Merge branch 'master' into 20.03-LTS

This commit is contained in:
Noah 2021-03-23 23:49:24 +08:00
commit cc9328432c
17 changed files with 3366 additions and 1100 deletions

View File

@ -0,0 +1,858 @@
From 65e9f0b4c719146b0958cb3c01fd31e11e49ec37 Mon Sep 17 00:00:00 2001
Date: Tue, 16 Mar 2021 07:09:57 +0000
Subject: [PATCH 4/4] backport JDK-8214535 to support Jmap parallel
---
src/hotspot/share/gc/g1/g1CollectedHeap.cpp | 25 ++++
src/hotspot/share/gc/g1/g1CollectedHeap.hpp | 4 +
.../gc/parallel/parallelScavengeHeap.cpp | 64 +++++++++++
.../gc/parallel/parallelScavengeHeap.hpp | 22 +++-
src/hotspot/share/gc/parallel/psOldGen.cpp | 32 ++++++
src/hotspot/share/gc/parallel/psOldGen.hpp | 11 ++
src/hotspot/share/gc/shared/collectedHeap.hpp | 11 ++
.../share/gc/shared/vmGCOperations.cpp | 2 +-
.../share/gc/shared/vmGCOperations.hpp | 5 +-
src/hotspot/share/gc/shared/workgroup.hpp | 21 ++++
src/hotspot/share/memory/heapInspection.cpp | 108 ++++++++++++++++--
src/hotspot/share/memory/heapInspection.hpp | 44 ++++++-
src/hotspot/share/runtime/arguments.hpp | 12 +-
src/hotspot/share/services/attachListener.cpp | 15 ++-
.../share/classes/sun/tools/jmap/JMap.java | 41 +++++--
test/jdk/sun/tools/jmap/BasicJMapTest.java | 55 +++++++++
16 files changed, 442 insertions(+), 30 deletions(-)
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
index 7e9c6254c..fd2da14a3 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
@@ -77,6 +77,7 @@
#include "gc/shared/weakProcessor.hpp"
#include "logging/log.hpp"
#include "memory/allocation.hpp"
+#include "memory/heapInspection.hpp"
#include "memory/iterator.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
@@ -2208,6 +2209,30 @@ void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
heap_region_iterate(&blk);
}
+class G1ParallelObjectIterator : public ParallelObjectIterator {
+private:
+ G1CollectedHeap* _heap;
+ HeapRegionClaimer _claimer;
+
+public:
+ G1ParallelObjectIterator(uint thread_num) :
+ _heap(G1CollectedHeap::heap()),
+ _claimer(thread_num == 0 ? G1CollectedHeap::heap()->workers()->active_workers() : thread_num) {}
+
+ virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
+ _heap->object_iterate_parallel(cl, worker_id, &_claimer);
+ }
+};
+
+ParallelObjectIterator* G1CollectedHeap::parallel_object_iterator(uint thread_num) {
+ return new G1ParallelObjectIterator(thread_num);
+}
+
+void G1CollectedHeap::object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapRegionClaimer* claimer) {
+ IterateObjectClosureRegionClosure blk(cl);
+ heap_region_par_iterate_from_worker_offset(&blk, claimer, worker_id);
+}
+
void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
_hrm.iterate(cl);
}
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
index bb46cae83..82f59d69b 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
@@ -1125,9 +1125,13 @@ public:
// Iteration functions.
+ void object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapRegionClaimer* claimer);
+
// Iterate over all objects, calling "cl.do_object" on each.
virtual void object_iterate(ObjectClosure* cl);
+ virtual ParallelObjectIterator* parallel_object_iterator(uint thread_num);
+
virtual void safe_object_iterate(ObjectClosure* cl) {
object_iterate(cl);
}
diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
index 29f967fb3..66e1b32a6 100644
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
@@ -523,6 +523,70 @@ void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
old_gen()->object_iterate(cl);
}
+// The HeapBlockClaimer is used during parallel iteration over the heap,
+// allowing workers to claim heap areas ("blocks"), gaining exclusive rights to these.
+// The eden and survivor spaces are treated as single blocks as it is hard to divide
+// these spaces.
+// The old space is divided into fixed-size blocks.
+class HeapBlockClaimer : public StackObj {
+ size_t _claimed_index;
+
+public:
+ static const size_t InvalidIndex = SIZE_MAX;
+ static const size_t EdenIndex = 0;
+ static const size_t SurvivorIndex = 1;
+ static const size_t NumNonOldGenClaims = 2;
+
+ HeapBlockClaimer() : _claimed_index(EdenIndex) { }
+ // Claim the block and get the block index.
+ size_t claim_and_get_block() {
+ size_t block_index;
+ block_index = Atomic::add(1u, &_claimed_index) - 1; // TODO: original impl is: Atomic::fetch_and_add(&_claimed_index, 1u);
+
+ PSOldGen* old_gen = ParallelScavengeHeap::heap()->old_gen();
+ size_t num_claims = old_gen->num_iterable_blocks() + NumNonOldGenClaims;
+
+ return block_index < num_claims ? block_index : InvalidIndex;
+ }
+};
+
+void ParallelScavengeHeap::object_iterate_parallel(ObjectClosure* cl,
+ HeapBlockClaimer* claimer) {
+ size_t block_index = claimer->claim_and_get_block();
+ // Iterate until all blocks are claimed
+ if (block_index == HeapBlockClaimer::EdenIndex) {
+ young_gen()->eden_space()->object_iterate(cl);
+ block_index = claimer->claim_and_get_block();
+ }
+ if (block_index == HeapBlockClaimer::SurvivorIndex) {
+ young_gen()->from_space()->object_iterate(cl);
+ young_gen()->to_space()->object_iterate(cl);
+ block_index = claimer->claim_and_get_block();
+ }
+ while (block_index != HeapBlockClaimer::InvalidIndex) {
+ old_gen()->object_iterate_block(cl, block_index - HeapBlockClaimer::NumNonOldGenClaims);
+ block_index = claimer->claim_and_get_block();
+ }
+}
+
+class PSScavengeParallelObjectIterator : public ParallelObjectIterator {
+private:
+ ParallelScavengeHeap* _heap;
+ HeapBlockClaimer _claimer;
+
+public:
+ PSScavengeParallelObjectIterator() :
+ _heap(ParallelScavengeHeap::heap()),
+ _claimer() {}
+
+ virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
+ _heap->object_iterate_parallel(cl, &_claimer);
+ }
+};
+
+ParallelObjectIterator* ParallelScavengeHeap::parallel_object_iterator(uint thread_num) {
+ return new PSScavengeParallelObjectIterator();
+}
HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
if (young_gen()->is_in_reserved(addr)) {
diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
index 5d18efb92..0a9b7bd3f 100644
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
@@ -44,6 +44,7 @@
class AdjoiningGenerations;
class GCHeapSummary;
class GCTaskManager;
+class HeapBlockClaimer;
class MemoryManager;
class MemoryPool;
class PSAdaptiveSizePolicy;
@@ -79,6 +80,8 @@ class ParallelScavengeHeap : public CollectedHeap {
MemoryPool* _survivor_pool;
MemoryPool* _old_pool;
+ WorkGang _workers;
+
virtual void initialize_serviceability();
void trace_heap(GCWhen::Type when, const GCTracer* tracer);
@@ -93,7 +96,20 @@ class ParallelScavengeHeap : public CollectedHeap {
public:
ParallelScavengeHeap(GenerationSizer* policy) :
- CollectedHeap(), _collector_policy(policy), _death_march_count(0) { }
+ CollectedHeap(),
+ _collector_policy(policy),
+ _death_march_count(0),
+ _young_manager(NULL),
+ _old_manager(NULL),
+ _eden_pool(NULL),
+ _survivor_pool(NULL),
+ _old_pool(NULL),
+ _workers("GC Thread",
+ ParallelGCThreads,
+ true /* are_GC_task_threads */,
+ false /* are_ConcurrentGC_threads */) {
+ _workers.initialize_workers();
+ }
// For use by VM operations
enum CollectionType {
@@ -217,6 +233,8 @@ class ParallelScavengeHeap : public CollectedHeap {
void object_iterate(ObjectClosure* cl);
void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
+ void object_iterate_parallel(ObjectClosure* cl, HeapBlockClaimer* claimer);
+ virtual ParallelObjectIterator* parallel_object_iterator(uint thread_num);
HeapWord* block_start(const void* addr) const;
size_t block_size(const HeapWord* addr) const;
@@ -232,6 +250,8 @@ class ParallelScavengeHeap : public CollectedHeap {
virtual void gc_threads_do(ThreadClosure* tc) const;
virtual void print_tracing_info() const;
+ virtual WorkGang* get_safepoint_workers() { return &_workers; }
+
void verify(VerifyOption option /* ignored */);
// Resize the young generation. The reserved space for the
diff --git a/src/hotspot/share/gc/parallel/psOldGen.cpp b/src/hotspot/share/gc/parallel/psOldGen.cpp
index 35844b14b..dbb5148fd 100644
--- a/src/hotspot/share/gc/parallel/psOldGen.cpp
+++ b/src/hotspot/share/gc/parallel/psOldGen.cpp
@@ -213,6 +213,38 @@ HeapWord* PSOldGen::allocate(size_t word_size) {
return res;
}
+size_t PSOldGen::num_iterable_blocks() const {
+ return (object_space()->used_in_bytes() + IterateBlockSize - 1) / IterateBlockSize;
+}
+
+void PSOldGen::object_iterate_block(ObjectClosure* cl, size_t block_index) {
+ size_t block_word_size = IterateBlockSize / HeapWordSize;
+ assert((block_word_size % (ObjectStartArray::block_size)) == 0,
+ "Block size not a multiple of start_array block");
+
+ MutableSpace *space = object_space();
+
+ HeapWord* begin = space->bottom() + block_index * block_word_size;
+ HeapWord* end = MIN2(space->top(), begin + block_word_size);
+
+ if (!start_array()->object_starts_in_range(begin, end)) {
+ return;
+ }
+
+ // Get object starting at or reaching into this block.
+ HeapWord* start = start_array()->object_start(begin);
+ if (start < begin) {
+ start += oop(start)->size();
+ }
+ assert(start >= begin,
+ "Object address" PTR_FORMAT " must be larger or equal to block address at " PTR_FORMAT,
+ p2i(start), p2i(begin));
+ // Iterate all objects until the end.
+ for (HeapWord* p = start; p < end; p += oop(p)->size()) {
+ cl->do_object(oop(p));
+ }
+}
+
HeapWord* PSOldGen::expand_and_allocate(size_t word_size) {
expand(word_size*HeapWordSize);
if (GCExpandToAllocateDelayMillis > 0) {
diff --git a/src/hotspot/share/gc/parallel/psOldGen.hpp b/src/hotspot/share/gc/parallel/psOldGen.hpp
index fa27f5a04..fa6e4849b 100644
--- a/src/hotspot/share/gc/parallel/psOldGen.hpp
+++ b/src/hotspot/share/gc/parallel/psOldGen.hpp
@@ -59,6 +59,9 @@ class PSOldGen : public CHeapObj<mtGC> {
const size_t _min_gen_size;
const size_t _max_gen_size;
+ // Block size for parallel iteration
+ static const size_t IterateBlockSize = 1024 * 1024;
+
// Used when initializing the _name field.
static inline const char* select_name();
@@ -195,6 +198,14 @@ class PSOldGen : public CHeapObj<mtGC> {
void oop_iterate(OopIterateClosure* cl) { object_space()->oop_iterate(cl); }
void object_iterate(ObjectClosure* cl) { object_space()->object_iterate(cl); }
+ // Number of blocks to be iterated over in the used part of old gen.
+ size_t num_iterable_blocks() const;
+ // Iterate the objects starting in block block_index within [bottom, top) of the
+ // old gen. The object just reaching into this block is not iterated over.
+ // A block is an evenly sized non-overlapping part of the old gen of
+ // IterateBlockSize bytes.
+ void object_iterate_block(ObjectClosure* cl, size_t block_index);
+
// Debugging - do not use for time critical operations
virtual void print() const;
virtual void print_on(outputStream* st) const;
diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp
index 47acf22cb..bcd4da29a 100644
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp
@@ -28,6 +28,7 @@
#include "gc/shared/gcCause.hpp"
#include "gc/shared/gcWhen.hpp"
#include "memory/allocation.hpp"
+#include "memory/heapInspection.hpp"
#include "runtime/handles.hpp"
#include "runtime/perfData.hpp"
#include "runtime/safepoint.hpp"
@@ -42,6 +43,7 @@
// class defines the functions that a heap must implement, and contains
// infrastructure common to all heaps.
+class AbstractGangTask;
class AdaptiveSizePolicy;
class BarrierSet;
class CollectorPolicy;
@@ -83,6 +85,11 @@ class GCHeapLog : public EventLogBase<GCMessage> {
}
};
+class ParallelObjectIterator : public CHeapObj<mtGC> {
+public:
+ virtual void object_iterate(ObjectClosure* cl, uint worker_id) = 0;
+};
+
//
// CollectedHeap
// GenCollectedHeap
@@ -434,6 +441,10 @@ class CollectedHeap : public CHeapObj<mtInternal> {
// Iterate over all objects, calling "cl.do_object" on each.
virtual void object_iterate(ObjectClosure* cl) = 0;
+ virtual ParallelObjectIterator* parallel_object_iterator(uint thread_num) {
+ return NULL;
+ }
+
// Similar to object_iterate() except iterates only
// over live objects.
virtual void safe_object_iterate(ObjectClosure* cl) = 0;
diff --git a/src/hotspot/share/gc/shared/vmGCOperations.cpp b/src/hotspot/share/gc/shared/vmGCOperations.cpp
index b02305a6e..728290a7b 100644
--- a/src/hotspot/share/gc/shared/vmGCOperations.cpp
+++ b/src/hotspot/share/gc/shared/vmGCOperations.cpp
@@ -154,7 +154,7 @@ void VM_GC_HeapInspection::doit() {
}
HeapInspection inspect(_csv_format, _print_help, _print_class_stats,
_columns);
- inspect.heap_inspection(_out);
+ inspect.heap_inspection(_out, _parallel_thread_num);
}
diff --git a/src/hotspot/share/gc/shared/vmGCOperations.hpp b/src/hotspot/share/gc/shared/vmGCOperations.hpp
index 65876e559..ef73b45de 100644
--- a/src/hotspot/share/gc/shared/vmGCOperations.hpp
+++ b/src/hotspot/share/gc/shared/vmGCOperations.hpp
@@ -125,18 +125,21 @@ class VM_GC_HeapInspection: public VM_GC_Operation {
private:
outputStream* _out;
bool _full_gc;
+ uint _parallel_thread_num;
bool _csv_format; // "comma separated values" format for spreadsheet.
bool _print_help;
bool _print_class_stats;
const char* _columns;
public:
- VM_GC_HeapInspection(outputStream* out, bool request_full_gc) :
+ VM_GC_HeapInspection(outputStream* out, bool request_full_gc,
+ uint parallel_thread_num = 1) :
VM_GC_Operation(0 /* total collections, dummy, ignored */,
GCCause::_heap_inspection /* GC Cause */,
0 /* total full collections, dummy, ignored */,
request_full_gc) {
_out = out;
_full_gc = request_full_gc;
+ _parallel_thread_num = parallel_thread_num;
_csv_format = false;
_print_help = false;
_print_class_stats = false;
diff --git a/src/hotspot/share/gc/shared/workgroup.hpp b/src/hotspot/share/gc/shared/workgroup.hpp
index 8b46d3bc4..109649df0 100644
--- a/src/hotspot/share/gc/shared/workgroup.hpp
+++ b/src/hotspot/share/gc/shared/workgroup.hpp
@@ -228,6 +228,27 @@ protected:
virtual AbstractGangWorker* allocate_worker(uint which);
};
+// Temporarily try to set the number of active workers.
+// It's not guaranteed that it succeeds, and users need to
+// query the number of active workers.
+class WithUpdatedActiveWorkers : public StackObj {
+private:
+ AbstractWorkGang* const _gang;
+ const uint _old_active_workers;
+
+public:
+ WithUpdatedActiveWorkers(AbstractWorkGang* gang, uint requested_num_workers) :
+ _gang(gang),
+ _old_active_workers(gang->active_workers()) {
+ uint capped_num_workers = MIN2(requested_num_workers, gang->total_workers());
+ gang->update_active_workers(capped_num_workers);
+ }
+
+ ~WithUpdatedActiveWorkers() {
+ _gang->update_active_workers(_old_active_workers);
+ }
+};
+
// Several instances of this class run in parallel as workers for a gang.
class AbstractGangWorker: public WorkerThread {
public:
diff --git a/src/hotspot/share/memory/heapInspection.cpp b/src/hotspot/share/memory/heapInspection.cpp
index 9c2cdc117..dbc0eb274 100644
--- a/src/hotspot/share/memory/heapInspection.cpp
+++ b/src/hotspot/share/memory/heapInspection.cpp
@@ -31,6 +31,7 @@
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "oops/reflectionAccessorImplKlassHelper.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/os.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
@@ -236,6 +237,41 @@ size_t KlassInfoTable::size_of_instances_in_words() const {
return _size_of_instances_in_words;
}
+// Return false if the entry could not be recorded on account
+// of running out of space required to create a new entry.
+bool KlassInfoTable::merge_entry(const KlassInfoEntry* cie) {
+ Klass* k = cie->klass();
+ KlassInfoEntry* elt = lookup(k);
+ // elt may be NULL if it's a new klass for which we
+ // could not allocate space for a new entry in the hashtable.
+ if (elt != NULL) {
+ elt->set_count(elt->count() + cie->count());
+ elt->set_words(elt->words() + cie->words());
+ _size_of_instances_in_words += cie->words();
+ return true;
+ }
+ return false;
+}
+
+class KlassInfoTableMergeClosure : public KlassInfoClosure {
+ private:
+ KlassInfoTable* _dest;
+ bool _success;
+ public:
+ KlassInfoTableMergeClosure(KlassInfoTable* table) : _dest(table), _success(true) {}
+ void do_cinfo(KlassInfoEntry* cie) {
+ _success &= _dest->merge_entry(cie);
+ }
+ bool success() { return _success; }
+};
+
+// merge from table
+bool KlassInfoTable::merge(KlassInfoTable* table) {
+ KlassInfoTableMergeClosure closure(this);
+ table->iterate(&closure);
+ return closure.success();
+}
+
int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) {
return (*e1)->compare(*e1,*e2);
}
@@ -687,7 +723,7 @@ class HistoClosure : public KlassInfoClosure {
class RecordInstanceClosure : public ObjectClosure {
private:
KlassInfoTable* _cit;
- size_t _missed_count;
+ uintx _missed_count;
BoolObjectClosure* _filter;
public:
RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) :
@@ -701,7 +737,7 @@ class RecordInstanceClosure : public ObjectClosure {
}
}
- size_t missed_count() { return _missed_count; }
+ uintx missed_count() { return _missed_count; }
private:
bool should_visit(oop obj) {
@@ -709,15 +745,73 @@ class RecordInstanceClosure : public ObjectClosure {
}
};
-size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter) {
- ResourceMark rm;
+// Heap inspection for every worker.
+// When native OOM hanppens for KlassInfoTable, set _success to false.
+void ParHeapInspectTask::work(uint worker_id) {
+ uintx missed_count = 0;
+ bool merge_success = true;
+ if (!Atomic::load(&_success)) {
+ // other worker has failed on parallel iteration.
+ return;
+ }
+ KlassInfoTable cit(false);
+ if (cit.allocation_failed()) {
+ // fail to allocate memory, stop parallel mode
+ Atomic::store(false, &_success);
+ return;
+ }
+ RecordInstanceClosure ric(&cit, _filter);
+ _poi->object_iterate(&ric, worker_id);
+ missed_count = ric.missed_count();
+ {
+ MutexLocker x(&_mutex);
+ merge_success = _shared_cit->merge(&cit);
+ }
+ if (merge_success) {
+ Atomic::add(missed_count, &_missed_count);
+ } else {
+ Atomic::store(false, &_success);
+ }
+}
+
+size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter, uint parallel_thread_num) {
+ // Try parallel first.
+ if (parallel_thread_num > 1) {
+ ResourceMark rm;
+
+ WorkGang* gang = Universe::heap()->get_safepoint_workers();
+ if (gang != NULL) {
+ // The GC provided a WorkGang to be used during a safepoint.
+
+ // Can't run with more threads than provided by the WorkGang.
+ WithUpdatedActiveWorkers update_and_restore(gang, parallel_thread_num);
+
+ ParallelObjectIterator* poi = Universe::heap()->parallel_object_iterator(gang->active_workers());
+ if (poi != NULL) {
+ // The GC supports parallel object iteration.
+
+ ParHeapInspectTask task(poi, cit, filter);
+ // Run task with the active workers.
+
+ gang->run_task(&task);
+
+ delete poi;
+ if (task.success()) {
+ return task.missed_count();
+ }
+ }
+ }
+ }
+
+ ResourceMark rm;
+ // If no parallel iteration available, run serially.
RecordInstanceClosure ric(cit, filter);
Universe::heap()->safe_object_iterate(&ric);
return ric.missed_count();
}
-void HeapInspection::heap_inspection(outputStream* st) {
+void HeapInspection::heap_inspection(outputStream* st, uint parallel_thread_num) {
ResourceMark rm;
if (_print_help) {
@@ -741,9 +835,9 @@ void HeapInspection::heap_inspection(outputStream* st) {
KlassInfoTable cit(_print_class_stats);
if (!cit.allocation_failed()) {
// populate table with object allocation info
- size_t missed_count = populate_table(&cit);
+ uintx missed_count = populate_table(&cit, NULL, parallel_thread_num);
if (missed_count != 0) {
- st->print_cr("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
+ st->print_cr("WARNING: Ran out of C-heap; undercounted " UINTX_FORMAT
" total instances in data below",
missed_count);
}
diff --git a/src/hotspot/share/memory/heapInspection.hpp b/src/hotspot/share/memory/heapInspection.hpp
index d8935dc68..026293bf7 100644
--- a/src/hotspot/share/memory/heapInspection.hpp
+++ b/src/hotspot/share/memory/heapInspection.hpp
@@ -25,12 +25,15 @@
#ifndef SHARE_VM_MEMORY_HEAPINSPECTION_HPP
#define SHARE_VM_MEMORY_HEAPINSPECTION_HPP
+#include "gc/shared/workgroup.hpp"
#include "memory/allocation.hpp"
#include "oops/objArrayOop.hpp"
#include "oops/oop.hpp"
#include "oops/annotations.hpp"
#include "utilities/macros.hpp"
+class ParallelObjectIterator;
+
#if INCLUDE_SERVICES
@@ -261,6 +264,8 @@ class KlassInfoTable: public StackObj {
void iterate(KlassInfoClosure* cic);
bool allocation_failed() { return _buckets == NULL; }
size_t size_of_instances_in_words() const;
+ bool merge(KlassInfoTable* table);
+ bool merge_entry(const KlassInfoEntry* cie);
friend class KlassInfoHisto;
friend class KlassHierarchy;
@@ -364,11 +369,46 @@ class HeapInspection : public StackObj {
bool print_class_stats, const char *columns) :
_csv_format(csv_format), _print_help(print_help),
_print_class_stats(print_class_stats), _columns(columns) {}
- void heap_inspection(outputStream* st) NOT_SERVICES_RETURN;
- size_t populate_table(KlassInfoTable* cit, BoolObjectClosure* filter = NULL) NOT_SERVICES_RETURN_(0);
+ void heap_inspection(outputStream* st, uint parallel_thread_num = 1) NOT_SERVICES_RETURN;
+ size_t populate_table(KlassInfoTable* cit, BoolObjectClosure* filter = NULL, uint parallel_thread_num = 1) NOT_SERVICES_RETURN_(0);
static void find_instances_at_safepoint(Klass* k, GrowableArray<oop>* result) NOT_SERVICES_RETURN;
private:
void iterate_over_heap(KlassInfoTable* cit, BoolObjectClosure* filter = NULL);
};
+// Parallel heap inspection task. Parallel inspection can fail due to
+// a native OOM when allocating memory for TL-KlassInfoTable.
+// _success will be set false on an OOM, and serial inspection tried.
+class ParHeapInspectTask : public AbstractGangTask {
+private:
+ ParallelObjectIterator *_poi;
+ KlassInfoTable *_shared_cit;
+ BoolObjectClosure *_filter;
+ uintx _missed_count;
+ bool _success;
+ Mutex _mutex;
+
+public:
+ ParHeapInspectTask(ParallelObjectIterator *poi,
+ KlassInfoTable *shared_cit,
+ BoolObjectClosure *filter) :
+ AbstractGangTask("Iterating heap"),
+ _poi(poi),
+ _shared_cit(shared_cit),
+ _filter(filter),
+ _missed_count(0),
+ _success(true),
+ _mutex(Mutex::leaf, "Parallel heap iteration data merge lock") {}
+
+ uintx missed_count() const {
+ return _missed_count;
+ }
+
+ bool success() {
+ return _success;
+ }
+
+ virtual void work(uint worker_id);
+};
+
#endif // SHARE_VM_MEMORY_HEAPINSPECTION_HPP
diff --git a/src/hotspot/share/runtime/arguments.hpp b/src/hotspot/share/runtime/arguments.hpp
index bd439aab0..9827a4c66 100644
--- a/src/hotspot/share/runtime/arguments.hpp
+++ b/src/hotspot/share/runtime/arguments.hpp
@@ -450,12 +450,6 @@ class Arguments : AllStatic {
static ArgsRange check_memory_size(julong size, julong min_size, julong max_size);
static ArgsRange parse_memory_size(const char* s, julong* long_arg,
julong min_size, julong max_size = max_uintx);
- // Parse a string for a unsigned integer. Returns true if value
- // is an unsigned integer greater than or equal to the minimum
- // parameter passed and returns the value in uintx_arg. Returns
- // false otherwise, with uintx_arg undefined.
- static bool parse_uintx(const char* value, uintx* uintx_arg,
- uintx min_size);
// methods to build strings from individual args
static void build_jvm_args(const char* arg);
@@ -493,6 +487,12 @@ class Arguments : AllStatic {
public:
// Parses the arguments, first phase
static jint parse(const JavaVMInitArgs* args);
+ // Parse a string for a unsigned integer. Returns true if value
+ // is an unsigned integer greater than or equal to the minimum
+ // parameter passed and returns the value in uintx_arg. Returns
+ // false otherwise, with uintx_arg undefined.
+ static bool parse_uintx(const char* value, uintx* uintx_arg,
+ uintx min_size);
// Apply ergonomics
static jint apply_ergo();
// Adjusts the arguments after the OS have adjusted the arguments
diff --git a/src/hotspot/share/services/attachListener.cpp b/src/hotspot/share/services/attachListener.cpp
index fc77970a0..b0f3b2e87 100644
--- a/src/hotspot/share/services/attachListener.cpp
+++ b/src/hotspot/share/services/attachListener.cpp
@@ -258,9 +258,11 @@ jint dump_heap(AttachOperation* op, outputStream* out) {
//
// Input arguments :-
// arg0: "-live" or "-all"
+// arg1: parallel thread number
static jint heap_inspection(AttachOperation* op, outputStream* out) {
bool live_objects_only = true; // default is true to retain the behavior before this change is made
const char* arg0 = op->arg(0);
+ uint parallel_thread_num = MAX2<uint>(1, (uint)os::initial_active_processor_count() * 3 / 8);
if (arg0 != NULL && (strlen(arg0) > 0)) {
if (strcmp(arg0, "-all") != 0 && strcmp(arg0, "-live") != 0) {
out->print_cr("Invalid argument to inspectheap operation: %s", arg0);
@@ -268,7 +270,18 @@ static jint heap_inspection(AttachOperation* op, outputStream* out) {
}
live_objects_only = strcmp(arg0, "-live") == 0;
}
- VM_GC_HeapInspection heapop(out, live_objects_only /* request full gc */);
+
+ const char* num_str = op->arg(1);
+ if (num_str != NULL && num_str[0] != '\0') {
+ uintx num;
+ if (!Arguments::parse_uintx(num_str, &num, 0)) {
+ out->print_cr("Invalid parallel thread number: [%s]", num_str);
+ return JNI_ERR;
+ }
+ parallel_thread_num = num == 0 ? parallel_thread_num : (uint)num;
+ }
+
+ VM_GC_HeapInspection heapop(out, live_objects_only /* request full gc */, parallel_thread_num);
VMThread::execute(&heapop);
return JNI_OK;
}
diff --git a/src/jdk.jcmd/share/classes/sun/tools/jmap/JMap.java b/src/jdk.jcmd/share/classes/sun/tools/jmap/JMap.java
index f2db61ab7..9af74f362 100644
--- a/src/jdk.jcmd/share/classes/sun/tools/jmap/JMap.java
+++ b/src/jdk.jcmd/share/classes/sun/tools/jmap/JMap.java
@@ -149,18 +149,28 @@ public class JMap {
throws AttachNotSupportedException, IOException,
UnsupportedEncodingException {
String liveopt = "-all";
- if (options.equals("") || options.equals("all")) {
- // pass
- }
- else if (options.equals("live")) {
- liveopt = "-live";
- }
- else {
- usage(1);
+ String parallel = null;
+ String subopts[] = options.split(",");
+
+ for (int i = 0; i < subopts.length; i++) {
+ String subopt = subopts[i];
+ if (subopt.equals("") || subopt.equals("all")) {
+ // pass
+ } else if (subopt.equals("live")) {
+ liveopt = "-live";
+ } else if (subopt.startsWith("parallel=")) {
+ parallel = subopt.substring("parallel=".length());
+ if (parallel == null) {
+ System.err.println("Fail: no number provided in option: '" + subopt + "'");
+ System.exit(1);
+ }
+ } else {
+ usage(1);
+ }
}
// inspectHeap is not the same as jcmd GC.class_histogram
- executeCommandForPid(pid, "inspectheap", liveopt);
+ executeCommandForPid(pid, "inspectheap", liveopt, parallel);
}
private static void dump(String pid, String options)
@@ -246,9 +256,8 @@ public class JMap {
System.err.println(" to connect to running process and print class loader statistics");
System.err.println(" jmap -finalizerinfo <pid>");
System.err.println(" to connect to running process and print information on objects awaiting finalization");
- System.err.println(" jmap -histo[:live] <pid>");
+ System.err.println(" jmap -histo:<histo-options> <pid>");
System.err.println(" to connect to running process and print histogram of java object heap");
- System.err.println(" if the \"live\" suboption is specified, only count live objects");
System.err.println(" jmap -dump:<dump-options> <pid>");
System.err.println(" to connect to running process and dump java heap");
System.err.println(" jmap -? -h --help");
@@ -261,6 +270,16 @@ public class JMap {
System.err.println(" file=<file> dump heap to <file>");
System.err.println("");
System.err.println(" Example: jmap -dump:live,format=b,file=heap.bin <pid>");
+ System.err.println("");
+ System.err.println(" histo-options:");
+ System.err.println(" live count only live objects");
+ System.err.println(" all count all objects in the heap (default if one of \"live\" or \"all\" is not specified)");
+ System.err.println(" parallel=<number> parallel threads number for heap iteration:");
+ System.err.println(" parallel=0 default behavior, use predefined number of threads");
+ System.err.println(" parallel=1 disable parallel heap iteration");
+ System.err.println(" parallel=<N> use N threads for parallel heap iteration");
+ System.err.println("");
+ System.err.println(" Example: jmap -histo:live,parallel=2 <pid>");
System.exit(exit);
}
}
diff --git a/test/jdk/sun/tools/jmap/BasicJMapTest.java b/test/jdk/sun/tools/jmap/BasicJMapTest.java
index c0432dede..960705e24 100644
--- a/test/jdk/sun/tools/jmap/BasicJMapTest.java
+++ b/test/jdk/sun/tools/jmap/BasicJMapTest.java
@@ -45,6 +45,35 @@ import jdk.testlibrary.ProcessTools;
* @build jdk.test.lib.hprof.util.*
* @run main/timeout=240 BasicJMapTest
*/
+
+/*
+ * @test id=Parallel
+ * @summary Unit test for jmap utility (Parallel GC)
+ * @key intermittent
+ * @library /lib/testlibrary
+ * @library /test/lib
+ * @build jdk.testlibrary.*
+ * @build jdk.test.lib.hprof.*
+ * @build jdk.test.lib.hprof.model.*
+ * @build jdk.test.lib.hprof.parser.*
+ * @build jdk.test.lib.hprof.util.*
+ * @run main/othervm/timeout=240 -XX:+UseParallelGC BasicJMapTest
+ */
+
+/*
+ * @test id=G1
+ * @summary Unit test for jmap utility (G1 GC)
+ * @key intermittent
+ * @library /lib/testlibrary
+ * @library /test/lib
+ * @build jdk.testlibrary.*
+ * @build jdk.test.lib.hprof.*
+ * @build jdk.test.lib.hprof.model.*
+ * @build jdk.test.lib.hprof.parser.*
+ * @build jdk.test.lib.hprof.util.*
+ * @run main/othervm/timeout=240 -XX:+UseG1GC BasicJMapTest
+ */
+
public class BasicJMapTest {
private static ProcessBuilder processBuilder = new ProcessBuilder();
@@ -68,6 +97,32 @@ public class BasicJMapTest {
output.shouldHaveExitValue(0);
}
+ private static void testHistoParallelZero() throws Exception {
+ OutputAnalyzer output = jmap("-histo:parallel=0");
+ output.shouldHaveExitValue(0);
+ }
+
+ private static void testHistoParallel() throws Exception {
+ OutputAnalyzer output = jmap("-histo:parallel=2");
+ output.shouldHaveExitValue(0);
+ }
+
+ private static void testHistoNonParallel() throws Exception {
+ OutputAnalyzer output = jmap("-histo:parallel=1");
+ output.shouldHaveExitValue(0);
+ }
+
+ private static void testHistoMultipleParameters() throws Exception {
+ OutputAnalyzer output = jmap("-histo:parallel=2,live");
+ output.shouldHaveExitValue(0);
+ output = jmap("-histo:live,parallel=2");
+ output.shouldHaveExitValue(0);
+ output = jmap("-histo:parallel=2,all");
+ output.shouldHaveExitValue(0);
+ output = jmap("-histo:all,parallel=2");
+ output.shouldHaveExitValue(0);
+ }
+
private static void testFinalizerInfo() throws Exception {
OutputAnalyzer output = jmap("-finalizerinfo");
output.shouldHaveExitValue(0);
--
2.19.0

View File

@ -3,8 +3,8 @@ index 6878962..bb9721c 100644
--- a/make/hotspot/gensrc/GensrcAdlc.gmk
+++ b/make/hotspot/gensrc/GensrcAdlc.gmk
@@ -146,6 +146,12 @@ ifeq ($(call check-jvm-feature, compiler2), true)
)))
endif
$d/os_cpu/$(HOTSPOT_TARGET_OS)_$(HOTSPOT_TARGET_CPU_ARCH)/$(HOTSPOT_TARGET_OS)_$(HOTSPOT_TARGET_CPU_ARCH).ad \
)))
+ ifeq ($(call check-jvm-feature, zgc), true)
+ AD_SRC_FILES += $(call uniq, $(wildcard $(foreach d, $(AD_SRC_ROOTS), \
@ -12,9 +12,9 @@ index 6878962..bb9721c 100644
+ )))
+ endif
+
SINGLE_AD_SRCFILE := $(ADLC_SUPPORT_DIR)/all-ad-src.ad
INSERT_FILENAME_AWK_SCRIPT := \
ifeq ($(call check-jvm-feature, shenandoahgc), true)
AD_SRC_FILES += $(call uniq, $(wildcard $(foreach d, $(AD_SRC_ROOTS), \
$d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/shenandoah/shenandoah_$(HOTSPOT_TARGET_CPU).ad \
diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad
index 05b36e2..18e774f 100644
--- a/src/hotspot/cpu/aarch64/aarch64.ad

View File

@ -0,0 +1,263 @@
From 8da2787209da1906e3a92fff95dc46abe793b433 Mon Sep 17 00:00:00 2001
Date: Thu, 18 Mar 2021 12:36:13 +0000
Subject: [PATCH 3/4] 8217918: C2 -XX:+AggressiveUnboxing is broken
---
src/hotspot/share/opto/cfgnode.hpp | 4 +-
src/hotspot/share/opto/phaseX.cpp | 96 ++++++++++++++++++++++++------
src/hotspot/share/opto/phaseX.hpp | 10 ++++
src/hotspot/share/opto/type.cpp | 16 +++++
src/hotspot/share/opto/type.hpp | 4 ++
5 files changed, 111 insertions(+), 19 deletions(-)
diff --git a/src/hotspot/share/opto/cfgnode.hpp b/src/hotspot/share/opto/cfgnode.hpp
index 0d8c9b33b..04029ca91 100644
--- a/src/hotspot/share/opto/cfgnode.hpp
+++ b/src/hotspot/share/opto/cfgnode.hpp
@@ -118,11 +118,13 @@ class JProjNode : public ProjNode {
// can turn PhiNodes into copys in-place by NULL'ing out their RegionNode
// input in slot 0.
class PhiNode : public TypeNode {
+ friend class PhaseRenumberLive;
+
const TypePtr* const _adr_type; // non-null only for Type::MEMORY nodes.
// The following fields are only used for data PhiNodes to indicate
// that the PhiNode represents the value of a known instance field.
int _inst_mem_id; // Instance memory id (node index of the memory Phi)
- const int _inst_id; // Instance id of the memory slice.
+ int _inst_id; // Instance id of the memory slice.
const int _inst_index; // Alias index of the instance memory slice.
// Array elements references have the same alias_idx but different offset.
const int _inst_offset; // Offset of the instance memory slice.
diff --git a/src/hotspot/share/opto/phaseX.cpp b/src/hotspot/share/opto/phaseX.cpp
index 9d5d4deed..f4a38cd28 100644
--- a/src/hotspot/share/opto/phaseX.cpp
+++ b/src/hotspot/share/opto/phaseX.cpp
@@ -463,55 +463,115 @@ PhaseRemoveUseless::PhaseRemoveUseless(PhaseGVN *gvn, Unique_Node_List *worklist
PhaseRenumberLive::PhaseRenumberLive(PhaseGVN* gvn,
Unique_Node_List* worklist, Unique_Node_List* new_worklist,
PhaseNumber phase_num) :
- PhaseRemoveUseless(gvn, worklist, Remove_Useless_And_Renumber_Live) {
-
+ PhaseRemoveUseless(gvn, worklist, Remove_Useless_And_Renumber_Live),
+ _new_type_array(C->comp_arena()),
+ _old2new_map(C->unique(), C->unique(), -1),
+ _delayed(Thread::current()->resource_area()),
+ _is_pass_finished(false),
+ _live_node_count(C->live_nodes())
+{
assert(RenumberLiveNodes, "RenumberLiveNodes must be set to true for node renumbering to take place");
assert(C->live_nodes() == _useful.size(), "the number of live nodes must match the number of useful nodes");
assert(gvn->nodes_size() == 0, "GVN must not contain any nodes at this point");
+ assert(_delayed.size() == 0, "should be empty");
- uint old_unique_count = C->unique();
- uint live_node_count = C->live_nodes();
uint worklist_size = worklist->size();
- // Storage for the updated type information.
- Type_Array new_type_array(C->comp_arena());
-
// Iterate over the set of live nodes.
- uint current_idx = 0; // The current new node ID. Incremented after every assignment.
- for (uint i = 0; i < _useful.size(); i++) {
- Node* n = _useful.at(i);
- // Sanity check that fails if we ever decide to execute this phase after EA
- assert(!n->is_Phi() || n->as_Phi()->inst_mem_id() == -1, "should not be linked to data Phi");
- const Type* type = gvn->type_or_null(n);
- new_type_array.map(current_idx, type);
+ for (uint current_idx = 0; current_idx < _useful.size(); current_idx++) {
+ Node* n = _useful.at(current_idx);
bool in_worklist = false;
if (worklist->member(n)) {
in_worklist = true;
}
+ const Type* type = gvn->type_or_null(n);
+ _new_type_array.map(current_idx, type);
+
+ assert(_old2new_map.at(n->_idx) == -1, "already seen");
+ _old2new_map.at_put(n->_idx, current_idx);
+
n->set_idx(current_idx); // Update node ID.
if (in_worklist) {
new_worklist->push(n);
}
- current_idx++;
+ if (update_embedded_ids(n) < 0) {
+ _delayed.push(n); // has embedded IDs; handle later
+ }
}
assert(worklist_size == new_worklist->size(), "the new worklist must have the same size as the original worklist");
- assert(live_node_count == current_idx, "all live nodes must be processed");
+ assert(_live_node_count == _useful.size(), "all live nodes must be processed");
+
+ _is_pass_finished = true; // pass finished; safe to process delayed updates
+
+ while (_delayed.size() > 0) {
+ Node* n = _delayed.pop();
+ int no_of_updates = update_embedded_ids(n);
+ assert(no_of_updates > 0, "should be updated");
+ }
// Replace the compiler's type information with the updated type information.
- gvn->replace_types(new_type_array);
+ gvn->replace_types(_new_type_array);
// Update the unique node count of the compilation to the number of currently live nodes.
- C->set_unique(live_node_count);
+ C->set_unique(_live_node_count);
// Set the dead node count to 0 and reset dead node list.
C->reset_dead_node_list();
}
+int PhaseRenumberLive::new_index(int old_idx) {
+ assert(_is_pass_finished, "not finished");
+ if (_old2new_map.at(old_idx) == -1) { // absent
+ // Allocate a placeholder to preserve uniqueness
+ _old2new_map.at_put(old_idx, _live_node_count);
+ _live_node_count++;
+ }
+ return _old2new_map.at(old_idx);
+}
+
+int PhaseRenumberLive::update_embedded_ids(Node* n) {
+ int no_of_updates = 0;
+ if (n->is_Phi()) {
+ PhiNode* phi = n->as_Phi();
+ if (phi->_inst_id != -1) {
+ if (!_is_pass_finished) {
+ return -1; // delay
+ }
+ int new_idx = new_index(phi->_inst_id);
+ assert(new_idx != -1, "");
+ phi->_inst_id = new_idx;
+ no_of_updates++;
+ }
+ if (phi->_inst_mem_id != -1) {
+ if (!_is_pass_finished) {
+ return -1; // delay
+ }
+ int new_idx = new_index(phi->_inst_mem_id);
+ assert(new_idx != -1, "");
+ phi->_inst_mem_id = new_idx;
+ no_of_updates++;
+ }
+ }
+
+ const Type* type = _new_type_array.fast_lookup(n->_idx);
+ if (type != NULL && type->isa_oopptr() && type->is_oopptr()->is_known_instance()) {
+ if (!_is_pass_finished) {
+ return -1; // delay
+ }
+ int old_idx = type->is_oopptr()->instance_id();
+ int new_idx = new_index(old_idx);
+ const Type* new_type = type->is_oopptr()->with_instance_id(new_idx);
+ _new_type_array.map(n->_idx, new_type);
+ no_of_updates++;
+ }
+
+ return no_of_updates;
+}
//=============================================================================
//------------------------------PhaseTransform---------------------------------
diff --git a/src/hotspot/share/opto/phaseX.hpp b/src/hotspot/share/opto/phaseX.hpp
index 3b33a8cb2..ef5eb488e 100644
--- a/src/hotspot/share/opto/phaseX.hpp
+++ b/src/hotspot/share/opto/phaseX.hpp
@@ -157,6 +157,16 @@ public:
// Phase that first performs a PhaseRemoveUseless, then it renumbers compiler
// structures accordingly.
class PhaseRenumberLive : public PhaseRemoveUseless {
+protected:
+ Type_Array _new_type_array; // Storage for the updated type information.
+ GrowableArray<int> _old2new_map;
+ Node_List _delayed;
+ bool _is_pass_finished;
+ uint _live_node_count;
+
+ int update_embedded_ids(Node* n);
+ int new_index(int old_idx);
+
public:
PhaseRenumberLive(PhaseGVN* gvn,
Unique_Node_List* worklist, Unique_Node_List* new_worklist,
diff --git a/src/hotspot/share/opto/type.cpp b/src/hotspot/share/opto/type.cpp
index 0078b8773..964f9d247 100644
--- a/src/hotspot/share/opto/type.cpp
+++ b/src/hotspot/share/opto/type.cpp
@@ -3456,6 +3456,12 @@ const TypePtr* TypeOopPtr::with_inline_depth(int depth) const {
return make(_ptr, _offset, _instance_id, _speculative, depth);
}
+//------------------------------with_instance_id--------------------------------
+const TypePtr* TypeOopPtr::with_instance_id(int instance_id) const {
+ assert(_instance_id != -1, "should be known");
+ return make(_ptr, _offset, instance_id, _speculative, _inline_depth);
+}
+
//------------------------------meet_instance_id--------------------------------
int TypeOopPtr::meet_instance_id( int instance_id ) const {
// Either is 'TOP' instance? Return the other instance!
@@ -4059,6 +4065,11 @@ const TypePtr *TypeInstPtr::with_inline_depth(int depth) const {
return make(_ptr, klass(), klass_is_exact(), const_oop(), _offset, _instance_id, _speculative, depth);
}
+const TypePtr *TypeInstPtr::with_instance_id(int instance_id) const {
+ assert(is_known_instance(), "should be known");
+ return make(_ptr, klass(), klass_is_exact(), const_oop(), _offset, instance_id, _speculative, _inline_depth);
+}
+
//=============================================================================
// Convenience common pre-built types.
const TypeAryPtr *TypeAryPtr::RANGE;
@@ -4529,6 +4540,11 @@ const TypePtr *TypeAryPtr::with_inline_depth(int depth) const {
return make(_ptr, _const_oop, _ary->remove_speculative()->is_ary(), _klass, _klass_is_exact, _offset, _instance_id, _speculative, depth);
}
+const TypePtr *TypeAryPtr::with_instance_id(int instance_id) const {
+ assert(is_known_instance(), "should be known");
+ return make(_ptr, _const_oop, _ary->remove_speculative()->is_ary(), _klass, _klass_is_exact, _offset, instance_id, _speculative, _inline_depth);
+}
+
//=============================================================================
//------------------------------hash-------------------------------------------
diff --git a/src/hotspot/share/opto/type.hpp b/src/hotspot/share/opto/type.hpp
index ca92fe3ab..e9ed7ce40 100644
--- a/src/hotspot/share/opto/type.hpp
+++ b/src/hotspot/share/opto/type.hpp
@@ -1048,6 +1048,8 @@ public:
virtual bool would_improve_type(ciKlass* exact_kls, int inline_depth) const;
virtual const TypePtr* with_inline_depth(int depth) const;
+ virtual const TypePtr* with_instance_id(int instance_id) const;
+
virtual const Type *xdual() const; // Compute dual right now.
// the core of the computation of the meet for TypeOopPtr and for its subclasses
virtual const Type *xmeet_helper(const Type *t) const;
@@ -1124,6 +1126,7 @@ class TypeInstPtr : public TypeOopPtr {
// Speculative type helper methods.
virtual const Type* remove_speculative() const;
virtual const TypePtr* with_inline_depth(int depth) const;
+ virtual const TypePtr* with_instance_id(int instance_id) const;
// the core of the computation of the meet of 2 types
virtual const Type *xmeet_helper(const Type *t) const;
@@ -1211,6 +1214,7 @@ public:
// Speculative type helper methods.
virtual const Type* remove_speculative() const;
virtual const TypePtr* with_inline_depth(int depth) const;
+ virtual const TypePtr* with_instance_id(int instance_id) const;
// the core of the computation of the meet of 2 types
virtual const Type *xmeet_helper(const Type *t) const;
--
2.19.0

View File

@ -1,564 +0,0 @@
diff --git a/src/hotspot/share/opto/classes.hpp b/src/hotspot/share/opto/classes.hpp
index 5ee23f7..b847caf 100644
--- a/src/hotspot/share/opto/classes.hpp
+++ b/src/hotspot/share/opto/classes.hpp
@@ -226,6 +226,8 @@ macro(NegF)
macro(NeverBranch)
macro(OnSpinWait)
macro(Opaque1)
+macro(OpaqueLoopInit)
+macro(OpaqueLoopStride)
macro(Opaque2)
macro(Opaque3)
macro(Opaque4)
diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp
index e5559a2..efe4cff 100644
--- a/src/hotspot/share/opto/compile.cpp
+++ b/src/hotspot/share/opto/compile.cpp
@@ -1987,7 +1987,17 @@ void Compile::remove_opaque4_nodes(PhaseIterGVN &igvn) {
for (int i = opaque4_count(); i > 0; i--) {
Node* opaq = opaque4_node(i-1);
assert(opaq->Opcode() == Op_Opaque4, "Opaque4 only");
+ // With Opaque4 nodes, the expectation is that the test of input 1
+ // is always equal to the constant value of input 2. So we can
+ // remove the Opaque4 and replace it by input 2. In debug builds,
+ // leave the non constant test in instead to sanity check that it
+ // never fails (if it does, that subgraph was constructed so, at
+ // runtime, a Halt node is executed).
+ #ifdef ASSERT
+ igvn.replace_node(opaq, opaq->in(1));
+ #else
igvn.replace_node(opaq, opaq->in(2));
+ #endif
}
assert(opaque4_count() == 0, "should be empty");
}
diff --git a/src/hotspot/share/opto/loopPredicate.cpp b/src/hotspot/share/opto/loopPredicate.cpp
index 6e85398..a6d2257 100644
--- a/src/hotspot/share/opto/loopPredicate.cpp
+++ b/src/hotspot/share/opto/loopPredicate.cpp
@@ -1243,8 +1243,9 @@ ProjNode* PhaseIdealLoop::insert_initial_skeleton_predicate(IfNode* iff, IdealLo
Node* init, Node* limit, jint stride,
Node* rng, bool &overflow,
Deoptimization::DeoptReason reason) {
+ // First predicate for the initial value on first loop iteration
assert(proj->_con && predicate_proj->_con, "not a range check?");
- Node* opaque_init = new Opaque1Node(C, init);
+ Node* opaque_init = new OpaqueLoopInitNode(C, init);
register_new_node(opaque_init, upper_bound_proj);
BoolNode* bol = rc_predicate(loop, upper_bound_proj, scale, offset, opaque_init, limit, stride, rng, (stride > 0) != (scale > 0), overflow);
Node* opaque_bol = new Opaque4Node(C, bol, _igvn.intcon(1)); // This will go away once loop opts are over
@@ -1252,6 +1253,22 @@ ProjNode* PhaseIdealLoop::insert_initial_skeleton_predicate(IfNode* iff, IdealLo
ProjNode* new_proj = create_new_if_for_predicate(predicate_proj, NULL, reason, overflow ? Op_If : iff->Opcode());
_igvn.replace_input_of(new_proj->in(0), 1, opaque_bol);
assert(opaque_init->outcnt() > 0, "should be used");
+ // Second predicate for init + (current stride - initial stride)
+ // This is identical to the previous predicate initially but as
+ // unrolling proceeds current stride is updated.
+ Node* init_stride = loop->_head->as_CountedLoop()->stride();
+ Node* opaque_stride = new OpaqueLoopStrideNode(C, init_stride);
+ register_new_node(opaque_stride, new_proj);
+ Node* max_value = new SubINode(opaque_stride, init_stride);
+ register_new_node(max_value, new_proj);
+ max_value = new AddINode(opaque_init, max_value);
+ register_new_node(max_value, new_proj);
+ bol = rc_predicate(loop, new_proj, scale, offset, max_value, limit, stride, rng, (stride > 0) != (scale > 0), overflow);
+ opaque_bol = new Opaque4Node(C, bol, _igvn.intcon(1));
+ register_new_node(opaque_bol, new_proj);
+ new_proj = create_new_if_for_predicate(predicate_proj, NULL, reason, overflow ? Op_If : iff->Opcode());
+ _igvn.replace_input_of(new_proj->in(0), 1, opaque_bol);
+ assert(max_value->outcnt() > 0, "should be used");
return new_proj;
}
diff --git a/src/hotspot/share/opto/loopTransform.cpp b/src/hotspot/share/opto/loopTransform.cpp
index 5e6faaa..89628bb 100644
--- a/src/hotspot/share/opto/loopTransform.cpp
+++ b/src/hotspot/share/opto/loopTransform.cpp
@@ -1080,7 +1080,7 @@ void PhaseIdealLoop::ensure_zero_trip_guard_proj(Node* node, bool is_main_loop)
// CastII/ConvI2L nodes cause some data paths to die. For consistency,
// the control paths must die too but the range checks were removed by
// predication. The range checks that we add here guarantee that they do.
-void PhaseIdealLoop::copy_skeleton_predicates_to_main_loop_helper(Node* predicate, Node* start, Node* end,
+void PhaseIdealLoop::copy_skeleton_predicates_to_main_loop_helper(Node* predicate, Node* init, Node* stride,
IdealLoopTree* outer_loop, LoopNode* outer_main_head,
uint dd_main_head, const uint idx_before_pre_post,
const uint idx_after_post_before_pre, Node* zero_trip_guard_proj_main,
@@ -1098,6 +1098,10 @@ void PhaseIdealLoop::copy_skeleton_predicates_to_main_loop_helper(Node* predicat
predicate = iff->in(0);
Node* current_proj = outer_main_head->in(LoopNode::EntryControl);
Node* prev_proj = current_proj;
+ Node* opaque_init = new OpaqueLoopInitNode(C, init);
+ register_new_node(opaque_init, outer_main_head->in(LoopNode::EntryControl));
+ Node* opaque_stride = new OpaqueLoopStrideNode(C, stride);
+ register_new_node(opaque_stride, outer_main_head->in(LoopNode::EntryControl));
while (predicate != NULL && predicate->is_Proj() && predicate->in(0)->is_If()) {
iff = predicate->in(0)->as_If();
uncommon_proj = iff->proj_out(1 - predicate->as_Proj()->_con);
@@ -1108,11 +1112,10 @@ void PhaseIdealLoop::copy_skeleton_predicates_to_main_loop_helper(Node* predicat
// Clone the skeleton predicate twice and initialize one with the initial
// value of the loop induction variable. Leave the other predicate
// to be initialized when increasing the stride during loop unrolling.
- prev_proj = clone_skeleton_predicate(iff, start, predicate, uncommon_proj, current_proj, outer_loop, prev_proj);
- assert(skeleton_predicate_has_opaque(prev_proj->in(0)->as_If()) == (start->Opcode() == Op_Opaque1), "");
- prev_proj = clone_skeleton_predicate(iff, end, predicate, uncommon_proj, current_proj, outer_loop, prev_proj);
- assert(skeleton_predicate_has_opaque(prev_proj->in(0)->as_If()) == (end->Opcode() == Op_Opaque1), "");
-
+ prev_proj = clone_skeleton_predicate(iff, opaque_init, NULL, predicate, uncommon_proj, current_proj, outer_loop, prev_proj);
+ assert(skeleton_predicate_has_opaque(prev_proj->in(0)->as_If()), "");
+ prev_proj = clone_skeleton_predicate(iff, init, stride, predicate, uncommon_proj, current_proj, outer_loop, prev_proj);
+ assert(!skeleton_predicate_has_opaque(prev_proj->in(0)->as_If()), "");
// Rewire any control inputs from the cloned skeleton predicates down to the main and post loop for data nodes that are part of the
// main loop (and were cloned to the pre and post loop).
for (DUIterator i = predicate->outs(); predicate->has_out(i); i++) {
@@ -1177,14 +1180,14 @@ bool PhaseIdealLoop::skeleton_predicate_has_opaque(IfNode* iff) {
}
continue;
}
- if (op == Op_Opaque1) {
+ if (n->is_Opaque1()) {
return true;
}
}
return false;
}
-Node* PhaseIdealLoop::clone_skeleton_predicate(Node* iff, Node* value, Node* predicate, Node* uncommon_proj,
+Node* PhaseIdealLoop::clone_skeleton_predicate(Node* iff, Node* new_init, Node* new_stride, Node* predicate, Node* uncommon_proj,
Node* current_proj, IdealLoopTree* outer_loop, Node* prev_proj) {
Node_Stack to_clone(2);
to_clone.push(iff->in(1), 1);
@@ -1204,12 +1207,19 @@ Node* PhaseIdealLoop::clone_skeleton_predicate(Node* iff, Node* value, Node* pre
to_clone.push(m, 1);
continue;
}
- if (op == Op_Opaque1) {
+ if (m->is_Opaque1()) {
if (n->_idx < current) {
n = n->clone();
+ register_new_node(n, current_proj);
+ }
+ if (op == Op_OpaqueLoopInit) {
+ n->set_req(i, new_init);
+ } else {
+ assert(op == Op_OpaqueLoopStride, "unexpected opaque node");
+ if (new_stride != NULL) {
+ n->set_req(i, new_stride);
+ }
}
- n->set_req(i, value);
- register_new_node(n, current_proj);
to_clone.set_node(n);
}
for (;;) {
@@ -1259,7 +1269,7 @@ Node* PhaseIdealLoop::clone_skeleton_predicate(Node* iff, Node* value, Node* pre
return proj;
}
-void PhaseIdealLoop::copy_skeleton_predicates_to_main_loop(CountedLoopNode* pre_head, Node* start, Node* end,
+void PhaseIdealLoop::copy_skeleton_predicates_to_main_loop(CountedLoopNode* pre_head, Node* init, Node* stride,
IdealLoopTree* outer_loop, LoopNode* outer_main_head,
uint dd_main_head, const uint idx_before_pre_post,
const uint idx_after_post_before_pre, Node* zero_trip_guard_proj_main,
@@ -1279,10 +1289,10 @@ void PhaseIdealLoop::copy_skeleton_predicates_to_main_loop(CountedLoopNode* pre_
}
}
predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
- copy_skeleton_predicates_to_main_loop_helper(predicate, start, end, outer_loop, outer_main_head, dd_main_head,
+ copy_skeleton_predicates_to_main_loop_helper(predicate, init, stride, outer_loop, outer_main_head, dd_main_head,
idx_before_pre_post, idx_after_post_before_pre, zero_trip_guard_proj_main,
zero_trip_guard_proj_post, old_new);
- copy_skeleton_predicates_to_main_loop_helper(profile_predicate, start, end, outer_loop, outer_main_head, dd_main_head,
+ copy_skeleton_predicates_to_main_loop_helper(profile_predicate, init, stride, outer_loop, outer_main_head, dd_main_head,
idx_before_pre_post, idx_after_post_before_pre, zero_trip_guard_proj_main,
zero_trip_guard_proj_post, old_new);
}
@@ -1433,10 +1443,8 @@ void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_
// CastII for the main loop:
Node* castii = cast_incr_before_loop( pre_incr, min_taken, main_head );
assert(castii != NULL, "no castII inserted");
- Node* opaque_castii = new Opaque1Node(C, castii);
- register_new_node(opaque_castii, outer_main_head->in(LoopNode::EntryControl));
assert(post_head->in(1)->is_IfProj(), "must be zero-trip guard If node projection of the post loop");
- copy_skeleton_predicates_to_main_loop(pre_head, castii, opaque_castii, outer_loop, outer_main_head, dd_main_head,
+ copy_skeleton_predicates_to_main_loop(pre_head, castii, stride, outer_loop, outer_main_head, dd_main_head,
idx_before_pre_post, idx_after_post_before_pre, min_taken, post_head->in(1), old_new);
// Step B4: Shorten the pre-loop to run only 1 iteration (for now).
@@ -1722,6 +1730,11 @@ void PhaseIdealLoop::update_main_loop_skeleton_predicates(Node* ctrl, CountedLoo
Node* prev_proj = ctrl;
LoopNode* outer_loop_head = loop_head->skip_strip_mined();
IdealLoopTree* outer_loop = get_loop(outer_loop_head);
+ // Compute the value of the loop induction variable at the end of the
+ // first iteration of the unrolled loop: init + new_stride_con - init_inc
+ int new_stride_con = stride_con * 2;
+ Node* max_value = _igvn.intcon(new_stride_con);
+ set_ctrl(max_value, C->root());
while (entry != NULL && entry->is_Proj() && entry->in(0)->is_If()) {
IfNode* iff = entry->in(0)->as_If();
ProjNode* proj = iff->proj_out(1 - entry->as_Proj()->_con);
@@ -1737,18 +1750,8 @@ void PhaseIdealLoop::update_main_loop_skeleton_predicates(Node* ctrl, CountedLoo
// tell. Kill it in any case.
_igvn.replace_input_of(iff, 1, iff->in(1)->in(2));
} else {
- // Add back the predicate for the value at the beginning of the first entry
- prev_proj = clone_skeleton_predicate(iff, init, entry, proj, ctrl, outer_loop, prev_proj);
- assert(!skeleton_predicate_has_opaque(prev_proj->in(0)->as_If()), "unexpected");
- // Compute the value of the loop induction variable at the end of the
- // first iteration of the unrolled loop: init + new_stride_con - init_inc
- int init_inc = stride_con/loop_head->unrolled_count();
- assert(init_inc != 0, "invalid loop increment");
- int new_stride_con = stride_con * 2;
- Node* max_value = _igvn.intcon(new_stride_con - init_inc);
- max_value = new AddINode(init, max_value);
- register_new_node(max_value, get_ctrl(iff->in(1)));
- prev_proj = clone_skeleton_predicate(iff, max_value, entry, proj, ctrl, outer_loop, prev_proj);
+ //Add back predicates updated for the new stride.
+ prev_proj = clone_skeleton_predicate(iff, init, max_value, entry, proj, ctrl, outer_loop, prev_proj);
assert(!skeleton_predicate_has_opaque(prev_proj->in(0)->as_If()), "unexpected");
}
}
@@ -2547,22 +2550,22 @@ int PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
// The underflow and overflow limits: 0 <= scale*I+offset < limit
add_constraint(stride_con, lscale_con, offset, zero, limit, pre_ctrl, &pre_limit, &main_limit);
Node* init = cl->init_trip();
- Node* opaque_init = new Opaque1Node(C, init);
+ Node* opaque_init = new OpaqueLoopInitNode(C, init);
register_new_node(opaque_init, predicate_proj);
+ // predicate on first value of first iteration
+ predicate_proj = add_range_check_predicate(loop, cl, predicate_proj, scale_con, int_offset, int_limit, stride_con, init);
+ assert(!skeleton_predicate_has_opaque(predicate_proj->in(0)->as_If()), "unexpected");
// template predicate so it can be updated on next unrolling
predicate_proj = add_range_check_predicate(loop, cl, predicate_proj, scale_con, int_offset, int_limit, stride_con, opaque_init);
assert(skeleton_predicate_has_opaque(predicate_proj->in(0)->as_If()), "unexpected");
- // predicate on first value of first iteration
- predicate_proj = add_range_check_predicate(loop, cl, predicate_proj, scale_con, int_offset, int_limit, stride_con, init);
- assert(!skeleton_predicate_has_opaque(predicate_proj->in(0)->as_If()), "unexpected");
- int init_inc = stride_con/cl->unrolled_count();
- assert(init_inc != 0, "invalid loop increment");
- Node* max_value = _igvn.intcon(stride_con - init_inc);
- max_value = new AddINode(init, max_value);
+ Node* opaque_stride = new OpaqueLoopStrideNode(C, cl->stride());
+ register_new_node(opaque_stride, predicate_proj);
+ Node* max_value = new SubINode(opaque_stride, cl->stride());
+ register_new_node(max_value, predicate_proj);
+ max_value = new AddINode(opaque_init, max_value);
register_new_node(max_value, predicate_proj);
- // predicate on last value of first iteration (in case unrolling has already happened)
predicate_proj = add_range_check_predicate(loop, cl, predicate_proj, scale_con, int_offset, int_limit, stride_con, max_value);
- assert(!skeleton_predicate_has_opaque(predicate_proj->in(0)->as_If()), "unexpected");
+ assert(skeleton_predicate_has_opaque(predicate_proj->in(0)->as_If()), "unexpected");
} else {
if (PrintOpto) {
tty->print_cr("missed RCE opportunity");
diff --git a/src/hotspot/share/opto/loopnode.hpp b/src/hotspot/share/opto/loopnode.hpp
index 53b9692..7c54113 100644
--- a/src/hotspot/share/opto/loopnode.hpp
+++ b/src/hotspot/share/opto/loopnode.hpp
@@ -748,13 +748,13 @@ private:
#ifdef ASSERT
void ensure_zero_trip_guard_proj(Node* node, bool is_main_loop);
#endif
- void copy_skeleton_predicates_to_main_loop_helper(Node* predicate, Node* start, Node* end, IdealLoopTree* outer_loop, LoopNode* outer_main_head,
+ void copy_skeleton_predicates_to_main_loop_helper(Node* predicate, Node* init, Node* stride, IdealLoopTree* outer_loop, LoopNode* outer_main_head,
uint dd_main_head, const uint idx_before_pre_post, const uint idx_after_post_before_pre,
Node* zero_trip_guard_proj_main, Node* zero_trip_guard_proj_post, const Node_List &old_new);
- void copy_skeleton_predicates_to_main_loop(CountedLoopNode* pre_head, Node* start, Node* end, IdealLoopTree* outer_loop, LoopNode* outer_main_head,
+ void copy_skeleton_predicates_to_main_loop(CountedLoopNode* pre_head, Node* init, Node* stride, IdealLoopTree* outer_loop, LoopNode* outer_main_head,
uint dd_main_head, const uint idx_before_pre_post, const uint idx_after_post_before_pre,
Node* zero_trip_guard_proj_main, Node* zero_trip_guard_proj_post, const Node_List &old_new);
- Node* clone_skeleton_predicate(Node* iff, Node* value, Node* predicate, Node* uncommon_proj,
+ Node* clone_skeleton_predicate(Node* iff, Node* new_init, Node* new_stride, Node* predicate, Node* uncommon_proj,
Node* current_proj, IdealLoopTree* outer_loop, Node* prev_proj);
bool skeleton_predicate_has_opaque(IfNode* iff);
void update_main_loop_skeleton_predicates(Node* ctrl, CountedLoopNode* loop_head, Node* init, int stride_con);
diff --git a/src/hotspot/share/opto/loopopts.cpp b/src/hotspot/share/opto/loopopts.cpp
index 6b85529..adbee17 100644
--- a/src/hotspot/share/opto/loopopts.cpp
+++ b/src/hotspot/share/opto/loopopts.cpp
@@ -890,30 +890,42 @@ void PhaseIdealLoop::try_move_store_after_loop(Node* n) {
Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) {
// Cloning these guys is unlikely to win
int n_op = n->Opcode();
- if( n_op == Op_MergeMem ) return n;
- if( n->is_Proj() ) return n;
+ if (n_op == Op_MergeMem) {
+ return n;
+ }
+ if (n->is_Proj()) {
+ return n;
+ }
// Do not clone-up CmpFXXX variations, as these are always
// followed by a CmpI
- if( n->is_Cmp() ) return n;
+ if (n->is_Cmp()) {
+ return n;
+ }
// Attempt to use a conditional move instead of a phi/branch
- if( ConditionalMoveLimit > 0 && n_op == Op_Region ) {
+ if (ConditionalMoveLimit > 0 && n_op == Op_Region) {
Node *cmov = conditional_move( n );
- if( cmov ) return cmov;
+ if (cmov) {
+ return cmov;
+ }
}
- if( n->is_CFG() || n->is_LoadStore() )
+ if (n->is_CFG() || n->is_LoadStore()) {
return n;
- if( n_op == Op_Opaque1 || // Opaque nodes cannot be mod'd
- n_op == Op_Opaque2 ) {
- if( !C->major_progress() ) // If chance of no more loop opts...
+ }
+ if (n->is_Opaque1() || // Opaque nodes cannot be mod'd
+ n_op == Op_Opaque2) {
+ if (!C->major_progress()) { // If chance of no more loop opts...
_igvn._worklist.push(n); // maybe we'll remove them
+ }
return n;
}
- if( n->is_Con() ) return n; // No cloning for Con nodes
-
+ if (n->is_Con()) {
+ return n; // No cloning for Con nodes
+ }
Node *n_ctrl = get_ctrl(n);
- if( !n_ctrl ) return n; // Dead node
-
+ if (!n_ctrl) {
+ return n; // Dead node
+ }
Node* res = try_move_store_before_loop(n, n_ctrl);
if (res != NULL) {
return n;
diff --git a/src/hotspot/share/opto/macro.cpp b/src/hotspot/share/opto/macro.cpp
index cf8eab1..8331d25 100644
--- a/src/hotspot/share/opto/macro.cpp
+++ b/src/hotspot/share/opto/macro.cpp
@@ -2617,9 +2617,10 @@ void PhaseMacroExpand::eliminate_macro_nodes() {
break;
case Node::Class_OuterStripMinedLoop:
break;
+ case Node::Class_Opaque1:
+ break;
default:
assert(n->Opcode() == Op_LoopLimit ||
- n->Opcode() == Op_Opaque1 ||
n->Opcode() == Op_Opaque2 ||
n->Opcode() == Op_Opaque3 ||
BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(n),
@@ -2661,7 +2662,7 @@ bool PhaseMacroExpand::expand_macro_nodes() {
C->remove_macro_node(n);
_igvn._worklist.push(n);
success = true;
- } else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) {
+ } else if (n->is_Opaque1() || n->Opcode() == Op_Opaque2) {
_igvn.replace_node(n, n->in(1));
success = true;
#if INCLUDE_RTM_OPT
diff --git a/src/hotspot/share/opto/node.hpp b/src/hotspot/share/opto/node.hpp
index 2a42e99..0eac634 100644
--- a/src/hotspot/share/opto/node.hpp
+++ b/src/hotspot/share/opto/node.hpp
@@ -116,6 +116,7 @@ class MulNode;
class MultiNode;
class MultiBranchNode;
class NeverBranchNode;
+class Opaque1Node;
class OuterStripMinedLoopNode;
class OuterStripMinedLoopEndNode;
class Node;
@@ -611,10 +612,10 @@ public:
// This enum is used only for C2 ideal and mach nodes with is_<node>() methods
// so that it's values fits into 16 bits.
enum NodeClasses {
- Bit_Node = 0x0000,
- Class_Node = 0x0000,
- ClassMask_Node = 0xFFFF,
-
+ Bit_Node = 0x00000000,
+ Class_Node = 0x00000000,
+ ClassMask_Node = 0xFFFFFFFF,
+
DEFINE_CLASS_ID(Multi, Node, 0)
DEFINE_CLASS_ID(SafePoint, Multi, 0)
DEFINE_CLASS_ID(Call, SafePoint, 0)
@@ -720,6 +721,7 @@ public:
DEFINE_CLASS_ID(Vector, Node, 13)
DEFINE_CLASS_ID(ClearArray, Node, 14)
DEFINE_CLASS_ID(Halt, Node, 15)
+ DEFINE_CLASS_ID(Opaque1, Node, 16)
_max_classes = ClassMask_Halt
};
@@ -746,12 +748,12 @@ public:
};
private:
- jushort _class_id;
+ juint _class_id;
jushort _flags;
protected:
// These methods should be called from constructors only.
- void init_class_id(jushort c) {
+ void init_class_id(juint c) {
_class_id = c; // cast out const
}
void init_flags(jushort fl) {
@@ -764,7 +766,7 @@ protected:
}
public:
- const jushort class_id() const { return _class_id; }
+ const juint class_id() const { return _class_id; }
const jushort flags() const { return _flags; }
@@ -865,6 +867,7 @@ public:
DEFINE_CLASS_QUERY(Mul)
DEFINE_CLASS_QUERY(Multi)
DEFINE_CLASS_QUERY(MultiBranch)
+ DEFINE_CLASS_QUERY(Opaque1)
DEFINE_CLASS_QUERY(OuterStripMinedLoop)
DEFINE_CLASS_QUERY(OuterStripMinedLoopEnd)
DEFINE_CLASS_QUERY(Parm)
diff --git a/src/hotspot/share/opto/opaquenode.hpp b/src/hotspot/share/opto/opaquenode.hpp
index f97de4a..4c00528 100644
--- a/src/hotspot/share/opto/opaquenode.hpp
+++ b/src/hotspot/share/opto/opaquenode.hpp
@@ -38,6 +38,7 @@ class Opaque1Node : public Node {
Opaque1Node(Compile* C, Node *n) : Node(NULL, n) {
// Put it on the Macro nodes list to removed during macro nodes expansion.
init_flags(Flag_is_macro);
+ init_class_id(Class_Opaque1);
C->add_macro_node(this);
}
// Special version for the pre-loop to hold the original loop limit
@@ -45,6 +46,7 @@ class Opaque1Node : public Node {
Opaque1Node(Compile* C, Node *n, Node* orig_limit) : Node(NULL, n, orig_limit) {
// Put it on the Macro nodes list to removed during macro nodes expansion.
init_flags(Flag_is_macro);
+ init_class_id(Class_Opaque1);
C->add_macro_node(this);
}
Node* original_loop_limit() { return req()==3 ? in(2) : NULL; }
@@ -52,6 +54,20 @@ class Opaque1Node : public Node {
virtual const Type *bottom_type() const { return TypeInt::INT; }
virtual Node* Identity(PhaseGVN* phase);
};
+// Opaque nodes specific to range check elimination handling
+class OpaqueLoopInitNode : public Opaque1Node {
+ public:
+ OpaqueLoopInitNode(Compile* C, Node *n) : Opaque1Node(C, n) {
+ }
+ virtual int Opcode() const;
+};
+
+class OpaqueLoopStrideNode : public Opaque1Node {
+ public:
+ OpaqueLoopStrideNode(Compile* C, Node *n) : Opaque1Node(C, n) {
+ }
+ virtual int Opcode() const;
+};
//------------------------------Opaque2Node------------------------------------
// A node to prevent unwanted optimizations. Allows constant folding. Stops
diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp
index ad7fb42..85c4dfe 100644
--- a/src/hotspot/share/runtime/vmStructs.cpp
+++ b/src/hotspot/share/runtime/vmStructs.cpp
@@ -951,7 +951,7 @@ typedef PaddedEnd<ObjectMonitor> PaddedObjectMonitor;
c2_nonstatic_field(Node, _outcnt, node_idx_t) \
c2_nonstatic_field(Node, _outmax, node_idx_t) \
c2_nonstatic_field(Node, _idx, const node_idx_t) \
- c2_nonstatic_field(Node, _class_id, jushort) \
+ c2_nonstatic_field(Node, _class_id, juint) \
c2_nonstatic_field(Node, _flags, jushort) \
\
c2_nonstatic_field(Compile, _root, RootNode*) \
diff --git a/test/hotspot/jtreg/compiler/loopopts/TestRCEAfterUnrolling.java b/test/hotspot/jtreg/compiler/loopopts/TestRCEAfterUnrolling.java
new file mode 100644
index 0000000..06bca79
--- /dev/null
+++ b/test/hotspot/jtreg/compiler/loopopts/TestRCEAfterUnrolling.java
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8229495
+ * @summary SIGILL in C2 generated OSR compilation.
+ *
+ * @run main/othervm -Xcomp -XX:-TieredCompilation -XX:CompileOnly=TestRCEAfterUnrolling::test TestRCEAfterUnrolling
+ *
+ */
+
+public class TestRCEAfterUnrolling {
+
+ public static int iFld = 0;
+ public static short sFld = 1;
+
+ public static void main(String[] strArr) {
+ test();
+ }
+
+ public static int test() {
+ int x = 11;
+ int y = 0;
+ int j = 0;
+ int iArr[] = new int[400];
+
+ init(iArr);
+
+ for (int i = 0; i < 2; i++) {
+ doNothing();
+ for (j = 10; j > 1; j -= 2) {
+ sFld += (short)j;
+ iArr = iArr;
+ y += (j * 3);
+ x = (iArr[j - 1]/ x);
+ x = sFld;
+ }
+ int k = 1;
+ while (++k < 8) {
+ iFld += x;
+ }
+ }
+ return Float.floatToIntBits(654) + x + j + y;
+ }
+
+ // Inlined
+ public static void doNothing() {
+ }
+
+ // Inlined
+ public static void init(int[] a) {
+ for (int j = 0; j < a.length; j++) {
+ a[j] = 0;
+ }
+ }
+}
+

View File

@ -1,479 +0,0 @@
diff --git a/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11AEADCipher.java b/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11AEADCipher.java
index d1b9d06d8..82d0dc164 100644
--- a/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11AEADCipher.java
+++ b/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11AEADCipher.java
@@ -1,4 +1,5 @@
-/* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+/*
+ * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -334,25 +335,25 @@ final class P11AEADCipher extends CipherSpi {
}
private void cancelOperation() {
+ // cancel operation by finishing it; avoid killSession as some
+ // hardware vendors may require re-login
+ int bufLen = doFinalLength(0);
+ byte[] buffer = new byte[bufLen];
+ byte[] in = dataBuffer.toByteArray();
+ int inLen = in.length;
try {
- if (session.hasObjects() == false) {
- session = token.killSession(session);
- return;
+ if (encrypt) {
+ token.p11.C_Encrypt(session.id(), 0, in, 0, inLen,
+ 0, buffer, 0, bufLen);
} else {
- // cancel operation by finishing it
- int bufLen = doFinalLength(0);
- byte[] buffer = new byte[bufLen];
-
- if (encrypt) {
- token.p11.C_Encrypt(session.id(), 0, buffer, 0, bufLen,
- 0, buffer, 0, bufLen);
- } else {
- token.p11.C_Decrypt(session.id(), 0, buffer, 0, bufLen,
- 0, buffer, 0, bufLen);
- }
+ token.p11.C_Decrypt(session.id(), 0, in, 0, inLen,
+ 0, buffer, 0, bufLen);
}
} catch (PKCS11Exception e) {
- throw new ProviderException("Cancel failed", e);
+ if (encrypt) {
+ throw new ProviderException("Cancel failed", e);
+ }
+ // ignore failure for decryption
}
}
@@ -434,18 +435,21 @@ final class P11AEADCipher extends CipherSpi {
if (!initialized) {
return;
}
+ initialized = false;
+
try {
if (session == null) {
return;
}
+
if (doCancel && token.explicitCancel) {
cancelOperation();
}
} finally {
p11Key.releaseKeyID();
session = token.releaseSession(session);
+ dataBuffer.reset();
}
- initialized = false;
}
// see JCE spec
diff --git a/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11Cipher.java b/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11Cipher.java
index cc4535e7b..470a888cd 100644
--- a/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11Cipher.java
+++ b/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11Cipher.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -409,10 +409,12 @@ final class P11Cipher extends CipherSpi {
return;
}
initialized = false;
+
try {
if (session == null) {
return;
}
+
if (doCancel && token.explicitCancel) {
cancelOperation();
}
@@ -426,22 +428,21 @@ final class P11Cipher extends CipherSpi {
private void cancelOperation() {
token.ensureValid();
- if (session.hasObjects() == false) {
- session = token.killSession(session);
- return;
- } else {
- try {
- // cancel operation by finishing it
- int bufLen = doFinalLength(0);
- byte[] buffer = new byte[bufLen];
- if (encrypt) {
- token.p11.C_EncryptFinal(session.id(), 0, buffer, 0, bufLen);
- } else {
- token.p11.C_DecryptFinal(session.id(), 0, buffer, 0, bufLen);
- }
- } catch (PKCS11Exception e) {
+ // cancel operation by finishing it; avoid killSession as some
+ // hardware vendors may require re-login
+ try {
+ int bufLen = doFinalLength(0);
+ byte[] buffer = new byte[bufLen];
+ if (encrypt) {
+ token.p11.C_EncryptFinal(session.id(), 0, buffer, 0, bufLen);
+ } else {
+ token.p11.C_DecryptFinal(session.id(), 0, buffer, 0, bufLen);
+ }
+ } catch (PKCS11Exception e) {
+ if (encrypt) {
throw new ProviderException("Cancel failed", e);
}
+ // ignore failure for decryption
}
}
diff --git a/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11Mac.java b/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11Mac.java
index 338cb215d..634e0855f 100644
--- a/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11Mac.java
+++ b/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11Mac.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -124,10 +124,12 @@ final class P11Mac extends MacSpi {
return;
}
initialized = false;
+
try {
if (session == null) {
return;
}
+
if (doCancel && token.explicitCancel) {
cancelOperation();
}
@@ -139,15 +141,12 @@ final class P11Mac extends MacSpi {
private void cancelOperation() {
token.ensureValid();
- if (session.hasObjects() == false) {
- session = token.killSession(session);
- return;
- } else {
- try {
- token.p11.C_SignFinal(session.id(), 0);
- } catch (PKCS11Exception e) {
- throw new ProviderException("Cancel failed", e);
- }
+ // cancel operation by finishing it; avoid killSession as some
+ // hardware vendors may require re-login
+ try {
+ token.p11.C_SignFinal(session.id(), 0);
+ } catch (PKCS11Exception e) {
+ throw new ProviderException("Cancel failed", e);
}
}
@@ -209,7 +208,6 @@ final class P11Mac extends MacSpi {
ensureInitialized();
return token.p11.C_SignFinal(session.id(), 0);
} catch (PKCS11Exception e) {
- reset(true);
throw new ProviderException("doFinal() failed", e);
} finally {
reset(false);
diff --git a/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11PSSSignature.java b/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11PSSSignature.java
index 763fb98a8..0a470b932 100644
--- a/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11PSSSignature.java
+++ b/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11PSSSignature.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -223,10 +223,12 @@ final class P11PSSSignature extends SignatureSpi {
return;
}
initialized = false;
+
try {
if (session == null) {
return;
}
+
if (doCancel && token.explicitCancel) {
cancelOperation();
}
@@ -242,14 +244,10 @@ final class P11PSSSignature extends SignatureSpi {
token.ensureValid();
if (DEBUG) System.out.print("Cancelling operation");
- if (session.hasObjects() == false) {
- if (DEBUG) System.out.println(" by killing session");
- session = token.killSession(session);
- return;
- }
- // "cancel" operation by finishing it
- if (mode == M_SIGN) {
- try {
+ // cancel operation by finishing it; avoid killSession as some
+ // hardware vendors may require re-login
+ try {
+ if (mode == M_SIGN) {
if (type == T_UPDATE) {
if (DEBUG) System.out.println(" by C_SignFinal");
token.p11.C_SignFinal(session.id(), 0);
@@ -259,11 +257,7 @@ final class P11PSSSignature extends SignatureSpi {
if (DEBUG) System.out.println(" by C_Sign");
token.p11.C_Sign(session.id(), digest);
}
- } catch (PKCS11Exception e) {
- throw new ProviderException("cancel failed", e);
- }
- } else { // M_VERIFY
- try {
+ } else { // M_VERIFY
byte[] signature =
new byte[(p11Key.length() + 7) >> 3];
if (type == T_UPDATE) {
@@ -275,10 +269,12 @@ final class P11PSSSignature extends SignatureSpi {
if (DEBUG) System.out.println(" by C_Verify");
token.p11.C_Verify(session.id(), digest, signature);
}
- } catch (PKCS11Exception e) {
- // will fail since the signature is incorrect
- // XXX check error code
}
+ } catch (PKCS11Exception e) {
+ if (mode == M_SIGN) {
+ throw new ProviderException("cancel failed", e);
+ }
+ // ignore failure for verification
}
}
diff --git a/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11RSACipher.java b/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11RSACipher.java
index 3f32501e0..06d65e893 100644
--- a/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11RSACipher.java
+++ b/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11RSACipher.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -247,10 +247,12 @@ final class P11RSACipher extends CipherSpi {
return;
}
initialized = false;
+
try {
if (session == null) {
return;
}
+
if (doCancel && token.explicitCancel) {
cancelOperation();
}
@@ -264,36 +266,33 @@ final class P11RSACipher extends CipherSpi {
// state variables such as "initialized"
private void cancelOperation() {
token.ensureValid();
- if (session.hasObjects() == false) {
- session = token.killSession(session);
- return;
- } else {
- try {
- PKCS11 p11 = token.p11;
- int inLen = maxInputSize;
- int outLen = buffer.length;
- long sessId = session.id();
- switch (mode) {
- case MODE_ENCRYPT:
- p11.C_Encrypt(sessId, 0, buffer, 0, inLen, 0, buffer, 0, outLen);
- break;
- case MODE_DECRYPT:
- p11.C_Decrypt(sessId, 0, buffer, 0, inLen, 0, buffer, 0, outLen);
- break;
- case MODE_SIGN:
- byte[] tmpBuffer = new byte[maxInputSize];
- p11.C_Sign(sessId, tmpBuffer);
- break;
- case MODE_VERIFY:
- p11.C_VerifyRecover(sessId, buffer, 0, inLen, buffer,
- 0, outLen);
- break;
- default:
- throw new ProviderException("internal error");
- }
- } catch (PKCS11Exception e) {
- // XXX ensure this always works, ignore error
+ // cancel operation by finishing it; avoid killSession as some
+ // hardware vendors may require re-login
+ try {
+ PKCS11 p11 = token.p11;
+ int inLen = maxInputSize;
+ int outLen = buffer.length;
+ long sessId = session.id();
+ switch (mode) {
+ case MODE_ENCRYPT:
+ p11.C_Encrypt(sessId, 0, buffer, 0, inLen, 0, buffer, 0, outLen);
+ break;
+ case MODE_DECRYPT:
+ p11.C_Decrypt(sessId, 0, buffer, 0, inLen, 0, buffer, 0, outLen);
+ break;
+ case MODE_SIGN:
+ byte[] tmpBuffer = new byte[maxInputSize];
+ p11.C_Sign(sessId, tmpBuffer);
+ break;
+ case MODE_VERIFY:
+ p11.C_VerifyRecover(sessId, buffer, 0, inLen, buffer,
+ 0, outLen);
+ break;
+ default:
+ throw new ProviderException("internal error");
}
+ } catch (PKCS11Exception e) {
+ // XXX ensure this always works, ignore error
}
}
@@ -362,6 +361,7 @@ final class P11RSACipher extends CipherSpi {
private int implDoFinal(byte[] out, int outOfs, int outLen)
throws BadPaddingException, IllegalBlockSizeException {
if (bufOfs > maxInputSize) {
+ reset(true);
throw new IllegalBlockSizeException("Data must not be longer "
+ "than " + maxInputSize + " bytes");
}
diff --git a/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11Signature.java b/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11Signature.java
index 159c65f59..f41538cda 100644
--- a/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11Signature.java
+++ b/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11Signature.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -270,10 +270,12 @@ final class P11Signature extends SignatureSpi {
return;
}
initialized = false;
+
try {
if (session == null) {
return;
}
+
if (doCancel && token.explicitCancel) {
cancelOperation();
}
@@ -284,59 +286,51 @@ final class P11Signature extends SignatureSpi {
}
private void cancelOperation() {
-
token.ensureValid();
- if (session.hasObjects() == false) {
- session = token.killSession(session);
- return;
- } else {
- // "cancel" operation by finishing it
- // XXX make sure all this always works correctly
+ // cancel operation by finishing it; avoid killSession as some
+ // hardware vendors may require re-login
+ try {
if (mode == M_SIGN) {
- try {
- if (type == T_UPDATE) {
- token.p11.C_SignFinal(session.id(), 0);
- } else {
- byte[] digest;
- if (type == T_DIGEST) {
- digest = md.digest();
- } else { // T_RAW
- digest = buffer;
- }
- token.p11.C_Sign(session.id(), digest);
+ if (type == T_UPDATE) {
+ token.p11.C_SignFinal(session.id(), 0);
+ } else {
+ byte[] digest;
+ if (type == T_DIGEST) {
+ digest = md.digest();
+ } else { // T_RAW
+ digest = buffer;
}
- } catch (PKCS11Exception e) {
- throw new ProviderException("cancel failed", e);
+ token.p11.C_Sign(session.id(), digest);
}
} else { // M_VERIFY
byte[] signature;
- try {
- if (keyAlgorithm.equals("DSA")) {
- signature = new byte[40];
- } else {
- signature = new byte[(p11Key.length() + 7) >> 3];
- }
- if (type == T_UPDATE) {
- token.p11.C_VerifyFinal(session.id(), signature);
- } else {
- byte[] digest;
- if (type == T_DIGEST) {
- digest = md.digest();
- } else { // T_RAW
- digest = buffer;
- }
- token.p11.C_Verify(session.id(), digest, signature);
- }
- } catch (PKCS11Exception e) {
- long errorCode = e.getErrorCode();
- if ((errorCode == CKR_SIGNATURE_INVALID) ||
- (errorCode == CKR_SIGNATURE_LEN_RANGE)) {
- // expected since signature is incorrect
- return;
+ if (keyAlgorithm.equals("DSA")) {
+ signature = new byte[40];
+ } else {
+ signature = new byte[(p11Key.length() + 7) >> 3];
+ }
+ if (type == T_UPDATE) {
+ token.p11.C_VerifyFinal(session.id(), signature);
+ } else {
+ byte[] digest;
+ if (type == T_DIGEST) {
+ digest = md.digest();
+ } else { // T_RAW
+ digest = buffer;
}
- throw new ProviderException("cancel failed", e);
+ token.p11.C_Verify(session.id(), digest, signature);
+ }
+ }
+ } catch (PKCS11Exception e) {
+ if (mode == M_VERIFY) {
+ long errorCode = e.getErrorCode();
+ if ((errorCode == CKR_SIGNATURE_INVALID) ||
+ (errorCode == CKR_SIGNATURE_LEN_RANGE)) {
+ // expected since signature is incorrect
+ return;
}
}
+ throw new ProviderException("cancel failed", e);
}
}
--
2.19.1

65
8240353.patch Executable file
View File

@ -0,0 +1,65 @@
commit 29fd7a83a66269e360af353c64d945612be62623
Date: Thu Feb 4 16:37:53 2021 +0800
8240353: AArch64: missing support for -XX:+ExtendedDTraceProbes in C1
Summary: <c1>: java -XX:+ExtendedDTraceProbes throws SIGILL
LLT: java -XX:+ExtendedDTraceProbes
Patch Type: backport
Bug url: https://bugs.openjdk.java.net/browse/JDK-8240353
diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
index 7798aa509..a8e89cde0 100644
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -449,12 +449,9 @@ int LIR_Assembler::emit_unwind_handler() {
}
if (compilation()->env()->dtrace_method_probes()) {
- __ call_Unimplemented();
-#if 0
- __ movptr(Address(rsp, 0), rax);
- __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
- __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
-#endif
+ __ mov(c_rarg0, rthread);
+ __ mov_metadata(c_rarg1, method()->constant_encoding());
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
}
if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
diff --git a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp
index 2344e0be3..3f5ab6641 100644
--- a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -1123,6 +1123,16 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
}
break;
+ case dtrace_object_alloc_id:
+ { // c_rarg0: object
+ StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
+ save_live_registers(sasm);
+
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), c_rarg0);
+
+ restore_live_registers(sasm);
+ }
+ break;
default:
{ StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);

View File

@ -1,20 +0,0 @@
--- a/src/hotspot/share/opto/addnode.cpp Mon Nov 02 20:20:05 2020 +0100
+++ b/src/hotspot/share/opto/addnode.cpp Wed Nov 04 10:42:35 2020 +0100
@@ -917,7 +917,7 @@
// Transform MIN2(x + c0, MIN2(x + c1, z)) into MIN2(x + MIN2(c0, c1), z)
// if x == y and the additions can't overflow.
- if (phase->eqv(x,y) &&
+ if (phase->eqv(x,y) && tx != NULL &&
!can_overflow(tx, x_off) &&
!can_overflow(tx, y_off)) {
return new MinINode(phase->transform(new AddINode(x, phase->intcon(MIN2(x_off, y_off)))), r->in(2));
@@ -925,7 +925,7 @@
} else {
// Transform MIN2(x + c0, y + c1) into x + MIN2(c0, c1)
// if x == y and the additions can't overflow.
- if (phase->eqv(x,y) &&
+ if (phase->eqv(x,y) && tx != NULL &&
!can_overflow(tx, x_off) &&
!can_overflow(tx, y_off)) {
return new AddINode(x,phase->intcon(MIN2(x_off,y_off)));

View File

@ -0,0 +1,218 @@
From 0f9ef0bc57aa0e7d8457b645374be74d510ea7ae Mon Sep 17 00:00:00 2001
Date: Thu, 18 Mar 2021 12:35:14 +0000
Subject: [PATCH 2/4] 8254078: DataOutputStream is very slow post disabling
---
.../classes/java/io/DataInputStream.java | 7 +-
.../classes/java/io/DataOutputStream.java | 24 ++--
.../bench/java/io/DataOutputStreamTest.java | 124 ++++++++++++++++++
3 files changed, 144 insertions(+), 11 deletions(-)
create mode 100644 test/micro/org/openjdk/bench/java/io/DataOutputStreamTest.java
diff --git a/src/java.base/share/classes/java/io/DataInputStream.java b/src/java.base/share/classes/java/io/DataInputStream.java
index f92c4f91b..114857691 100644
--- a/src/java.base/share/classes/java/io/DataInputStream.java
+++ b/src/java.base/share/classes/java/io/DataInputStream.java
@@ -31,9 +31,10 @@ package java.io;
* way. An application uses a data output stream to write data that
* can later be read by a data input stream.
* <p>
- * DataInputStream is not necessarily safe for multithreaded access.
- * Thread safety is optional and is the responsibility of users of
- * methods in this class.
+ * A DataInputStream is not safe for use by multiple concurrent
+ * threads. If a DataInputStream is to be used by more than one
+ * thread then access to the data input stream should be controlled
+ * by appropriate synchronization.
*
* @author Arthur van Hoff
* @see java.io.DataOutputStream
diff --git a/src/java.base/share/classes/java/io/DataOutputStream.java b/src/java.base/share/classes/java/io/DataOutputStream.java
index 392abba92..7c0962442 100644
--- a/src/java.base/share/classes/java/io/DataOutputStream.java
+++ b/src/java.base/share/classes/java/io/DataOutputStream.java
@@ -29,6 +29,11 @@ package java.io;
* A data output stream lets an application write primitive Java data
* types to an output stream in a portable way. An application can
* then use a data input stream to read the data back in.
+ * <p>
+ * A DataOutputStream is not safe for use by multiple concurrent
+ * threads. If a DataOutputStream is to be used by more than one
+ * thread then access to the data output stream should be controlled
+ * by appropriate synchronization.
*
* @author unascribed
* @see java.io.DataInputStream
@@ -164,8 +169,9 @@ class DataOutputStream extends FilterOutputStream implements DataOutput {
* @see java.io.FilterOutputStream#out
*/
public final void writeShort(int v) throws IOException {
- out.write((v >>> 8) & 0xFF);
- out.write((v >>> 0) & 0xFF);
+ writeBuffer[0] = (byte)(v >>> 8);
+ writeBuffer[1] = (byte)(v >>> 0);
+ out.write(writeBuffer, 0, 2);
incCount(2);
}
@@ -179,8 +185,9 @@ class DataOutputStream extends FilterOutputStream implements DataOutput {
* @see java.io.FilterOutputStream#out
*/
public final void writeChar(int v) throws IOException {
- out.write((v >>> 8) & 0xFF);
- out.write((v >>> 0) & 0xFF);
+ writeBuffer[0] = (byte)(v >>> 8);
+ writeBuffer[1] = (byte)(v >>> 0);
+ out.write(writeBuffer, 0, 2);
incCount(2);
}
@@ -194,10 +201,11 @@ class DataOutputStream extends FilterOutputStream implements DataOutput {
* @see java.io.FilterOutputStream#out
*/
public final void writeInt(int v) throws IOException {
- out.write((v >>> 24) & 0xFF);
- out.write((v >>> 16) & 0xFF);
- out.write((v >>> 8) & 0xFF);
- out.write((v >>> 0) & 0xFF);
+ writeBuffer[0] = (byte)(v >>> 24);
+ writeBuffer[1] = (byte)(v >>> 16);
+ writeBuffer[2] = (byte)(v >>> 8);
+ writeBuffer[3] = (byte)(v >>> 0);
+ out.write(writeBuffer, 0, 4);
incCount(4);
}
diff --git a/test/micro/org/openjdk/bench/java/io/DataOutputStreamTest.java b/test/micro/org/openjdk/bench/java/io/DataOutputStreamTest.java
new file mode 100644
index 000000000..2f573e6dd
--- /dev/null
+++ b/test/micro/org/openjdk/bench/java/io/DataOutputStreamTest.java
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2020, Red Hat Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package org.openjdk.bench.java.io;
+
+import org.openjdk.jmh.annotations.*;
+
+import java.io.*;
+import java.util.concurrent.TimeUnit;
+
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.MICROSECONDS)
+@Fork(value = 1, warmups = 0)
+@Measurement(iterations = 6, time = 1)
+@Warmup(iterations=2, time = 2)
+@State(Scope.Benchmark)
+public class DataOutputStreamTest {
+
+ public enum BasicType {CHAR, SHORT, INT, STRING}
+ @Param({"CHAR", "SHORT", "INT", /* "STRING"*/}) BasicType basicType;
+
+ @Param({"4096"}) int size;
+
+ final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(size);
+ File f;
+ String outputString;
+ FileOutputStream fileOutputStream;
+ DataOutput bufferedFileStream, rawFileStream, byteArrayStream;
+
+ @Setup(Level.Trial)
+ public void setup() throws Exception {
+ f = File.createTempFile("DataOutputStreamTest","out");
+ fileOutputStream = new FileOutputStream(f);
+ byteArrayStream = new DataOutputStream(byteArrayOutputStream);
+ rawFileStream = new DataOutputStream(fileOutputStream);
+ bufferedFileStream = new DataOutputStream(new BufferedOutputStream(fileOutputStream));
+ outputString = new String(new byte[size]);
+ }
+
+ public void writeChars(DataOutput dataOutput)
+ throws Exception {
+ for (int i = 0; i < size; i += 2) {
+ dataOutput.writeChar(i);
+ }
+ }
+
+ public void writeShorts(DataOutput dataOutput)
+ throws Exception {
+ for (int i = 0; i < size; i += 2) {
+ dataOutput.writeShort(i);
+ }
+ }
+
+ public void writeInts(DataOutput dataOutput)
+ throws Exception {
+ for (int i = 0; i < size; i += 4) {
+ dataOutput.writeInt(i);
+ }
+ }
+
+ public void writeString(DataOutput dataOutput)
+ throws Exception {
+ dataOutput.writeChars(outputString);
+ }
+
+ public void write(DataOutput dataOutput)
+ throws Exception {
+ switch (basicType) {
+ case CHAR:
+ writeChars(dataOutput);
+ break;
+ case SHORT:
+ writeShorts(dataOutput);
+ break;
+ case INT:
+ writeInts(dataOutput);
+ break;
+ case STRING:
+ writeString(dataOutput);
+ break;
+ }
+ }
+
+ @Benchmark
+ public void dataOutputStreamOverByteArray() throws Exception {
+ byteArrayOutputStream.reset();
+ write(byteArrayStream);
+ byteArrayOutputStream.flush();
+ }
+
+ @Benchmark
+ public void dataOutputStreamOverRawFileStream() throws Exception {
+ fileOutputStream.getChannel().position(0);
+ write(rawFileStream);
+ fileOutputStream.flush();
+ }
+
+ @Benchmark
+ public void dataOutputStreamOverBufferedFileStream() throws Exception{
+ fileOutputStream.getChannel().position(0);
+ write(bufferedFileStream);
+ fileOutputStream.flush();
+ }
+}
--
2.19.0

View File

@ -1,16 +0,0 @@
--- a/make/autoconf/version-numbers Mon Nov 02 20:12:55 2020 +0100
+++ b/make/autoconf/version-numbers Mon Nov 02 20:20:05 2020 +0100
@@ -29,11 +29,11 @@
DEFAULT_VERSION_FEATURE=11
DEFAULT_VERSION_INTERIM=0
DEFAULT_VERSION_UPDATE=9
-DEFAULT_VERSION_PATCH=0
+DEFAULT_VERSION_PATCH=1
DEFAULT_VERSION_EXTRA1=0
DEFAULT_VERSION_EXTRA2=0
DEFAULT_VERSION_EXTRA3=0
-DEFAULT_VERSION_DATE=2020-10-20
+DEFAULT_VERSION_DATE=2020-11-04
DEFAULT_VERSION_CLASSFILE_MAJOR=55 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
DEFAULT_VERSION_CLASSFILE_MINOR=0
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="10 11"

View File

@ -0,0 +1,23 @@
From ae703c0e86c278359f1bddcf35ccba87b556d901 Mon Sep 17 00:00:00 2001
Date: Thu, 18 Mar 2021 12:37:11 +0000
Subject: [PATCH 4/4] Fix the memcpy symbol issue during JDK11 x64 build
---
make/lib/Awt2dLibraries.gmk | 1 +
1 file changed, 1 insertion(+)
diff --git a/make/lib/Awt2dLibraries.gmk b/make/lib/Awt2dLibraries.gmk
index 207a459ae..7b0441507 100644
--- a/make/lib/Awt2dLibraries.gmk
+++ b/make/lib/Awt2dLibraries.gmk
@@ -597,6 +597,7 @@ else
$(eval $(call SetupJdkLibrary, BUILD_LIBHARFBUZZ, \
NAME := harfbuzz, \
EXCLUDE_FILES := $(LIBHARFBUZZ_EXCLUDE_FILES), \
+ EXTRA_FILES := $(LIBMEMCPY_FILES), \
TOOLCHAIN := TOOLCHAIN_LINK_CXX, \
CFLAGS := $(CFLAGS_JDKLIB) $(LIBHARFBUZZ_CFLAGS), \
CXXFLAGS := $(CXXFLAGS_JDKLIB) $(LIBHARFBUZZ_CFLAGS), \
--
2.19.0

752
add-G1-Full-GC-optimization.patch Executable file
View File

@ -0,0 +1,752 @@
From 54bd3b89d00c7eba9119e3dfa3d49b7c9ec79d30 Mon Sep 17 00:00:00 2001
Date: Tue, 16 Mar 2021 07:09:02 +0000
Subject: [PATCH 3/4] add G1 Full GC optimization
---
src/hotspot/share/gc/g1/g1CollectedHeap.cpp | 15 +++-
src/hotspot/share/gc/g1/g1CollectedHeap.hpp | 2 +-
src/hotspot/share/gc/g1/g1FullCollector.cpp | 5 ++
src/hotspot/share/gc/g1/g1FullCollector.hpp | 3 +
.../share/gc/g1/g1FullGCCompactTask.cpp | 14 +++
.../share/gc/g1/g1FullGCCompactTask.hpp | 1 +
src/hotspot/share/gc/g1/g1FullGCMarkTask.cpp | 2 +
.../share/gc/g1/g1FullGCMarker.inline.hpp | 5 ++
.../share/gc/g1/g1FullGCPrepareTask.cpp | 52 ++++++++---
.../share/gc/g1/g1FullGCPrepareTask.hpp | 7 +-
src/hotspot/share/gc/g1/g1MarkLiveWords.cpp | 37 ++++++++
src/hotspot/share/gc/g1/g1MarkLiveWords.hpp | 34 +++++++
src/hotspot/share/gc/g1/g1MarkRegionCache.cpp | 49 +++++++++++
src/hotspot/share/gc/g1/g1MarkRegionCache.hpp | 40 +++++++++
src/hotspot/share/gc/g1/g1_globals.hpp | 10 ++-
src/hotspot/share/gc/g1/heapRegion.cpp | 3 +-
src/hotspot/share/gc/g1/heapRegion.hpp | 9 +-
src/hotspot/share/gc/g1/heapRegionManager.hpp | 1 +
src/hotspot/share/gc/g1/heapRegionSet.cpp | 15 ----
src/hotspot/share/gc/g1/heapRegionSet.hpp | 2 -
test/hotspot/jtreg/gc/g1/TestG1NoMoving.java | 88 +++++++++++++++++++
21 files changed, 359 insertions(+), 35 deletions(-)
create mode 100644 src/hotspot/share/gc/g1/g1MarkLiveWords.cpp
create mode 100644 src/hotspot/share/gc/g1/g1MarkLiveWords.hpp
create mode 100644 src/hotspot/share/gc/g1/g1MarkRegionCache.cpp
create mode 100644 src/hotspot/share/gc/g1/g1MarkRegionCache.hpp
create mode 100644 test/hotspot/jtreg/gc/g1/TestG1NoMoving.java
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
index 130f8ec0a..7e9c6254c 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
@@ -2571,6 +2571,17 @@ void G1CollectedHeap::gc_epilogue(bool full) {
_numa->print_statistics();
}
+void G1CollectedHeap::verify_numa_regions(const char* desc) {
+ LogTarget(Trace, gc, heap, verify) lt;
+
+ if (lt.is_enabled()) {
+ LogStream ls(lt);
+ // Iterate all heap regions to print matching between preferred numa id and actual numa id.
+ G1NodeIndexCheckClosure cl(desc, _numa, &ls);
+ heap_region_iterate(&cl);
+ }
+}
+
HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
uint gc_count_before,
bool* succeeded,
@@ -2975,7 +2986,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
_verifier->verify_before_gc(verify_type);
_verifier->check_bitmaps("GC Start");
-
+ verify_numa_regions("GC Start");
#if COMPILER2_OR_JVMCI
DerivedPointerTable::clear();
#endif
@@ -3129,7 +3140,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
_verifier->verify_after_gc(verify_type);
_verifier->check_bitmaps("GC End");
-
+ verify_numa_regions("GC End");
assert(!_ref_processor_stw->discovery_enabled(), "Postcondition");
_ref_processor_stw->verify_no_references_recorded();
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
index aafaf6a08..bb46cae83 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
@@ -722,7 +722,7 @@ private:
void print_taskqueue_stats() const;
void reset_taskqueue_stats();
#endif // TASKQUEUE_STATS
-
+ void verify_numa_regions(const char* desc);
// Schedule the VM operation that will do an evacuation pause to
// satisfy an allocation request of word_size. *succeeded will
// return whether the VM operation was successful (it did do an
diff --git a/src/hotspot/share/gc/g1/g1FullCollector.cpp b/src/hotspot/share/gc/g1/g1FullCollector.cpp
index 4362ee87e..661a3dd9f 100644
--- a/src/hotspot/share/gc/g1/g1FullCollector.cpp
+++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp
@@ -37,6 +37,7 @@
#include "gc/g1/g1OopClosures.hpp"
#include "gc/g1/g1Policy.hpp"
#include "gc/g1/g1StringDedup.hpp"
+#include "gc/g1/g1MarkRegionCache.hpp"
#include "gc/shared/adaptiveSizePolicy.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/preservedMarks.hpp"
@@ -120,9 +121,11 @@ G1FullCollector::G1FullCollector(G1CollectedHeap* heap, GCMemoryManager* memory_
_preserved_marks_set.init(_num_workers);
_markers = NEW_C_HEAP_ARRAY(G1FullGCMarker*, _num_workers, mtGC);
_compaction_points = NEW_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _num_workers, mtGC);
+ _no_moving_region_compaction_points = NEW_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _num_workers, mtGC);
for (uint i = 0; i < _num_workers; i++) {
_markers[i] = new G1FullGCMarker(i, _preserved_marks_set.get(i), mark_bitmap());
_compaction_points[i] = new G1FullGCCompactionPoint();
+ _no_moving_region_compaction_points[i] = new G1FullGCCompactionPoint();
_oop_queue_set.register_queue(i, marker(i)->oop_stack());
_array_queue_set.register_queue(i, marker(i)->objarray_stack());
}
@@ -132,9 +135,11 @@ G1FullCollector::~G1FullCollector() {
for (uint i = 0; i < _num_workers; i++) {
delete _markers[i];
delete _compaction_points[i];
+ delete _no_moving_region_compaction_points[i];
}
FREE_C_HEAP_ARRAY(G1FullGCMarker*, _markers);
FREE_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _compaction_points);
+ FREE_C_HEAP_ARRAY(G1FullGCCompactionPoint*, _no_moving_region_compaction_points);
}
void G1FullCollector::prepare_collection() {
diff --git a/src/hotspot/share/gc/g1/g1FullCollector.hpp b/src/hotspot/share/gc/g1/g1FullCollector.hpp
index 0b97abeea..f81fe1059 100644
--- a/src/hotspot/share/gc/g1/g1FullCollector.hpp
+++ b/src/hotspot/share/gc/g1/g1FullCollector.hpp
@@ -66,6 +66,8 @@ class G1FullCollector : StackObj {
G1IsAliveClosure _is_alive;
ReferenceProcessorIsAliveMutator _is_alive_mutator;
+ G1FullGCCompactionPoint** _no_moving_region_compaction_points;
+
static uint calc_active_workers();
G1FullGCSubjectToDiscoveryClosure _always_subject_to_discovery;
@@ -83,6 +85,7 @@ public:
uint workers() { return _num_workers; }
G1FullGCMarker* marker(uint id) { return _markers[id]; }
G1FullGCCompactionPoint* compaction_point(uint id) { return _compaction_points[id]; }
+ G1FullGCCompactionPoint* no_moving_region_compaction_point(uint id) { return _no_moving_region_compaction_points[id]; }
OopQueueSet* oop_queue_set() { return &_oop_queue_set; }
ObjArrayTaskQueueSet* array_queue_set() { return &_array_queue_set; }
PreservedMarksSet* preserved_mark_set() { return &_preserved_marks_set; }
diff --git a/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp
index 0c2fc088f..eab1b2121 100644
--- a/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp
+++ b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp
@@ -87,6 +87,11 @@ void G1FullGCCompactTask::compact_region(HeapRegion* hr) {
hr->complete_compaction();
}
+void G1FullGCCompactTask::process_no_moving_region(HeapRegion* hr) {
+ collector()->mark_bitmap()->clear_region(hr);
+ hr->reset_no_compaction_region_during_compaction();
+}
+
void G1FullGCCompactTask::work(uint worker_id) {
Ticks start = Ticks::now();
GrowableArray<HeapRegion*>* compaction_queue = collector()->compaction_point(worker_id)->regions();
@@ -96,6 +101,15 @@ void G1FullGCCompactTask::work(uint worker_id) {
compact_region(*it);
}
+ if (G1FullGCNoMoving) {
+ GrowableArray<HeapRegion*>* no_move_region_queue = collector()->no_moving_region_compaction_point(worker_id)->regions();
+ for (GrowableArrayIterator<HeapRegion*> it = no_move_region_queue->begin();
+ it != no_move_region_queue->end();
+ ++it) {
+ process_no_moving_region(*it);
+ }
+ }
+
G1ResetHumongousClosure hc(collector()->mark_bitmap());
G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&hc, &_claimer, worker_id);
log_task("Compaction task", worker_id, start);
diff --git a/src/hotspot/share/gc/g1/g1FullGCCompactTask.hpp b/src/hotspot/share/gc/g1/g1FullGCCompactTask.hpp
index 6c8eaf596..25221599a 100644
--- a/src/hotspot/share/gc/g1/g1FullGCCompactTask.hpp
+++ b/src/hotspot/share/gc/g1/g1FullGCCompactTask.hpp
@@ -41,6 +41,7 @@ protected:
private:
void compact_region(HeapRegion* hr);
+ void process_no_moving_region(HeapRegion* hr);
public:
G1FullGCCompactTask(G1FullCollector* collector) :
diff --git a/src/hotspot/share/gc/g1/g1FullGCMarkTask.cpp b/src/hotspot/share/gc/g1/g1FullGCMarkTask.cpp
index d2c4b8d60..d982ef94a 100644
--- a/src/hotspot/share/gc/g1/g1FullGCMarkTask.cpp
+++ b/src/hotspot/share/gc/g1/g1FullGCMarkTask.cpp
@@ -29,6 +29,7 @@
#include "gc/g1/g1FullGCMarkTask.hpp"
#include "gc/g1/g1FullGCOopClosures.inline.hpp"
#include "gc/g1/g1FullGCReferenceProcessorExecutor.hpp"
+#include "gc/g1/g1MarkLiveWords.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/referenceProcessor.hpp"
#include "memory/iterator.inline.hpp"
@@ -42,6 +43,7 @@ G1FullGCMarkTask::G1FullGCMarkTask(G1FullCollector* collector) :
}
void G1FullGCMarkTask::work(uint worker_id) {
+ G1MarkLiveWords g1_mark_live_words;
Ticks start = Ticks::now();
ResourceMark rm;
G1FullGCMarker* marker = collector()->marker(worker_id);
diff --git a/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp b/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp
index 98a2fe7f1..78555b30f 100644
--- a/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp
@@ -31,6 +31,7 @@
#include "gc/g1/g1FullGCOopClosures.inline.hpp"
#include "gc/g1/g1StringDedup.hpp"
#include "gc/g1/g1StringDedupQueue.hpp"
+#include "gc/g1/g1MarkLiveWords.hpp"
#include "gc/shared/preservedMarks.inline.hpp"
#include "oops/access.inline.hpp"
#include "oops/compressedOops.inline.hpp"
@@ -68,6 +69,10 @@ template <class T> inline void G1FullGCMarker::mark_and_push(T* p) {
if (!CompressedOops::is_null(heap_oop)) {
oop obj = CompressedOops::decode_not_null(heap_oop);
if (mark_object(obj)) {
+ uint hr_index = G1CollectedHeap::heap()->addr_to_region((HeapWord*)obj);
+ if (_tl_live_words_cache != NULL) {
+ _tl_live_words_cache->inc_live(hr_index, (size_t)obj->size());
+ }
_oop_stack.push(obj);
assert(_bitmap->is_marked(obj), "Must be marked now - map self");
} else {
diff --git a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp
index 3f0e18fc8..2cc9c87d0 100644
--- a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp
+++ b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp
@@ -78,7 +78,8 @@ bool G1FullGCPrepareTask::has_freed_regions() {
void G1FullGCPrepareTask::work(uint worker_id) {
Ticks start = Ticks::now();
G1FullGCCompactionPoint* compaction_point = collector()->compaction_point(worker_id);
- G1CalculatePointersClosure closure(collector()->mark_bitmap(), compaction_point);
+ G1FullGCCompactionPoint* no_moving_regions_compaction_point = collector()->no_moving_region_compaction_point(worker_id);
+ G1CalculatePointersClosure closure(collector()->mark_bitmap(), compaction_point, no_moving_regions_compaction_point);
G1CollectedHeap::heap()->heap_region_par_iterate_from_start(&closure, &_hrclaimer);
// Update humongous region sets
@@ -93,11 +94,14 @@ void G1FullGCPrepareTask::work(uint worker_id) {
}
G1FullGCPrepareTask::G1CalculatePointersClosure::G1CalculatePointersClosure(G1CMBitMap* bitmap,
- G1FullGCCompactionPoint* cp) :
+ G1FullGCCompactionPoint* cp,
+ G1FullGCCompactionPoint* no_moving_regions_cp) :
_g1h(G1CollectedHeap::heap()),
_bitmap(bitmap),
_cp(cp),
- _humongous_regions_removed(0) { }
+ _no_moving_regions_cp(no_moving_regions_cp),
+ _humongous_regions_removed(0),
+ _hr_live_bytes_threshold((size_t)HeapRegion::GrainBytes * G1NoMovingRegionLiveBytesLowerThreshold / 100) { }
void G1FullGCPrepareTask::G1CalculatePointersClosure::free_humongous_region(HeapRegion* hr) {
FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
@@ -113,7 +117,7 @@ void G1FullGCPrepareTask::G1CalculatePointersClosure::free_humongous_region(Heap
void G1FullGCPrepareTask::G1CalculatePointersClosure::reset_region_metadata(HeapRegion* hr) {
hr->rem_set()->clear();
hr->clear_cardtable();
-
+ hr->set_live_words_after_mark((size_t)0);
if (_g1h->g1_hot_card_cache()->use_cache()) {
_g1h->g1_hot_card_cache()->reset_card_counts(hr);
}
@@ -151,13 +155,41 @@ void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_for_compaction_wor
}
void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_for_compaction(HeapRegion* hr) {
- if (!_cp->is_initialized()) {
- hr->set_compaction_top(hr->bottom());
- _cp->initialize(hr, true);
+ size_t live_bytes_after_mark = hr->live_bytes_after_mark();
+ if(!G1FullGCNoMoving || live_bytes_after_mark < _hr_live_bytes_threshold || hr->is_humongous()) {
+ if (!_cp->is_initialized()) {
+ hr->set_compaction_top(hr->bottom());
+ _cp->initialize(hr, true);
+ }
+ // Add region to the compaction queue and prepare it.
+ _cp->add(hr);
+ prepare_for_compaction_work(_cp, hr);
+ } else {
+ prepare_no_moving_region(hr);
+ _no_moving_regions_cp->add(hr);
+ log_debug(gc, phases)("no moving region index: %u, live bytes: "SIZE_FORMAT, hr->hrm_index(), live_bytes_after_mark);
+ }
+}
+
+void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_no_moving_region(const HeapRegion* hr) {
+ const HeapRegion* current = hr;
+ assert(!current->is_humongous(), "Should be no humongous regions");
+ HeapWord* limit = current->top();
+ HeapWord* next_addr = current->bottom();
+ while (next_addr < limit) {
+ Prefetch::write(next_addr, PrefetchScanIntervalInBytes);
+ oop obj = oop(next_addr);
+ size_t obj_size = obj->size();
+ if (_bitmap->is_marked(next_addr)) {
+ if (obj->forwardee() != NULL) {
+ obj->init_mark_raw();
+ }
+ } else {
+ // Fill dummy object to replace dead object
+ Universe::heap()->fill_with_dummy_object(next_addr, next_addr + obj_size, true);
+ }
+ next_addr += obj_size;
}
- // Add region to the compaction queue and prepare it.
- _cp->add(hr);
- prepare_for_compaction_work(_cp, hr);
}
void G1FullGCPrepareTask::prepare_serial_compaction() {
diff --git a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.hpp b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.hpp
index fcaf797a1..57b53c9dd 100644
--- a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.hpp
+++ b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.hpp
@@ -39,7 +39,6 @@ class G1FullGCPrepareTask : public G1FullGCTask {
protected:
volatile bool _freed_regions;
HeapRegionClaimer _hrclaimer;
-
void set_freed_regions();
public:
@@ -54,16 +53,20 @@ protected:
G1CollectedHeap* _g1h;
G1CMBitMap* _bitmap;
G1FullGCCompactionPoint* _cp;
+ G1FullGCCompactionPoint* _no_moving_regions_cp;
uint _humongous_regions_removed;
+ size_t _hr_live_bytes_threshold;
virtual void prepare_for_compaction(HeapRegion* hr);
void prepare_for_compaction_work(G1FullGCCompactionPoint* cp, HeapRegion* hr);
void free_humongous_region(HeapRegion* hr);
void reset_region_metadata(HeapRegion* hr);
+ void prepare_no_moving_region(const HeapRegion* hr);
public:
G1CalculatePointersClosure(G1CMBitMap* bitmap,
- G1FullGCCompactionPoint* cp);
+ G1FullGCCompactionPoint* cp,
+ G1FullGCCompactionPoint* no_moving_regions_cp);
void update_sets();
bool do_heap_region(HeapRegion* hr);
diff --git a/src/hotspot/share/gc/g1/g1MarkLiveWords.cpp b/src/hotspot/share/gc/g1/g1MarkLiveWords.cpp
new file mode 100644
index 000000000..32da3800a
--- /dev/null
+++ b/src/hotspot/share/gc/g1/g1MarkLiveWords.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2021, Huawei Technologies Co. Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Alibaba designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "gc/g1/g1MarkLiveWords.hpp"
+
+__thread G1MarkRegionCache* _tl_live_words_cache;
+
+G1MarkLiveWords::G1MarkLiveWords() {
+ if (G1FullGCNoMoving) {
+ _tl_live_words_cache = new G1MarkRegionCache();
+ }
+}
+
+G1MarkLiveWords::~G1MarkLiveWords() {
+ if (G1FullGCNoMoving) {
+ delete _tl_live_words_cache;
+ _tl_live_words_cache = NULL;
+ }
+}
diff --git a/src/hotspot/share/gc/g1/g1MarkLiveWords.hpp b/src/hotspot/share/gc/g1/g1MarkLiveWords.hpp
new file mode 100644
index 000000000..a11a4ca52
--- /dev/null
+++ b/src/hotspot/share/gc/g1/g1MarkLiveWords.hpp
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2021, Huawei Technologies Co. Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Alibaba designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef SHARE_VM_GC_G1_G1MARKLIVEWORDS_HPP
+#define SHARE_VM_GC_G1_G1MARKLIVEWORDS_HPP
+
+#include "gc/g1/g1MarkRegionCache.hpp"
+
+extern __thread G1MarkRegionCache* _tl_live_words_cache;
+class G1MarkLiveWords {
+public:
+ G1MarkLiveWords();
+ ~G1MarkLiveWords();
+};
+
+#endif
diff --git a/src/hotspot/share/gc/g1/g1MarkRegionCache.cpp b/src/hotspot/share/gc/g1/g1MarkRegionCache.cpp
new file mode 100644
index 000000000..37922e8cf
--- /dev/null
+++ b/src/hotspot/share/gc/g1/g1MarkRegionCache.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2021, Huawei Technologies Co. Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Alibaba designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "gc/g1/g1MarkRegionCache.hpp"
+#include "gc/g1/heapRegion.inline.hpp"
+#include "runtime/atomic.hpp"
+
+G1MarkRegionCache::G1MarkRegionCache() {
+ _cache = NEW_C_HEAP_ARRAY(size_t, G1CollectedHeap::heap()->max_regions(), mtGC);
+ memset(_cache, 0 , sizeof(size_t)*G1CollectedHeap::heap()->max_regions());
+}
+void G1MarkRegionCache::inc_live(uint hr_index, size_t words) {
+ _cache[hr_index] += words;
+}
+
+void* G1MarkRegionCache::operator new(size_t size) {
+ return (address)AllocateHeap(size, mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
+}
+
+void G1MarkRegionCache::operator delete(void* p) {
+ FreeHeap(p);
+}
+
+G1MarkRegionCache::~G1MarkRegionCache() {
+ for (uint i = 0; i < G1CollectedHeap::heap()->max_regions(); ++i) {
+ if (_cache[i]) {
+ Atomic::add(_cache[i], G1CollectedHeap::heap()->region_at(i)->live_words_addr());
+ }
+ }
+ FREE_C_HEAP_ARRAY(size_t, _cache);
+}
diff --git a/src/hotspot/share/gc/g1/g1MarkRegionCache.hpp b/src/hotspot/share/gc/g1/g1MarkRegionCache.hpp
new file mode 100644
index 000000000..0615fcab6
--- /dev/null
+++ b/src/hotspot/share/gc/g1/g1MarkRegionCache.hpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2021, Huawei Technologies Co. Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Alibaba designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef SHARE_VM_GC_G1_G1MARKREGIONCACHE_HPP
+#define SHARE_VM_GC_G1_G1MARKREGIONCACHE_HPP
+
+#include "memory/allocation.hpp"
+
+class G1MarkRegionCache {
+private:
+ size_t* _cache;
+public:
+ G1MarkRegionCache();
+ void inc_live(uint hr_index, size_t words);
+
+ void* operator new(size_t size);
+ void operator delete(void* p);
+
+ ~G1MarkRegionCache();
+};
+
+#endif
diff --git a/src/hotspot/share/gc/g1/g1_globals.hpp b/src/hotspot/share/gc/g1/g1_globals.hpp
index 8c7aec847..e035e0713 100644
--- a/src/hotspot/share/gc/g1/g1_globals.hpp
+++ b/src/hotspot/share/gc/g1/g1_globals.hpp
@@ -302,6 +302,14 @@
"Verify the code root lists attached to each heap region.") \
\
develop(bool, G1VerifyBitmaps, false, \
- "Verifies the consistency of the marking bitmaps")
+ "Verifies the consistency of the marking bitmaps") \
+ \
+ product(double, G1NoMovingRegionLiveBytesLowerThreshold, 98.0, \
+ "The Lower Threshold of Heap Region Live bytes percent" \
+ "in G1 Mark Sweep phase") \
+ range(50.0, 100.0) \
+ \
+ product(bool, G1FullGCNoMoving, false, \
+ "full gc support no moving region mode ")
#endif // SHARE_VM_GC_G1_G1_GLOBALS_HPP
diff --git a/src/hotspot/share/gc/g1/heapRegion.cpp b/src/hotspot/share/gc/g1/heapRegion.cpp
index 85840bc6f..c81695eae 100644
--- a/src/hotspot/share/gc/g1/heapRegion.cpp
+++ b/src/hotspot/share/gc/g1/heapRegion.cpp
@@ -243,7 +243,8 @@ HeapRegion::HeapRegion(uint hrm_index,
_surv_rate_group(NULL), _age_index(-1),
_prev_top_at_mark_start(NULL), _next_top_at_mark_start(NULL),
_recorded_rs_length(0), _predicted_elapsed_time_ms(0),
- _node_index(G1NUMA::UnknownNodeIndex)
+ _node_index(G1NUMA::UnknownNodeIndex),
+ _live_words(0)
{
_rem_set = new HeapRegionRemSet(bot, this);
diff --git a/src/hotspot/share/gc/g1/heapRegion.hpp b/src/hotspot/share/gc/g1/heapRegion.hpp
index 12a4eb8c3..023febbfc 100644
--- a/src/hotspot/share/gc/g1/heapRegion.hpp
+++ b/src/hotspot/share/gc/g1/heapRegion.hpp
@@ -246,7 +246,7 @@ class HeapRegion: public G1ContiguousSpace {
// in each heap region.
size_t _prev_marked_bytes; // Bytes known to be live via last completed marking.
size_t _next_marked_bytes; // Bytes known to be live via in-progress marking.
-
+ size_t _live_words;
// The calculated GC efficiency of the region.
double _gc_efficiency;
@@ -320,6 +320,10 @@ class HeapRegion: public G1ContiguousSpace {
~((1 << (size_t) LogOfHRGrainBytes) - 1);
}
+ void reset_no_compaction_region_during_compaction() {
+ zero_marked_bytes();
+ init_top_at_mark_start();
+ }
// Returns whether a field is in the same region as the obj it points to.
template <typename T>
@@ -369,6 +373,9 @@ class HeapRegion: public G1ContiguousSpace {
// The number of bytes marked live in the region in the last marking phase.
size_t marked_bytes() { return _prev_marked_bytes; }
+ size_t* live_words_addr() { return &_live_words; }
+ size_t live_bytes_after_mark() { return _live_words * HeapWordSize; }
+ void set_live_words_after_mark(size_t live_words) { _live_words = live_words; }
size_t live_bytes() {
return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
}
diff --git a/src/hotspot/share/gc/g1/heapRegionManager.hpp b/src/hotspot/share/gc/g1/heapRegionManager.hpp
index 3edc1a9fb..85e6e024e 100644
--- a/src/hotspot/share/gc/g1/heapRegionManager.hpp
+++ b/src/hotspot/share/gc/g1/heapRegionManager.hpp
@@ -29,6 +29,7 @@
#include "gc/g1/g1NUMA.hpp"
#include "gc/g1/g1RegionToSpaceMapper.hpp"
#include "gc/g1/heapRegionSet.hpp"
+#include "gc/g1/g1RegionsOnNodes.hpp"
#include "services/memoryUsage.hpp"
class HeapRegion;
diff --git a/src/hotspot/share/gc/g1/heapRegionSet.cpp b/src/hotspot/share/gc/g1/heapRegionSet.cpp
index eb8430ff6..322f0e32a 100644
--- a/src/hotspot/share/gc/g1/heapRegionSet.cpp
+++ b/src/hotspot/share/gc/g1/heapRegionSet.cpp
@@ -244,21 +244,6 @@ void FreeRegionList::remove_starting_at(HeapRegion* first, uint num_regions) {
verify_optional();
}
-uint FreeRegionList::num_of_regions_in_range(uint start, uint end) const {
- HeapRegion* cur = _head;
- uint num = 0;
- while (cur != NULL) {
- uint index = cur->hrm_index();
- if (index > end) {
- break;
- } else if (index >= start) {
- num++;
- }
- cur = cur->next();
- }
- return num;
-}
-
void FreeRegionList::verify() {
// See comment in HeapRegionSetBase::verify() about MT safety and
// verification.
diff --git a/src/hotspot/share/gc/g1/heapRegionSet.hpp b/src/hotspot/share/gc/g1/heapRegionSet.hpp
index 71b89668a..2ad10acf7 100644
--- a/src/hotspot/share/gc/g1/heapRegionSet.hpp
+++ b/src/hotspot/share/gc/g1/heapRegionSet.hpp
@@ -230,8 +230,6 @@ public:
virtual void verify();
- uint num_of_regions_in_range(uint start, uint end) const;
-
using HeapRegionSetBase::length;
uint length(uint node_index) const;
};
diff --git a/test/hotspot/jtreg/gc/g1/TestG1NoMoving.java b/test/hotspot/jtreg/gc/g1/TestG1NoMoving.java
new file mode 100644
index 000000000..2f892773b
--- /dev/null
+++ b/test/hotspot/jtreg/gc/g1/TestG1NoMoving.java
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2021, Huawei Technologies Co. Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Alibaba designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/*
+ * @test TestG1NoMoving
+ * @summary Test that a full gc with -XX:+G1FullGCNoMoving
+ * @key gc
+ * @requires vm.gc.G1
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ * java.management
+ * @run main/othervm TestG1NoMoving
+ */
+
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import jdk.test.lib.Platform;
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
+
+public class TestG1NoMoving {
+ public static void runTest() throws Exception {
+ final String[] arguments = {
+ "-XX:+UseG1GC",
+ "-XX:+G1FullGCNoMoving",
+ "-Xmx8m",
+ "-Xms8M",
+ "-Xlog:gc+phases=debug",
+ "-XX:G1HeapRegionSize=1m",
+ GCTest.class.getName()
+ };
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(arguments);
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+ System.out.println(output.getStdout());
+
+ String pattern = ".*no moving region.*";
+ Pattern r = Pattern.compile(pattern);
+ Matcher m = r.matcher(output.getStdout());
+
+ if (!m.find()) {
+ throw new RuntimeException("Could not find any no moving region output");
+ }
+
+ }
+
+ public static void main(String[] args) throws Exception {
+ runTest();
+ }
+
+ static class GCTest {
+ public static List<char[]> memory;
+ public static void main(String[] args) throws Exception {
+ memory = new ArrayList<>();
+ try {
+ while (true) {
+ memory.add(new char[1024]);
+ System.gc();
+ }
+ } catch (OutOfMemoryError e) {
+ memory = null;
+ System.gc();
+ }
+ }
+ }
+}
+
--
2.19.0

1078
add-LazyBox-feature.patch Executable file

File diff suppressed because it is too large Load Diff

View File

@ -12,9 +12,9 @@ index bb9721c8e..3774dd730 100644
+ )))
+ endif
+
ifeq ($(call check-jvm-feature, shenandoahgc), true)
ifeq ($(call check-jvm-feature, zgc), true)
AD_SRC_FILES += $(call uniq, $(wildcard $(foreach d, $(AD_SRC_ROOTS), \
$d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/shenandoah/shenandoah_$(HOTSPOT_TARGET_CPU).ad \
$d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/z/z_$(HOTSPOT_TARGET_CPU).ad \
diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad
index 617b2b8fb..eab0101b0 100644
--- a/src/hotspot/cpu/aarch64/aarch64.ad

View File

@ -0,0 +1,46 @@
From f9a030a58fcae2352e1b4a629901b6047c2f6610 Mon Sep 17 00:00:00 2001
Date: Thu, 18 Mar 2021 12:34:06 +0000
Subject: [PATCH 1/4] downgrade the symver of log2f posix spawn
---
src/hotspot/share/opto/parse2.cpp | 8 ++++++++
src/java.base/unix/native/libjava/ProcessImpl_md.c | 4 ++++
2 files changed, 12 insertions(+)
diff --git a/src/hotspot/share/opto/parse2.cpp b/src/hotspot/share/opto/parse2.cpp
index 4cbc57eb8..2b21881bc 100644
--- a/src/hotspot/share/opto/parse2.cpp
+++ b/src/hotspot/share/opto/parse2.cpp
@@ -45,6 +45,14 @@
#include "runtime/deoptimization.hpp"
#include "runtime/sharedRuntime.hpp"
+#ifdef AARCH64
+ __asm__(".symver log2f,log2f@GLIBC_2.17");
+#endif
+
+#ifdef AMD64
+ __asm__(".symver log2f,log2f@GLIBC_2.2.5");
+#endif
+
#ifndef PRODUCT
extern int explicit_null_checks_inserted,
explicit_null_checks_elided;
diff --git a/src/java.base/unix/native/libjava/ProcessImpl_md.c b/src/java.base/unix/native/libjava/ProcessImpl_md.c
index d0c2543ce..09d71b874 100644
--- a/src/java.base/unix/native/libjava/ProcessImpl_md.c
+++ b/src/java.base/unix/native/libjava/ProcessImpl_md.c
@@ -48,6 +48,10 @@
#include "childproc.h"
+#if defined(amd64)
+ __asm__(".symver posix_spawn,posix_spawn@GLIBC_2.2.5");
+#endif
+
/*
* There are 4 possible strategies we might use to "fork":
*
--
2.19.0

View File

@ -114,7 +114,7 @@
# New Version-String scheme-style defines
%global majorver 11
%global securityver 9
%global securityver 10
# buildjdkver is usually same as %%{majorver},
# but in time of bootstrap of next jdk, it is majorver-1,
# and this it is better to change it here, on single place
@ -130,16 +130,21 @@
%global origin_nice OpenJDK
%global top_level_dir_name %{origin}
%global minorver 0
%global buildver 11
%global buildver 9
%global patchver 0
%global project jdk-updates
%global repo jdk11u
%global revision jdk-11.0.9-ga
%global revision jdk-11.0.10-ga
%global full_revision %{project}-%{repo}-%{revision}
# priority must be 7 digits in total
# setting to 1, so debug ones can have 0
%global priority 00000%{minorver}1
%if %{patchver}
%global newjavaver %{majorver}.%{minorver}.%{securityver}.%{patchver}
%else
%global newjavaver %{majorver}.%{minorver}.%{securityver}
%endif
%global javaver %{majorver}
@ -735,7 +740,7 @@ Provides: java-src%{?1} = %{epoch}:%{version}-%{release}
Name: java-%{javaver}-%{origin}
Version: %{newjavaver}.%{buildver}
Release: 10
Release: 6
# java-1.5.0-ibm from jpackage.org set Epoch to 1 for unknown reasons
# and this change was brought into RHEL-4. java-1.5.0-ibm packages
# also included the epoch in their virtual provides. This created a
@ -823,7 +828,6 @@ Patch37: fix-compile-error-without-disable-precompiled-headers.patch
Patch38: fast-serializer-jdk11.patch
Patch39: fix-jck-failure-on-FastSerializer.patch
Patch40: 8223667-ASAN-build-broken.patch
Patch41: 8229495-SIGILL-in-C2-generated-OSR-compilation.patch
Patch42: 8229496-SIGFPE-division-by-zero-in-C2-OSR-compiled-method.patch
Patch43: 8243670-Unexpected-test-result-caused-by-C2-MergeMem.patch
Patch45: leaf-optimize-in-ParallelScanvageGC.patch
@ -834,9 +838,6 @@ Patch49: 8237483-AArch64-C1-OopMap-inserted-twice-fatal-error.patch
Patch50: 8248336-AArch64-C2-offset-overflow-in-BoxLockNode-em.patch
# 11.0.9
Patch51: 8255781-Bump-patch-update-version-for-OpenJDK-jdk-11.0.9.1.patch
Patch52: 8250861-Crash-in-MinINode-Ideal.patch
Patch53: 8236512-PKCS11-Connection-closed-after-Cipher-doFinal-and-NoPadding.patch
Patch54: 8207160-ClassReader-adjustMethodParams-can-potentially-return-null-if-the-args-list-is-empty.patch
Patch55: 8215047-Task-terminators-do-not-complete-termination-in-consistent-state.patch
Patch56: 8247766-aarch64-guarantee-val-1U--nbits-failed-Field-too-big-for-insn.patch
@ -844,6 +845,16 @@ Patch57: add-zgc-parameter-adaptation-feature.patch
Patch58: add-integerCache-feature.patch
Patch59: add-SVE-backend-feature.patch
#11.0.10
Patch60: 8240353.patch
Patch61: downgrade-the-symver-of-log2f-posix-spawn.patch
Patch62: 8254078-DataOutputStream-is-very-slow-post-disabling.patch
Patch63: 8217918-C2-XX-AggressiveUnboxing-is-broken.patch
Patch64: Fix-the-memcpy-symbol-issue-during-JDK11-x64-build.patch
Patch65: add-LazyBox-feature.patch
Patch66: add-G1-Full-GC-optimization.patch
Patch67: 8214535-support-Jmap-parallel.patch
BuildRequires: autoconf
BuildRequires: alsa-lib-devel
BuildRequires: binutils
@ -856,6 +867,7 @@ BuildRequires: freetype-devel
BuildRequires: giflib-devel
BuildRequires: gcc-c++
BuildRequires: gdb
BuildRequires: harfbuzz-devel
BuildRequires: lcms2-devel
BuildRequires: libjpeg-devel
BuildRequires: libpng-devel
@ -1097,7 +1109,6 @@ pushd %{top_level_dir_name}
%patch38 -p1
%patch39 -p1
%patch40 -p1
%patch41 -p1
%patch42 -p1
%patch43 -p1
%patch45 -p1
@ -1106,15 +1117,20 @@ pushd %{top_level_dir_name}
%patch48 -p1
%patch49 -p1
%patch50 -p1
%patch51 -p1
%patch52 -p1
%patch53 -p1
%patch54 -p1
%patch55 -p1
%patch56 -p1
%patch57 -p1
%patch58 -p1
%patch59 -p1
%patch60 -p1
%patch61 -p1
%patch62 -p1
%patch63 -p1
%patch64 -p1
%patch65 -p1
%patch66 -p1
%patch67 -p1
popd # openjdk
%patch1000
@ -1219,6 +1235,7 @@ bash ../configure \
--with-giflib=system \
--with-libpng=system \
--with-lcms=system \
--with-harfbuzz=system \
--with-stdc++lib=dynamic \
--with-extra-cxxflags="$EXTRA_CPP_FLAGS" \
--with-extra-cflags="$EXTRA_CFLAGS" \
@ -1408,7 +1425,7 @@ if ! echo $suffix | grep -q "debug" ; then
# Install Javadoc documentation
install -d -m 755 $RPM_BUILD_ROOT%{_javadocdir}
cp -a %{buildoutputdir -- $suffix}/images/docs $RPM_BUILD_ROOT%{_javadocdir}/%{uniquejavadocdir -- $suffix}
cp -a %{buildoutputdir -- $suffix}/bundles/jdk-%{newjavaver}.1+%{buildver}-docs.zip $RPM_BUILD_ROOT%{_javadocdir}/%{uniquejavadocdir -- $suffix}.zip
cp -a %{buildoutputdir -- $suffix}/bundles/jdk-%{newjavaver}+%{buildver}-docs.zip $RPM_BUILD_ROOT%{_javadocdir}/%{uniquejavadocdir -- $suffix}.zip
fi
# Install icons and menu entries
@ -1617,6 +1634,31 @@ require "copy_jdk_configs.lua"
%changelog
* Fri Mar 19 2021 aijm <aijiaming1@huawei.com> - 1:11.0.10.9-6
- add 8214535-support-Jmap-parallel.patch
* Fri Mar 19 2021 aijm <aijiaming1@huawei.com> - 1:11.0.10.9-5
- add add-G1-Full-GC-optimization.patch
* Fri Mar 19 2021 kuenking111 <wangkun49@huawei.com> - 1:11.0.10.9-4
- add add-LazyBox-feature.patch
* Fri Mar 19 2021 aijm <aijiaming1@huawei.com> - 1:11.0.10.9-3
- add downgrade-the-symver-of-log2f-posix-spawn.patch
- add 8254078-DataOutputStream-is-very-slow-post-disabling.patch
- add 8217918-C2-XX-AggressiveUnboxing-is-broken.patch
- add Fix-the-memcpy-symbol-issue-during-JDK11-x64-build.patch
* Sun Feb 7 2021 jdkboy <ge.guo@huawei.com> - 1:11.0.10.9-2
- remove redundant file info
* Thu Feb 5 2021 eapen <zhangyipeng7@huawei.com> - 1:11.0.10.9-1
- add 8240353.patch
* Thu Feb 5 2021 eapen <zhangyipeng7@huawei.com> - 1:11.0.10.9-0
- update to 11.0.10+9(GA)
- use system harfbuzz now this is supported
* Thu Dec 24 2020 kuenking <wangkun49@huawei.com> - 1:11.0.9.11-10
- add add-SVE-backend-feature.patch

View File

@ -7,7 +7,7 @@ set -e
export PROJECT_NAME="jdk-updates"
export REPO_NAME="jdk11u"
# warning, clonning without shenadnaoh prefix, you will clone pure jdk - thus without shenandaoh GC
export VERSION="jdk-11.0.8-ga"
export VERSION="jdk-11.0.10-ga"
export COMPRESSION=xz
# unset tapsets overrides
export OPENJDK_URL=""