I2AJL2: add 8215047-Task-terminators-do-not-complete-termination-in-consistent-state.patch && add 8247766-aarch64-guarantee-val-1U--nbits-failed-Field-too-big-for-insn.patch
This commit is contained in:
parent
cd7a0e18c8
commit
81123ca88a
@ -0,0 +1,399 @@
|
||||
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
|
||||
index 3749a99bb..ef8fb4ac0 100644
|
||||
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
|
||||
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
+ * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -2202,7 +2202,10 @@ void G1CMTask::reset(G1CMBitMap* next_mark_bitmap) {
|
||||
}
|
||||
|
||||
bool G1CMTask::should_exit_termination() {
|
||||
- regular_clock_call();
|
||||
+ if (!regular_clock_call()) {
|
||||
+ return true;
|
||||
+ }
|
||||
+
|
||||
// This is called when we are in the termination protocol. We should
|
||||
// quit if, for some reason, this task wants to abort or the global
|
||||
// stack is not empty (this means that we can get work from it).
|
||||
@@ -2213,12 +2216,12 @@ void G1CMTask::reached_limit() {
|
||||
assert(_words_scanned >= _words_scanned_limit ||
|
||||
_refs_reached >= _refs_reached_limit ,
|
||||
"shouldn't have been called otherwise");
|
||||
- regular_clock_call();
|
||||
+ abort_marking_if_regular_check_fail();
|
||||
}
|
||||
|
||||
-void G1CMTask::regular_clock_call() {
|
||||
+bool G1CMTask::regular_clock_call() {
|
||||
if (has_aborted()) {
|
||||
- return;
|
||||
+ return false;
|
||||
}
|
||||
|
||||
// First, we need to recalculate the words scanned and refs reached
|
||||
@@ -2229,21 +2232,19 @@ void G1CMTask::regular_clock_call() {
|
||||
|
||||
// (1) If an overflow has been flagged, then we abort.
|
||||
if (_cm->has_overflown()) {
|
||||
- set_has_aborted();
|
||||
- return;
|
||||
+ return false;
|
||||
}
|
||||
|
||||
// If we are not concurrent (i.e. we're doing remark) we don't need
|
||||
// to check anything else. The other steps are only needed during
|
||||
// the concurrent marking phase.
|
||||
if (!_cm->concurrent()) {
|
||||
- return;
|
||||
+ return true;
|
||||
}
|
||||
|
||||
// (2) If marking has been aborted for Full GC, then we also abort.
|
||||
if (_cm->has_aborted()) {
|
||||
- set_has_aborted();
|
||||
- return;
|
||||
+ return false;
|
||||
}
|
||||
|
||||
double curr_time_ms = os::elapsedVTime() * 1000.0;
|
||||
@@ -2252,17 +2253,15 @@ void G1CMTask::regular_clock_call() {
|
||||
if (SuspendibleThreadSet::should_yield()) {
|
||||
// We should yield. To do this we abort the task. The caller is
|
||||
// responsible for yielding.
|
||||
- set_has_aborted();
|
||||
- return;
|
||||
+ return false;
|
||||
}
|
||||
|
||||
// (5) We check whether we've reached our time quota. If we have,
|
||||
// then we abort.
|
||||
double elapsed_time_ms = curr_time_ms - _start_time_ms;
|
||||
if (elapsed_time_ms > _time_target_ms) {
|
||||
- set_has_aborted();
|
||||
_has_timed_out = true;
|
||||
- return;
|
||||
+ return false;
|
||||
}
|
||||
|
||||
// (6) Finally, we check whether there are enough completed STAB
|
||||
@@ -2271,9 +2270,9 @@ void G1CMTask::regular_clock_call() {
|
||||
if (!_draining_satb_buffers && satb_mq_set.process_completed_buffers()) {
|
||||
// we do need to process SATB buffers, we'll abort and restart
|
||||
// the marking task to do so
|
||||
- set_has_aborted();
|
||||
- return;
|
||||
+ return false;
|
||||
}
|
||||
+ return true;
|
||||
}
|
||||
|
||||
void G1CMTask::recalculate_limits() {
|
||||
@@ -2428,7 +2427,7 @@ void G1CMTask::drain_satb_buffers() {
|
||||
// until we run out of buffers or we need to abort.
|
||||
while (!has_aborted() &&
|
||||
satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
|
||||
- regular_clock_call();
|
||||
+ abort_marking_if_regular_check_fail();
|
||||
}
|
||||
|
||||
_draining_satb_buffers = false;
|
||||
@@ -2671,7 +2670,7 @@ void G1CMTask::do_marking_step(double time_target_ms,
|
||||
// If the iteration is successful, give up the region.
|
||||
if (mr.is_empty()) {
|
||||
giveup_current_region();
|
||||
- regular_clock_call();
|
||||
+ abort_marking_if_regular_check_fail();
|
||||
} else if (_curr_region->is_humongous() && mr.start() == _curr_region->bottom()) {
|
||||
if (_next_mark_bitmap->is_marked(mr.start())) {
|
||||
// The object is marked - apply the closure
|
||||
@@ -2680,10 +2679,10 @@ void G1CMTask::do_marking_step(double time_target_ms,
|
||||
// Even if this task aborted while scanning the humongous object
|
||||
// we can (and should) give up the current region.
|
||||
giveup_current_region();
|
||||
- regular_clock_call();
|
||||
+ abort_marking_if_regular_check_fail();
|
||||
} else if (_next_mark_bitmap->iterate(&bitmap_closure, mr)) {
|
||||
giveup_current_region();
|
||||
- regular_clock_call();
|
||||
+ abort_marking_if_regular_check_fail();
|
||||
} else {
|
||||
assert(has_aborted(), "currently the only way to do so");
|
||||
// The only way to abort the bitmap iteration is to return
|
||||
@@ -2738,7 +2737,7 @@ void G1CMTask::do_marking_step(double time_target_ms,
|
||||
// block of empty regions. So we need to call the regular clock
|
||||
// method once round the loop to make sure it's called
|
||||
// frequently enough.
|
||||
- regular_clock_call();
|
||||
+ abort_marking_if_regular_check_fail();
|
||||
}
|
||||
|
||||
if (!has_aborted() && _curr_region == NULL) {
|
||||
@@ -2816,6 +2815,7 @@ void G1CMTask::do_marking_step(double time_target_ms,
|
||||
guarantee(_cm->mark_stack_empty(), "only way to reach here");
|
||||
guarantee(_task_queue->size() == 0, "only way to reach here");
|
||||
guarantee(!_cm->has_overflown(), "only way to reach here");
|
||||
+ guarantee(!has_aborted(), "should never happen if termination has completed");
|
||||
} else {
|
||||
// Apparently there's more work to do. Let's abort this task. It
|
||||
// will restart it and we can hopefully find more things to do.
|
||||
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
|
||||
index b5eb26197..b760fe977 100644
|
||||
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
|
||||
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp
|
||||
@@ -730,7 +730,11 @@ private:
|
||||
// Supposed to be called regularly during a marking step as
|
||||
// it checks a bunch of conditions that might cause the marking step
|
||||
// to abort
|
||||
- void regular_clock_call();
|
||||
+ // Return true if the marking step should continue. Otherwise, return false to abort
|
||||
+ bool regular_clock_call();
|
||||
+
|
||||
+ // Set abort flag if regular_clock_call() check fails
|
||||
+ inline void abort_marking_if_regular_check_fail();
|
||||
|
||||
// Test whether obj might have already been passed over by the
|
||||
// mark bitmap scan, and so needs to be pushed onto the mark stack.
|
||||
diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp
|
||||
index 4a969c511..383cdc563 100644
|
||||
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp
|
||||
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp
|
||||
@@ -210,6 +210,12 @@ inline void G1ConcurrentMark::add_to_liveness(uint worker_id, oop const obj, siz
|
||||
task(worker_id)->update_liveness(obj, size);
|
||||
}
|
||||
|
||||
+inline void G1CMTask::abort_marking_if_regular_check_fail() {
|
||||
+ if (!regular_clock_call()) {
|
||||
+ set_has_aborted();
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
inline bool G1CMTask::make_reference_grey(oop obj) {
|
||||
if (!_cm->mark_in_next_bitmap(_worker_id, obj)) {
|
||||
return false;
|
||||
diff --git a/src/hotspot/share/gc/shared/owstTaskTerminator.cpp b/src/hotspot/share/gc/shared/owstTaskTerminator.cpp
|
||||
index 3c32ab627..2856a9981 100644
|
||||
--- a/src/hotspot/share/gc/shared/owstTaskTerminator.cpp
|
||||
+++ b/src/hotspot/share/gc/shared/owstTaskTerminator.cpp
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
- * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
|
||||
+ * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
@@ -38,15 +38,17 @@ bool OWSTTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
|
||||
// Single worker, done
|
||||
if (_n_threads == 1) {
|
||||
_offered_termination = 1;
|
||||
+ assert(!peek_in_queue_set(), "Precondition");
|
||||
return true;
|
||||
}
|
||||
|
||||
_blocker->lock_without_safepoint_check();
|
||||
- // All arrived, done
|
||||
_offered_termination++;
|
||||
+ // All arrived, done
|
||||
if (_offered_termination == _n_threads) {
|
||||
_blocker->notify_all();
|
||||
_blocker->unlock();
|
||||
+ assert(!peek_in_queue_set(), "Precondition");
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -59,21 +61,31 @@ bool OWSTTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
|
||||
|
||||
if (do_spin_master_work(terminator)) {
|
||||
assert(_offered_termination == _n_threads, "termination condition");
|
||||
+ assert(!peek_in_queue_set(), "Precondition");
|
||||
return true;
|
||||
} else {
|
||||
_blocker->lock_without_safepoint_check();
|
||||
+ // There is possibility that termination is reached between dropping the lock
|
||||
+ // before returning from do_spin_master_work() and acquiring lock above.
|
||||
+ if (_offered_termination == _n_threads) {
|
||||
+ _blocker->unlock();
|
||||
+ assert(!peek_in_queue_set(), "Precondition");
|
||||
+ return true;
|
||||
+ }
|
||||
}
|
||||
} else {
|
||||
_blocker->wait(true, WorkStealingSleepMillis);
|
||||
|
||||
if (_offered_termination == _n_threads) {
|
||||
_blocker->unlock();
|
||||
+ assert(!peek_in_queue_set(), "Precondition");
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
size_t tasks = tasks_in_queue_set();
|
||||
if (exit_termination(tasks, terminator)) {
|
||||
+ assert_lock_strong(_blocker);
|
||||
_offered_termination--;
|
||||
_blocker->unlock();
|
||||
return false;
|
||||
@@ -153,19 +165,24 @@ bool OWSTTaskTerminator::do_spin_master_work(TerminatorTerminator* terminator) {
|
||||
_total_peeks++;
|
||||
#endif
|
||||
size_t tasks = tasks_in_queue_set();
|
||||
- if (exit_termination(tasks, terminator)) {
|
||||
+ bool exit = exit_termination(tasks, terminator);
|
||||
+ {
|
||||
MonitorLockerEx locker(_blocker, Mutex::_no_safepoint_check_flag);
|
||||
- if (tasks >= _offered_termination - 1) {
|
||||
- locker.notify_all();
|
||||
- } else {
|
||||
- for (; tasks > 1; tasks--) {
|
||||
- locker.notify();
|
||||
+ // Termination condition reached
|
||||
+ if (_offered_termination == _n_threads) {
|
||||
+ _spin_master = NULL;
|
||||
+ return true;
|
||||
+ } else if (exit) {
|
||||
+ if (tasks >= _offered_termination - 1) {
|
||||
+ locker.notify_all();
|
||||
+ } else {
|
||||
+ for (; tasks > 1; tasks--) {
|
||||
+ locker.notify();
|
||||
+ }
|
||||
}
|
||||
+ _spin_master = NULL;
|
||||
+ return false;
|
||||
}
|
||||
- _spin_master = NULL;
|
||||
- return false;
|
||||
- } else if (_offered_termination == _n_threads) {
|
||||
- return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
diff --git a/src/hotspot/share/gc/shared/owstTaskTerminator.hpp b/src/hotspot/share/gc/shared/owstTaskTerminator.hpp
|
||||
index 9e6fe135a..190033eb7 100644
|
||||
--- a/src/hotspot/share/gc/shared/owstTaskTerminator.hpp
|
||||
+++ b/src/hotspot/share/gc/shared/owstTaskTerminator.hpp
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
- * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
|
||||
+ * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
@@ -55,6 +55,7 @@ public:
|
||||
}
|
||||
|
||||
virtual ~OWSTTaskTerminator() {
|
||||
+ assert(_spin_master == NULL, "Should have been reset");
|
||||
assert(_blocker != NULL, "Can not be NULL");
|
||||
delete _blocker;
|
||||
}
|
||||
diff --git a/src/hotspot/share/gc/shared/taskqueue.cpp b/src/hotspot/share/gc/shared/taskqueue.cpp
|
||||
index 47639bdf9..697c13645 100644
|
||||
--- a/src/hotspot/share/gc/shared/taskqueue.cpp
|
||||
+++ b/src/hotspot/share/gc/shared/taskqueue.cpp
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
+ * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -25,6 +25,9 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/taskqueue.hpp"
|
||||
#include "gc/shared/owstTaskTerminator.hpp"
|
||||
+#if INCLUDE_SHENANDOAHGC
|
||||
+#include "gc/shenandoah/shenandoahHeap.hpp"
|
||||
+#endif
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
@@ -118,6 +121,14 @@ ParallelTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) :
|
||||
_queue_set(queue_set),
|
||||
_offered_termination(0) {}
|
||||
|
||||
+ParallelTaskTerminator::~ParallelTaskTerminator() {
|
||||
+ assert(_offered_termination == 0 || !peek_in_queue_set(), "Precondition");
|
||||
+#if INCLUDE_SHENANDOAHGC
|
||||
+ if (UseShenandoahGC && ShenandoahHeap::heap()->cancelled_gc()) return;
|
||||
+#endif
|
||||
+ assert(_offered_termination == 0 || _offered_termination == _n_threads, "Terminated or aborted" );
|
||||
+}
|
||||
+
|
||||
bool ParallelTaskTerminator::peek_in_queue_set() {
|
||||
return _queue_set->peek();
|
||||
}
|
||||
@@ -162,6 +173,7 @@ ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
|
||||
assert(_offered_termination <= _n_threads, "Invariant");
|
||||
// Are all threads offering termination?
|
||||
if (_offered_termination == _n_threads) {
|
||||
+ assert(!peek_in_queue_set(), "Precondition");
|
||||
return true;
|
||||
} else {
|
||||
// Look for more work.
|
||||
@@ -211,9 +223,7 @@ ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
|
||||
#endif
|
||||
if (peek_in_queue_set() ||
|
||||
(terminator != NULL && terminator->should_exit_termination())) {
|
||||
- Atomic::dec(&_offered_termination);
|
||||
- assert(_offered_termination < _n_threads, "Invariant");
|
||||
- return false;
|
||||
+ return complete_or_exit_termination();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -229,6 +239,23 @@ void ParallelTaskTerminator::print_termination_counts() {
|
||||
}
|
||||
#endif
|
||||
|
||||
+bool ParallelTaskTerminator::complete_or_exit_termination() {
|
||||
+ // If termination is ever reached, terminator should stay in such state,
|
||||
+ // so that all threads see the same state
|
||||
+ uint current_offered = _offered_termination;
|
||||
+ uint expected_value;
|
||||
+ do {
|
||||
+ if (current_offered == _n_threads) {
|
||||
+ assert(!peek_in_queue_set(), "Precondition");
|
||||
+ return true;
|
||||
+ }
|
||||
+ expected_value = current_offered;
|
||||
+ } while ((current_offered = Atomic::cmpxchg(current_offered - 1, &_offered_termination, current_offered)) != expected_value);
|
||||
+
|
||||
+ assert(_offered_termination < _n_threads, "Invariant");
|
||||
+ return false;
|
||||
+}
|
||||
+
|
||||
void ParallelTaskTerminator::reset_for_reuse() {
|
||||
if (_offered_termination != 0) {
|
||||
assert(_offered_termination == _n_threads,
|
||||
diff --git a/src/hotspot/share/gc/shared/taskqueue.hpp b/src/hotspot/share/gc/shared/taskqueue.hpp
|
||||
index 1b60a62c2..110757684 100644
|
||||
--- a/src/hotspot/share/gc/shared/taskqueue.hpp
|
||||
+++ b/src/hotspot/share/gc/shared/taskqueue.hpp
|
||||
@@ -491,11 +491,18 @@ protected:
|
||||
virtual void yield();
|
||||
void sleep(uint millis);
|
||||
|
||||
+ // Called when exiting termination is requested.
|
||||
+ // When the request is made, terminator may have already terminated
|
||||
+ // (e.g. all threads are arrived and offered termination). In this case,
|
||||
+ // it should ignore the request and complete the termination.
|
||||
+ // Return true if termination is completed. Otherwise, return false.
|
||||
+ bool complete_or_exit_termination();
|
||||
public:
|
||||
|
||||
// "n_threads" is the number of threads to be terminated. "queue_set" is a
|
||||
// queue sets of work queues of other threads.
|
||||
ParallelTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set);
|
||||
+ virtual ~ParallelTaskTerminator();
|
||||
|
||||
// The current thread has no work, and is ready to terminate if everyone
|
||||
// else is. If returns "true", all threads are terminated. If returns
|
||||
--
|
||||
2.19.1
|
||||
|
||||
@ -0,0 +1,257 @@
|
||||
diff --git a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp
|
||||
index db582f25f..80ddb9b31 100644
|
||||
--- a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp
|
||||
+++ b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp
|
||||
@@ -553,14 +553,7 @@ class Address {
|
||||
|
||||
void lea(MacroAssembler *, Register) const;
|
||||
|
||||
- static bool offset_ok_for_immed(long offset, int shift = 0) {
|
||||
- unsigned mask = (1 << shift) - 1;
|
||||
- if (offset < 0 || offset & mask) {
|
||||
- return (uabs(offset) < (1 << (20 - 12))); // Unscaled offset
|
||||
- } else {
|
||||
- return ((offset >> shift) < (1 << (21 - 10 + 1))); // Scaled, unsigned offset
|
||||
- }
|
||||
- }
|
||||
+ static bool offset_ok_for_immed(long offset, uint shift);
|
||||
};
|
||||
|
||||
// Convience classes
|
||||
diff --git a/src/hotspot/cpu/aarch64/assembler_aarch64.inline.hpp b/src/hotspot/cpu/aarch64/assembler_aarch64.inline.hpp
|
||||
index 86eb8c2f8..a475575bf 100644
|
||||
--- a/src/hotspot/cpu/aarch64/assembler_aarch64.inline.hpp
|
||||
+++ b/src/hotspot/cpu/aarch64/assembler_aarch64.inline.hpp
|
||||
@@ -30,4 +30,15 @@
|
||||
#include "asm/codeBuffer.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
|
||||
+inline bool Address::offset_ok_for_immed(long offset, uint shift = 0) {
|
||||
+ uint mask = (1 << shift) - 1;
|
||||
+ if (offset < 0 || (offset & mask) != 0) {
|
||||
+ // Unscaled signed offset, encoded in a signed imm9 field.
|
||||
+ return Assembler::is_simm9(offset);
|
||||
+ } else {
|
||||
+ // Scaled unsigned offset, encoded in an unsigned imm12:_ field.
|
||||
+ return Assembler::is_uimm12(offset >> shift);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
#endif // CPU_AARCH64_VM_ASSEMBLER_AARCH64_INLINE_HPP
|
||||
diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
|
||||
index f6a77dc78..7798aa509 100644
|
||||
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
|
||||
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
|
||||
@@ -226,6 +226,19 @@ Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
|
||||
// FIXME: This needs to be much more clever. See x86.
|
||||
}
|
||||
|
||||
+// Ensure a valid Address (base + offset) to a stack-slot. If stack access is
|
||||
+// not encodable as a base + (immediate) offset, generate an explicit address
|
||||
+// calculation to hold the address in a temporary register.
|
||||
+Address LIR_Assembler::stack_slot_address(int index, uint size, Register tmp, int adjust) {
|
||||
+ precond(size == 4 || size == 8);
|
||||
+ Address addr = frame_map()->address_for_slot(index, adjust);
|
||||
+ precond(addr.getMode() == Address::base_plus_offset);
|
||||
+ precond(addr.base() == sp);
|
||||
+ precond(addr.offset() > 0);
|
||||
+ uint mask = size - 1;
|
||||
+ assert((addr.offset() & mask) == 0, "scaled offsets only");
|
||||
+ return __ legitimize_address(addr, size, tmp);
|
||||
+}
|
||||
|
||||
void LIR_Assembler::osr_entry() {
|
||||
offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
|
||||
@@ -745,32 +758,38 @@ void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
|
||||
}
|
||||
|
||||
void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
|
||||
+ precond(src->is_register() && dest->is_stack());
|
||||
+
|
||||
+ uint const c_sz32 = sizeof(uint32_t);
|
||||
+ uint const c_sz64 = sizeof(uint64_t);
|
||||
+
|
||||
if (src->is_single_cpu()) {
|
||||
+ int index = dest->single_stack_ix();
|
||||
if (type == T_ARRAY || type == T_OBJECT) {
|
||||
- __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
|
||||
+ __ str(src->as_register(), stack_slot_address(index, c_sz64, rscratch1));
|
||||
__ verify_oop(src->as_register());
|
||||
} else if (type == T_METADATA || type == T_DOUBLE || type == T_ADDRESS) {
|
||||
- __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
|
||||
+ __ str(src->as_register(), stack_slot_address(index, c_sz64, rscratch1));
|
||||
} else {
|
||||
- __ strw(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
|
||||
+ __ strw(src->as_register(), stack_slot_address(index, c_sz32, rscratch1));
|
||||
}
|
||||
|
||||
} else if (src->is_double_cpu()) {
|
||||
- Address dest_addr_LO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
|
||||
+ int index = dest->double_stack_ix();
|
||||
+ Address dest_addr_LO = stack_slot_address(index, c_sz64, rscratch1, lo_word_offset_in_bytes);
|
||||
__ str(src->as_register_lo(), dest_addr_LO);
|
||||
|
||||
} else if (src->is_single_fpu()) {
|
||||
- Address dest_addr = frame_map()->address_for_slot(dest->single_stack_ix());
|
||||
- __ strs(src->as_float_reg(), dest_addr);
|
||||
+ int index = dest->single_stack_ix();
|
||||
+ __ strs(src->as_float_reg(), stack_slot_address(index, c_sz32, rscratch1));
|
||||
|
||||
} else if (src->is_double_fpu()) {
|
||||
- Address dest_addr = frame_map()->address_for_slot(dest->double_stack_ix());
|
||||
- __ strd(src->as_double_reg(), dest_addr);
|
||||
+ int index = dest->double_stack_ix();
|
||||
+ __ strd(src->as_double_reg(), stack_slot_address(index, c_sz64, rscratch1));
|
||||
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
-
|
||||
}
|
||||
|
||||
|
||||
@@ -855,32 +874,34 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
|
||||
|
||||
void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
|
||||
- assert(src->is_stack(), "should not call otherwise");
|
||||
- assert(dest->is_register(), "should not call otherwise");
|
||||
+ precond(src->is_stack() && dest->is_register());
|
||||
+
|
||||
+ uint const c_sz32 = sizeof(uint32_t);
|
||||
+ uint const c_sz64 = sizeof(uint64_t);
|
||||
|
||||
if (dest->is_single_cpu()) {
|
||||
+ int index = src->single_stack_ix();
|
||||
if (type == T_ARRAY || type == T_OBJECT) {
|
||||
- __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
|
||||
+ __ ldr(dest->as_register(), stack_slot_address(index, c_sz64, rscratch1));
|
||||
__ verify_oop(dest->as_register());
|
||||
} else if (type == T_METADATA || type == T_ADDRESS) {
|
||||
- __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
|
||||
+ __ ldr(dest->as_register(), stack_slot_address(index, c_sz64, rscratch1));
|
||||
} else {
|
||||
- Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
|
||||
- Address data_addr = __ form_address(rscratch1, sp, src_addr.offset(), 2);
|
||||
- __ ldrw(dest->as_register(), data_addr);
|
||||
+ __ ldrw(dest->as_register(), stack_slot_address(index, c_sz32, rscratch1));
|
||||
}
|
||||
|
||||
} else if (dest->is_double_cpu()) {
|
||||
- Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
|
||||
+ int index = src->double_stack_ix();
|
||||
+ Address src_addr_LO = stack_slot_address(index, c_sz64, rscratch1, lo_word_offset_in_bytes);
|
||||
__ ldr(dest->as_register_lo(), src_addr_LO);
|
||||
|
||||
} else if (dest->is_single_fpu()) {
|
||||
- Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
|
||||
- __ ldrs(dest->as_float_reg(), src_addr);
|
||||
+ int index = src->single_stack_ix();
|
||||
+ __ ldrs(dest->as_float_reg(), stack_slot_address(index, c_sz32, rscratch1));
|
||||
|
||||
} else if (dest->is_double_fpu()) {
|
||||
- Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
|
||||
- __ ldrd(dest->as_double_reg(), src_addr);
|
||||
+ int index = src->double_stack_ix();
|
||||
+ __ ldrd(dest->as_double_reg(), stack_slot_address(index, c_sz64, rscratch1));
|
||||
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp
|
||||
index 6374a33e6..9db81fed9 100644
|
||||
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp
|
||||
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp
|
||||
@@ -45,10 +45,12 @@ friend class ArrayCopyStub;
|
||||
|
||||
bool is_literal_address(LIR_Address* addr);
|
||||
|
||||
- // When we need to use something other than rscratch1 use this
|
||||
- // method.
|
||||
+ // When we need to use something other than rscratch1 use this method.
|
||||
Address as_Address(LIR_Address* addr, Register tmp);
|
||||
|
||||
+ // Ensure we have a valid Address (base+offset) to a stack-slot.
|
||||
+ Address stack_slot_address(int index, uint shift, Register tmp, int adjust = 0);
|
||||
+
|
||||
// Record the type of the receiver in ReceiverTypeData
|
||||
void type_profile_helper(Register mdo,
|
||||
ciMethodData *md, ciProfileData *data,
|
||||
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
|
||||
index 44497ea7c..014a4d3c6 100644
|
||||
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
|
||||
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
|
||||
@@ -26,7 +26,7 @@
|
||||
#ifndef CPU_AARCH64_VM_MACROASSEMBLER_AARCH64_HPP
|
||||
#define CPU_AARCH64_VM_MACROASSEMBLER_AARCH64_HPP
|
||||
|
||||
-#include "asm/assembler.hpp"
|
||||
+#include "asm/assembler.inline.hpp"
|
||||
|
||||
// MacroAssembler extends Assembler by frequently used macros.
|
||||
//
|
||||
@@ -132,6 +132,20 @@ class MacroAssembler: public Assembler {
|
||||
a.lea(this, r);
|
||||
}
|
||||
|
||||
+ /* Sometimes we get misaligned loads and stores, usually from Unsafe
|
||||
+ accesses, and these can exceed the offset range. */
|
||||
+ Address legitimize_address(const Address &a, int size, Register scratch) {
|
||||
+ if (a.getMode() == Address::base_plus_offset) {
|
||||
+ if (! Address::offset_ok_for_immed(a.offset(), exact_log2(size))) {
|
||||
+ block_comment("legitimize_address {");
|
||||
+ lea(scratch, a);
|
||||
+ block_comment("} legitimize_address");
|
||||
+ return Address(scratch);
|
||||
+ }
|
||||
+ }
|
||||
+ return a;
|
||||
+ }
|
||||
+
|
||||
void addmw(Address a, Register incr, Register scratch) {
|
||||
ldrw(scratch, a);
|
||||
addw(scratch, scratch, incr);
|
||||
diff --git a/src/hotspot/share/asm/assembler.hpp b/src/hotspot/share/asm/assembler.hpp
|
||||
index da181b90b..56c3068e4 100644
|
||||
--- a/src/hotspot/share/asm/assembler.hpp
|
||||
+++ b/src/hotspot/share/asm/assembler.hpp
|
||||
@@ -302,6 +302,7 @@ class AbstractAssembler : public ResourceObj {
|
||||
// Define some:
|
||||
static bool is_simm5( intptr_t x) { return is_simm(x, 5 ); }
|
||||
static bool is_simm8( intptr_t x) { return is_simm(x, 8 ); }
|
||||
+ static bool is_simm9( intptr_t x) { return is_simm(x, 9 ); }
|
||||
static bool is_simm10(intptr_t x) { return is_simm(x, 10); }
|
||||
static bool is_simm11(intptr_t x) { return is_simm(x, 11); }
|
||||
static bool is_simm12(intptr_t x) { return is_simm(x, 12); }
|
||||
@@ -310,6 +311,15 @@ class AbstractAssembler : public ResourceObj {
|
||||
static bool is_simm26(intptr_t x) { return is_simm(x, 26); }
|
||||
static bool is_simm32(intptr_t x) { return is_simm(x, 32); }
|
||||
|
||||
+ // Test if x is within unsigned immediate range for width.
|
||||
+ static bool is_uimm(intptr_t x, uint w) {
|
||||
+ precond(0 < w && w < 64);
|
||||
+ intptr_t limes = intptr_t(1) << w;
|
||||
+ return x < limes;
|
||||
+ }
|
||||
+
|
||||
+ static bool is_uimm12(intptr_t x) { return is_uimm(x, 12); }
|
||||
+
|
||||
// Accessors
|
||||
CodeSection* code_section() const { return _code_section; }
|
||||
CodeBuffer* code() const { return code_section()->outer(); }
|
||||
diff --git a/src/hotspot/share/utilities/debug.hpp b/src/hotspot/share/utilities/debug.hpp
|
||||
index aa594754a..c66c710f2 100644
|
||||
--- a/src/hotspot/share/utilities/debug.hpp
|
||||
+++ b/src/hotspot/share/utilities/debug.hpp
|
||||
@@ -66,6 +66,9 @@ do { \
|
||||
// For backward compatibility.
|
||||
#define assert(p, ...) vmassert(p, __VA_ARGS__)
|
||||
|
||||
+#define precond(p) assert(p, "precond")
|
||||
+#define postcond(p) assert(p, "postcond")
|
||||
+
|
||||
#ifndef ASSERT
|
||||
#define vmassert_status(p, status, msg)
|
||||
#else
|
||||
--
|
||||
2.19.1
|
||||
|
||||
@ -735,7 +735,7 @@ Provides: java-src%{?1} = %{epoch}:%{version}-%{release}
|
||||
|
||||
Name: java-%{javaver}-%{origin}
|
||||
Version: %{newjavaver}.%{buildver}
|
||||
Release: 6
|
||||
Release: 7
|
||||
# java-1.5.0-ibm from jpackage.org set Epoch to 1 for unknown reasons
|
||||
# and this change was brought into RHEL-4. java-1.5.0-ibm packages
|
||||
# also included the epoch in their virtual provides. This created a
|
||||
@ -838,6 +838,8 @@ Patch51: 8255781-Bump-patch-update-version-for-OpenJDK-jdk-11.0.9.1.patch
|
||||
Patch52: 8250861-Crash-in-MinINode-Ideal.patch
|
||||
Patch53: 8236512-PKCS11-Connection-closed-after-Cipher-doFinal-and-NoPadding.patch
|
||||
Patch54: 8207160-ClassReader-adjustMethodParams-can-potentially-return-null-if-the-args-list-is-empty.patch
|
||||
Patch55: 8215047-Task-terminators-do-not-complete-termination-in-consistent-state.patch
|
||||
Patch56: 8247766-aarch64-guarantee-val-1U--nbits-failed-Field-too-big-for-insn.patch
|
||||
|
||||
BuildRequires: autoconf
|
||||
BuildRequires: alsa-lib-devel
|
||||
@ -1105,6 +1107,8 @@ pushd %{top_level_dir_name}
|
||||
%patch52 -p1
|
||||
%patch53 -p1
|
||||
%patch54 -p1
|
||||
%patch55 -p1
|
||||
%patch56 -p1
|
||||
popd # openjdk
|
||||
|
||||
%patch1000
|
||||
@ -1607,6 +1611,10 @@ require "copy_jdk_configs.lua"
|
||||
|
||||
|
||||
%changelog
|
||||
* Wed Dec 23 2020 alapha <sunjianye@huawei.com> - 1:11.0.9.11-7
|
||||
- add 8215047-Task-terminators-do-not-complete-termination-in-consistent-state.patch
|
||||
- add 8247766-aarch64-guarantee-val-1U--nbits-failed-Field-too-big-for-insn.patch
|
||||
|
||||
* Wed Dec 23 2020 eapen <zhangyipeng7@huawei.com> - 1:11.0.9.11-6
|
||||
- add 8207160-ClassReader-adjustMethodParams-can-potentially-return-null-if-the-args-list-is-empty.patch
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user