Update to 11.0.7+10 (GA)

This commit is contained in:
jdkboy 2020-05-21 15:31:32 +08:00
parent f9c7db8ddf
commit 1018bbba9e
8 changed files with 593 additions and 203 deletions

View File

@ -0,0 +1,63 @@
From 06c663befa33d4a71faed3f58ae47faab753b658 Mon Sep 17 00:00:00 2001
Date: Sat, 28 Mar 2020 12:00:06 +0000
Subject: [PATCH] 8210303 VM_HandshakeAllThreads fails assert with "failed:
blocked and not walkable"
Summary: <gc>: <vmthread can process handshake in more conditions>
LLT: jdk11u/test/hotspot/jtreg/vmTestbase/nsk/jvmti/scenarios/sampling/SP07/sp07t002/TestDescription.java
Bug url: https://bugs.openjdk.java.net/browse/JDK-8210303
---
src/hotspot/share/runtime/handshake.cpp | 25 +++++++++++++++++++++++--
1 file changed, 23 insertions(+), 2 deletions(-)
diff --git a/src/hotspot/share/runtime/handshake.cpp b/src/hotspot/share/runtime/handshake.cpp
index 7aac489..1891623 100644
--- a/src/hotspot/share/runtime/handshake.cpp
+++ b/src/hotspot/share/runtime/handshake.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -343,6 +343,27 @@ bool HandshakeState::vmthread_can_process_handshake(JavaThread* target) {
return SafepointSynchronize::safepoint_safe(target, target->thread_state());
}
+static bool possibly_vmthread_can_process_handshake(JavaThread* target) {
+ // An externally suspended thread cannot be resumed while the
+ // Threads_lock is held so it is safe.
+ // Note that this method is allowed to produce false positives.
+ assert(Threads_lock->owned_by_self(), "Not holding Threads_lock.");
+ if (target->is_ext_suspended()) {
+ return true;
+ }
+ switch (target->thread_state()) {
+ case _thread_in_native:
+ // native threads are safe if they have no java stack or have walkable stack
+ return !target->has_last_Java_frame() || target->frame_anchor()->walkable();
+
+ case _thread_blocked:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
bool HandshakeState::claim_handshake_for_vmthread() {
if (!_semaphore.trywait()) {
return false;
@@ -362,7 +383,7 @@ void HandshakeState::process_by_vmthread(JavaThread* target) {
return;
}
- if (!vmthread_can_process_handshake(target)) {
+ if (!possibly_vmthread_can_process_handshake(target)) {
// JT is observed in an unsafe state, it must notice the handshake itself
return;
}
--
1.8.3.1

View File

@ -0,0 +1,359 @@
From ae26670a6db1950136b2838c2f57864cfd9964d0 Mon Sep 17 00:00:00 2001
Date: Wed, 1 Apr 2020 21:22:56 +0000
Subject: [PATCH] 8212933: Thread-SMR: requesting a VM operation whilst holding
a ThreadsListHandle can cause deadlocks
Summary: <gc>: <vmthread process handshake in condition of a exiting java thread>
LLT: test/hotspot/jtreg/runtime/handshake/HandshakeWalkSuspendExitTest.java
Bug url: https://bugs.openjdk.java.net/browse/JDK-8212933
---
src/hotspot/share/runtime/handshake.cpp | 69 ++++++----------
src/hotspot/share/runtime/handshake.hpp | 9 +--
src/hotspot/share/runtime/thread.cpp | 3 -
src/hotspot/share/runtime/thread.hpp | 4 -
src/hotspot/share/runtime/threadSMR.cpp | 5 --
.../handshake/HandshakeWalkSuspendExitTest.java | 93 ++++++++++++++++++++++
6 files changed, 117 insertions(+), 66 deletions(-)
create mode 100644 test/hotspot/jtreg/runtime/handshake/HandshakeWalkSuspendExitTest.java
diff --git a/src/hotspot/share/runtime/handshake.cpp b/src/hotspot/share/runtime/handshake.cpp
index b84bf22..025861f 100644
--- a/src/hotspot/share/runtime/handshake.cpp
+++ b/src/hotspot/share/runtime/handshake.cpp
@@ -41,7 +41,6 @@
class HandshakeOperation: public StackObj {
public:
virtual void do_handshake(JavaThread* thread) = 0;
- virtual void cancel_handshake(JavaThread* thread) = 0;
};
class HandshakeThreadsOperation: public HandshakeOperation {
@@ -51,8 +50,6 @@ class HandshakeThreadsOperation: public HandshakeOperation {
public:
HandshakeThreadsOperation(ThreadClosure* cl) : _thread_cl(cl) {}
void do_handshake(JavaThread* thread);
- void cancel_handshake(JavaThread* thread) { _done.signal(); };
-
bool thread_has_completed() { return _done.trywait(); }
#ifdef ASSERT
@@ -122,15 +119,11 @@ class VM_HandshakeOneThread: public VM_Handshake {
DEBUG_ONLY(_op->check_state();)
TraceTime timer("Performing single-target operation (vmoperation doit)", TRACETIME_LOG(Info, handshake));
- {
- ThreadsListHandle tlh;
- if (tlh.includes(_target)) {
- set_handshake(_target);
- _thread_alive = true;
- }
- }
-
- if (!_thread_alive) {
+ ThreadsListHandle tlh;
+ if (tlh.includes(_target)) {
+ set_handshake(_target);
+ _thread_alive = true;
+ } else {
return;
}
@@ -148,20 +141,9 @@ class VM_HandshakeOneThread: public VM_Handshake {
// We need to re-think this with SMR ThreadsList.
// There is an assumption in the code that the Threads_lock should be
// locked during certain phases.
- MutexLockerEx ml(Threads_lock, Mutex::_no_safepoint_check_flag);
- ThreadsListHandle tlh;
- if (tlh.includes(_target)) {
- // Warning _target's address might be re-used.
- // handshake_process_by_vmthread will check the semaphore for us again.
- // Since we can't have more then one handshake in flight a reuse of
- // _target's address should be okay since the new thread will not have
- // an operation.
+ {
+ MutexLockerEx ml(Threads_lock, Mutex::_no_safepoint_check_flag);
_target->handshake_process_by_vmthread();
- } else {
- // We can't warn here since the thread does cancel_handshake after
- // it has been removed from the ThreadsList. So we should just keep
- // looping here until while below returns false. If we have a bug,
- // then we hang here, which is good for debugging.
}
} while (!poll_for_completed_thread());
DEBUG_ONLY(_op->check_state();)
@@ -180,8 +162,9 @@ class VM_HandshakeAllThreads: public VM_Handshake {
DEBUG_ONLY(_op->check_state();)
TraceTime timer("Performing operation (vmoperation doit)", TRACETIME_LOG(Info, handshake));
+ JavaThreadIteratorWithHandle jtiwh;
int number_of_threads_issued = 0;
- for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) {
+ for (JavaThread *thr = jtiwh.next(); thr != NULL; thr = jtiwh.next()) {
set_handshake(thr);
number_of_threads_issued++;
}
@@ -211,8 +194,9 @@ class VM_HandshakeAllThreads: public VM_Handshake {
// We need to re-think this with SMR ThreadsList.
// There is an assumption in the code that the Threads_lock should
// be locked during certain phases.
+ jtiwh.rewind();
MutexLockerEx ml(Threads_lock, Mutex::_no_safepoint_check_flag);
- for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) {
+ for (JavaThread *thr = jtiwh.next(); thr != NULL; thr = jtiwh.next()) {
// A new thread on the ThreadsList will not have an operation,
// hence it is skipped in handshake_process_by_vmthread.
thr->handshake_process_by_vmthread();
@@ -263,7 +247,11 @@ void HandshakeThreadsOperation::do_handshake(JavaThread* thread) {
FormatBufferResource message("Operation for thread " PTR_FORMAT ", is_vm_thread: %s",
p2i(thread), BOOL_TO_STR(Thread::current()->is_VM_thread()));
TraceTime timer(message, TRACETIME_LOG(Debug, handshake, task));
- _thread_cl->do_thread(thread);
+
+ // Only actually execute the operation for non terminated threads.
+ if (!thread->is_terminated()) {
+ _thread_cl->do_thread(thread);
+ }
// Use the semaphore to inform the VM thread that we have completed the operation
_done.signal();
@@ -307,12 +295,7 @@ void HandshakeState::clear_handshake(JavaThread* target) {
void HandshakeState::process_self_inner(JavaThread* thread) {
assert(Thread::current() == thread, "should call from thread");
-
- if (thread->is_terminated()) {
- // If thread is not on threads list but armed, cancel.
- thread->cancel_handshake();
- return;
- }
+ assert(!thread->is_terminated(), "should not be a terminated thread");
ThreadInVMForHandshake tivm(thread);
if (!_semaphore.trywait()) {
@@ -329,18 +312,8 @@ void HandshakeState::process_self_inner(JavaThread* thread) {
_semaphore.signal();
}
-void HandshakeState::cancel_inner(JavaThread* thread) {
- assert(Thread::current() == thread, "should call from thread");
- assert(thread->thread_state() == _thread_in_vm, "must be in vm state");
- HandshakeOperation* op = _operation;
- clear_handshake(thread);
- if (op != NULL) {
- op->cancel_handshake(thread);
- }
-}
-
bool HandshakeState::vmthread_can_process_handshake(JavaThread* target) {
- return SafepointSynchronize::safepoint_safe(target, target->thread_state());
+ return SafepointSynchronize::safepoint_safe(target, target->thread_state()) || target->is_terminated();
}
static bool possibly_vmthread_can_process_handshake(JavaThread* target) {
@@ -357,6 +330,9 @@ static bool possibly_vmthread_can_process_handshake(JavaThread* target) {
if (target->is_ext_suspended()) {
return true;
}
+ if (target->is_terminated()) {
+ return true;
+ }
switch (target->thread_state()) {
case _thread_in_native:
// native threads are safe if they have no java stack or have walkable stack
@@ -383,6 +359,8 @@ bool HandshakeState::claim_handshake_for_vmthread() {
void HandshakeState::process_by_vmthread(JavaThread* target) {
assert(Thread::current()->is_VM_thread(), "should call from vm thread");
+ // Threads_lock must be held here, but that is assert()ed in
+ // possibly_vmthread_can_process_handshake().
if (!has_operation()) {
// JT has already cleared its handshake
@@ -404,7 +382,6 @@ void HandshakeState::process_by_vmthread(JavaThread* target) {
// getting caught by the semaphore.
if (vmthread_can_process_handshake(target)) {
guarantee(!_semaphore.trywait(), "we should already own the semaphore");
-
_operation->do_handshake(target);
// Disarm after VM thread have executed the operation.
clear_handshake(target);
diff --git a/src/hotspot/share/runtime/handshake.hpp b/src/hotspot/share/runtime/handshake.hpp
index 88dcd7e..a735d1e 100644
--- a/src/hotspot/share/runtime/handshake.hpp
+++ b/src/hotspot/share/runtime/handshake.hpp
@@ -60,7 +60,6 @@ class HandshakeState {
bool vmthread_can_process_handshake(JavaThread* target);
void clear_handshake(JavaThread* thread);
- void cancel_inner(JavaThread* thread);
void process_self_inner(JavaThread* thread);
public:
@@ -72,19 +71,13 @@ public:
return _operation != NULL;
}
- void cancel(JavaThread* thread) {
- if (!_thread_in_process_handshake) {
- FlagSetting fs(_thread_in_process_handshake, true);
- cancel_inner(thread);
- }
- }
-
void process_by_self(JavaThread* thread) {
if (!_thread_in_process_handshake) {
FlagSetting fs(_thread_in_process_handshake, true);
process_self_inner(thread);
}
}
+
void process_by_vmthread(JavaThread* target);
};
diff --git a/src/hotspot/share/runtime/thread.cpp b/src/hotspot/share/runtime/thread.cpp
index d794ae8..7846525 100644
--- a/src/hotspot/share/runtime/thread.cpp
+++ b/src/hotspot/share/runtime/thread.cpp
@@ -4258,9 +4258,6 @@ bool Threads::destroy_vm() {
before_exit(thread);
thread->exit(true);
- // thread will never call smr_delete, instead of implicit cancel
- // in wait_for_vm_thread_exit we do it explicit.
- thread->cancel_handshake();
// Stop VM thread.
{
diff --git a/src/hotspot/share/runtime/thread.hpp b/src/hotspot/share/runtime/thread.hpp
index 90cb7cb..fc469c8 100644
--- a/src/hotspot/share/runtime/thread.hpp
+++ b/src/hotspot/share/runtime/thread.hpp
@@ -1228,10 +1228,6 @@ class JavaThread: public Thread {
return _handshake.has_operation();
}
- void cancel_handshake() {
- _handshake.cancel(this);
- }
-
void handshake_process_by_self() {
_handshake.process_by_self(this);
}
diff --git a/src/hotspot/share/runtime/threadSMR.cpp b/src/hotspot/share/runtime/threadSMR.cpp
index 8b232fb..ba18aa4 100644
--- a/src/hotspot/share/runtime/threadSMR.cpp
+++ b/src/hotspot/share/runtime/threadSMR.cpp
@@ -989,11 +989,6 @@ void ThreadsSMRSupport::smr_delete(JavaThread *thread) {
// Retry the whole scenario.
}
- if (ThreadLocalHandshakes) {
- // The thread is about to be deleted so cancel any handshake.
- thread->cancel_handshake();
- }
-
delete thread;
if (EnableThreadSMRStatistics) {
timer.stop();
diff --git a/test/hotspot/jtreg/runtime/handshake/HandshakeWalkSuspendExitTest.java b/test/hotspot/jtreg/runtime/handshake/HandshakeWalkSuspendExitTest.java
new file mode 100644
index 0000000..26b6f63
--- /dev/null
+++ b/test/hotspot/jtreg/runtime/handshake/HandshakeWalkSuspendExitTest.java
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test HandshakeWalkSuspendExitTest
+ * @summary This test tries to stress the handshakes with new and exiting threads while suspending them.
+ * @library /testlibrary /test/lib
+ * @build HandshakeWalkSuspendExitTest
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ * sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI HandshakeWalkSuspendExitTest
+ */
+
+import jdk.test.lib.Asserts;
+import sun.hotspot.WhiteBox;
+
+public class HandshakeWalkSuspendExitTest implements Runnable {
+
+ static final int _test_threads = 8;
+ static final int _test_exit_threads = 128;
+ static Thread[] _threads = new Thread[_test_threads];
+ static volatile boolean exit_now = false;
+ static java.util.concurrent.Semaphore _sem = new java.util.concurrent.Semaphore(0);
+
+ @Override
+ public void run() {
+ WhiteBox wb = WhiteBox.getWhiteBox();
+ while (!exit_now) {
+ _sem.release();
+ // We only suspend threads on even index and not ourself.
+ // Otherwise we can accidentially suspend all threads.
+ for (int i = 0; i < _threads.length; i += 2) {
+ wb.handshakeWalkStack(null /* ignored */, true /* stackwalk all threads */);
+ if (Thread.currentThread() != _threads[i]) {
+ _threads[i].suspend();
+ _threads[i].resume();
+ }
+ }
+ for (int i = 0; i < _threads.length; i += 2) {
+ wb.handshakeWalkStack(_threads[i] /* thread to stackwalk */, false /* stackwalk one thread */);
+ if (Thread.currentThread() != _threads[i]) {
+ _threads[i].suspend();
+ _threads[i].resume();
+ }
+ }
+ }
+ }
+
+ public static void main(String... args) throws Exception {
+ HandshakeWalkSuspendExitTest test = new HandshakeWalkSuspendExitTest();
+
+ for (int i = 0; i < _threads.length; i++) {
+ _threads[i] = new Thread(test);
+ _threads[i].start();
+ }
+ for (int i = 0; i < _test_threads; i++) {
+ _sem.acquire();
+ }
+ Thread[] exit_threads = new Thread[_test_exit_threads];
+ for (int i = 0; i < _test_exit_threads; i++) {
+ exit_threads[i] = new Thread(new Runnable() { public void run() {} });
+ exit_threads[i].start();
+ }
+ exit_now = true;
+ for (int i = 0; i < _threads.length; i++) {
+ _threads[i].join();
+ }
+ for (int i = 0; i < exit_threads.length; i++) {
+ exit_threads[i].join();
+ }
+ }
+}
--
1.8.3.1

View File

@ -1,5 +1,5 @@
From c759704261967595054d7ad928cb2cb13dc53356 Mon Sep 17 00:00:00 2001
Date: Wed, 25 Dec 2019 14:21:26 +0800
From 7995d2341ea83431d68cf36d22577074e111e32d Mon Sep 17 00:00:00 2001
Date: Mon, 13 Apr 2020 10:40:13 +0800
Subject: [PATCH] 8214527: AArch64: ZGC for Aarch64
Summary: <gc>: <Implement ZGC for AArch64>
@ -9,9 +9,6 @@ Bug url: https://bugs.openjdk.java.net/browse/JDK-8214527
make/autoconf/hotspot.m4 | 3 +-
src/hotspot/cpu/aarch64/aarch64.ad | 486 +++++++++++++++++++++
.../cpu/aarch64/c1_LIRAssembler_aarch64.cpp | 12 +-
.../gc/shared/barrierSetAssembler_aarch64.hpp | 2 +-
.../shared/modRefBarrierSetAssembler_aarch64.cpp | 4 +-
.../shared/modRefBarrierSetAssembler_aarch64.hpp | 2 +-
.../aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp | 408 +++++++++++++++++
.../aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp | 92 ++++
src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp | 19 +-
@ -29,7 +26,7 @@ Bug url: https://bugs.openjdk.java.net/browse/JDK-8214527
.../gc/z/zPhysicalMemoryBacking_linux_aarch64.hpp | 65 +++
.../gc/z/zVirtualMemory_linux_aarch64.cpp | 41 ++
src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp | 20 +-
23 files changed, 2368 insertions(+), 27 deletions(-)
20 files changed, 2361 insertions(+), 20 deletions(-)
create mode 100644 src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp
create mode 100644 src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp
create mode 100644 src/hotspot/os_cpu/linux_aarch64/gc/z/zAddress_linux_aarch64.inline.hpp
@ -60,7 +57,7 @@ index 1d4c710be..6bfda33da 100644
else
DISABLED_JVM_FEATURES="$DISABLED_JVM_FEATURES zgc"
diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad
index bdef2a6d7..d0f3fc33d 100644
index f2a9207b8..57b20775e 100644
--- a/src/hotspot/cpu/aarch64/aarch64.ad
+++ b/src/hotspot/cpu/aarch64/aarch64.ad
@@ -957,6 +957,146 @@ reg_class v3_reg(
@ -578,7 +575,7 @@ index bdef2a6d7..d0f3fc33d 100644
// These must follow all instruction definitions as they use the names
// defined in the instructions definitions.
diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
index ac91e87d2..e6601434b 100644
index 73202f669..cf3ce0a8f 100644
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
@@ -1012,7 +1012,11 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
@ -594,7 +591,7 @@ index ac91e87d2..e6601434b 100644
} else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
if (UseCompressedClassPointers) {
__ decode_klass_not_null(dest->as_register());
@@ -2811,7 +2815,11 @@ void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
@@ -2818,7 +2822,11 @@ void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
@ -607,49 +604,6 @@ index ac91e87d2..e6601434b 100644
__ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr()));
}
diff --git a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp
index 6bd6c6b89..68e287517 100644
--- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp
@@ -37,7 +37,7 @@ private:
public:
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
- Register addr, Register count, RegSet saved_regs) {}
+ Register src, Register dst, Register count, RegSet saved_regs) {}
virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
Register start, Register end, Register tmp, RegSet saved_regs) {}
virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
diff --git a/src/hotspot/cpu/aarch64/gc/shared/modRefBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shared/modRefBarrierSetAssembler_aarch64.cpp
index a28c50169..badd46d05 100644
--- a/src/hotspot/cpu/aarch64/gc/shared/modRefBarrierSetAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/gc/shared/modRefBarrierSetAssembler_aarch64.cpp
@@ -29,10 +29,10 @@
#define __ masm->
void ModRefBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
- Register addr, Register count, RegSet saved_regs) {
+ Register src, Register dst, Register count, RegSet saved_regs) {
if (is_oop) {
- gen_write_ref_array_pre_barrier(masm, decorators, addr, count, saved_regs);
+ gen_write_ref_array_pre_barrier(masm, decorators, dst, count, saved_regs);
}
}
diff --git a/src/hotspot/cpu/aarch64/gc/shared/modRefBarrierSetAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/shared/modRefBarrierSetAssembler_aarch64.hpp
index e145b5d74..00e36b919 100644
--- a/src/hotspot/cpu/aarch64/gc/shared/modRefBarrierSetAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/gc/shared/modRefBarrierSetAssembler_aarch64.hpp
@@ -44,7 +44,7 @@ protected:
public:
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
- Register addr, Register count, RegSet saved_regs);
+ Register src, Register dst, Register count, RegSet saved_regs);
virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
Register start, Register count, Register tmp, RegSet saved_regs);
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
diff --git a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp
new file mode 100644
index 000000000..90b2b4ca7
@ -1163,7 +1117,7 @@ index 000000000..7e8be01cc
+
+#endif // CPU_AARCH64_GC_Z_ZBARRIERSETASSEMBLER_AARCH64_HPP
diff --git a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp
index eff23b6ba..ac7eb8480 100644
index 89a433013..ac7eb8480 100644
--- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp
@@ -45,6 +45,9 @@
@ -1193,35 +1147,8 @@ index eff23b6ba..ac7eb8480 100644
// Check if the oop is in the right area of memory
__ mov(c_rarg3, (intptr_t) Universe::verify_oop_mask());
__ andr(c_rarg2, r0, c_rarg3);
@@ -1333,7 +1346,7 @@ class StubGenerator: public StubCodeGenerator {
}
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
- bs->arraycopy_prologue(_masm, decorators, is_oop, d, count, saved_reg);
+ bs->arraycopy_prologue(_masm, decorators, is_oop, s, d, count, saved_reg);
if (is_oop) {
// save regs before copy_memory
@@ -1399,7 +1412,7 @@ class StubGenerator: public StubCodeGenerator {
}
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
- bs->arraycopy_prologue(_masm, decorators, is_oop, d, count, saved_regs);
+ bs->arraycopy_prologue(_masm, decorators, is_oop, s, d, count, saved_regs);
if (is_oop) {
// save regs before copy_memory
@@ -1753,7 +1766,7 @@ class StubGenerator: public StubCodeGenerator {
}
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
- bs->arraycopy_prologue(_masm, decorators, is_oop, to, count, wb_pre_saved_regs);
+ bs->arraycopy_prologue(_masm, decorators, is_oop, from, to, count, wb_pre_saved_regs);
// save the original count
__ mov(count_save, count);
diff --git a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp
index e8b4b7414..3aa3f8579 100644
index d2290a670..381211ecc 100644
--- a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp
@@ -273,13 +273,13 @@ void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce,
@ -1234,11 +1161,11 @@ index e8b4b7414..3aa3f8579 100644
- } else {
+ if (stub->tmp()->is_valid()) {
// Load address into tmp register
ce->leal(stub->ref_addr(), stub->tmp(), stub->patch_code(), stub->patch_info());
ce->leal(stub->ref_addr(), stub->tmp());
ref_addr = stub->tmp()->as_pointer_register();
+ } else {
+ // Address already in register
+ ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register();
+ ref_addr = stub->ref_addr()->as_address_ptr()->base()->as_pointer_register();
}
assert_different_registers(ref, ref_addr, noreg);
@ -2654,11 +2581,11 @@ index 000000000..68df40191
+ return true;
+}
diff --git a/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp b/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp
index a1b43005c..f6f48268d 100644
index 9f8ce7424..0abd3980f 100644
--- a/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp
+++ b/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp
@@ -39,21 +39,15 @@ ZLoadBarrierStubC1::ZLoadBarrierStubC1(LIRAccess& access, LIR_Opr ref, address r
_patch_info(access.patch_emit_info()),
@@ -38,21 +38,15 @@ ZLoadBarrierStubC1::ZLoadBarrierStubC1(LIRAccess& access, LIR_Opr ref, address r
_tmp(LIR_OprFact::illegalOpr),
_runtime_stub(runtime_stub) {
+ assert(_ref->is_register(), "Must be a register");
@ -2687,5 +2614,5 @@ index a1b43005c..f6f48268d 100644
DecoratorSet ZLoadBarrierStubC1::decorators() const {
--
2.12.3
2.19.1

View File

@ -1,5 +1,5 @@
From e26d6291e9ee09b805f9ee73b3a9a87335aea4c8 Mon Sep 17 00:00:00 2001
Date: Mon, 6 Jan 2020 10:49:35 +0800
From f9885f88585a8299760f905b420af1adb5bcca44 Mon Sep 17 00:00:00 2001
Date: Mon, 13 Apr 2020 10:46:11 +0800
Subject: [PATCH] 8224675: Late GC barrier insertion for ZGC
Summary: <gc>: <Insert ZGC load barriers after loop optimizations, before macro expansion.>
@ -12,7 +12,7 @@ Bug url: https://bugs.openjdk.java.net/browse/JDK-8224675
src/hotspot/share/adlc/formssel.cpp | 9 +-
src/hotspot/share/compiler/compilerDirectives.hpp | 2 +-
src/hotspot/share/gc/shared/c2/barrierSetC2.hpp | 4 +-
src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp | 1768 ++++++++++----------
src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp | 1768 ++++++++---------
src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp | 116 +-
src/hotspot/share/gc/z/zHeap.cpp | 26 -
src/hotspot/share/gc/z/z_globals.hpp | 3 -
@ -39,7 +39,7 @@ Bug url: https://bugs.openjdk.java.net/browse/JDK-8224675
30 files changed, 1259 insertions(+), 1108 deletions(-)
diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad
index d0f3fc33d..3b898f548 100644
index 57b20775e..a6d2e6609 100644
--- a/src/hotspot/cpu/aarch64/aarch64.ad
+++ b/src/hotspot/cpu/aarch64/aarch64.ad
@@ -17517,7 +17517,7 @@ instruct loadBarrierSlowReg(iRegP dst, memory mem, rFlagsReg cr,
@ -139,19 +139,19 @@ diff --git a/src/hotspot/cpu/x86/x86_64.ad b/src/hotspot/cpu/x86/x86_64.ad
index 9d9eda84b..97c396875 100644
--- a/src/hotspot/cpu/x86/x86_64.ad
+++ b/src/hotspot/cpu/x86/x86_64.ad
@@ -539,6 +539,12 @@ reg_class int_rdi_reg(RDI);
@@ -538,6 +538,12 @@ reg_class int_rdi_reg(RDI);
%}
source_hpp %{
+source_hpp %{
+
+#include "gc/z/c2/zBarrierSetC2.hpp"
+
+%}
+
+source_hpp %{
source_hpp %{
#if INCLUDE_ZGC
#include "gc/z/zBarrierSetAssembler.hpp"
#endif
@@ -12630,7 +12636,7 @@ instruct RethrowException()
instruct loadBarrierSlowRegNoVec(rRegP dst, memory mem, rFlagsReg cr) %{
@ -303,7 +303,7 @@ index 466880d19..2845cf6d8 100644
int cnt = sizeof(needs_ideal_memory_list)/sizeof(char*);
if( strcmp(_opType,"PrefetchAllocation")==0 )
diff --git a/src/hotspot/share/compiler/compilerDirectives.hpp b/src/hotspot/share/compiler/compilerDirectives.hpp
index c06d6b899..5c9fc98e1 100644
index b4de7faed..8eba28f94 100644
--- a/src/hotspot/share/compiler/compilerDirectives.hpp
+++ b/src/hotspot/share/compiler/compilerDirectives.hpp
@@ -67,7 +67,7 @@ NOT_PRODUCT(cflags(TraceOptoOutput, bool, TraceOptoOutput, TraceOptoOutput))
@ -339,7 +339,7 @@ index 8baf4d9de..eea74674f 100644
#endif // SHARE_GC_SHARED_C2_BARRIERSETC2_HPP
diff --git a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp
index f7458e6ce..1c77a4f63 100644
index 221ab4c2f..bf0bd43af 100644
--- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp
+++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp
@@ -22,14 +22,21 @@
@ -388,7 +388,7 @@ index f7458e6ce..1c77a4f63 100644
- ZBarrierSetC2State* s = bs->state();
- if (s->load_barrier_count() >= 2) {
- Compile::TracePhase tp("idealLoop", &C->timers[Phase::_t_idealLoop]);
- PhaseIdealLoop ideal_loop(igvn, true, false, true);
- PhaseIdealLoop ideal_loop(igvn, LoopOptsLastRound);
- if (C->major_progress()) C->print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
- }
-}
@ -451,7 +451,7 @@ index f7458e6ce..1c77a4f63 100644
init_req(Control, c);
init_req(Memory, mem);
init_req(Oop, val);
@@ -216,8 +188,8 @@ const Type *LoadBarrierNode::Value(PhaseGVN *phase) const {
@@ -215,8 +187,8 @@ const Type *LoadBarrierNode::Value(PhaseGVN *phase) const {
const Type** floadbarrier = (const Type **)(phase->C->type_arena()->Amalloc_4((Number_of_Outputs)*sizeof(Type*)));
const Type* val_t = phase->type(in(Oop));
floadbarrier[Control] = Type::CONTROL;
@ -462,7 +462,7 @@ index f7458e6ce..1c77a4f63 100644
return TypeTuple::make(Number_of_Outputs, floadbarrier);
}
@@ -237,6 +209,11 @@ bool LoadBarrierNode::is_dominator(PhaseIdealLoop* phase, bool linear_only, Node
@@ -236,6 +208,11 @@ bool LoadBarrierNode::is_dominator(PhaseIdealLoop* phase, bool linear_only, Node
}
LoadBarrierNode* LoadBarrierNode::has_dominating_barrier(PhaseIdealLoop* phase, bool linear_only, bool look_for_similar) {
@ -474,7 +474,7 @@ index f7458e6ce..1c77a4f63 100644
Node* val = in(LoadBarrierNode::Oop);
if (in(Similar)->is_Proj() && in(Similar)->in(0)->is_LoadBarrier()) {
LoadBarrierNode* lb = in(Similar)->in(0)->as_LoadBarrier();
@@ -265,7 +242,7 @@ LoadBarrierNode* LoadBarrierNode::has_dominating_barrier(PhaseIdealLoop* phase,
@@ -264,7 +241,7 @@ LoadBarrierNode* LoadBarrierNode::has_dominating_barrier(PhaseIdealLoop* phase,
}
}
@ -483,7 +483,7 @@ index f7458e6ce..1c77a4f63 100644
return NULL;
}
@@ -315,7 +292,7 @@ LoadBarrierNode* LoadBarrierNode::has_dominating_barrier(PhaseIdealLoop* phase,
@@ -314,7 +291,7 @@ LoadBarrierNode* LoadBarrierNode::has_dominating_barrier(PhaseIdealLoop* phase,
}
if (ok) {
assert(dom_found, "");
@ -492,7 +492,7 @@ index f7458e6ce..1c77a4f63 100644
}
break;
}
@@ -327,6 +304,7 @@ LoadBarrierNode* LoadBarrierNode::has_dominating_barrier(PhaseIdealLoop* phase,
@@ -326,6 +303,7 @@ LoadBarrierNode* LoadBarrierNode::has_dominating_barrier(PhaseIdealLoop* phase,
void LoadBarrierNode::push_dominated_barriers(PhaseIterGVN* igvn) const {
// Change to that barrier may affect a dominated barrier so re-push those
@ -500,7 +500,7 @@ index f7458e6ce..1c77a4f63 100644
Node* val = in(LoadBarrierNode::Oop);
for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
@@ -354,13 +332,9 @@ void LoadBarrierNode::push_dominated_barriers(PhaseIterGVN* igvn) const {
@@ -353,13 +331,9 @@ void LoadBarrierNode::push_dominated_barriers(PhaseIterGVN* igvn) const {
}
Node *LoadBarrierNode::Identity(PhaseGVN *phase) {
@ -515,7 +515,7 @@ index f7458e6ce..1c77a4f63 100644
assert(dominating_barrier->in(Oop) == in(Oop), "");
return dominating_barrier;
}
@@ -373,33 +347,31 @@ Node *LoadBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) {
@@ -372,33 +346,31 @@ Node *LoadBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) {
return this;
}
@ -563,7 +563,7 @@ index f7458e6ce..1c77a4f63 100644
if (eliminate) {
if (can_reshape) {
PhaseIterGVN* igvn = phase->is_IterGVN();
@@ -414,13 +386,13 @@ Node *LoadBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) {
@@ -413,13 +385,13 @@ Node *LoadBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) {
fix_similar_in_uses(igvn);
if (out_res != NULL) {
if (dominating_barrier != NULL) {
@ -578,7 +578,7 @@ index f7458e6ce..1c77a4f63 100644
return new ConINode(TypeInt::ZERO);
}
@@ -431,7 +403,7 @@ Node *LoadBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) {
@@ -430,7 +402,7 @@ Node *LoadBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) {
return this;
}
@ -587,7 +587,7 @@ index f7458e6ce..1c77a4f63 100644
// If this barrier is linked through the Similar edge by a
// dominated barrier and both barriers have the same Oop field,
// the dominated barrier can go away, so push it for reprocessing.
@@ -445,6 +417,7 @@ Node *LoadBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) {
@@ -444,6 +416,7 @@ Node *LoadBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) {
Node* u = out_res->fast_out(i);
if (u->is_LoadBarrier() && u->in(Similar) == out_res &&
(u->in(Oop) == val || !u->in(Similar)->is_top())) {
@ -595,7 +595,7 @@ index f7458e6ce..1c77a4f63 100644
igvn->_worklist.push(u);
}
}
@@ -478,213 +451,17 @@ void LoadBarrierNode::fix_similar_in_uses(PhaseIterGVN* igvn) {
@@ -477,213 +450,17 @@ void LoadBarrierNode::fix_similar_in_uses(PhaseIterGVN* igvn) {
bool LoadBarrierNode::has_true_uses() const {
Node* out_res = proj_out_or_null(Oop);
@ -815,7 +815,7 @@ index f7458e6ce..1c77a4f63 100644
static bool barrier_needed(C2Access access) {
return ZBarrierSet::barrier_needed(access.decorators(), access.type());
}
@@ -696,157 +473,48 @@ Node* ZBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) co
@@ -695,157 +472,48 @@ Node* ZBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) co
}
bool weak = (access.decorators() & ON_WEAK_OOP_REF) != 0;
@ -990,7 +990,7 @@ index f7458e6ce..1c77a4f63 100644
Node* in_ctrl = barrier->in(LoadBarrierNode::Control);
Node* in_mem = barrier->in(LoadBarrierNode::Memory);
@@ -856,102 +524,8 @@ void ZBarrierSetC2::expand_loadbarrier_basic(PhaseMacroExpand* phase, LoadBarrie
@@ -855,102 +523,8 @@ void ZBarrierSetC2::expand_loadbarrier_basic(PhaseMacroExpand* phase, LoadBarrie
Node* out_ctrl = barrier->proj_out(LoadBarrierNode::Control);
Node* out_res = barrier->proj_out(LoadBarrierNode::Oop);
@ -1093,7 +1093,7 @@ index f7458e6ce..1c77a4f63 100644
Node* jthread = igvn.transform(new ThreadLocalNode());
Node* adr = phase->basic_plus_adr(jthread, in_bytes(ZThreadLocalData::address_bad_mask_offset()));
Node* bad_mask = igvn.transform(LoadNode::make(igvn, in_ctrl, in_mem, adr,
@@ -965,17 +539,9 @@ void ZBarrierSetC2::expand_loadbarrier_optimized(PhaseMacroExpand* phase, LoadBa
@@ -964,17 +538,9 @@ void ZBarrierSetC2::expand_loadbarrier_optimized(PhaseMacroExpand* phase, LoadBa
Node* then = igvn.transform(new IfTrueNode(iff));
Node* elsen = igvn.transform(new IfFalseNode(iff));
@ -1113,7 +1113,7 @@ index f7458e6ce..1c77a4f63 100644
// Create the final region/phi pair to converge cntl/data paths to downstream code
Node* result_region = igvn.transform(new RegionNode(3));
result_region->set_req(1, then);
@@ -985,29 +551,17 @@ void ZBarrierSetC2::expand_loadbarrier_optimized(PhaseMacroExpand* phase, LoadBa
@@ -984,29 +550,17 @@ void ZBarrierSetC2::expand_loadbarrier_optimized(PhaseMacroExpand* phase, LoadBa
result_phi->set_req(1, new_loadp);
result_phi->set_req(2, barrier->in(LoadBarrierNode::Oop));
@ -1148,7 +1148,7 @@ index f7458e6ce..1c77a4f63 100644
return;
}
@@ -1053,373 +607,6 @@ bool ZBarrierSetC2::expand_macro_nodes(PhaseMacroExpand* macro) const {
@@ -1052,373 +606,6 @@ bool ZBarrierSetC2::expand_macro_nodes(PhaseMacroExpand* macro) const {
return false;
}
@ -1522,7 +1522,7 @@ index f7458e6ce..1c77a4f63 100644
Node* ZBarrierSetC2::step_over_gc_barrier(Node* c) const {
Node* node = c;
@@ -1437,7 +624,7 @@ Node* ZBarrierSetC2::step_over_gc_barrier(Node* c) const {
@@ -1436,7 +623,7 @@ Node* ZBarrierSetC2::step_over_gc_barrier(Node* c) const {
if (node->is_Phi()) {
PhiNode* phi = node->as_Phi();
Node* n = phi->in(1);
@ -1531,7 +1531,7 @@ index f7458e6ce..1c77a4f63 100644
assert(c == node, "projections from step 1 should only be seen before macro expansion");
return phi->in(2);
}
@@ -1553,3 +740,798 @@ void ZBarrierSetC2::verify_gc_barriers(bool post_parse) const {
@@ -1552,3 +739,798 @@ void ZBarrierSetC2::verify_gc_barriers(bool post_parse) const {
}
#endif
@ -2595,10 +2595,10 @@ index 4314edd96..f6b588685 100644
macro(Loop)
macro(LoopLimit)
diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp
index 40ced1bb7..ba61967bc 100644
index dc92af402..ef4a6ab13 100644
--- a/src/hotspot/share/opto/compile.cpp
+++ b/src/hotspot/share/opto/compile.cpp
@@ -2167,8 +2167,8 @@ void Compile::Optimize() {
@@ -2207,8 +2207,8 @@ void Compile::Optimize() {
#endif
@ -2608,8 +2608,8 @@ index 40ced1bb7..ba61967bc 100644
bs->verify_gc_barriers(true);
#endif
@@ -2344,12 +2344,6 @@ void Compile::Optimize() {
}
@@ -2380,12 +2380,6 @@ void Compile::Optimize() {
return;
}
-#if INCLUDE_ZGC
@ -2621,7 +2621,7 @@ index 40ced1bb7..ba61967bc 100644
if (failing()) return;
// Ensure that major progress is now clear
@@ -2369,7 +2363,13 @@ void Compile::Optimize() {
@@ -2405,7 +2399,13 @@ void Compile::Optimize() {
}
#ifdef ASSERT
@ -2636,7 +2636,7 @@ index 40ced1bb7..ba61967bc 100644
bs->verify_gc_barriers(false);
#endif
@@ -2917,10 +2917,6 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
@@ -2953,10 +2953,6 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
case Op_LoadL_unaligned:
case Op_LoadPLocked:
case Op_LoadP:
@ -2647,7 +2647,7 @@ index 40ced1bb7..ba61967bc 100644
case Op_LoadN:
case Op_LoadRange:
case Op_LoadS: {
@@ -2937,6 +2933,29 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
@@ -2973,6 +2969,29 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
break;
}
@ -2678,7 +2678,7 @@ index 40ced1bb7..ba61967bc 100644
Node *addp = n->in(AddPNode::Address);
assert( !addp->is_AddP() ||
diff --git a/src/hotspot/share/opto/compile.hpp b/src/hotspot/share/opto/compile.hpp
index 446324ec4..d58758db7 100644
index a56439bbf..bfd85d032 100644
--- a/src/hotspot/share/opto/compile.hpp
+++ b/src/hotspot/share/opto/compile.hpp
@@ -52,6 +52,7 @@ class C2Compiler;
@ -2689,7 +2689,7 @@ index 446324ec4..d58758db7 100644
class InlineTree;
class Int_Array;
class LoadBarrierNode;
@@ -644,6 +645,7 @@ class Compile : public Phase {
@@ -655,6 +656,7 @@ class Compile : public Phase {
void set_inlining_incrementally(bool z) { _inlining_incrementally = z; }
int inlining_incrementally() const { return _inlining_incrementally; }
void set_major_progress() { _major_progress++; }
@ -2697,7 +2697,7 @@ index 446324ec4..d58758db7 100644
void clear_major_progress() { _major_progress = 0; }
int num_loop_opts() const { return _num_loop_opts; }
void set_num_loop_opts(int n) { _num_loop_opts = n; }
@@ -733,7 +735,15 @@ class Compile : public Phase {
@@ -744,7 +746,15 @@ class Compile : public Phase {
C->_latest_stage_start_counter.stamp();
}
@ -2714,7 +2714,7 @@ index 446324ec4..d58758db7 100644
EventCompilerPhase event;
if (event.should_commit()) {
event.set_starttime(C->_latest_stage_start_counter);
@@ -743,10 +753,15 @@ class Compile : public Phase {
@@ -754,10 +764,15 @@ class Compile : public Phase {
event.commit();
}
@ -2734,7 +2734,7 @@ index 446324ec4..d58758db7 100644
#endif
C->_latest_stage_start_counter.stamp();
diff --git a/src/hotspot/share/opto/escape.cpp b/src/hotspot/share/opto/escape.cpp
index 602f1261d..47c83766e 100644
index e0637ee47..235b31c4c 100644
--- a/src/hotspot/share/opto/escape.cpp
+++ b/src/hotspot/share/opto/escape.cpp
@@ -453,10 +453,6 @@ void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *de
@ -2858,10 +2858,10 @@ index 50c0c2dd3..05ec9fa9f 100644
case Op_LoadS:
case Op_LoadKlass:
diff --git a/src/hotspot/share/opto/loopnode.cpp b/src/hotspot/share/opto/loopnode.cpp
index dd8ae8557..d99e2f81b 100644
index 4103fa4bd..b0b7fe185 100644
--- a/src/hotspot/share/opto/loopnode.cpp
+++ b/src/hotspot/share/opto/loopnode.cpp
@@ -982,7 +982,7 @@ void LoopNode::verify_strip_mined(int expect_skeleton) const {
@@ -986,7 +986,7 @@ void LoopNode::verify_strip_mined(int expect_skeleton) const {
wq.push(u);
bool found_sfpt = false;
for (uint next = 0; next < wq.size() && !found_sfpt; next++) {
@ -2870,7 +2870,7 @@ index dd8ae8557..d99e2f81b 100644
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !found_sfpt; i++) {
Node* u = n->fast_out(i);
if (u == sfpt) {
@@ -996,6 +996,19 @@ void LoopNode::verify_strip_mined(int expect_skeleton) const {
@@ -1000,6 +1000,19 @@ void LoopNode::verify_strip_mined(int expect_skeleton) const {
assert(found_sfpt, "no node in loop that's not input to safepoint");
}
}
@ -2890,16 +2890,16 @@ index dd8ae8557..d99e2f81b 100644
CountedLoopEndNode* cle = inner_out->in(0)->as_CountedLoopEnd();
assert(cle == inner->loopexit_or_null(), "mismatch");
bool has_skeleton = outer_le->in(1)->bottom_type()->singleton() && outer_le->in(1)->bottom_type()->is_int()->get_con() == 0;
@@ -2713,7 +2726,7 @@ bool PhaseIdealLoop::process_expensive_nodes() {
@@ -2717,7 +2730,7 @@ bool PhaseIdealLoop::process_expensive_nodes() {
//----------------------------build_and_optimize-------------------------------
// Create a PhaseLoop. Build the ideal Loop tree. Map each Ideal Node to
// its corresponding LoopNode. If 'optimize' is true, do some loop cleanups.
-void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool skip_loop_opts, bool last_round) {
+void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool skip_loop_opts, bool last_round, bool z_barrier_insertion) {
ResourceMark rm;
-void PhaseIdealLoop::build_and_optimize(LoopOptsMode mode) {
+void PhaseIdealLoop::build_and_optimize(LoopOptsMode mode, bool z_barrier_insertion) {
bool do_split_ifs = (mode == LoopOptsDefault || mode == LoopOptsLastRound);
bool skip_loop_opts = (mode == LoopOptsNone);
int old_progress = C->major_progress();
@@ -2777,7 +2790,8 @@ void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool skip_loop_opts,
@@ -2784,7 +2797,8 @@ void PhaseIdealLoop::build_and_optimize(LoopOptsMode mode) {
}
// Nothing to do, so get out
@ -2909,7 +2909,7 @@ index dd8ae8557..d99e2f81b 100644
bool do_expensive_nodes = C->should_optimize_expensive_nodes(_igvn);
if (stop_early && !do_expensive_nodes) {
_igvn.optimize(); // Cleanup NeverBranches
@@ -2869,9 +2883,7 @@ void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool skip_loop_opts,
@@ -2876,9 +2890,7 @@ void PhaseIdealLoop::build_and_optimize(LoopOptsMode mode) {
build_loop_late( visited, worklist, nstack );
if (_verify_only) {
@ -2920,7 +2920,7 @@ index dd8ae8557..d99e2f81b 100644
assert(C->unique() == unique, "verification mode made Nodes? ? ?");
assert(_igvn._worklist.size() == orig_worklist_size, "shouldn't push anything");
return;
@@ -2915,10 +2927,7 @@ void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool skip_loop_opts,
@@ -2922,10 +2934,7 @@ void PhaseIdealLoop::build_and_optimize(LoopOptsMode mode) {
if (skip_loop_opts) {
// restore major progress flag
@ -2932,7 +2932,7 @@ index dd8ae8557..d99e2f81b 100644
// Cleanup any modified bits
_igvn.optimize();
@@ -2928,6 +2937,16 @@ void PhaseIdealLoop::build_and_optimize(bool do_split_ifs, bool skip_loop_opts,
@@ -2935,6 +2944,16 @@ void PhaseIdealLoop::build_and_optimize(LoopOptsMode mode) {
return;
}
@ -2949,7 +2949,7 @@ index dd8ae8557..d99e2f81b 100644
if (ReassociateInvariants) {
// Reassociate invariants and prep for split_thru_phi
for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
@@ -3094,8 +3113,7 @@ void PhaseIdealLoop::verify() const {
@@ -3101,8 +3120,7 @@ void PhaseIdealLoop::verify() const {
_ltree_root->verify_tree(loop_verify._ltree_root, NULL);
// Reset major-progress. It was cleared by creating a verify version of
// PhaseIdealLoop.
@ -2959,7 +2959,7 @@ index dd8ae8557..d99e2f81b 100644
}
//------------------------------verify_compare---------------------------------
@@ -4213,7 +4231,6 @@ void PhaseIdealLoop::build_loop_late_post( Node *n ) {
@@ -4225,7 +4243,6 @@ void PhaseIdealLoop::build_loop_late_post( Node *n ) {
case Op_LoadS:
case Op_LoadP:
case Op_LoadBarrierSlowReg:
@ -2968,15 +2968,15 @@ index dd8ae8557..d99e2f81b 100644
case Op_LoadRange:
case Op_LoadD_unaligned:
diff --git a/src/hotspot/share/opto/loopnode.hpp b/src/hotspot/share/opto/loopnode.hpp
index 5dad7a9bd..cc679d158 100644
index 5efff246d..d2c50df70 100644
--- a/src/hotspot/share/opto/loopnode.hpp
+++ b/src/hotspot/share/opto/loopnode.hpp
@@ -929,7 +929,7 @@ public:
}
// build the loop tree and perform any requested optimizations
- void build_and_optimize(bool do_split_if, bool skip_loop_opts, bool last_round = false);
+ void build_and_optimize(bool do_split_if, bool skip_loop_opts, bool last_round = false, bool z_barrier_insertion = false);
- void build_and_optimize(LoopOptsMode mode);
+ void build_and_optimize(LoopOptsMode mode, bool z_barrier_insertion = false);
// Dominators for the sea of nodes
void Dominators();
@ -2984,23 +2984,23 @@ index 5dad7a9bd..cc679d158 100644
Node *dom_lca_internal( Node *n1, Node *n2 ) const;
// Compute the Ideal Node to Loop mapping
- PhaseIdealLoop( PhaseIterGVN &igvn, bool do_split_ifs, bool skip_loop_opts = false, bool last_round = false) :
+ PhaseIdealLoop( PhaseIterGVN &igvn, bool do_split_ifs, bool skip_loop_opts = false, bool last_round = false, bool z_barrier_insertion = false) :
- PhaseIdealLoop(PhaseIterGVN &igvn, LoopOptsMode mode) :
+ PhaseIdealLoop(PhaseIterGVN &igvn, LoopOptsMode mode, bool z_barrier_insertion = false) :
PhaseTransform(Ideal_Loop),
_igvn(igvn),
_dom_lca_tags(arena()), // Thread::resource_area
_verify_me(NULL),
_verify_only(false) {
- build_and_optimize(do_split_ifs, skip_loop_opts, last_round);
+ build_and_optimize(do_split_ifs, skip_loop_opts, last_round, z_barrier_insertion);
- build_and_optimize(mode);
+ build_and_optimize(mode, z_barrier_insertion);
}
// Verify that verify_me made the same decisions as a fresh run.
diff --git a/src/hotspot/share/opto/loopopts.cpp b/src/hotspot/share/opto/loopopts.cpp
index 42018e8d9..411cf0841 100644
index 567069193..5793f51f2 100644
--- a/src/hotspot/share/opto/loopopts.cpp
+++ b/src/hotspot/share/opto/loopopts.cpp
@@ -1419,12 +1419,6 @@ void PhaseIdealLoop::split_if_with_blocks_post(Node *n, bool last_round) {
@@ -1420,12 +1420,6 @@ void PhaseIdealLoop::split_if_with_blocks_post(Node *n, bool last_round) {
get_loop(get_ctrl(n)) == get_loop(get_ctrl(n->in(1))) ) {
_igvn.replace_node( n, n->in(1) );
}
@ -3052,7 +3052,7 @@ index 1824c89cd..bdcf9c424 100644
case Op_CMoveF:
case Op_CMoveI:
diff --git a/src/hotspot/share/opto/memnode.cpp b/src/hotspot/share/opto/memnode.cpp
index 1bbc6fcd0..6fa851af2 100644
index ea33eef95..cdaf7669c 100644
--- a/src/hotspot/share/opto/memnode.cpp
+++ b/src/hotspot/share/opto/memnode.cpp
@@ -927,14 +927,6 @@ static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp,
@ -3070,7 +3070,7 @@ index 1bbc6fcd0..6fa851af2 100644
Node* ld_adr = in(MemNode::Address);
intptr_t ld_off = 0;
AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off);
@@ -2819,7 +2811,8 @@ const Type* SCMemProjNode::Value(PhaseGVN* phase) const
@@ -2825,7 +2817,8 @@ const Type* SCMemProjNode::Value(PhaseGVN* phase) const
LoadStoreNode::LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required )
: Node(required),
_type(rt),
@ -3080,7 +3080,7 @@ index 1bbc6fcd0..6fa851af2 100644
{
init_req(MemNode::Control, c );
init_req(MemNode::Memory , mem);
@@ -3113,16 +3106,6 @@ Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
@@ -3119,16 +3112,6 @@ Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
return NULL;
}
@ -3098,7 +3098,7 @@ index 1bbc6fcd0..6fa851af2 100644
// Eliminate volatile MemBars for scalar replaced objects.
if (can_reshape && req() == (Precedent+1)) {
diff --git a/src/hotspot/share/opto/memnode.hpp b/src/hotspot/share/opto/memnode.hpp
index e0ffedc06..084a91c49 100644
index 529075761..e2bd6a6d8 100644
--- a/src/hotspot/share/opto/memnode.hpp
+++ b/src/hotspot/share/opto/memnode.hpp
@@ -182,6 +182,8 @@ private:
@ -3148,7 +3148,7 @@ index e0ffedc06..084a91c49 100644
class LoadStoreConditionalNode : public LoadStoreNode {
diff --git a/src/hotspot/share/opto/node.cpp b/src/hotspot/share/opto/node.cpp
index 434363631..2ff9bb2cd 100644
index a7d26152e..9bfc3a3b9 100644
--- a/src/hotspot/share/opto/node.cpp
+++ b/src/hotspot/share/opto/node.cpp
@@ -546,6 +546,9 @@ Node *Node::clone() const {
@ -3161,7 +3161,7 @@ index 434363631..2ff9bb2cd 100644
return n; // Return the clone
}
@@ -1462,10 +1465,14 @@ bool Node::rematerialize() const {
@@ -1465,10 +1468,14 @@ bool Node::rematerialize() const {
//------------------------------needs_anti_dependence_check---------------------
// Nodes which use memory without consuming it, hence need antidependences.
bool Node::needs_anti_dependence_check() const {
@ -3215,10 +3215,10 @@ index 608aa0c8f..49f415a31 100644
DEFINE_CLASS_QUERY(Loop)
DEFINE_CLASS_QUERY(Mach)
diff --git a/src/hotspot/share/opto/phaseX.cpp b/src/hotspot/share/opto/phaseX.cpp
index a94233cdc..880efe991 100644
index 686e709c3..d1177a56e 100644
--- a/src/hotspot/share/opto/phaseX.cpp
+++ b/src/hotspot/share/opto/phaseX.cpp
@@ -958,9 +958,6 @@ PhaseIterGVN::PhaseIterGVN( PhaseGVN *gvn ) : PhaseGVN(gvn),
@@ -947,9 +947,6 @@ PhaseIterGVN::PhaseIterGVN( PhaseGVN *gvn ) : PhaseGVN(gvn),
n->is_Mem() )
add_users_to_worklist(n);
}
@ -3284,7 +3284,7 @@ index e7a122ce1..3573555c8 100644
return new StackValue(h);
}
diff --git a/src/hotspot/share/utilities/growableArray.hpp b/src/hotspot/share/utilities/growableArray.hpp
index eb59003a7..ba8304ab7 100644
index f37d94a05..6c84180a6 100644
--- a/src/hotspot/share/utilities/growableArray.hpp
+++ b/src/hotspot/share/utilities/growableArray.hpp
@@ -152,6 +152,12 @@ class GenericGrowableArray : public ResourceObj {
@ -3300,7 +3300,7 @@ index eb59003a7..ba8304ab7 100644
template<class E> class GrowableArray : public GenericGrowableArray {
friend class VMStructs;
@@ -443,6 +449,37 @@ template<class E> class GrowableArray : public GenericGrowableArray {
@@ -447,6 +453,37 @@ template<class E> class GrowableArray : public GenericGrowableArray {
}
return min;
}
@ -3339,5 +3339,5 @@ index eb59003a7..ba8304ab7 100644
// Global GrowableArray methods (one instance in the library per each 'E' type).
--
2.12.3
2.19.1

View File

@ -0,0 +1,28 @@
From e25b331a945301e24429c120bef1ed0daf04d49c Mon Sep 17 00:00:00 2001
Date: Fri, 3 Apr 2020 17:12:16 +0800
Subject: [PATCH] ZGC: aarch64: Fix MR 32, fix system call number of
memfd_create
Summary: <gc>: <memfd_create in aarch64 always fail because the system call number is wrong>
LLT: N/A
Bug url: N/A
---
src/hotspot/os_cpu/linux_aarch64/gc/z/zBackingFile_linux_aarch64.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/hotspot/os_cpu/linux_aarch64/gc/z/zBackingFile_linux_aarch64.cpp b/src/hotspot/os_cpu/linux_aarch64/gc/z/zBackingFile_linux_aarch64.cpp
index 47894b5..f956b53 100644
--- a/src/hotspot/os_cpu/linux_aarch64/gc/z/zBackingFile_linux_aarch64.cpp
+++ b/src/hotspot/os_cpu/linux_aarch64/gc/z/zBackingFile_linux_aarch64.cpp
@@ -51,7 +51,7 @@
// Support for building on older Linux systems
#ifndef __NR_memfd_create
-#define __NR_memfd_create 319
+#define __NR_memfd_create 279
#endif
#ifndef MFD_CLOEXEC
#define MFD_CLOEXEC 0x0001U
--
1.8.3.1

View File

@ -5,15 +5,16 @@ Subject: [PATCH] ZGC: aarch64: not using zr register avoid sigill in
Summary: <gc>: <instruction ldp doesn't support two same register>
LLT: jtreg
Bug url: NA
Bug url:
---
src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp | 46 +++++++++++++---------
1 file changed, 24 insertions(+), 22 deletions(-)
src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp | 48 +++++++++++++---------
1 file changed, 28 insertions(+), 20 deletions(-)
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
index 611f13b0e..6db979b57 100644
index 611f13b0e..a65a605d0 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
@@ -2100,57 +2100,59 @@ int MacroAssembler::pop(unsigned int bitset, Register stack) {
@@ -2100,58 +2100,66 @@ int MacroAssembler::pop(unsigned int bitset, Register stack) {
// Push lots of registers in the bit set supplied. Don't push sp.
// Return the number of words pushed
int MacroAssembler::push_fp(unsigned int bitset, Register stack) {
@ -33,19 +34,19 @@ index 611f13b0e..6db979b57 100644
- // Always pushing full 128 bit registers.
- if (count) {
- stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -count * wordSize * 2)));
- words_pushed += 2;
+ if (!count) {
+ return 0;
}
- for (int i = 2; i < count; i += 2) {
+
+ add(stack, stack, -count * wordSize * 2);
+ }
+
+ if (count & 1) {
+ strq(as_FloatRegister(regs[0]), Address(stack));
+ strq(as_FloatRegister(regs[0]), Address(pre(stack, -count * wordSize * 2)));
+ i += 1;
+ }
+ } else {
stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -count * wordSize * 2)));
- words_pushed += 2;
+ i += 2;
}
- for (int i = 2; i < count; i += 2) {
+
+ for (; i < count; i += 2) {
stpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
@ -72,26 +73,34 @@ index 611f13b0e..6db979b57 100644
- count &= ~1;
- for (int i = 2; i < count; i += 2) {
- ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
- words_pushed += 2;
+ if (!count) {
+ return 0;
}
- if (count) {
- ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, count * wordSize * 2)));
- words_pushed += 2;
+ }
+
+ if (count & 1) {
+ ldrq(as_FloatRegister(regs[0]), Address(stack));
+ i += 1;
+ } else {
+ i += 2;
+ }
+
+ for (; i < count; i += 2) {
ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
- words_pushed += 2;
}
- if (count) {
+
+ if ((count & 1) == 0) {
ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, count * wordSize * 2)));
- words_pushed += 2;
+ } else {
+ ldrq(as_FloatRegister(regs[0]), Address(post(stack, count * wordSize * 2)));
}
- assert(words_pushed == count, "oops, pushed != count");
+ for (; i < count; i += 2) {
+ ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
+ }
+
+ add(stack, stack, count * wordSize * 2);
-
return count;
}
--
2.12.3

View File

@ -6,10 +6,10 @@
# Examples:
#
# Produce release *and* slowdebug builds on x86_64 (default):
# $ rpmbuild -ba java-1.8.0-openjdk.spec
# $ rpmbuild -ba java-11-openjdk.spec
#
# Produce only release builds (no slowdebug builds) on x86_64:
# $ rpmbuild -ba java-1.8.0-openjdk.spec --without slowdebug
# $ rpmbuild -ba java-11-openjdk.spec --without slowdebug
#
# Only produce a release build on x86_64:
# $ fedpkg mockbuild --without slowdebug
@ -172,7 +172,7 @@
# Standard JPackage naming and versioning defines
%global origin openjdk
%global origin_nice OpenJDK
%global updatever 6
%global updatever 7
%global minorver 0
%global buildver 10
%global top_level_dir_name %{origin}
@ -801,7 +801,7 @@ Provides: java-%{javaver}-%{origin}-src%{?1} = %{epoch}:%{version}-%{release}
Name: java-%{javaver}-%{origin}
Version: %{fulljavaver}
Release: 2
Release: 3
# java-1.5.0-ibm from jpackage.org set Epoch to 1 for unknown reasons
# and this change was brought into RHEL-4. java-1.5.0-ibm packages
# also included the epoch in their virtual provides. This created a
@ -850,12 +850,9 @@ Source0: openjdk-%{fulljavaver}-ga.tar.xz
Patch1: change-vendor-to-openEuler_Community.patch
Patch2: 8225648-TESTBUG-java-lang-annotation-loaderLeak-Main.patch
Patch3: 8231584-Deadlock-with-ClassLoader.findLibrary-and-Sy.patch
Patch4: 8214345-infinite-recursion-while-checking-super-clas.patch
Patch5: Add-ability-to-configure-third-port-for-remote-JMX.patch
Patch6: 8214527-AArch64-ZGC-for-Aarch64.patch
Patch7: 8224675-Late-GC-barrier-insertion-for-ZGC.patch
Patch8: freetype-seeks-to-index-at-the-end-of-the-fo.patch
Patch9: ZGC-Redesign-C2-load-barrier-to-expand-on-th.patch
Patch10: ZGC-aarch64-not-using-zr-register-avoid-sigill-in-Ma.patch
Patch11: 8217856-ZGC-Break-out-C2-matching-rules-into-separat.patch
@ -863,11 +860,13 @@ Patch12: 8233073-Make-BitMap-accessors-more-memory-ordering-f.patch
Patch13: 8233061-ZGC-Enforce-memory-ordering-in-segmented-bit.patch
Patch14: Add-loadload-membar-to-avoid-loading-a-incorrect-offset.patch
Patch15: 8226536-Catch-OOM-from-deopt-that-fails-rematerializ.patch
Patch16: prohibition-of-irreducible-loop-in-mergers.patch
Patch18: 8209375-ZGC-Use-dynamic-base-address-for-mark-stack-.patch
Patch20: 8209894-ZGC-Cap-number-of-GC-workers-based-on-heap-s.patch
Patch22: 8233506-ZGC-the-load-for-Reference.get-can-be-conver.patch
Patch23: add-missing-inline.patch
Patch24: 8210303-VM_HandshakeAllThreads-fails-assert-with-fai.patch
Patch25: 8212933-Thread-SMR-requesting-a-VM-operation-whilst-.patch
Patch26: ZGC-aarch64-fix-system-call-number-of-memfd_create.patch
BuildRequires: autoconf
BuildRequires: automake
@ -1123,12 +1122,9 @@ pushd %{top_level_dir_name}
# OpenJDK patches
%patch1 -p1
%patch2 -p1
%patch3 -p1
%patch4 -p1
%patch5 -p1
%patch6 -p1
%patch7 -p1
%patch8 -p1
%patch9 -p1
%patch10 -p1
%patch11 -p1
@ -1136,11 +1132,13 @@ pushd %{top_level_dir_name}
%patch13 -p1
%patch14 -p1
%patch15 -p1
%patch16 -p1
%patch18 -p1
%patch20 -p1
%patch22 -p1
%patch23 -p1
%patch24 -p1
%patch25 -p1
%patch26 -p1
%build
# How many CPU's do we have?
@ -1161,8 +1159,8 @@ export CFLAGS="$CFLAGS -mieee"
# We use ourcppflags because the OpenJDK build seems to
# pass EXTRA_CFLAGS to the HotSpot C++ compiler...
# Explicitly set the C++ standard as the default has changed on GCC >= 6
EXTRA_CFLAGS="%ourcppflags -std=gnu++98 -Wno-error -fno-delete-null-pointer-checks -fno-lifetime-dse"
EXTRA_CPP_FLAGS="%ourcppflags -std=gnu++98 -fno-delete-null-pointer-checks -fno-lifetime-dse"
EXTRA_CFLAGS="%ourcppflags -Wno-error -fno-delete-null-pointer-checks -fno-lifetime-dse"
EXTRA_CPP_FLAGS="%ourcppflags -std=gnu++98 -Wno-error -fno-delete-null-pointer-checks -fno-lifetime-dse"
%ifarch %{power64} ppc
# fix rpmlint warnings
@ -1610,5 +1608,11 @@ require "copy_jdk_configs.lua"
%changelog
* Fri April 3 2020 jvmboy <hedongbo@huawei.com> - 1:11.0.ea.28-1
* Thu May 21 2020 jdkboy <guoge1@huawei.com> - 1:11.0.7.10-3
- Update to 11.0.7+10 (GA)
* Tue Apr 28 2020 jdkboy <guoge1@huawei.com> - 1:11.0.6.10-2
- Adjust some patches
* Sun Apr 26 2020 Noah <hedongbo@huawei.com> - 1:11.0.6.10-1
- Initial build from OpenJDK 11.0.6