1729 lines
67 KiB
Diff
1729 lines
67 KiB
Diff
From 4aea44c0438e3b635b6bea94869fd92ce96713a9 Mon Sep 17 00:00:00 2001
|
|
Date: Wed, 29 Jun 2022 10:00:02 +0800
|
|
Subject: Apply TBI to ZGC of JDK17
|
|
|
|
---
|
|
src/hotspot/cpu/aarch64/aarch64.ad | 13 ++-
|
|
.../cpu/aarch64/c1_LIRAssembler_aarch64.cpp | 5 +
|
|
.../cpu/aarch64/c1_LIRAssembler_aarch64.hpp | 9 +-
|
|
.../cpu/aarch64/compiledIC_aarch64.cpp | 9 +-
|
|
.../gc/shared/barrierSetNMethod_aarch64.cpp | 80 ++++++++++----
|
|
.../gc/z/zBarrierSetAssembler_aarch64.cpp | 20 ++++
|
|
.../gc/z/zBarrierSetAssembler_aarch64.hpp | 2 +
|
|
.../cpu/aarch64/gc/z/zGlobals_aarch64.cpp | 102 ++++++++++++++++++
|
|
.../cpu/aarch64/gc/z/zGlobals_aarch64.hpp | 5 +
|
|
src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad | 8 +-
|
|
.../cpu/aarch64/globalDefinitions_aarch64.hpp | 3 +
|
|
src/hotspot/cpu/aarch64/globals_aarch64.hpp | 5 +-
|
|
.../aarch64/jvmciCodeInstaller_aarch64.cpp | 16 ++-
|
|
.../cpu/aarch64/macroAssembler_aarch64.cpp | 40 +++++--
|
|
.../cpu/aarch64/nativeInst_aarch64.cpp | 31 +++++-
|
|
.../cpu/aarch64/nativeInst_aarch64.hpp | 16 ++-
|
|
.../cpu/aarch64/vm_version_aarch64.cpp | 28 +++++
|
|
src/hotspot/os/linux/gc/z/zNUMA_linux.cpp | 4 +
|
|
.../gc/z/zPhysicalMemoryBacking_linux.cpp | 61 ++++++++++-
|
|
src/hotspot/share/asm/codeBuffer.cpp | 3 +-
|
|
src/hotspot/share/code/relocInfo.hpp | 29 ++++-
|
|
src/hotspot/share/gc/z/vmStructs_z.cpp | 1 +
|
|
src/hotspot/share/gc/z/vmStructs_z.hpp | 3 +
|
|
src/hotspot/share/gc/z/zAddress.cpp | 6 ++
|
|
src/hotspot/share/gc/z/zAddress.hpp | 3 +
|
|
src/hotspot/share/gc/z/zAddress.inline.hpp | 30 ++++--
|
|
src/hotspot/share/gc/z/zBarrierSetNMethod.cpp | 7 ++
|
|
src/hotspot/share/gc/z/zGlobals.cpp | 1 +
|
|
src/hotspot/share/gc/z/zGlobals.hpp | 1 +
|
|
src/hotspot/share/gc/z/zMark.cpp | 7 ++
|
|
src/hotspot/share/gc/z/zMarkStackEntry.hpp | 3 +-
|
|
src/hotspot/share/gc/z/zNMethod.cpp | 65 ++++++++++-
|
|
src/hotspot/share/gc/z/zNMethod.hpp | 6 +-
|
|
src/hotspot/share/gc/z/zNMethodData.cpp | 12 +++
|
|
src/hotspot/share/gc/z/zNMethodData.hpp | 9 ++
|
|
src/hotspot/share/gc/z/zPhysicalMemory.cpp | 54 ++++++++++
|
|
src/hotspot/share/gc/z/zVirtualMemory.cpp | 15 +++
|
|
.../writers/jfrStreamWriterHost.inline.hpp | 3 +
|
|
src/hotspot/share/prims/jni.cpp | 6 ++
|
|
.../sun/jvm/hotspot/gc/z/ZAddress.java | 15 ++-
|
|
.../sun/jvm/hotspot/gc/z/ZCollectedHeap.java | 3 +
|
|
.../sun/jvm/hotspot/gc/z/ZGlobals.java | 4 +
|
|
.../hotspot/gc/z/ZGlobalsForVMStructs.java | 6 ++
|
|
.../classes/sun/jvm/hotspot/gc/z/ZUtils.java | 5 +
|
|
44 files changed, 685 insertions(+), 69 deletions(-)
|
|
|
|
diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad
|
|
index 91ea50c00..b3d89863e 100644
|
|
--- a/src/hotspot/cpu/aarch64/aarch64.ad
|
|
+++ b/src/hotspot/cpu/aarch64/aarch64.ad
|
|
@@ -1750,7 +1750,11 @@ int MachCallStaticJavaNode::ret_addr_offset()
|
|
|
|
int MachCallDynamicJavaNode::ret_addr_offset()
|
|
{
|
|
- return 16; // movz, movk, movk, bl
|
|
+ if (!UseTBI) {
|
|
+ return 16; // movz, movk, movk, bl
|
|
+ } else {
|
|
+ return 20; // movz, movk, movk, movk, bl
|
|
+ }
|
|
}
|
|
|
|
int MachCallRuntimeNode::ret_addr_offset() {
|
|
@@ -1768,7 +1772,12 @@ int MachCallRuntimeNode::ret_addr_offset() {
|
|
if (cb) {
|
|
return 1 * NativeInstruction::instruction_size;
|
|
} else {
|
|
- return 6 * NativeInstruction::instruction_size;
|
|
+ // lea will emit 3 or an extra movk to make 4 instructions.
|
|
+ if (!UseTBI) {
|
|
+ return 6 * NativeInstruction::instruction_size;
|
|
+ } else {
|
|
+ return 7 * NativeInstruction::instruction_size;
|
|
+ }
|
|
}
|
|
}
|
|
|
|
diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
|
|
index 5ce3ecf9e..c69f8aaf1 100644
|
|
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
|
|
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
|
|
@@ -94,6 +94,11 @@ static void select_different_registers(Register preserve,
|
|
assert_different_registers(preserve, tmp1, tmp2, tmp3);
|
|
}
|
|
|
|
+int LIR_Assembler::_call_stub_size = 13 * NativeInstruction::instruction_size;
|
|
+
|
|
+void LIR_Assembler::init_for_tbi() {
|
|
+ _call_stub_size = 15 * NativeInstruction::instruction_size;
|
|
+}
|
|
|
|
bool LIR_Assembler::is_small_constant(LIR_Opr opr) { Unimplemented(); return false; }
|
|
|
|
diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp
|
|
index c31ad9b21..d39f57087 100644
|
|
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp
|
|
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp
|
|
@@ -70,16 +70,19 @@ friend class ArrayCopyStub;
|
|
|
|
void deoptimize_trap(CodeEmitInfo *info);
|
|
|
|
+ // call stub: CompiledStaticCall::to_interp_stub_size() +
|
|
+ // CompiledStaticCall::to_trampoline_stub_size()
|
|
+ static int _call_stub_size;
|
|
+
|
|
enum {
|
|
- // call stub: CompiledStaticCall::to_interp_stub_size() +
|
|
- // CompiledStaticCall::to_trampoline_stub_size()
|
|
- _call_stub_size = 13 * NativeInstruction::instruction_size,
|
|
_exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175),
|
|
_deopt_handler_size = 7 * NativeInstruction::instruction_size
|
|
};
|
|
|
|
public:
|
|
|
|
+ static void init_for_tbi();
|
|
+
|
|
void store_parameter(Register r, int offset_from_esp_in_words);
|
|
void store_parameter(jint c, int offset_from_esp_in_words);
|
|
void store_parameter(jobject c, int offset_from_esp_in_words);
|
|
diff --git a/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp b/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp
|
|
index e922fc1cd..c6a9d8387 100644
|
|
--- a/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp
|
|
+++ b/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp
|
|
@@ -71,8 +71,13 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
|
|
#undef __
|
|
|
|
int CompiledStaticCall::to_interp_stub_size() {
|
|
- // isb; movk; movz; movz; movk; movz; movz; br
|
|
- return 8 * NativeInstruction::instruction_size;
|
|
+ if (!UseTBI) {
|
|
+ // isb; movk; movz; movz; movk; movz; movz; br
|
|
+ return 8 * NativeInstruction::instruction_size;
|
|
+ } else {
|
|
+ // emit_to_interp_stub will emit 2 extra movk instructions.
|
|
+ return 10 * NativeInstruction::instruction_size;
|
|
+ }
|
|
}
|
|
|
|
int CompiledStaticCall::to_trampoline_stub_size() {
|
|
diff --git a/src/hotspot/cpu/aarch64/gc/shared/barrierSetNMethod_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shared/barrierSetNMethod_aarch64.cpp
|
|
index 8598fb7e7..4942dca93 100644
|
|
--- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetNMethod_aarch64.cpp
|
|
+++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetNMethod_aarch64.cpp
|
|
@@ -34,25 +34,6 @@
|
|
#include "utilities/align.hpp"
|
|
#include "utilities/debug.hpp"
|
|
|
|
-class NativeNMethodBarrier: public NativeInstruction {
|
|
- address instruction_address() const { return addr_at(0); }
|
|
-
|
|
- int *guard_addr() {
|
|
- return reinterpret_cast<int*>(instruction_address() + 10 * 4);
|
|
- }
|
|
-
|
|
-public:
|
|
- int get_value() {
|
|
- return Atomic::load_acquire(guard_addr());
|
|
- }
|
|
-
|
|
- void set_value(int value) {
|
|
- Atomic::release_store(guard_addr(), value);
|
|
- }
|
|
-
|
|
- void verify() const;
|
|
-};
|
|
-
|
|
// Store the instruction bitmask, bits and name for checking the barrier.
|
|
struct CheckInsn {
|
|
uint32_t mask;
|
|
@@ -60,7 +41,20 @@ struct CheckInsn {
|
|
const char *name;
|
|
};
|
|
|
|
-static const struct CheckInsn barrierInsn[] = {
|
|
+static const struct CheckInsn barrierInsn48[] = {
|
|
+ { 0xff000000, 0x18000000, "ldr (literal)" },
|
|
+ { 0xfffff0ff, 0xd50330bf, "dmb" },
|
|
+ { 0xffc00000, 0xb9400000, "ldr"},
|
|
+ { 0x7f20001f, 0x6b00001f, "cmp"},
|
|
+ { 0xff00001f, 0x54000000, "b.eq"},
|
|
+ { 0xff800000, 0xd2800000, "mov"},
|
|
+ { 0xff800000, 0xf2800000, "movk"},
|
|
+ { 0xff800000, 0xf2800000, "movk"},
|
|
+ { 0xfffffc1f, 0xd63f0000, "blr"},
|
|
+ { 0xfc000000, 0x14000000, "b"}
|
|
+};
|
|
+
|
|
+static const struct CheckInsn barrierInsn64[] = {
|
|
{ 0xff000000, 0x18000000, "ldr (literal)" },
|
|
{ 0xfffff0ff, 0xd50330bf, "dmb" },
|
|
{ 0xffc00000, 0xb9400000, "ldr"},
|
|
@@ -69,16 +63,52 @@ static const struct CheckInsn barrierInsn[] = {
|
|
{ 0xff800000, 0xd2800000, "mov"},
|
|
{ 0xff800000, 0xf2800000, "movk"},
|
|
{ 0xff800000, 0xf2800000, "movk"},
|
|
+ { 0xff800000, 0xf2800000, "movk"},
|
|
{ 0xfffffc1f, 0xd63f0000, "blr"},
|
|
{ 0xfc000000, 0x14000000, "b"}
|
|
};
|
|
|
|
+static const unsigned int barrier_inst_len48 = sizeof(barrierInsn48) / sizeof(struct CheckInsn);
|
|
+static const unsigned int barrier_inst_len64 = sizeof(barrierInsn64) / sizeof(struct CheckInsn);
|
|
+
|
|
+static int get_entry_barrier_size() {
|
|
+ return UseTBI ? (4 * (int)barrier_inst_len64) : (4 * (int)barrier_inst_len48);
|
|
+}
|
|
+
|
|
+class NativeNMethodBarrier: public NativeInstruction {
|
|
+ address instruction_address() const { return addr_at(0); }
|
|
+
|
|
+ int *guard_addr() {
|
|
+ return reinterpret_cast<int*>(instruction_address() + get_entry_barrier_size());
|
|
+ }
|
|
+
|
|
+public:
|
|
+ int get_value() {
|
|
+ return Atomic::load_acquire(guard_addr());
|
|
+ }
|
|
+
|
|
+ void set_value(int value) {
|
|
+ Atomic::release_store(guard_addr(), value);
|
|
+ }
|
|
+
|
|
+ void verify() const;
|
|
+};
|
|
+
|
|
// The encodings must match the instructions emitted by
|
|
// BarrierSetAssembler::nmethod_entry_barrier. The matching ignores the specific
|
|
// register numbers and immediate values in the encoding.
|
|
void NativeNMethodBarrier::verify() const {
|
|
+ const CheckInsn *barrierInsn;
|
|
+ unsigned int barrier_inst_len;
|
|
+ if (UseTBI) {
|
|
+ barrierInsn = barrierInsn64;
|
|
+ barrier_inst_len = barrier_inst_len64;
|
|
+ } else {
|
|
+ barrierInsn = barrierInsn48;
|
|
+ barrier_inst_len = barrier_inst_len48;
|
|
+ }
|
|
intptr_t addr = (intptr_t) instruction_address();
|
|
- for(unsigned int i = 0; i < sizeof(barrierInsn)/sizeof(struct CheckInsn); i++ ) {
|
|
+ for(unsigned int i = 0; i < barrier_inst_len; i++) {
|
|
uint32_t inst = *((uint32_t*) addr);
|
|
if ((inst & barrierInsn[i].mask) != barrierInsn[i].bits) {
|
|
tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", addr, inst);
|
|
@@ -132,10 +162,14 @@ void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
|
|
// not find the expected native instruction at this offset, which needs updating.
|
|
// Note that this offset is invariant of PreserveFramePointer.
|
|
|
|
-static const int entry_barrier_offset = -4 * 11;
|
|
+// offset = entry barrier insns (10 or 11, depending on UseTBI) + int32 (guard value)
|
|
+// @see BarrierSetAssembler::nmethod_entry_barrier
|
|
+static int entry_barrier_offset() {
|
|
+ return -(get_entry_barrier_size() + 4);
|
|
+}
|
|
|
|
static NativeNMethodBarrier* native_nmethod_barrier(nmethod* nm) {
|
|
- address barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset;
|
|
+ address barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset();
|
|
NativeNMethodBarrier* barrier = reinterpret_cast<NativeNMethodBarrier*>(barrier_address);
|
|
debug_only(barrier->verify());
|
|
return barrier;
|
|
diff --git a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp
|
|
index 6b42982ed..cafd4e58f 100644
|
|
--- a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp
|
|
+++ b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp
|
|
@@ -203,6 +203,26 @@ void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm,
|
|
BLOCK_COMMENT("} ZBarrierSetAssembler::try_resolve_jobject_in_native");
|
|
}
|
|
|
|
+static void change_immediate(uint32_t& instr, uint32_t imm, uint32_t start, uint32_t end) {
|
|
+ uint32_t imm_mask = ((1u << start) - 1u) ^ ((1u << (end + 1)) - 1u);
|
|
+ instr &= ~imm_mask;
|
|
+ instr |= imm << start;
|
|
+}
|
|
+
|
|
+void ZBarrierSetAssembler::patch_barrier_relocation(address addr) {
|
|
+ uint32_t* const patch_addr = (uint32_t*)addr;
|
|
+
|
|
+ // The next 3 insns should be movz, andr, cbnz.
|
|
+ assert(nativeInstruction_at(addr)->is_movz() &&
|
|
+ Instruction_aarch64::extract(*(patch_addr + 1), 30, 24) == 0b0001010 &&
|
|
+ Instruction_aarch64::extract(*(patch_addr + 2), 31, 24) == 0b10110101,
|
|
+ "wrong insns in barrier patch");
|
|
+
|
|
+ change_immediate(*patch_addr, (uint16_t) (ZAddressBadMask >> 48), 5, 20);
|
|
+ OrderAccess::fence();
|
|
+ ICache::invalidate_word((address)patch_addr);
|
|
+}
|
|
+
|
|
#ifdef COMPILER1
|
|
|
|
#undef __
|
|
diff --git a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp
|
|
index cca873825..1594ffe64 100644
|
|
--- a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp
|
|
+++ b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.hpp
|
|
@@ -77,6 +77,8 @@ public:
|
|
Register tmp,
|
|
Label& slowpath);
|
|
|
|
+ void patch_barrier_relocation(address addr);
|
|
+
|
|
#ifdef COMPILER1
|
|
void generate_c1_load_barrier_test(LIR_Assembler* ce,
|
|
LIR_Opr ref) const;
|
|
diff --git a/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.cpp
|
|
index 6aa6d41fe..031014511 100644
|
|
--- a/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.cpp
|
|
+++ b/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.cpp
|
|
@@ -34,6 +34,7 @@
|
|
#include <sys/mman.h>
|
|
#endif // LINUX
|
|
|
|
+// Address layout when using multi-mapping
|
|
//
|
|
// The heap can have three different layouts, depending on the max heap size.
|
|
//
|
|
@@ -142,6 +143,97 @@
|
|
// * 63-48 Fixed (16-bits, always zero)
|
|
//
|
|
|
|
+// Address layout when using TBI (Top Byte Ignore)
|
|
+//
|
|
+// The heap can have three different layouts, depending on the max heap size.
|
|
+//
|
|
+// Address Space & Pointer Layout 1
|
|
+// --------------------------------
|
|
+//
|
|
+// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
|
|
+// . .
|
|
+// . .
|
|
+// . .
|
|
+// +--------------------------------+ 0x0000080000000000 (8TB)
|
|
+// | Heap |
|
|
+// +--------------------------------+ 0x0000040000000000 (4TB)
|
|
+// . .
|
|
+// +--------------------------------+ 0x0000000000000000
|
|
+//
|
|
+// 6 5 5 4 4
|
|
+// 2 9 8 2 1 0
|
|
+// ++----+-------------------+-----------------------------------------------+
|
|
+// 0|1111|000 00000000 000000|11 11111111 11111111 11111111 11111111 11111111|
|
|
+// ++----+-------------------+-----------------------------------------------+
|
|
+// | | |
|
|
+// | | * 41-0 Object Offset (42-bits, 4TB address space)
|
|
+// | |
|
|
+// | * 58-42 Fixed (18-bits, always zero)
|
|
+// |
|
|
+// * 62-59 Metadata Bits (4-bits) 0001 = Marked0
|
|
+// 0010 = Marked1
|
|
+// 0100 = Remapped
|
|
+// 1000 = Finalizable
|
|
+//
|
|
+//
|
|
+// Address Space & Pointer Layout 2
|
|
+// --------------------------------
|
|
+//
|
|
+// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
|
|
+// . .
|
|
+// . .
|
|
+// . .
|
|
+// +--------------------------------+ 0x0000100000000000 (16TB)
|
|
+// | Heap |
|
|
+// +--------------------------------+ 0x0000080000000000 (8TB)
|
|
+// . .
|
|
+// +--------------------------------+ 0x0000000000000000
|
|
+//
|
|
+// 6 5 5 4 4
|
|
+// 2 9 8 3 2 0
|
|
+// ++----+------------------+------------------------------------------------+
|
|
+// 0|1111|000 00000000 00000|111 11111111 11111111 11111111 11111111 11111111|
|
|
+// ++----+------------------+------------------------------------------------+
|
|
+// | | |
|
|
+// | | * 42-0 Object Offset (43-bits, 8TB address space)
|
|
+// | |
|
|
+// | * 58-43 Fixed (17-bits, always zero)
|
|
+// |
|
|
+// * 62-59 Metadata Bits (4-bits) 0001 = Marked0
|
|
+// 0010 = Marked1
|
|
+// 0100 = Remapped
|
|
+// 1000 = Finalizable
|
|
+//
|
|
+//
|
|
+// Address Space & Pointer Layout 3
|
|
+// --------------------------------
|
|
+//
|
|
+// +--------------------------------+ 0x00007FFFFFFFFFFF (127TB)
|
|
+// . .
|
|
+// . .
|
|
+// . .
|
|
+// +--------------------------------+ 0x0000200000000000 (32TB)
|
|
+// | Heap |
|
|
+// +--------------------------------+ 0x0000100000000000 (16TB)
|
|
+// . .
|
|
+// +--------------------------------+ 0x0000000000000000
|
|
+//
|
|
+// 6 5 5 4 4
|
|
+// 2 9 8 4 3 0
|
|
+// ++----+-----------------+-------------------------------------------------+
|
|
+// 0|1111|000 00000000 0000|1111 11111111 11111111 11111111 11111111 11111111|
|
|
+// ++----+-----------------+-------------------------------------------------+
|
|
+// | | |
|
|
+// | | * 43-0 Object Offset (44-bits, 16TB address space)
|
|
+// | |
|
|
+// | * 58-44 Fixed (17-bits, always zero)
|
|
+// |
|
|
+// * 62-59 Metadata Bits (4-bits) 0001 = Marked0
|
|
+// 0010 = Marked1
|
|
+// 0100 = Remapped
|
|
+// 1000 = Finalizable
|
|
+//
|
|
+
|
|
// Default value if probing is not implemented for a certain platform: 128TB
|
|
static const size_t DEFAULT_MAX_ADDRESS_BIT = 47;
|
|
// Minimum value returned, if probing fails: 64GB
|
|
@@ -196,6 +288,13 @@ static size_t probe_valid_max_address_bit() {
|
|
#endif // LINUX
|
|
}
|
|
|
|
+uintptr_t ZPlatformAddressBase() {
|
|
+ if (UseTBI) {
|
|
+ return (uintptr_t)1 << ZPlatformAddressOffsetBits();
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
size_t ZPlatformAddressOffsetBits() {
|
|
const static size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1;
|
|
const size_t max_address_offset_bits = valid_max_address_offset_bits - 3;
|
|
@@ -206,5 +305,8 @@ size_t ZPlatformAddressOffsetBits() {
|
|
}
|
|
|
|
size_t ZPlatformAddressMetadataShift() {
|
|
+ if (UseTBI) {
|
|
+ return ZPlatformAddressMetadataShiftForTbi;
|
|
+ }
|
|
return ZPlatformAddressOffsetBits();
|
|
}
|
|
diff --git a/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.hpp b/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.hpp
|
|
index 3187808b6..264b51ec1 100644
|
|
--- a/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.hpp
|
|
+++ b/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.hpp
|
|
@@ -28,6 +28,11 @@ const size_t ZPlatformGranuleSizeShift = 21; // 2MB
|
|
const size_t ZPlatformHeapViews = 3;
|
|
const size_t ZPlatformCacheLineSize = 64;
|
|
|
|
+// The highest bit (bit 63) of the address is occupied by StackWatermarkState (see StackWatermarkState::create).
|
|
+// So here we use bits 59~62 as the metadata bits of ZGC.
|
|
+const size_t ZPlatformAddressMetadataShiftForTbi = 59;
|
|
+
|
|
+uintptr_t ZPlatformAddressBase();
|
|
size_t ZPlatformAddressOffsetBits();
|
|
size_t ZPlatformAddressMetadataShift();
|
|
|
|
diff --git a/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad b/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad
|
|
index bd1c2cc9f..426a1cc2a 100644
|
|
--- a/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad
|
|
+++ b/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad
|
|
@@ -26,6 +26,7 @@ source_hpp %{
|
|
#include "gc/shared/gc_globals.hpp"
|
|
#include "gc/z/c2/zBarrierSetC2.hpp"
|
|
#include "gc/z/zThreadLocalData.hpp"
|
|
+#include "gc/z/zBarrierSetAssembler.hpp"
|
|
|
|
%}
|
|
|
|
@@ -36,7 +37,12 @@ static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address
|
|
return;
|
|
}
|
|
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, barrier_data);
|
|
- __ ldr(tmp, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
|
|
+ if (UseTBI) {
|
|
+ __ relocate(barrier_Relocation::spec());
|
|
+ __ movz(tmp, barrier_Relocation::unpatched, 48);
|
|
+ } else {
|
|
+ __ ldr(tmp, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
|
|
+ }
|
|
__ andr(tmp, tmp, ref);
|
|
__ cbnz(tmp, *stub->entry());
|
|
__ bind(*stub->continuation());
|
|
diff --git a/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp b/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp
|
|
index 3c779bb11..4a2af011f 100644
|
|
--- a/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp
|
|
+++ b/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp
|
|
@@ -67,4 +67,7 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
|
|
#define NOT_R18_RESERVED(code) code
|
|
#endif
|
|
|
|
+#define CLEAR_TOP_BYTE(addr) ((addr) & ((1UL << 56) - 1))
|
|
+#define CLEAR_COLOR_BITS(addr) (UseTBI ? CLEAR_TOP_BYTE(addr) : (addr))
|
|
+
|
|
#endif // CPU_AARCH64_GLOBALDEFINITIONS_AARCH64_HPP
|
|
diff --git a/src/hotspot/cpu/aarch64/globals_aarch64.hpp b/src/hotspot/cpu/aarch64/globals_aarch64.hpp
|
|
index fefc2e5c3..cae9fb60c 100644
|
|
--- a/src/hotspot/cpu/aarch64/globals_aarch64.hpp
|
|
+++ b/src/hotspot/cpu/aarch64/globals_aarch64.hpp
|
|
@@ -119,7 +119,10 @@ define_pd_global(intx, InlineSmallCode, 1000);
|
|
product(uint, OnSpinWaitInstCount, 1, DIAGNOSTIC, \
|
|
"The number of OnSpinWaitInst instructions to generate." \
|
|
"It cannot be used with OnSpinWaitInst=none.") \
|
|
- range(1, 99)
|
|
+ range(1, 99) \
|
|
+ product(bool, UseTBI, false, EXPERIMENTAL, \
|
|
+ "Use the \"Top Byte Ignore\" feature for ZGC." \
|
|
+ "And use 64 bit literal addresses instead of 48 bit.")
|
|
|
|
// end of ARCH_FLAGS
|
|
|
|
diff --git a/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp b/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp
|
|
index 17b978012..b52b3f2b3 100644
|
|
--- a/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp
|
|
+++ b/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp
|
|
@@ -57,9 +57,19 @@ void CodeInstaller::pd_patch_OopConstant(int pc_offset, JVMCIObject constant, JV
|
|
assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 &&
|
|
nativeInstruction_at(pc+4)->is_movk(), "wrong insn in patch");
|
|
} else {
|
|
- // Move wide constant: movz n, movk, movk.
|
|
- assert(nativeInstruction_at(pc+4)->is_movk()
|
|
- && nativeInstruction_at(pc+8)->is_movk(), "wrong insn in patch");
|
|
+ if (!UseTBI) {
|
|
+ // Move wide constant: movz n, movk, movk.
|
|
+ assert(nativeInstruction_at(pc+4)->is_movk()
|
|
+ && nativeInstruction_at(pc+8)->is_movk(), "wrong insn in patch");
|
|
+ } else {
|
|
+ // Move wide constant: movz n, movk, movk, movk.
|
|
+ // JVMCI would require code generated for it to use 64 bit literals and
|
|
+ // it therefore disables UseTBI until support is enabled.
|
|
+ // @see src/hotspot/cpu/aarch64/vm_version_aarch64.cpp - VM_Version::initialize()
|
|
+ assert(nativeInstruction_at(pc+4)->is_movk()
|
|
+ && nativeInstruction_at(pc+8)->is_movk()
|
|
+ && nativeInstruction_at(pc+12)->is_movk(), "wrong insn in patch");
|
|
+ }
|
|
}
|
|
}
|
|
#endif // ASSERT
|
|
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
|
|
index 676a548d0..d89d655af 100644
|
|
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
|
|
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
|
|
@@ -176,9 +176,10 @@ int MacroAssembler::patch_oop(address insn_addr, address o) {
|
|
unsigned insn = *(unsigned*)insn_addr;
|
|
assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
|
|
|
|
- // OOPs are either narrow (32 bits) or wide (48 bits). We encode
|
|
+ // OOPs are either narrow (32 bits) or wide (48 or 64 bits). We encode
|
|
// narrow OOPs by setting the upper 16 bits in the first
|
|
// instruction.
|
|
+ // 64 bit addresses are only enabled with UseTBI set.
|
|
if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) {
|
|
// Move narrow OOP
|
|
uint32_t n = CompressedOops::narrow_oop_value(cast_to_oop(o));
|
|
@@ -193,6 +194,12 @@ int MacroAssembler::patch_oop(address insn_addr, address o) {
|
|
Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
|
|
Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
|
|
instructions = 3;
|
|
+
|
|
+ if (UseTBI) {
|
|
+ assert(nativeInstruction_at(insn_addr+12)->is_movk(), "wrong insns in patch");
|
|
+ Instruction_aarch64::patch(insn_addr+12, 20, 5, (dest >>= 16) & 0xffff);
|
|
+ instructions = 4;
|
|
+ }
|
|
}
|
|
return instructions * NativeInstruction::instruction_size;
|
|
}
|
|
@@ -279,12 +286,18 @@ address MacroAssembler::target_addr_for_insn(address insn_addr, unsigned insn) {
|
|
}
|
|
} else if (Instruction_aarch64::extract(insn, 31, 23) == 0b110100101) {
|
|
uint32_t *insns = (uint32_t *)insn_addr;
|
|
- // Move wide constant: movz, movk, movk. See movptr().
|
|
- assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
|
|
- assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
|
|
- return address(uint64_t(Instruction_aarch64::extract(insns[0], 20, 5))
|
|
+ // Move wide constant: movz, movk, movk [, movk]. See movptr().
|
|
+ assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch - 2nd movk missing");
|
|
+ assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch - 3rd movk missing");
|
|
+ uint64_t addr = uint64_t(Instruction_aarch64::extract(insns[0], 20, 5))
|
|
+ (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
|
|
- + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
|
|
+ + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32);
|
|
+ // Allow for getting the target address of a possible adddress.
|
|
+ if (UseTBI) {
|
|
+ assert(nativeInstruction_at(insns+3)->is_movk(), "wrong insns in patch - 4th movk missing.");
|
|
+ addr += uint64_t(Instruction_aarch64::extract(insns[3], 20, 5)) << 48;
|
|
+ }
|
|
+ return address(addr);
|
|
} else if (Instruction_aarch64::extract(insn, 31, 22) == 0b1011100101 &&
|
|
Instruction_aarch64::extract(insn, 4, 0) == 0b11111) {
|
|
return 0;
|
|
@@ -1505,10 +1518,9 @@ void MacroAssembler::mov(Register r, Address dest) {
|
|
movptr(r, imm64);
|
|
}
|
|
|
|
-// Move a constant pointer into r. In AArch64 mode the virtual
|
|
-// address space is 48 bits in size, so we only need three
|
|
-// instructions to create a patchable instruction sequence that can
|
|
-// reach anywhere.
|
|
+// Move a constant pointer into r. In AArch64 mode the virtual address space
|
|
+// is 48 bits in size or 52 bits. We need three or four instructions to create
|
|
+// a patchable instruction sequence that can reach anywhere.
|
|
void MacroAssembler::movptr(Register r, uintptr_t imm64) {
|
|
#ifndef PRODUCT
|
|
{
|
|
@@ -1517,12 +1529,18 @@ void MacroAssembler::movptr(Register r, uintptr_t imm64) {
|
|
block_comment(buffer);
|
|
}
|
|
#endif
|
|
- assert(imm64 < (1ull << 48), "48-bit overflow in address constant");
|
|
+ if (!UseTBI) {
|
|
+ assert(imm64 < (1ull << 48), "48-bit overflow in address constant");
|
|
+ }
|
|
movz(r, imm64 & 0xffff);
|
|
imm64 >>= 16;
|
|
movk(r, imm64 & 0xffff, 16);
|
|
imm64 >>= 16;
|
|
movk(r, imm64 & 0xffff, 32);
|
|
+ if (UseTBI) {
|
|
+ imm64 >>= 16;
|
|
+ movk(r, imm64 & 0xffff, 48);
|
|
+ }
|
|
}
|
|
|
|
// Macro to mov replicated immediate to vector register.
|
|
diff --git a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp
|
|
index d808e4b5b..117686f8c 100644
|
|
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp
|
|
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp
|
|
@@ -234,6 +234,14 @@ void NativeCall::insert(address code_pos, address entry) { Unimplemented(); }
|
|
|
|
//-------------------------------------------------------------------
|
|
|
|
+// movz, movk, movk.
|
|
+int NativeMovConstReg::instruction_size = 3 * NativeInstruction::instruction_size;
|
|
+
|
|
+void NativeMovConstReg::init_for_tbi() {
|
|
+ // movz, movk, movk, movk.
|
|
+ instruction_size = 4 * NativeInstruction::instruction_size;
|
|
+}
|
|
+
|
|
void NativeMovConstReg::verify() {
|
|
if (! (nativeInstruction_at(instruction_address())->is_movz() ||
|
|
is_adrp_at(instruction_address()) ||
|
|
@@ -439,8 +447,17 @@ bool NativeInstruction::is_general_jump() {
|
|
NativeInstruction* inst2 = nativeInstruction_at(addr_at(instruction_size * 2));
|
|
if (inst2->is_movk()) {
|
|
NativeInstruction* inst3 = nativeInstruction_at(addr_at(instruction_size * 3));
|
|
- if (inst3->is_blr()) {
|
|
- return true;
|
|
+ if (!UseTBI) {
|
|
+ if (inst3->is_blr()) {
|
|
+ return true;
|
|
+ }
|
|
+ } else {
|
|
+ if(inst3->is_movk()) {
|
|
+ NativeInstruction* inst4 = nativeInstruction_at(addr_at(instruction_size * 4));
|
|
+ if (inst4->is_blr()) {
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
}
|
|
}
|
|
}
|
|
@@ -496,6 +513,16 @@ void NativeJump::patch_verified_entry(address entry, address verified_entry, add
|
|
ICache::invalidate_range(verified_entry, instruction_size);
|
|
}
|
|
|
|
+// movz, movk, movk, br.
|
|
+int NativeGeneralJump::instruction_size = 4 * NativeInstruction::instruction_size;
|
|
+int NativeGeneralJump::next_instruction_offset = 4 * NativeInstruction::instruction_size;
|
|
+
|
|
+void NativeGeneralJump::init_for_tbi() {
|
|
+ // movz, movk, movk, movk, br.
|
|
+ instruction_size = 5 * NativeInstruction::instruction_size;
|
|
+ next_instruction_offset = 5 * NativeInstruction::instruction_size;
|
|
+}
|
|
+
|
|
void NativeGeneralJump::verify() { }
|
|
|
|
void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
|
|
diff --git a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp
|
|
index 75f2797c3..2460c02a1 100644
|
|
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp
|
|
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp
|
|
@@ -271,16 +271,19 @@ inline NativeCall* nativeCall_before(address return_address) {
|
|
class NativeMovConstReg: public NativeInstruction {
|
|
public:
|
|
enum Aarch64_specific_constants {
|
|
- instruction_size = 3 * 4, // movz, movk, movk. See movptr().
|
|
instruction_offset = 0,
|
|
displacement_offset = 0,
|
|
};
|
|
|
|
+ static int instruction_size;
|
|
+
|
|
+ static void init_for_tbi();
|
|
+
|
|
address instruction_address() const { return addr_at(instruction_offset); }
|
|
|
|
address next_instruction_address() const {
|
|
if (nativeInstruction_at(instruction_address())->is_movz())
|
|
- // Assume movz, movk, movk
|
|
+ // Assume movz, movk, movk [, movk].
|
|
return addr_at(instruction_size);
|
|
else if (is_adrp_at(instruction_address()))
|
|
return addr_at(2*4);
|
|
@@ -469,12 +472,15 @@ inline NativeJump* nativeJump_at(address address) {
|
|
class NativeGeneralJump: public NativeJump {
|
|
public:
|
|
enum AArch64_specific_constants {
|
|
- instruction_size = 4 * 4,
|
|
instruction_offset = 0,
|
|
- data_offset = 0,
|
|
- next_instruction_offset = 4 * 4
|
|
+ data_offset = 0
|
|
};
|
|
|
|
+ static int instruction_size;
|
|
+ static int next_instruction_offset;
|
|
+
|
|
+ static void init_for_tbi();
|
|
+
|
|
address jump_destination() const;
|
|
void set_jump_destination(address dest);
|
|
|
|
diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
|
|
index 31dfb7727..bcd064cfe 100644
|
|
--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
|
|
+++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
|
|
@@ -31,6 +31,10 @@
|
|
#include "runtime/vm_version.hpp"
|
|
#include "utilities/formatBuffer.hpp"
|
|
#include "utilities/macros.hpp"
|
|
+#include "code/nativeInst.hpp"
|
|
+#ifdef COMPILER1
|
|
+#include "c1/c1_LIRAssembler.hpp"
|
|
+#endif // COMPILER1
|
|
|
|
#include OS_HEADER_INLINE(os)
|
|
|
|
@@ -66,6 +70,16 @@ static SpinWait get_spin_wait_desc() {
|
|
return SpinWait{};
|
|
}
|
|
|
|
+// Configure instruction sizes for nativeInst_aarch64 and c1_LIRAssembler_aarch64
|
|
+// based on flag UseTBI.
|
|
+static void init_instruction_sizes_for_tbi() {
|
|
+ NativeMovConstReg::init_for_tbi();
|
|
+ NativeGeneralJump::init_for_tbi();
|
|
+#ifdef COMPILER1
|
|
+ LIR_Assembler::init_for_tbi();
|
|
+#endif // COMPILER1
|
|
+}
|
|
+
|
|
void VM_Version::initialize() {
|
|
_supports_cx8 = true;
|
|
_supports_atomic_getset4 = true;
|
|
@@ -415,6 +429,20 @@ void VM_Version::initialize() {
|
|
UsePopCountInstruction = true;
|
|
}
|
|
|
|
+#if INCLUDE_JVMCI && defined(AARCH64)
|
|
+ if (UseTBI && EnableJVMCI) {
|
|
+ warning("64-bit Literal Addresses disabled due to EnableJVMCI.");
|
|
+ UseTBI = false;
|
|
+ }
|
|
+#endif
|
|
+ if (UseTBI && !UseZGC) {
|
|
+ warning("UseTBI only works when UseZGC is on.");
|
|
+ UseTBI = false;
|
|
+ }
|
|
+ if (UseTBI) {
|
|
+ init_instruction_sizes_for_tbi();
|
|
+ }
|
|
+
|
|
#ifdef COMPILER2
|
|
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
|
|
UseMultiplyToLenIntrinsic = true;
|
|
diff --git a/src/hotspot/os/linux/gc/z/zNUMA_linux.cpp b/src/hotspot/os/linux/gc/z/zNUMA_linux.cpp
|
|
index cfe25549f..cefe5d03d 100644
|
|
--- a/src/hotspot/os/linux/gc/z/zNUMA_linux.cpp
|
|
+++ b/src/hotspot/os/linux/gc/z/zNUMA_linux.cpp
|
|
@@ -59,6 +59,10 @@ uint32_t ZNUMA::memory_id(uintptr_t addr) {
|
|
|
|
uint32_t id = (uint32_t)-1;
|
|
|
|
+#ifdef AARCH64
|
|
+ addr = CLEAR_COLOR_BITS(addr);
|
|
+#endif
|
|
+
|
|
if (ZSyscall::get_mempolicy((int*)&id, NULL, 0, (void*)addr, MPOL_F_NODE | MPOL_F_ADDR) == -1) {
|
|
ZErrno err;
|
|
fatal("Failed to get NUMA id for memory at " PTR_FORMAT " (%s)", addr, err.to_string());
|
|
diff --git a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp
|
|
index 7a1130554..d0ffe93ce 100644
|
|
--- a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp
|
|
+++ b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp
|
|
@@ -120,6 +120,14 @@ ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity) :
|
|
_available(0),
|
|
_initialized(false) {
|
|
|
|
+#ifdef AARCH64
|
|
+ if (UseTBI) {
|
|
+ // Successfully initialized
|
|
+ _initialized = true;
|
|
+ return;
|
|
+ }
|
|
+#endif // AARCH64
|
|
+
|
|
// Create backing file
|
|
_fd = create_fd(ZFILENAME_HEAP);
|
|
if (_fd == -1) {
|
|
@@ -350,7 +358,12 @@ void ZPhysicalMemoryBacking::warn_max_map_count(size_t max_capacity) const {
|
|
// However, ZGC tends to create the most mappings and dominate the total count.
|
|
// In the worst cases, ZGC will map each granule three times, i.e. once per heap view.
|
|
// We speculate that we need another 20% to allow for non-ZGC subsystems to map memory.
|
|
- const size_t required_max_map_count = (max_capacity / ZGranuleSize) * 3 * 1.2;
|
|
+#ifdef AARCH64
|
|
+ const size_t required_max_map_multiple = UseTBI ? 1 : 3;
|
|
+#else
|
|
+ const size_t required_max_map_multiple = 3;
|
|
+#endif // AARCH64
|
|
+ const size_t required_max_map_count = (max_capacity / ZGranuleSize) * required_max_map_multiple * 1.2;
|
|
if (actual_max_map_count < required_max_map_count) {
|
|
log_warning_p(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
|
|
log_warning_p(gc)("The system limit on number of memory mappings per process might be too low for the given");
|
|
@@ -363,6 +376,13 @@ void ZPhysicalMemoryBacking::warn_max_map_count(size_t max_capacity) const {
|
|
}
|
|
|
|
void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
|
|
+#ifdef AARCH64
|
|
+ if (UseTBI) {
|
|
+ // Warn if max map count is too low
|
|
+ warn_max_map_count(max_capacity);
|
|
+ return;
|
|
+ }
|
|
+#endif // AARCH64
|
|
// Warn if available space is too low
|
|
warn_available_space(max_capacity);
|
|
|
|
@@ -681,6 +701,13 @@ size_t ZPhysicalMemoryBacking::commit_default(size_t offset, size_t length) cons
|
|
}
|
|
|
|
size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) const {
|
|
+#ifdef AARCH64
|
|
+ if (UseTBI) {
|
|
+ // do nothing
|
|
+ return length;
|
|
+ }
|
|
+#endif // AARCH64
|
|
+
|
|
if (ZNUMA::is_enabled() && !ZLargePages::is_explicit()) {
|
|
// To get granule-level NUMA interleaving when using non-large pages,
|
|
// we must explicitly interleave the memory at commit/fallocate time.
|
|
@@ -693,6 +720,12 @@ size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) const {
|
|
size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const {
|
|
log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
|
|
offset / M, (offset + length) / M, length / M);
|
|
+#ifdef AARCH64
|
|
+ if (UseTBI) {
|
|
+ // Not yet supported.
|
|
+ return length;
|
|
+ }
|
|
+#endif // AARCH64
|
|
|
|
const ZErrno err = fallocate(true /* punch_hole */, offset, length);
|
|
if (err) {
|
|
@@ -704,6 +737,29 @@ size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const {
|
|
}
|
|
|
|
void ZPhysicalMemoryBacking::map(uintptr_t addr, size_t size, uintptr_t offset) const {
|
|
+#ifdef AARCH64
|
|
+ if (UseTBI) {
|
|
+ int flags = MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE;
|
|
+ if (ZLargePages::is_explicit()) {
|
|
+ flags |= MAP_HUGETLB;
|
|
+ }
|
|
+ const void* const res = mmap((void*) CLEAR_TOP_BYTE(addr), size, PROT_READ | PROT_WRITE, flags, 0, 0);
|
|
+ if (res == MAP_FAILED) {
|
|
+ ZErrno err;
|
|
+ fatal("Failed to map memory (%s)", err.to_string());
|
|
+ }
|
|
+
|
|
+ // Advise on use of transparent huge pages before touching it
|
|
+ if (ZLargePages::is_transparent()) {
|
|
+ if (madvise((void*) CLEAR_TOP_BYTE(addr), size, MADV_HUGEPAGE) == -1) {
|
|
+ ZErrno err;
|
|
+ log_error(gc)("Failed to advise use of transparent huge pages (%s)", err.to_string());
|
|
+ }
|
|
+ }
|
|
+ return;
|
|
+ }
|
|
+#endif // AARCH64
|
|
+
|
|
const void* const res = mmap((void*)addr, size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _fd, offset);
|
|
if (res == MAP_FAILED) {
|
|
ZErrno err;
|
|
@@ -712,6 +768,9 @@ void ZPhysicalMemoryBacking::map(uintptr_t addr, size_t size, uintptr_t offset)
|
|
}
|
|
|
|
void ZPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const {
|
|
+#ifdef AARCH64
|
|
+ addr = CLEAR_COLOR_BITS(addr);
|
|
+#endif // AARCH64
|
|
// Note that we must keep the address space reservation intact and just detach
|
|
// the backing memory. For this reason we map a new anonymous, non-accessible
|
|
// and non-reserved page over the mapping instead of actually unmapping.
|
|
diff --git a/src/hotspot/share/asm/codeBuffer.cpp b/src/hotspot/share/asm/codeBuffer.cpp
|
|
index 0012152d4..c89c8ee3b 100644
|
|
--- a/src/hotspot/share/asm/codeBuffer.cpp
|
|
+++ b/src/hotspot/share/asm/codeBuffer.cpp
|
|
@@ -331,7 +331,8 @@ void CodeSection::relocate(address at, RelocationHolder const& spec, int format)
|
|
rtype == relocInfo::runtime_call_type ||
|
|
rtype == relocInfo::internal_word_type||
|
|
rtype == relocInfo::section_word_type ||
|
|
- rtype == relocInfo::external_word_type,
|
|
+ rtype == relocInfo::external_word_type||
|
|
+ rtype == relocInfo::barrier_type,
|
|
"code needs relocation information");
|
|
// leave behind an indication that we attempted a relocation
|
|
DEBUG_ONLY(_locs_start = _locs_limit = (relocInfo*)badAddress);
|
|
diff --git a/src/hotspot/share/code/relocInfo.hpp b/src/hotspot/share/code/relocInfo.hpp
|
|
index 55d4ac7c6..57d2126fb 100644
|
|
--- a/src/hotspot/share/code/relocInfo.hpp
|
|
+++ b/src/hotspot/share/code/relocInfo.hpp
|
|
@@ -268,7 +268,8 @@ class relocInfo {
|
|
trampoline_stub_type = 13, // stub-entry for trampoline
|
|
runtime_call_w_cp_type = 14, // Runtime call which may load its target from the constant pool
|
|
data_prefix_tag = 15, // tag for a prefix (carries data arguments)
|
|
- type_mask = 15 // A mask which selects only the above values
|
|
+ barrier_type = 16, // GC barrier data
|
|
+ type_mask = 31 // A mask which selects only the above values
|
|
};
|
|
|
|
private:
|
|
@@ -307,12 +308,13 @@ class relocInfo {
|
|
visitor(poll_return) \
|
|
visitor(section_word) \
|
|
visitor(trampoline_stub) \
|
|
+ visitor(barrier) \
|
|
|
|
|
|
public:
|
|
enum {
|
|
value_width = sizeof(unsigned short) * BitsPerByte,
|
|
- type_width = 4, // == log2(type_mask+1)
|
|
+ type_width = 5, // == log2(type_mask+1)
|
|
nontype_width = value_width - type_width,
|
|
datalen_width = nontype_width-1,
|
|
datalen_tag = 1 << datalen_width, // or-ed into _value
|
|
@@ -994,6 +996,29 @@ class metadata_Relocation : public DataRelocation {
|
|
// Note: metadata_value transparently converts Universe::non_metadata_word to NULL.
|
|
};
|
|
|
|
+class barrier_Relocation : public DataRelocation {
|
|
+
|
|
+ public:
|
|
+ // The uninitialized value used before the relocation has been patched.
|
|
+ // Code assumes that the unpatched value is zero.
|
|
+ static const int16_t unpatched = 0;
|
|
+
|
|
+ static RelocationHolder spec() {
|
|
+ RelocationHolder rh = newHolder();
|
|
+ new(rh) barrier_Relocation();
|
|
+ return rh;
|
|
+ }
|
|
+
|
|
+ private:
|
|
+ friend class RelocIterator;
|
|
+ barrier_Relocation() : DataRelocation(relocInfo::barrier_type) { }
|
|
+
|
|
+ public:
|
|
+ virtual int offset() { ShouldNotReachHere(); return 0; }
|
|
+ virtual address value() { ShouldNotReachHere(); return NULL; }
|
|
+ virtual void set_value(address x) { ShouldNotReachHere(); }
|
|
+ virtual void set_value(address x, intptr_t o) { ShouldNotReachHere(); }
|
|
+};
|
|
|
|
class virtual_call_Relocation : public CallRelocation {
|
|
|
|
diff --git a/src/hotspot/share/gc/z/vmStructs_z.cpp b/src/hotspot/share/gc/z/vmStructs_z.cpp
|
|
index c86d11c81..e68077ccb 100644
|
|
--- a/src/hotspot/share/gc/z/vmStructs_z.cpp
|
|
+++ b/src/hotspot/share/gc/z/vmStructs_z.cpp
|
|
@@ -27,6 +27,7 @@
|
|
ZGlobalsForVMStructs::ZGlobalsForVMStructs() :
|
|
_ZGlobalPhase(&ZGlobalPhase),
|
|
_ZGlobalSeqNum(&ZGlobalSeqNum),
|
|
+ _ZAddressBase(&ZAddressBase),
|
|
_ZAddressOffsetMask(&ZAddressOffsetMask),
|
|
_ZAddressMetadataMask(&ZAddressMetadataMask),
|
|
_ZAddressMetadataFinalizable(&ZAddressMetadataFinalizable),
|
|
diff --git a/src/hotspot/share/gc/z/vmStructs_z.hpp b/src/hotspot/share/gc/z/vmStructs_z.hpp
|
|
index 3c0eb9f74..f8754e0cb 100644
|
|
--- a/src/hotspot/share/gc/z/vmStructs_z.hpp
|
|
+++ b/src/hotspot/share/gc/z/vmStructs_z.hpp
|
|
@@ -45,6 +45,7 @@ public:
|
|
|
|
uint32_t* _ZGlobalSeqNum;
|
|
|
|
+ uintptr_t* _ZAddressBase;
|
|
uintptr_t* _ZAddressOffsetMask;
|
|
uintptr_t* _ZAddressMetadataMask;
|
|
uintptr_t* _ZAddressMetadataFinalizable;
|
|
@@ -64,6 +65,7 @@ typedef ZAttachedArray<ZForwarding, ZForwardingEntry> ZAttachedArrayForForwardin
|
|
static_field(ZGlobalsForVMStructs, _instance_p, ZGlobalsForVMStructs*) \
|
|
nonstatic_field(ZGlobalsForVMStructs, _ZGlobalPhase, uint32_t*) \
|
|
nonstatic_field(ZGlobalsForVMStructs, _ZGlobalSeqNum, uint32_t*) \
|
|
+ nonstatic_field(ZGlobalsForVMStructs, _ZAddressBase, uintptr_t*) \
|
|
nonstatic_field(ZGlobalsForVMStructs, _ZAddressOffsetMask, uintptr_t*) \
|
|
nonstatic_field(ZGlobalsForVMStructs, _ZAddressMetadataMask, uintptr_t*) \
|
|
nonstatic_field(ZGlobalsForVMStructs, _ZAddressMetadataFinalizable, uintptr_t*) \
|
|
@@ -120,6 +122,7 @@ typedef ZAttachedArray<ZForwarding, ZForwardingEntry> ZAttachedArrayForForwardin
|
|
declare_constant(ZPageSizeMediumShift) \
|
|
declare_constant(ZAddressOffsetShift) \
|
|
declare_constant(ZAddressOffsetBits) \
|
|
+ declare_constant(ZAddressBase) \
|
|
declare_constant(ZAddressOffsetMask) \
|
|
declare_constant(ZAddressOffsetMax)
|
|
|
|
diff --git a/src/hotspot/share/gc/z/zAddress.cpp b/src/hotspot/share/gc/z/zAddress.cpp
|
|
index cfa7c04d3..47935d4c1 100644
|
|
--- a/src/hotspot/share/gc/z/zAddress.cpp
|
|
+++ b/src/hotspot/share/gc/z/zAddress.cpp
|
|
@@ -32,6 +32,12 @@ void ZAddress::set_good_mask(uintptr_t mask) {
|
|
}
|
|
|
|
void ZAddress::initialize() {
|
|
+#ifdef AARCH64
|
|
+ ZAddressBase = ZPlatformAddressBase();
|
|
+#else
|
|
+ ZAddressBase = 0;
|
|
+#endif
|
|
+
|
|
ZAddressOffsetBits = ZPlatformAddressOffsetBits();
|
|
ZAddressOffsetMask = (((uintptr_t)1 << ZAddressOffsetBits) - 1) << ZAddressOffsetShift;
|
|
ZAddressOffsetMax = (uintptr_t)1 << ZAddressOffsetBits;
|
|
diff --git a/src/hotspot/share/gc/z/zAddress.hpp b/src/hotspot/share/gc/z/zAddress.hpp
|
|
index eddd10418..c9e7449dc 100644
|
|
--- a/src/hotspot/share/gc/z/zAddress.hpp
|
|
+++ b/src/hotspot/share/gc/z/zAddress.hpp
|
|
@@ -52,6 +52,9 @@ public:
|
|
static bool is_remapped(uintptr_t value);
|
|
static bool is_in(uintptr_t value);
|
|
|
|
+#ifdef AARCH64
|
|
+ static uintptr_t base(uintptr_t value);
|
|
+#endif
|
|
static uintptr_t offset(uintptr_t value);
|
|
static uintptr_t good(uintptr_t value);
|
|
static uintptr_t good_or_null(uintptr_t value);
|
|
diff --git a/src/hotspot/share/gc/z/zAddress.inline.hpp b/src/hotspot/share/gc/z/zAddress.inline.hpp
|
|
index a151e7182..c8b560490 100644
|
|
--- a/src/hotspot/share/gc/z/zAddress.inline.hpp
|
|
+++ b/src/hotspot/share/gc/z/zAddress.inline.hpp
|
|
@@ -31,6 +31,12 @@
|
|
#include "utilities/macros.hpp"
|
|
#include "utilities/powerOfTwo.hpp"
|
|
|
|
+#ifdef AARCH64
|
|
+#define AARCH64_BASE(x) base(x)
|
|
+#else
|
|
+#define AARCH64_BASE(x) (x)
|
|
+#endif
|
|
+
|
|
inline bool ZAddress::is_null(uintptr_t value) {
|
|
return value == 0;
|
|
}
|
|
@@ -90,7 +96,11 @@ inline bool ZAddress::is_remapped(uintptr_t value) {
|
|
|
|
inline bool ZAddress::is_in(uintptr_t value) {
|
|
// Check that exactly one non-offset bit is set
|
|
+#ifdef AARCH64
|
|
+ if (!is_power_of_2(value & ~ZAddressOffsetMask & ~ZAddressBase)) {
|
|
+#else
|
|
if (!is_power_of_2(value & ~ZAddressOffsetMask)) {
|
|
+#endif
|
|
return false;
|
|
}
|
|
|
|
@@ -98,12 +108,18 @@ inline bool ZAddress::is_in(uintptr_t value) {
|
|
return value & (ZAddressMetadataMask & ~ZAddressMetadataFinalizable);
|
|
}
|
|
|
|
+#ifdef AARCH64
|
|
+inline uintptr_t ZAddress::base(uintptr_t value) {
|
|
+ return value | ZAddressBase;
|
|
+}
|
|
+#endif
|
|
+
|
|
inline uintptr_t ZAddress::offset(uintptr_t value) {
|
|
return value & ZAddressOffsetMask;
|
|
}
|
|
|
|
inline uintptr_t ZAddress::good(uintptr_t value) {
|
|
- return offset(value) | ZAddressGoodMask;
|
|
+ return AARCH64_BASE(offset(value) | ZAddressGoodMask);
|
|
}
|
|
|
|
inline uintptr_t ZAddress::good_or_null(uintptr_t value) {
|
|
@@ -111,27 +127,29 @@ inline uintptr_t ZAddress::good_or_null(uintptr_t value) {
|
|
}
|
|
|
|
inline uintptr_t ZAddress::finalizable_good(uintptr_t value) {
|
|
- return offset(value) | ZAddressMetadataFinalizable | ZAddressGoodMask;
|
|
+ return AARCH64_BASE(offset(value) | ZAddressMetadataFinalizable | ZAddressGoodMask);
|
|
}
|
|
|
|
inline uintptr_t ZAddress::marked(uintptr_t value) {
|
|
- return offset(value) | ZAddressMetadataMarked;
|
|
+ return AARCH64_BASE(offset(value) | ZAddressMetadataMarked);
|
|
}
|
|
|
|
inline uintptr_t ZAddress::marked0(uintptr_t value) {
|
|
- return offset(value) | ZAddressMetadataMarked0;
|
|
+ return AARCH64_BASE(offset(value) | ZAddressMetadataMarked0);
|
|
}
|
|
|
|
inline uintptr_t ZAddress::marked1(uintptr_t value) {
|
|
- return offset(value) | ZAddressMetadataMarked1;
|
|
+ return AARCH64_BASE(offset(value) | ZAddressMetadataMarked1);
|
|
}
|
|
|
|
inline uintptr_t ZAddress::remapped(uintptr_t value) {
|
|
- return offset(value) | ZAddressMetadataRemapped;
|
|
+ return AARCH64_BASE(offset(value) | ZAddressMetadataRemapped);
|
|
}
|
|
|
|
inline uintptr_t ZAddress::remapped_or_null(uintptr_t value) {
|
|
return is_null(value) ? 0 : remapped(value);
|
|
}
|
|
|
|
+#undef AARCH64_BASE
|
|
+
|
|
#endif // SHARE_GC_Z_ZADDRESS_INLINE_HPP
|
|
diff --git a/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp b/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp
|
|
index 9916178cc..e5a2ab09a 100644
|
|
--- a/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp
|
|
+++ b/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp
|
|
@@ -55,6 +55,13 @@ bool ZBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
|
|
return false;
|
|
}
|
|
|
|
+#ifdef AARCH64
|
|
+ if (UseTBI) {
|
|
+ // Heal barriers
|
|
+ ZNMethod::nmethod_patch_barriers(nm);
|
|
+ }
|
|
+#endif
|
|
+
|
|
// Heal oops
|
|
ZNMethod::nmethod_oops_barrier(nm);
|
|
|
|
diff --git a/src/hotspot/share/gc/z/zGlobals.cpp b/src/hotspot/share/gc/z/zGlobals.cpp
|
|
index 28200e23b..cc82e9335 100644
|
|
--- a/src/hotspot/share/gc/z/zGlobals.cpp
|
|
+++ b/src/hotspot/share/gc/z/zGlobals.cpp
|
|
@@ -49,6 +49,7 @@ static uint32_t* ZAddressCalculateBadMaskHighOrderBitsAddr() {
|
|
|
|
uint32_t* ZAddressBadMaskHighOrderBitsAddr = ZAddressCalculateBadMaskHighOrderBitsAddr();
|
|
|
|
+uintptr_t ZAddressBase;
|
|
size_t ZAddressOffsetBits;
|
|
uintptr_t ZAddressOffsetMask;
|
|
size_t ZAddressOffsetMax;
|
|
diff --git a/src/hotspot/share/gc/z/zGlobals.hpp b/src/hotspot/share/gc/z/zGlobals.hpp
|
|
index 300f6489a..783411a82 100644
|
|
--- a/src/hotspot/share/gc/z/zGlobals.hpp
|
|
+++ b/src/hotspot/share/gc/z/zGlobals.hpp
|
|
@@ -102,6 +102,7 @@ extern uint32_t* ZAddressBadMaskHighOrderBitsAddr;
|
|
const int ZAddressBadMaskHighOrderBitsOffset = LITTLE_ENDIAN_ONLY(4) BIG_ENDIAN_ONLY(0);
|
|
|
|
// Pointer part of address
|
|
+extern uintptr_t ZAddressBase;
|
|
extern size_t ZAddressOffsetBits;
|
|
const size_t ZAddressOffsetShift = 0;
|
|
extern uintptr_t ZAddressOffsetMask;
|
|
diff --git a/src/hotspot/share/gc/z/zMark.cpp b/src/hotspot/share/gc/z/zMark.cpp
|
|
index edc80925d..8a527107a 100644
|
|
--- a/src/hotspot/share/gc/z/zMark.cpp
|
|
+++ b/src/hotspot/share/gc/z/zMark.cpp
|
|
@@ -643,6 +643,13 @@ public:
|
|
}
|
|
|
|
if (ZNMethod::is_armed(nm)) {
|
|
+#ifdef AARCH64
|
|
+ if (UseTBI) {
|
|
+ // Heal barriers
|
|
+ ZNMethod::nmethod_patch_barriers(nm);
|
|
+ }
|
|
+#endif
|
|
+
|
|
ZNMethod::nmethod_oops_do_inner(nm, _cl);
|
|
ZNMethod::disarm(nm);
|
|
}
|
|
diff --git a/src/hotspot/share/gc/z/zMarkStackEntry.hpp b/src/hotspot/share/gc/z/zMarkStackEntry.hpp
|
|
index fbb798cc8..ba66717c1 100644
|
|
--- a/src/hotspot/share/gc/z/zMarkStackEntry.hpp
|
|
+++ b/src/hotspot/share/gc/z/zMarkStackEntry.hpp
|
|
@@ -78,7 +78,8 @@ private:
|
|
typedef ZBitField<uint64_t, bool, 2, 1> field_follow;
|
|
typedef ZBitField<uint64_t, bool, 3, 1> field_inc_live;
|
|
typedef ZBitField<uint64_t, bool, 4, 1> field_mark;
|
|
- typedef ZBitField<uint64_t, uintptr_t, 5, 59> field_object_address;
|
|
+ // Set ValueShift to 2 to avoid the top bits being cleared.
|
|
+ typedef ZBitField<uint64_t, uintptr_t, 5, 59, 2> field_object_address;
|
|
typedef ZBitField<uint64_t, size_t, 2, 30> field_partial_array_length;
|
|
typedef ZBitField<uint64_t, size_t, 32, 32> field_partial_array_offset;
|
|
|
|
diff --git a/src/hotspot/share/gc/z/zNMethod.cpp b/src/hotspot/share/gc/z/zNMethod.cpp
|
|
index 71f510c2e..777ade668 100644
|
|
--- a/src/hotspot/share/gc/z/zNMethod.cpp
|
|
+++ b/src/hotspot/share/gc/z/zNMethod.cpp
|
|
@@ -28,7 +28,10 @@
|
|
#include "gc/shared/barrierSet.hpp"
|
|
#include "gc/shared/barrierSetNMethod.hpp"
|
|
#include "gc/shared/suspendibleThreadSet.hpp"
|
|
+#include "gc/z/zArray.inline.hpp"
|
|
#include "gc/z/zBarrier.inline.hpp"
|
|
+#include "gc/z/zBarrierSet.hpp"
|
|
+#include "gc/z/zBarrierSetAssembler.hpp"
|
|
#include "gc/z/zGlobals.hpp"
|
|
#include "gc/z/zLock.inline.hpp"
|
|
#include "gc/z/zNMethod.hpp"
|
|
@@ -54,12 +57,23 @@ static void set_gc_data(nmethod* nm, ZNMethodData* data) {
|
|
}
|
|
|
|
void ZNMethod::attach_gc_data(nmethod* nm) {
|
|
+#ifdef AARCH64
|
|
+ ZArray<address> barriers;
|
|
+#endif
|
|
GrowableArray<oop*> immediate_oops;
|
|
bool non_immediate_oops = false;
|
|
|
|
- // Find all oop relocations
|
|
+ // Find all barrier and oop relocations
|
|
RelocIterator iter(nm);
|
|
while (iter.next()) {
|
|
+#ifdef AARCH64
|
|
+ if (UseTBI && iter.type() == relocInfo::barrier_type) {
|
|
+ // Barrier relocation
|
|
+ barrier_Relocation* const reloc = iter.barrier_reloc();
|
|
+ barriers.push(reloc->addr());
|
|
+ continue;
|
|
+ }
|
|
+#endif
|
|
if (iter.type() != relocInfo::oop_type) {
|
|
// Not an oop
|
|
continue;
|
|
@@ -92,9 +106,16 @@ void ZNMethod::attach_gc_data(nmethod* nm) {
|
|
ZNMethodDataOops* const new_oops = ZNMethodDataOops::create(immediate_oops, non_immediate_oops);
|
|
ZNMethodDataOops* const old_oops = data->swap_oops(new_oops);
|
|
ZNMethodDataOops::destroy(old_oops);
|
|
+
|
|
+#ifdef AARCH64
|
|
+ if (UseTBI) {
|
|
+ // Attach barriers in GC data
|
|
+ data->swap_barriers(&barriers);
|
|
+ }
|
|
+#endif
|
|
}
|
|
|
|
-ZReentrantLock* ZNMethod::lock_for_nmethod(nmethod* nm) {
|
|
+ZReentrantLock* ZNMethod::lock_for_nmethod(const nmethod* nm) {
|
|
return gc_data(nm)->lock();
|
|
}
|
|
|
|
@@ -116,6 +137,20 @@ void ZNMethod::log_register(const nmethod* nm) {
|
|
oops->immediates_count(),
|
|
oops->has_non_immediates() ? "Yes" : "No");
|
|
|
|
+#ifdef AARCH64
|
|
+ if (UseTBI) {
|
|
+ LogTarget(Trace, gc, nmethod, barrier) log_barriers;
|
|
+ if (log_barriers.is_enabled()) {
|
|
+ // Print nmethod barriers
|
|
+ ZLocker<ZReentrantLock> locker(lock_for_nmethod(nm));
|
|
+ ZArrayIterator<address> iter(gc_data(nm)->barriers());
|
|
+ for (address b; iter.next(&b);) {
|
|
+ log_barriers.print(" Barrier: " PTR_FORMAT, p2i(b));
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+
|
|
LogTarget(Trace, gc, nmethod, oops) log_oops;
|
|
if (!log_oops.is_enabled()) {
|
|
return;
|
|
@@ -164,6 +199,15 @@ void ZNMethod::register_nmethod(nmethod* nm) {
|
|
|
|
log_register(nm);
|
|
|
|
+#ifdef AARCH64
|
|
+ if (UseTBI) {
|
|
+ ZLocker<ZReentrantLock> locker(lock_for_nmethod(nm));
|
|
+ // Patch nmathod barriers
|
|
+ nmethod_patch_barriers(nm);
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ // Register nmethod
|
|
ZNMethodTable::register_nmethod(nm);
|
|
|
|
// Disarm nmethod entry barrier
|
|
@@ -206,6 +250,16 @@ void ZNMethod::disarm(nmethod* nm) {
|
|
bs->disarm(nm);
|
|
}
|
|
|
|
+#ifdef AARCH64
|
|
+void ZNMethod::nmethod_patch_barriers(nmethod* nm) {
|
|
+ ZBarrierSetAssembler* const bs_asm = ZBarrierSet::assembler();
|
|
+ ZArrayIterator<address> iter(gc_data(nm)->barriers());
|
|
+ for (address barrier; iter.next(&barrier);) {
|
|
+ bs_asm->patch_barrier_relocation(barrier);
|
|
+ }
|
|
+}
|
|
+#endif
|
|
+
|
|
void ZNMethod::nmethod_oops_do(nmethod* nm, OopClosure* cl) {
|
|
ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
|
|
if (!nm->is_alive()) {
|
|
@@ -329,6 +383,13 @@ public:
|
|
ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
|
|
|
|
if (ZNMethod::is_armed(nm)) {
|
|
+#ifdef AARCH64
|
|
+ if (UseTBI) {
|
|
+ // Heal barriers
|
|
+ ZNMethod::nmethod_patch_barriers(nm);
|
|
+ }
|
|
+#endif
|
|
+
|
|
// Heal oops and disarm
|
|
ZNMethod::nmethod_oops_barrier(nm);
|
|
ZNMethod::disarm(nm);
|
|
diff --git a/src/hotspot/share/gc/z/zNMethod.hpp b/src/hotspot/share/gc/z/zNMethod.hpp
|
|
index 40ac93adb..bfe6af377 100644
|
|
--- a/src/hotspot/share/gc/z/zNMethod.hpp
|
|
+++ b/src/hotspot/share/gc/z/zNMethod.hpp
|
|
@@ -48,6 +48,10 @@ public:
|
|
static bool is_armed(nmethod* nm);
|
|
static void disarm(nmethod* nm);
|
|
|
|
+#ifdef AARCH64
|
|
+ static void nmethod_patch_barriers(nmethod* nm);
|
|
+#endif
|
|
+
|
|
static void nmethod_oops_do(nmethod* nm, OopClosure* cl);
|
|
static void nmethod_oops_do_inner(nmethod* nm, OopClosure* cl);
|
|
|
|
@@ -57,7 +61,7 @@ public:
|
|
static void nmethods_do_end();
|
|
static void nmethods_do(NMethodClosure* cl);
|
|
|
|
- static ZReentrantLock* lock_for_nmethod(nmethod* nm);
|
|
+ static ZReentrantLock* lock_for_nmethod(const nmethod* nm);
|
|
|
|
static void unlink(ZWorkers* workers, bool unloading_occurred);
|
|
static void purge(ZWorkers* workers);
|
|
diff --git a/src/hotspot/share/gc/z/zNMethodData.cpp b/src/hotspot/share/gc/z/zNMethodData.cpp
|
|
index c6efbfe66..70d06557f 100644
|
|
--- a/src/hotspot/share/gc/z/zNMethodData.cpp
|
|
+++ b/src/hotspot/share/gc/z/zNMethodData.cpp
|
|
@@ -86,3 +86,15 @@ ZNMethodDataOops* ZNMethodData::swap_oops(ZNMethodDataOops* new_oops) {
|
|
_oops = new_oops;
|
|
return old_oops;
|
|
}
|
|
+
|
|
+#ifdef AARCH64
|
|
+const ZArray<address>* ZNMethodData::barriers() const {
|
|
+ assert(_lock.is_owned(), "Should be owned");
|
|
+ return &_barriers;
|
|
+}
|
|
+
|
|
+void ZNMethodData::swap_barriers(ZArray<address>* new_barriers) {
|
|
+ ZLocker<ZReentrantLock> locker(&_lock);
|
|
+ _barriers.swap(new_barriers);
|
|
+}
|
|
+#endif
|
|
\ No newline at end of file
|
|
diff --git a/src/hotspot/share/gc/z/zNMethodData.hpp b/src/hotspot/share/gc/z/zNMethodData.hpp
|
|
index 7afd60105..1013855c9 100644
|
|
--- a/src/hotspot/share/gc/z/zNMethodData.hpp
|
|
+++ b/src/hotspot/share/gc/z/zNMethodData.hpp
|
|
@@ -24,6 +24,7 @@
|
|
#ifndef SHARE_GC_Z_ZNMETHODDATA_HPP
|
|
#define SHARE_GC_Z_ZNMETHODDATA_HPP
|
|
|
|
+#include "gc/z/zArray.hpp"
|
|
#include "gc/z/zAttachedArray.hpp"
|
|
#include "gc/z/zLock.hpp"
|
|
#include "memory/allocation.hpp"
|
|
@@ -57,6 +58,9 @@ class ZNMethodData : public CHeapObj<mtGC> {
|
|
private:
|
|
ZReentrantLock _lock;
|
|
ZNMethodDataOops* volatile _oops;
|
|
+#ifdef AARCH64
|
|
+ ZArray<address> _barriers;
|
|
+#endif
|
|
|
|
public:
|
|
ZNMethodData();
|
|
@@ -66,6 +70,11 @@ public:
|
|
|
|
ZNMethodDataOops* oops() const;
|
|
ZNMethodDataOops* swap_oops(ZNMethodDataOops* oops);
|
|
+
|
|
+#ifdef AARCH64
|
|
+ const ZArray<address>* barriers() const;
|
|
+ void swap_barriers(ZArray<address>* barriers);
|
|
+#endif
|
|
};
|
|
|
|
#endif // SHARE_GC_Z_ZNMETHODDATA_HPP
|
|
diff --git a/src/hotspot/share/gc/z/zPhysicalMemory.cpp b/src/hotspot/share/gc/z/zPhysicalMemory.cpp
|
|
index ad8b762bd..38422d37c 100644
|
|
--- a/src/hotspot/share/gc/z/zPhysicalMemory.cpp
|
|
+++ b/src/hotspot/share/gc/z/zPhysicalMemory.cpp
|
|
@@ -277,13 +277,21 @@ void ZPhysicalMemoryManager::try_enable_uncommit(size_t min_capacity, size_t max
|
|
|
|
void ZPhysicalMemoryManager::nmt_commit(uintptr_t offset, size_t size) const {
|
|
// From an NMT point of view we treat the first heap view (marked0) as committed
|
|
+#ifdef AARCH64
|
|
+ const uintptr_t addr = UseTBI ? ZAddress::base(offset) : ZAddress::marked0(offset);
|
|
+#else // AARCH64
|
|
const uintptr_t addr = ZAddress::marked0(offset);
|
|
+#endif // AARCH64
|
|
MemTracker::record_virtual_memory_commit((void*)addr, size, CALLER_PC);
|
|
}
|
|
|
|
void ZPhysicalMemoryManager::nmt_uncommit(uintptr_t offset, size_t size) const {
|
|
if (MemTracker::enabled()) {
|
|
+#ifdef AARCH64
|
|
+ const uintptr_t addr = UseTBI ? ZAddress::base(offset) : ZAddress::marked0(offset);
|
|
+#else // AARCH64
|
|
const uintptr_t addr = ZAddress::marked0(offset);
|
|
+#endif // AARCH64
|
|
Tracker tracker(Tracker::uncommit);
|
|
tracker.record((address)addr, size);
|
|
}
|
|
@@ -291,6 +299,13 @@ void ZPhysicalMemoryManager::nmt_uncommit(uintptr_t offset, size_t size) const {
|
|
|
|
void ZPhysicalMemoryManager::alloc(ZPhysicalMemory& pmem, size_t size) {
|
|
assert(is_aligned(size, ZGranuleSize), "Invalid size");
|
|
+#ifdef AARCH64
|
|
+ if (UseTBI) {
|
|
+ // We don't use _manager to alloc addresses.
|
|
+ pmem.add_segment(ZPhysicalMemorySegment(0, size, false /* committed */));
|
|
+ return;
|
|
+ }
|
|
+#endif // AARCH64
|
|
|
|
// Allocate segments
|
|
while (size > 0) {
|
|
@@ -303,6 +318,13 @@ void ZPhysicalMemoryManager::alloc(ZPhysicalMemory& pmem, size_t size) {
|
|
}
|
|
|
|
void ZPhysicalMemoryManager::free(const ZPhysicalMemory& pmem) {
|
|
+#ifdef AARCH64
|
|
+ if (UseTBI) {
|
|
+ // We don't use _manager to alloc addresses.
|
|
+ return;
|
|
+ }
|
|
+#endif // AARCH64
|
|
+
|
|
// Free segments
|
|
for (int i = 0; i < pmem.nsegments(); i++) {
|
|
const ZPhysicalMemorySegment& segment = pmem.segment(i);
|
|
@@ -381,6 +403,13 @@ void ZPhysicalMemoryManager::unmap_view(uintptr_t addr, size_t size) const {
|
|
}
|
|
|
|
void ZPhysicalMemoryManager::pretouch(uintptr_t offset, size_t size) const {
|
|
+#ifdef AARCH64
|
|
+ if (UseTBI) {
|
|
+ pretouch_view(ZAddress::base(offset), size);
|
|
+ return;
|
|
+ }
|
|
+#endif // AARCH64
|
|
+
|
|
if (ZVerifyViews) {
|
|
// Pre-touch good view
|
|
pretouch_view(ZAddress::good(offset), size);
|
|
@@ -394,6 +423,13 @@ void ZPhysicalMemoryManager::pretouch(uintptr_t offset, size_t size) const {
|
|
|
|
void ZPhysicalMemoryManager::map(uintptr_t offset, const ZPhysicalMemory& pmem) const {
|
|
const size_t size = pmem.size();
|
|
+#ifdef AARCH64
|
|
+ if (UseTBI) {
|
|
+ map_view(ZAddress::base(offset), pmem);
|
|
+ nmt_commit(offset, size);
|
|
+ return;
|
|
+ }
|
|
+#endif // AARCH64
|
|
|
|
if (ZVerifyViews) {
|
|
// Map good view
|
|
@@ -410,6 +446,12 @@ void ZPhysicalMemoryManager::map(uintptr_t offset, const ZPhysicalMemory& pmem)
|
|
|
|
void ZPhysicalMemoryManager::unmap(uintptr_t offset, size_t size) const {
|
|
nmt_uncommit(offset, size);
|
|
+#ifdef AARCH64
|
|
+ if (UseTBI) {
|
|
+ unmap_view(ZAddress::base(offset), size);
|
|
+ return;
|
|
+ }
|
|
+#endif // AARCH64
|
|
|
|
if (ZVerifyViews) {
|
|
// Unmap good view
|
|
@@ -423,12 +465,24 @@ void ZPhysicalMemoryManager::unmap(uintptr_t offset, size_t size) const {
|
|
}
|
|
|
|
void ZPhysicalMemoryManager::debug_map(uintptr_t offset, const ZPhysicalMemory& pmem) const {
|
|
+#ifdef AARCH64
|
|
+ if (UseTBI) {
|
|
+ // Does nothing when using VA-masking
|
|
+ return;
|
|
+ }
|
|
+#endif // AARCH64
|
|
// Map good view
|
|
assert(ZVerifyViews, "Should be enabled");
|
|
map_view(ZAddress::good(offset), pmem);
|
|
}
|
|
|
|
void ZPhysicalMemoryManager::debug_unmap(uintptr_t offset, size_t size) const {
|
|
+#ifdef AARCH64
|
|
+ if (UseTBI) {
|
|
+ // Does nothing when using VA-masking
|
|
+ return;
|
|
+ }
|
|
+#endif // AARCH64
|
|
// Unmap good view
|
|
assert(ZVerifyViews, "Should be enabled");
|
|
unmap_view(ZAddress::good(offset), size);
|
|
diff --git a/src/hotspot/share/gc/z/zVirtualMemory.cpp b/src/hotspot/share/gc/z/zVirtualMemory.cpp
|
|
index 4f9c9bd4b..cb50ae803 100644
|
|
--- a/src/hotspot/share/gc/z/zVirtualMemory.cpp
|
|
+++ b/src/hotspot/share/gc/z/zVirtualMemory.cpp
|
|
@@ -104,6 +104,21 @@ size_t ZVirtualMemoryManager::reserve_discontiguous(size_t size) {
|
|
bool ZVirtualMemoryManager::reserve_contiguous(uintptr_t start, size_t size) {
|
|
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned");
|
|
|
|
+#ifdef AARCH64
|
|
+ if (UseTBI) {
|
|
+ const uintptr_t addr = ZAddress::base(start);
|
|
+ if (!pd_reserve(addr, size)) {
|
|
+ return false;
|
|
+ }
|
|
+ nmt_reserve(addr, size);
|
|
+
|
|
+ // Make the address range free
|
|
+ _manager.free(start, size);
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+#endif // AARCH64
|
|
+
|
|
// Reserve address views
|
|
const uintptr_t marked0 = ZAddress::marked0(start);
|
|
const uintptr_t marked1 = ZAddress::marked1(start);
|
|
diff --git a/src/hotspot/share/jfr/writers/jfrStreamWriterHost.inline.hpp b/src/hotspot/share/jfr/writers/jfrStreamWriterHost.inline.hpp
|
|
index f8900a13b..356c6ca8b 100644
|
|
--- a/src/hotspot/share/jfr/writers/jfrStreamWriterHost.inline.hpp
|
|
+++ b/src/hotspot/share/jfr/writers/jfrStreamWriterHost.inline.hpp
|
|
@@ -28,6 +28,7 @@
|
|
#include "jfr/jni/jfrJavaSupport.hpp"
|
|
#include "jfr/writers/jfrStreamWriterHost.hpp"
|
|
#include "runtime/os.hpp"
|
|
+#include "runtime/globals.hpp"
|
|
|
|
template <typename Adapter, typename AP>
|
|
StreamWriterHost<Adapter, AP>::StreamWriterHost(typename Adapter::StorageType* storage, Thread* thread) :
|
|
@@ -74,6 +75,9 @@ inline void StreamWriterHost<Adapter, AP>::write_bytes(void* dest, const void* b
|
|
template <typename Adapter, typename AP>
|
|
inline void StreamWriterHost<Adapter, AP>::write_bytes(const u1* buf, intptr_t len) {
|
|
assert(len >= 0, "invariant");
|
|
+#ifdef AARCH64
|
|
+ buf = (const u1*) CLEAR_COLOR_BITS((uintptr_t) buf);
|
|
+#endif
|
|
while (len > 0) {
|
|
const unsigned int nBytes = len > INT_MAX ? INT_MAX : (unsigned int)len;
|
|
const ssize_t num_written = (ssize_t)os::write(_fd, buf, nBytes);
|
|
diff --git a/src/hotspot/share/prims/jni.cpp b/src/hotspot/share/prims/jni.cpp
|
|
index cd0115248..41e946563 100644
|
|
--- a/src/hotspot/share/prims/jni.cpp
|
|
+++ b/src/hotspot/share/prims/jni.cpp
|
|
@@ -2831,6 +2831,9 @@ JNI_ENTRY(void*, jni_GetPrimitiveArrayCritical(JNIEnv *env, jarray array, jboole
|
|
type = TypeArrayKlass::cast(a->klass())->element_type();
|
|
}
|
|
void* ret = arrayOop(a)->base(type);
|
|
+#ifdef AARCH64
|
|
+ ret = (void*) CLEAR_COLOR_BITS((uintptr_t) ret);
|
|
+#endif // AARCH64
|
|
HOTSPOT_JNI_GETPRIMITIVEARRAYCRITICAL_RETURN(ret);
|
|
return ret;
|
|
JNI_END
|
|
@@ -2893,6 +2896,9 @@ JNI_ENTRY(const jchar*, jni_GetStringCritical(JNIEnv *env, jstring string, jbool
|
|
}
|
|
if (isCopy != NULL) *isCopy = JNI_TRUE;
|
|
}
|
|
+#ifdef AARCH64
|
|
+ ret = (jchar*) CLEAR_COLOR_BITS((uintptr_t) ret);
|
|
+#endif // AARCH64
|
|
HOTSPOT_JNI_GETSTRINGCRITICAL_RETURN((uint16_t *) ret);
|
|
return ret;
|
|
JNI_END
|
|
diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZAddress.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZAddress.java
|
|
index 52d9555e4..4a449d9d4 100644
|
|
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZAddress.java
|
|
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZAddress.java
|
|
@@ -51,12 +51,18 @@ class ZAddress {
|
|
return !is_weak_bad(value);
|
|
}
|
|
|
|
+ static long base(long address) {
|
|
+ // ZAddressBase is a non-zero value only when flag UseTBI is on.
|
|
+ // So nothing changes when the arch is not Aarch64 or UseTBI is off.
|
|
+ return address | ZGlobals.ZAddressBase();
|
|
+ }
|
|
+
|
|
static long offset(Address address) {
|
|
return as_long(address) & ZGlobals.ZAddressOffsetMask();
|
|
}
|
|
|
|
static Address good(Address value) {
|
|
- return VM.getVM().getDebugger().newAddress(offset(value) | ZGlobals.ZAddressGoodMask());
|
|
+ return VM.getVM().getDebugger().newAddress(base(offset(value) | ZGlobals.ZAddressGoodMask()));
|
|
}
|
|
|
|
static Address good_or_null(Address value) {
|
|
@@ -69,9 +75,14 @@ class ZAddress {
|
|
|
|
static boolean isIn(Address addr) {
|
|
long value = as_long(addr);
|
|
- if (!isPowerOf2(value & ~ZGlobals.ZAddressOffsetMask())) {
|
|
+ if (!isPowerOf2(value & ~ZGlobals.ZAddressOffsetMask() & ~ZGlobals.ZAddressBase())) {
|
|
return false;
|
|
}
|
|
return (value & (ZGlobals.ZAddressMetadataMask() & ~ZGlobals.ZAddressMetadataFinalizable())) != 0L;
|
|
}
|
|
+
|
|
+ static Address clearTopByte(Address value) {
|
|
+ // (1L << 56) - 1 = 0x 00ff ffff ffff ffff
|
|
+ return VM.getVM().getDebugger().newAddress(as_long(value) & ((1L << 56) - 1));
|
|
+ }
|
|
}
|
|
diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZCollectedHeap.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZCollectedHeap.java
|
|
index c55370ef7..dd74913dd 100644
|
|
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZCollectedHeap.java
|
|
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZCollectedHeap.java
|
|
@@ -92,6 +92,9 @@ public class ZCollectedHeap extends CollectedHeap {
|
|
if (oopAddress == null) {
|
|
return null;
|
|
}
|
|
+ if (ZUtils.getUseTBI()) {
|
|
+ oopAddress = ZAddress.clearTopByte(oopAddress);
|
|
+ }
|
|
|
|
return oopAddress.addOffsetToAsOopHandle(0);
|
|
}
|
|
diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZGlobals.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZGlobals.java
|
|
index e01f7b832..41c9bb37b 100644
|
|
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZGlobals.java
|
|
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZGlobals.java
|
|
@@ -98,6 +98,10 @@ public class ZGlobals {
|
|
return instance().ZGlobalSeqNum();
|
|
}
|
|
|
|
+ public static long ZAddressBase() {
|
|
+ return instance().ZAddressBase();
|
|
+ }
|
|
+
|
|
public static long ZAddressOffsetMask() {
|
|
return instance().ZAddressOffsetMask();
|
|
}
|
|
diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZGlobalsForVMStructs.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZGlobalsForVMStructs.java
|
|
index 28d33f4d9..12054c92a 100644
|
|
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZGlobalsForVMStructs.java
|
|
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZGlobalsForVMStructs.java
|
|
@@ -34,6 +34,7 @@ import sun.jvm.hotspot.types.TypeDataBase;
|
|
class ZGlobalsForVMStructs extends VMObject {
|
|
private static AddressField ZGlobalPhaseField;
|
|
private static AddressField ZGlobalSeqNumField;
|
|
+ private static AddressField ZAddressBaseField;
|
|
private static AddressField ZAddressOffsetMaskField;
|
|
private static AddressField ZAddressMetadataMaskField;
|
|
private static AddressField ZAddressMetadataFinalizableField;
|
|
@@ -52,6 +53,7 @@ class ZGlobalsForVMStructs extends VMObject {
|
|
|
|
ZGlobalPhaseField = type.getAddressField("_ZGlobalPhase");
|
|
ZGlobalSeqNumField = type.getAddressField("_ZGlobalSeqNum");
|
|
+ ZAddressBaseField = type.getAddressField("_ZAddressBase");
|
|
ZAddressOffsetMaskField = type.getAddressField("_ZAddressOffsetMask");
|
|
ZAddressMetadataMaskField = type.getAddressField("_ZAddressMetadataMask");
|
|
ZAddressMetadataFinalizableField = type.getAddressField("_ZAddressMetadataFinalizable");
|
|
@@ -74,6 +76,10 @@ class ZGlobalsForVMStructs extends VMObject {
|
|
return ZGlobalSeqNumField.getValue(addr).getJIntAt(0);
|
|
}
|
|
|
|
+ long ZAddressBase() {
|
|
+ return ZAddressBaseField.getValue(addr).getJLongAt(0);
|
|
+ }
|
|
+
|
|
long ZAddressOffsetMask() {
|
|
return ZAddressOffsetMaskField.getValue(addr).getJLongAt(0);
|
|
}
|
|
diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZUtils.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZUtils.java
|
|
index 2029a71da..65930a006 100644
|
|
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZUtils.java
|
|
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/z/ZUtils.java
|
|
@@ -37,4 +37,9 @@ class ZUtils {
|
|
long adjusted = size + mask;
|
|
return adjusted & ~mask;
|
|
}
|
|
+
|
|
+ static boolean getUseTBI() {
|
|
+ VM.Flag flag = VM.getVM().getCommandLineFlag("UseTBI");
|
|
+ return flag == null ? false : flag.getBool();
|
|
+ }
|
|
}
|
|
--
|
|
2.17.1
|
|
|