Collate patches and merge patches related to ZGC

This commit is contained in:
neu-mobi 2024-06-15 16:55:04 +08:00
parent b7e821743c
commit 4a98be116f
15 changed files with 2163 additions and 2838 deletions

View File

@ -1,184 +0,0 @@
From 476ec6be3f75c70c50bd1552c624abca098ddba2 Mon Sep 17 00:00:00 2001
Date: Wed, 18 Mar 2020 10:25:06 +0000
Subject: [PATCH] 8209375: ZGC: Use dynamic base address for mark stack space
Summary: <gc>: <mark stack needs atomic change>
LLT: jdk11u/test/hotspot/jtreg/vmTestbase/gc/gctests/SoftReference/soft004/soft004.java
Bug url: https://bugs.openjdk.java.net/browse/JDK-8209375
---
src/hotspot/share/gc/z/zGlobals.hpp | 7 +---
src/hotspot/share/gc/z/zMarkStack.cpp | 74 +++++++++++++++--------------------
src/hotspot/share/gc/z/zMarkStack.hpp | 1 +
src/hotspot/share/gc/z/z_globals.hpp | 6 +--
4 files changed, 38 insertions(+), 50 deletions(-)
diff --git a/src/hotspot/share/gc/z/zGlobals.hpp b/src/hotspot/share/gc/z/zGlobals.hpp
index 080ea5c0e..0f9e9dcb4 100644
--- a/src/hotspot/share/gc/z/zGlobals.hpp
+++ b/src/hotspot/share/gc/z/zGlobals.hpp
@@ -117,11 +117,8 @@ extern uintptr_t ZAddressWeakBadMask;
// Marked state
extern uintptr_t ZAddressMetadataMarked;
-// Address space for mark stack allocations
-const size_t ZMarkStackSpaceSizeShift = 40; // 1TB
-const size_t ZMarkStackSpaceSize = (size_t)1 << ZMarkStackSpaceSizeShift;
-const uintptr_t ZMarkStackSpaceStart = ZAddressSpaceEnd + ZMarkStackSpaceSize;
-const uintptr_t ZMarkStackSpaceEnd = ZMarkStackSpaceStart + ZMarkStackSpaceSize;
+// Mark stack space
+extern uintptr_t ZMarkStackSpaceStart;
const size_t ZMarkStackSpaceExpandSize = (size_t)1 << 25; // 32M
// Mark stack and magazine sizes
diff --git a/src/hotspot/share/gc/z/zMarkStack.cpp b/src/hotspot/share/gc/z/zMarkStack.cpp
index 52fe51ece..9cc768956 100644
--- a/src/hotspot/share/gc/z/zMarkStack.cpp
+++ b/src/hotspot/share/gc/z/zMarkStack.cpp
@@ -28,58 +28,44 @@
#include "gc/z/zMarkStack.inline.hpp"
#include "logging/log.hpp"
#include "runtime/atomic.hpp"
+#include "runtime/os.hpp"
#include "utilities/debug.hpp"
-#include <sys/mman.h>
-#include <sys/types.h>
+uintptr_t ZMarkStackSpaceStart;
ZMarkStackSpace::ZMarkStackSpace() :
_expand_lock(),
+ _start(0),
_top(0),
_end(0) {
- assert(ZMarkStacksMax >= ZMarkStackSpaceExpandSize, "ZMarkStacksMax too small");
- assert(ZMarkStacksMax <= ZMarkStackSpaceSize, "ZMarkStacksMax too large");
-
+ assert(ZMarkStackSpaceLimit >= ZMarkStackSpaceExpandSize, "ZMarkStackSpaceLimit too small");
// Reserve address space
- const void* res = mmap((void*)ZMarkStackSpaceStart, ZMarkStackSpaceSize,
- PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
- if (res != (void*)ZMarkStackSpaceStart) {
- log_error(gc, marking)("Failed to reserve address space for marking stacks");
+ const size_t size = ZMarkStackSpaceLimit;
+ const size_t alignment = (size_t)os::vm_allocation_granularity();
+ const uintptr_t addr = (uintptr_t)os::reserve_memory(size, NULL, alignment, mtGC);
+ if (addr == 0) {
+ log_error(gc, marking)("Failed to reserve address space for mark stacks");
return;
}
// Successfully initialized
- _top = _end = ZMarkStackSpaceStart;
-}
+ _start = _top = _end = addr;
-bool ZMarkStackSpace::is_initialized() const {
- return _top != 0;
+ // Register mark stack space start
+ ZMarkStackSpaceStart = _start;
}
-bool ZMarkStackSpace::expand() {
- const size_t max = ZMarkStackSpaceStart + ZMarkStacksMax;
- if (_end + ZMarkStackSpaceExpandSize > max) {
- // Expansion limit reached
- return false;
- }
-
- void* const res = mmap((void*)_end, ZMarkStackSpaceExpandSize,
- PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED, -1, 0);
- if (res == MAP_FAILED) {
- ZErrno err;
- log_error(gc, marking)("Failed to map memory for marking stacks (%s)", err.to_string());
- return false;
- }
-
- return true;
+bool ZMarkStackSpace::is_initialized() const {
+ return _start != 0;
}
uintptr_t ZMarkStackSpace::alloc_space(size_t size) {
- uintptr_t top = _top;
+ uintptr_t top = Atomic::load(&_top);
for (;;) {
+ const uintptr_t end = Atomic::load(&_end);
const uintptr_t new_top = top + size;
- if (new_top > _end) {
+ if (new_top > end) {
// Not enough space left
return 0;
}
@@ -104,24 +90,28 @@ uintptr_t ZMarkStackSpace::expand_and_alloc_space(size_t size) {
return addr;
}
- // Expand stack space
- if (!expand()) {
- // We currently can't handle the situation where we
- // are running out of mark stack space.
- fatal("Mark stack overflow (allocated " SIZE_FORMAT "M, size " SIZE_FORMAT "M, max " SIZE_FORMAT "M),"
- " use -XX:ZMarkStacksMax=? to increase this limit",
- (_end - ZMarkStackSpaceStart) / M, size / M, ZMarkStacksMax / M);
- return 0;
+ // Check expansion limit
+ const size_t expand_size = ZMarkStackSpaceExpandSize;
+ const size_t old_size = _end - _start;
+ const size_t new_size = old_size + expand_size;
+ if (new_size > ZMarkStackSpaceLimit) {
+ // Expansion limit reached. This is a fatal error since we
+ // currently can't recover from running out of mark stack space.
+ fatal("Mark stack space exhausted. Use -XX:ZMarkStackSpaceLimit=<size> to increase the "
+ "maximum number of bytes allocated for mark stacks. Current limit is " SIZE_FORMAT "M.",
+ ZMarkStackSpaceLimit / M);
}
log_debug(gc, marking)("Expanding mark stack space: " SIZE_FORMAT "M->" SIZE_FORMAT "M",
- (_end - ZMarkStackSpaceStart) / M,
- (_end - ZMarkStackSpaceStart + ZMarkStackSpaceExpandSize) / M);
+ old_size / M, new_size / M);
+
+ // Expand
+ os::commit_memory_or_exit((char*)_end, expand_size, false /* executable */, "Mark stack space");
// Increment top before end to make sure another
// thread can't steal out newly expanded space.
addr = Atomic::add(size, &_top) - size;
- _end += ZMarkStackSpaceExpandSize;
+ Atomic::add(expand_size, &_end);
return addr;
}
diff --git a/src/hotspot/share/gc/z/zMarkStack.hpp b/src/hotspot/share/gc/z/zMarkStack.hpp
index b68b9faa3..12f3e4eca 100644
--- a/src/hotspot/share/gc/z/zMarkStack.hpp
+++ b/src/hotspot/share/gc/z/zMarkStack.hpp
@@ -76,6 +76,7 @@ typedef ZStackList<ZMarkStackMagazine> ZMarkStackMagazineList;
class ZMarkStackSpace {
private:
ZLock _expand_lock;
+ uintptr_t _start;
volatile uintptr_t _top;
volatile uintptr_t _end;
diff --git a/src/hotspot/share/gc/z/z_globals.hpp b/src/hotspot/share/gc/z/z_globals.hpp
index 9e0f8985b..8cee59be7 100644
--- a/src/hotspot/share/gc/z/z_globals.hpp
+++ b/src/hotspot/share/gc/z/z_globals.hpp
@@ -53,9 +53,9 @@
"Allow Java threads to stall and wait for GC to complete " \
"instead of immediately throwing an OutOfMemoryError") \
\
- product(size_t, ZMarkStacksMax, NOT_LP64(512*M) LP64_ONLY(8*G), \
- "Maximum number of bytes allocated for marking stacks") \
- range(32*M, NOT_LP64(512*M) LP64_ONLY(1024*G)) \
+ product(size_t, ZMarkStackSpaceLimit, 8*G, \
+ "Maximum number of bytes allocated for mark stacks") \
+ range(32*M, 1024*G) \
\
product(uint, ZCollectionInterval, 0, \
"Force GC at a fixed time interval (in seconds)") \
--
2.12.3

View File

@ -1,87 +0,0 @@
From 7ca249ae82c6b6c60c524781806f9d12ef3f8f98 Mon Sep 17 00:00:00 2001
Date: Mon, 16 Mar 2020 16:24:43 +0800
Subject: [PATCH] 8209894: ZGC: Cap number of GC workers based on heap size
Summary: <gc>: <cap number of GC workers based on heap size>
LLT: jdk11u/test/hotspot/jtreg/vmTestbase/nsk/jdi/ObjectReference/disableCollection/disablecollection002/TestDescription.java
Bug url: https://bugs.openjdk.java.net/browse/JDK-8209894
---
src/hotspot/share/gc/z/zWorkers.cpp | 23 ++++++++++++++++++-----
src/hotspot/share/gc/z/zWorkers.hpp | 4 +---
2 files changed, 19 insertions(+), 8 deletions(-)
diff --git a/src/hotspot/share/gc/z/zWorkers.cpp b/src/hotspot/share/gc/z/zWorkers.cpp
index 0686ec7af..6a0c2561d 100644
--- a/src/hotspot/share/gc/z/zWorkers.cpp
+++ b/src/hotspot/share/gc/z/zWorkers.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,14 +22,27 @@
*/
#include "precompiled.hpp"
+#include "gc/z/zGlobals.hpp"
#include "gc/z/zTask.hpp"
#include "gc/z/zWorkers.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/safepoint.hpp"
-uint ZWorkers::calculate_ncpus(double share_in_percent) {
- return ceil(os::initial_active_processor_count() * share_in_percent / 100.0);
+static uint calculate_nworkers_based_on_ncpus(double cpu_share_in_percent) {
+ return ceil(os::initial_active_processor_count() * cpu_share_in_percent / 100.0);
+}
+
+static uint calculate_nworkers_based_on_heap_size(double reserve_share_in_percent) {
+ const int nworkers = ((MaxHeapSize * (reserve_share_in_percent / 100.0)) - ZPageSizeMedium) / ZPageSizeSmall;
+ return MAX2(nworkers, 1);
+}
+
+static uint calculate_nworkers(double cpu_share_in_percent) {
+ // Cap number of workers so that we never use more than 10% of the max heap
+ // for the reserve. This is useful when using small heaps on large machines.
+ return MIN2(calculate_nworkers_based_on_ncpus(cpu_share_in_percent),
+ calculate_nworkers_based_on_heap_size(10.0));
}
uint ZWorkers::calculate_nparallel() {
@@ -38,7 +51,7 @@ uint ZWorkers::calculate_nparallel() {
// close to the number of processors tends to lead to over-provisioning and
// scheduling latency issues. Using 60% of the active processors appears to
// be a fairly good balance.
- return calculate_ncpus(60.0);
+ return calculate_nworkers(60.0);
}
uint ZWorkers::calculate_nconcurrent() {
@@ -48,7 +61,7 @@ uint ZWorkers::calculate_nconcurrent() {
// throughput, while using too few threads will prolong the GC-cycle and
// we then risk being out-run by the application. Using 12.5% of the active
// processors appears to be a fairly good balance.
- return calculate_ncpus(12.5);
+ return calculate_nworkers(12.5);
}
class ZWorkersWarmupTask : public ZTask {
diff --git a/src/hotspot/share/gc/z/zWorkers.hpp b/src/hotspot/share/gc/z/zWorkers.hpp
index 36a3c61fd..6ce09c447 100644
--- a/src/hotspot/share/gc/z/zWorkers.hpp
+++ b/src/hotspot/share/gc/z/zWorkers.hpp
@@ -34,8 +34,6 @@ private:
bool _boost;
WorkGang _workers;
- static uint calculate_ncpus(double share_in_percent);
-
void run(ZTask* task, uint nworkers);
public:
--
2.12.3

View File

@ -1,906 +0,0 @@
diff --git a/make/hotspot/gensrc/GensrcAdlc.gmk b/make/hotspot/gensrc/GensrcAdlc.gmk
index 687896251..a39640526 100644
--- a/make/hotspot/gensrc/GensrcAdlc.gmk
+++ b/make/hotspot/gensrc/GensrcAdlc.gmk
@@ -140,6 +140,12 @@ ifeq ($(call check-jvm-feature, compiler2), true)
$d/os_cpu/$(HOTSPOT_TARGET_OS)_$(HOTSPOT_TARGET_CPU_ARCH)/$(HOTSPOT_TARGET_OS)_$(HOTSPOT_TARGET_CPU_ARCH).ad \
)))
+ ifeq ($(call check-jvm-feature, zgc), true)
+ AD_SRC_FILES += $(call uniq, $(wildcard $(foreach d, $(AD_SRC_ROOTS), \
+ $d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/z/z_$(HOTSPOT_TARGET_CPU).ad \
+ )))
+ endif
+
ifeq ($(call check-jvm-feature, shenandoahgc), true)
AD_SRC_FILES += $(call uniq, $(wildcard $(foreach d, $(AD_SRC_ROOTS), \
$d/cpu/$(HOTSPOT_TARGET_CPU_ARCH)/gc/shenandoah/shenandoah_$(HOTSPOT_TARGET_CPU).ad \
diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad
index 29f81face..ab578476a 100644
--- a/src/hotspot/cpu/aarch64/aarch64.ad
+++ b/src/hotspot/cpu/aarch64/aarch64.ad
@@ -1128,13 +1128,6 @@ definitions %{
int_def VOLATILE_REF_COST ( 1000, 10 * INSN_COST);
%}
-source_hpp %{
-
-#include "gc/z/c2/zBarrierSetC2.hpp"
-#include "gc/z/zThreadLocalData.hpp"
-
-%}
-
//----------SOURCE BLOCK-------------------------------------------------------
// This is a block of C++ code which provides values, functions, and
// definitions necessary in the rest of the architecture description
@@ -18110,243 +18103,6 @@ instruct vpopcount2I(vecD dst, vecD src) %{
ins_pipe(pipe_class_default);
%}
-source %{
-
-static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, bool weak) {
- ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, weak);
- __ ldr(tmp, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
- __ andr(tmp, tmp, ref);
- __ cbnz(tmp, *stub->entry());
- __ bind(*stub->continuation());
-}
-
-static void z_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
- ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, false /* weak */);
- __ b(*stub->entry());
- __ bind(*stub->continuation());
-}
-
-%}
-
-// Load Pointer
-instruct zLoadP(iRegPNoSp dst, memory mem, rFlagsReg cr)
-%{
- match(Set dst (LoadP mem));
- predicate(UseZGC && !needs_acquiring_load(n) && (n->as_Load()->barrier_data() == ZLoadBarrierStrong));
- effect(TEMP dst, KILL cr);
-
- ins_cost(4 * INSN_COST);
-
- format %{ "ldr $dst, $mem" %}
-
- ins_encode %{
- const Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
- __ ldr($dst$$Register, ref_addr);
- if (barrier_data() != ZLoadBarrierElided) {
- z_load_barrier(_masm, this, ref_addr, $dst$$Register, rscratch2 /* tmp */, false /* weak */);
- }
- %}
-
- ins_pipe(iload_reg_mem);
-%}
-
-// Load Weak Pointer
-instruct zLoadWeakP(iRegPNoSp dst, memory mem, rFlagsReg cr)
-%{
- match(Set dst (LoadP mem));
- predicate(UseZGC && !needs_acquiring_load(n) && (n->as_Load()->barrier_data() == ZLoadBarrierWeak));
- effect(TEMP dst, KILL cr);
-
- ins_cost(4 * INSN_COST);
-
- format %{ "ldr $dst, $mem" %}
-
- ins_encode %{
- const Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
- __ ldr($dst$$Register, ref_addr);
- z_load_barrier(_masm, this, ref_addr, $dst$$Register, rscratch2 /* tmp */, true /* weak */);
- %}
-
- ins_pipe(iload_reg_mem);
-%}
-
-// Load Pointer Volatile
-instruct zLoadPVolatile(iRegPNoSp dst, indirect mem /* sync_memory */, rFlagsReg cr)
-%{
- match(Set dst (LoadP mem));
- predicate(UseZGC && needs_acquiring_load(n) && n->as_Load()->barrier_data() == ZLoadBarrierStrong);
- effect(TEMP dst, KILL cr);
-
- ins_cost(VOLATILE_REF_COST);
-
- format %{ "ldar $dst, $mem\t" %}
-
- ins_encode %{
- __ ldar($dst$$Register, $mem$$Register);
- if (barrier_data() != ZLoadBarrierElided) {
- z_load_barrier(_masm, this, Address($mem$$Register), $dst$$Register, rscratch2 /* tmp */, false /* weak */);
- }
- %}
-
- ins_pipe(pipe_serial);
-%}
-
-instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
- match(Set res (CompareAndSwapP mem (Binary oldval newval)));
- match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
- predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
- effect(KILL cr, TEMP_DEF res);
-
- ins_cost(2 * VOLATILE_REF_COST);
-
- format %{ "cmpxchg $mem, $oldval, $newval\n\t"
- "cset $res, EQ" %}
-
- ins_encode %{
- guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
- __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
- false /* acquire */, true /* release */, false /* weak */, rscratch2);
- __ cset($res$$Register, Assembler::EQ);
- if (barrier_data() != ZLoadBarrierElided) {
- Label good;
- __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
- __ andr(rscratch1, rscratch1, rscratch2);
- __ cbz(rscratch1, good);
- z_load_barrier_slow_path(_masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */);
- __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
- false /* acquire */, true /* release */, false /* weak */, rscratch2);
- __ cset($res$$Register, Assembler::EQ);
- __ bind(good);
- }
- %}
-
- ins_pipe(pipe_slow);
-%}
-
-instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
- match(Set res (CompareAndSwapP mem (Binary oldval newval)));
- match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
- predicate(UseZGC && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong));
- effect(KILL cr, TEMP_DEF res);
-
- ins_cost(2 * VOLATILE_REF_COST);
-
- format %{ "cmpxchg $mem, $oldval, $newval\n\t"
- "cset $res, EQ" %}
-
- ins_encode %{
- guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
- __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
- true /* acquire */, true /* release */, false /* weak */, rscratch2);
- __ cset($res$$Register, Assembler::EQ);
- if (barrier_data() != ZLoadBarrierElided) {
- Label good;
- __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
- __ andr(rscratch1, rscratch1, rscratch2);
- __ cbz(rscratch1, good);
- z_load_barrier_slow_path(_masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */ );
- __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
- true /* acquire */, true /* release */, false /* weak */, rscratch2);
- __ cset($res$$Register, Assembler::EQ);
- __ bind(good);
- }
- %}
-
- ins_pipe(pipe_slow);
-%}
-
-instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
- match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
- predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
- effect(TEMP_DEF res, KILL cr);
-
- ins_cost(2 * VOLATILE_REF_COST);
-
- format %{ "cmpxchg $res = $mem, $oldval, $newval" %}
-
- ins_encode %{
- guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
- __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
- false /* acquire */, true /* release */, false /* weak */, $res$$Register);
- if (barrier_data() != ZLoadBarrierElided) {
- Label good;
- __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
- __ andr(rscratch1, rscratch1, $res$$Register);
- __ cbz(rscratch1, good);
- z_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */);
- __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
- false /* acquire */, true /* release */, false /* weak */, $res$$Register);
- __ bind(good);
- }
- %}
-
- ins_pipe(pipe_slow);
-%}
-
-instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
- match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
- predicate(UseZGC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
- effect(TEMP_DEF res, KILL cr);
-
- ins_cost(2 * VOLATILE_REF_COST);
-
- format %{ "cmpxchg $res = $mem, $oldval, $newval" %}
-
- ins_encode %{
- guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
- __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
- true /* acquire */, true /* release */, false /* weak */, $res$$Register);
- if (barrier_data() != ZLoadBarrierElided) {
- Label good;
- __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
- __ andr(rscratch1, rscratch1, $res$$Register);
- __ cbz(rscratch1, good);
- z_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */);
- __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
- true /* acquire */, true /* release */, false /* weak */, $res$$Register);
- __ bind(good);
- }
- %}
-
- ins_pipe(pipe_slow);
-%}
-
-instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
- match(Set prev (GetAndSetP mem newv));
- predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
- effect(TEMP_DEF prev, KILL cr);
-
- ins_cost(2 * VOLATILE_REF_COST);
-
- format %{ "atomic_xchg $prev, $newv, [$mem]" %}
-
- ins_encode %{
- __ atomic_xchg($prev$$Register, $newv$$Register, $mem$$Register);
- if (barrier_data() != ZLoadBarrierElided) {
- z_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, false /* weak */);
- }
- %}
-
- ins_pipe(pipe_serial);
-%}
-
-instruct zGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
- match(Set prev (GetAndSetP mem newv));
- predicate(UseZGC && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong));
- effect(TEMP_DEF prev, KILL cr);
-
- ins_cost(VOLATILE_REF_COST);
-
- format %{ "atomic_xchg_acq $prev, $newv, [$mem]" %}
-
- ins_encode %{
- __ atomic_xchgal($prev$$Register, $newv$$Register, $mem$$Register);
- if (barrier_data() != ZLoadBarrierElided) {
- z_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, false /* weak */);
- }
- %}
- ins_pipe(pipe_serial);
-%}
//----------PEEPHOLE RULES-----------------------------------------------------
// These must follow all instruction definitions as they use the names
diff --git a/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad b/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad
new file mode 100644
index 000000000..50cc6f924
--- /dev/null
+++ b/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad
@@ -0,0 +1,268 @@
+//
+// Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+//
+// This code is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License version 2 only, as
+// published by the Free Software Foundation.
+//
+// This code is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// version 2 for more details (a copy is included in the LICENSE file that
+// accompanied this code).
+//
+// You should have received a copy of the GNU General Public License version
+// 2 along with this work; if not, write to the Free Software Foundation,
+// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+//
+// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+// or visit www.oracle.com if you need additional information or have any
+// questions.
+//
+
+source_hpp %{
+
+#include "gc/z/c2/zBarrierSetC2.hpp"
+#include "gc/z/zThreadLocalData.hpp"
+
+%}
+
+source %{
+
+static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, bool weak) {
+ ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, weak);
+ __ ldr(tmp, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
+ __ andr(tmp, tmp, ref);
+ __ cbnz(tmp, *stub->entry());
+ __ bind(*stub->continuation());
+}
+
+static void z_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
+ ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, false /* weak */);
+ __ b(*stub->entry());
+ __ bind(*stub->continuation());
+}
+
+%}
+
+// Load Pointer
+instruct zLoadP(iRegPNoSp dst, memory mem, rFlagsReg cr)
+%{
+ match(Set dst (LoadP mem));
+ predicate(UseZGC && !needs_acquiring_load(n) && (n->as_Load()->barrier_data() == ZLoadBarrierStrong));
+ effect(TEMP dst, KILL cr);
+
+ ins_cost(4 * INSN_COST);
+
+ format %{ "ldr $dst, $mem" %}
+
+ ins_encode %{
+ const Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+ __ ldr($dst$$Register, ref_addr);
+ if (barrier_data() != ZLoadBarrierElided) {
+ z_load_barrier(_masm, this, ref_addr, $dst$$Register, rscratch2 /* tmp */, false /* weak */);
+ }
+ %}
+
+ ins_pipe(iload_reg_mem);
+%}
+
+// Load Weak Pointer
+instruct zLoadWeakP(iRegPNoSp dst, memory mem, rFlagsReg cr)
+%{
+ match(Set dst (LoadP mem));
+ predicate(UseZGC && !needs_acquiring_load(n) && (n->as_Load()->barrier_data() == ZLoadBarrierWeak));
+ effect(TEMP dst, KILL cr);
+
+ ins_cost(4 * INSN_COST);
+
+ format %{ "ldr $dst, $mem" %}
+
+ ins_encode %{
+ const Address ref_addr = mem2address($mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
+ __ ldr($dst$$Register, ref_addr);
+ z_load_barrier(_masm, this, ref_addr, $dst$$Register, rscratch2 /* tmp */, true /* weak */);
+ %}
+
+ ins_pipe(iload_reg_mem);
+%}
+
+// Load Pointer Volatile
+instruct zLoadPVolatile(iRegPNoSp dst, indirect mem /* sync_memory */, rFlagsReg cr)
+%{
+ match(Set dst (LoadP mem));
+ predicate(UseZGC && needs_acquiring_load(n) && n->as_Load()->barrier_data() == ZLoadBarrierStrong);
+ effect(TEMP dst, KILL cr);
+
+ ins_cost(VOLATILE_REF_COST);
+
+ format %{ "ldar $dst, $mem\t" %}
+
+ ins_encode %{
+ __ ldar($dst$$Register, $mem$$Register);
+ if (barrier_data() != ZLoadBarrierElided) {
+ z_load_barrier(_masm, this, Address($mem$$Register), $dst$$Register, rscratch2 /* tmp */, false /* weak */);
+ }
+ %}
+
+ ins_pipe(pipe_serial);
+%}
+
+instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
+ match(Set res (CompareAndSwapP mem (Binary oldval newval)));
+ match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
+ predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
+ effect(KILL cr, TEMP_DEF res);
+
+ ins_cost(2 * VOLATILE_REF_COST);
+
+ format %{ "cmpxchg $mem, $oldval, $newval\n\t"
+ "cset $res, EQ" %}
+
+ ins_encode %{
+ guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
+ false /* acquire */, true /* release */, false /* weak */, rscratch2);
+ __ cset($res$$Register, Assembler::EQ);
+ if (barrier_data() != ZLoadBarrierElided) {
+ Label good;
+ __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
+ __ andr(rscratch1, rscratch1, rscratch2);
+ __ cbz(rscratch1, good);
+ z_load_barrier_slow_path(_masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */);
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
+ false /* acquire */, true /* release */, false /* weak */, rscratch2);
+ __ cset($res$$Register, Assembler::EQ);
+ __ bind(good);
+ }
+ %}
+
+ ins_pipe(pipe_slow);
+%}
+
+instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
+ match(Set res (CompareAndSwapP mem (Binary oldval newval)));
+ match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
+ predicate(UseZGC && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong));
+ effect(KILL cr, TEMP_DEF res);
+
+ ins_cost(2 * VOLATILE_REF_COST);
+
+ format %{ "cmpxchg $mem, $oldval, $newval\n\t"
+ "cset $res, EQ" %}
+
+ ins_encode %{
+ guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
+ true /* acquire */, true /* release */, false /* weak */, rscratch2);
+ __ cset($res$$Register, Assembler::EQ);
+ if (barrier_data() != ZLoadBarrierElided) {
+ Label good;
+ __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
+ __ andr(rscratch1, rscratch1, rscratch2);
+ __ cbz(rscratch1, good);
+ z_load_barrier_slow_path(_masm, this, Address($mem$$Register), rscratch2 /* ref */, rscratch1 /* tmp */ );
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
+ true /* acquire */, true /* release */, false /* weak */, rscratch2);
+ __ cset($res$$Register, Assembler::EQ);
+ __ bind(good);
+ }
+ %}
+
+ ins_pipe(pipe_slow);
+%}
+
+instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
+ match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
+ predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
+ effect(TEMP_DEF res, KILL cr);
+
+ ins_cost(2 * VOLATILE_REF_COST);
+
+ format %{ "cmpxchg $res = $mem, $oldval, $newval" %}
+
+ ins_encode %{
+ guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
+ false /* acquire */, true /* release */, false /* weak */, $res$$Register);
+ if (barrier_data() != ZLoadBarrierElided) {
+ Label good;
+ __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
+ __ andr(rscratch1, rscratch1, $res$$Register);
+ __ cbz(rscratch1, good);
+ z_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */);
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
+ false /* acquire */, true /* release */, false /* weak */, $res$$Register);
+ __ bind(good);
+ }
+ %}
+
+ ins_pipe(pipe_slow);
+%}
+
+instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
+ match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
+ predicate(UseZGC && needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
+ effect(TEMP_DEF res, KILL cr);
+
+ ins_cost(2 * VOLATILE_REF_COST);
+
+ format %{ "cmpxchg $res = $mem, $oldval, $newval" %}
+
+ ins_encode %{
+ guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
+ true /* acquire */, true /* release */, false /* weak */, $res$$Register);
+ if (barrier_data() != ZLoadBarrierElided) {
+ Label good;
+ __ ldr(rscratch1, Address(rthread, ZThreadLocalData::address_bad_mask_offset()));
+ __ andr(rscratch1, rscratch1, $res$$Register);
+ __ cbz(rscratch1, good);
+ z_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, rscratch1 /* tmp */);
+ __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::xword,
+ true /* acquire */, true /* release */, false /* weak */, $res$$Register);
+ __ bind(good);
+ }
+ %}
+
+ ins_pipe(pipe_slow);
+%}
+
+instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
+ match(Set prev (GetAndSetP mem newv));
+ predicate(UseZGC && !needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
+ effect(TEMP_DEF prev, KILL cr);
+
+ ins_cost(2 * VOLATILE_REF_COST);
+
+ format %{ "atomic_xchg $prev, $newv, [$mem]" %}
+
+ ins_encode %{
+ __ atomic_xchg($prev$$Register, $newv$$Register, $mem$$Register);
+ if (barrier_data() != ZLoadBarrierElided) {
+ z_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, false /* weak */);
+ }
+ %}
+
+ ins_pipe(pipe_serial);
+%}
+
+instruct zGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
+ match(Set prev (GetAndSetP mem newv));
+ predicate(UseZGC && needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong));
+ effect(TEMP_DEF prev, KILL cr);
+
+ ins_cost(VOLATILE_REF_COST);
+
+ format %{ "atomic_xchg_acq $prev, $newv, [$mem]" %}
+
+ ins_encode %{
+ __ atomic_xchgal($prev$$Register, $newv$$Register, $mem$$Register);
+ if (barrier_data() != ZLoadBarrierElided) {
+ z_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, rscratch2 /* tmp */, false /* weak */);
+ }
+ %}
+ ins_pipe(pipe_serial);
+%}
+
diff --git a/src/hotspot/cpu/x86/gc/z/z_x86_64.ad b/src/hotspot/cpu/x86/gc/z/z_x86_64.ad
new file mode 100644
index 000000000..38c2e926b
--- /dev/null
+++ b/src/hotspot/cpu/x86/gc/z/z_x86_64.ad
@@ -0,0 +1,168 @@
+//
+// Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+//
+// This code is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License version 2 only, as
+// published by the Free Software Foundation.
+//
+// This code is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// version 2 for more details (a copy is included in the LICENSE file that
+// accompanied this code).
+//
+// You should have received a copy of the GNU General Public License version
+// 2 along with this work; if not, write to the Free Software Foundation,
+// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+//
+// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+// or visit www.oracle.com if you need additional information or have any
+// questions.
+//
+
+source_hpp %{
+
+#include "gc/z/c2/zBarrierSetC2.hpp"
+#include "gc/z/zThreadLocalData.hpp"
+
+%}
+
+source %{
+
+static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, bool weak) {
+ ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, weak);
+ __ testptr(ref, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset()));
+ __ jcc(Assembler::notZero, *stub->entry());
+ __ bind(*stub->continuation());
+}
+
+static void z_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
+ ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, false /* weak */);
+ __ jmp(*stub->entry());
+ __ bind(*stub->continuation());
+}
+
+%}
+
+// Load Pointer
+instruct zLoadP(rRegP dst, memory mem, rFlagsReg cr)
+%{
+ predicate(UseZGC && n->as_Load()->barrier_data() == ZLoadBarrierStrong);
+ match(Set dst (LoadP mem));
+ effect(KILL cr, TEMP dst);
+
+ ins_cost(125);
+
+ format %{ "movq $dst, $mem" %}
+
+ ins_encode %{
+ __ movptr($dst$$Register, $mem$$Address);
+ if (barrier_data() != ZLoadBarrierElided) {
+ z_load_barrier(_masm, this, $mem$$Address, $dst$$Register, noreg /* tmp */, false /* weak */);
+ }
+ %}
+
+ ins_pipe(ialu_reg_mem);
+%}
+
+// Load Weak Pointer
+instruct zLoadWeakP(rRegP dst, memory mem, rFlagsReg cr)
+%{
+ predicate(UseZGC && n->as_Load()->barrier_data() == ZLoadBarrierWeak);
+ match(Set dst (LoadP mem));
+ effect(KILL cr, TEMP dst);
+
+ ins_cost(125);
+
+ format %{ "movq $dst, $mem" %}
+
+ ins_encode %{
+ __ movptr($dst$$Register, $mem$$Address);
+ z_load_barrier(_masm, this, $mem$$Address, $dst$$Register, noreg /* tmp */, true /* weak */);
+ %}
+
+ ins_pipe(ialu_reg_mem);
+%}
+
+instruct zCompareAndExchangeP(memory mem, rax_RegP oldval, rRegP newval, rRegP tmp, rFlagsReg cr) %{
+ match(Set oldval (CompareAndExchangeP mem (Binary oldval newval)));
+ predicate(UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
+ effect(KILL cr, TEMP tmp);
+
+ format %{ "lock\n\t"
+ "cmpxchgq $newval, $mem" %}
+
+ ins_encode %{
+ if (barrier_data() != ZLoadBarrierElided) {
+ __ movptr($tmp$$Register, $oldval$$Register);
+ }
+ __ lock();
+ __ cmpxchgptr($newval$$Register, $mem$$Address);
+ if (barrier_data() != ZLoadBarrierElided) {
+ Label good;
+ __ testptr($oldval$$Register, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset()));
+ __ jcc(Assembler::zero, good);
+ z_load_barrier_slow_path(_masm, this, $mem$$Address, $oldval$$Register, $tmp$$Register);
+ __ movptr($oldval$$Register, $tmp$$Register);
+ __ lock();
+ __ cmpxchgptr($newval$$Register, $mem$$Address);
+ __ bind(good);
+ }
+ %}
+
+ ins_pipe(pipe_cmpxchg);
+%}
+
+instruct zCompareAndSwapP(rRegI res, memory mem, rRegP newval, rRegP tmp, rFlagsReg cr, rax_RegP oldval) %{
+ match(Set res (CompareAndSwapP mem (Binary oldval newval)));
+ match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
+ predicate(UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
+ effect(KILL cr, KILL oldval, TEMP tmp);
+
+ format %{ "lock\n\t"
+ "cmpxchgq $newval, $mem\n\t"
+ "sete $res\n\t"
+ "movzbl $res, $res" %}
+
+ ins_encode %{
+ if (barrier_data() != ZLoadBarrierElided) {
+ __ movptr($tmp$$Register, $oldval$$Register);
+ }
+ __ lock();
+ __ cmpxchgptr($newval$$Register, $mem$$Address);
+ if (barrier_data() != ZLoadBarrierElided) {
+ Label good;
+ __ testptr($oldval$$Register, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset()));
+ __ jcc(Assembler::zero, good);
+ z_load_barrier_slow_path(_masm, this, $mem$$Address, $oldval$$Register, $tmp$$Register);
+ __ movptr($oldval$$Register, $tmp$$Register);
+ __ lock();
+ __ cmpxchgptr($newval$$Register, $mem$$Address);
+ __ bind(good);
+ __ cmpptr($tmp$$Register, $oldval$$Register);
+ }
+ __ setb(Assembler::equal, $res$$Register);
+ __ movzbl($res$$Register, $res$$Register);
+ %}
+
+ ins_pipe(pipe_cmpxchg);
+%}
+
+instruct zXChgP(memory mem, rRegP newval, rFlagsReg cr) %{
+ match(Set newval (GetAndSetP mem newval));
+ predicate(UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
+ effect(KILL cr);
+
+ format %{ "xchgq $newval, $mem" %}
+
+ ins_encode %{
+ __ xchgptr($newval$$Register, $mem$$Address);
+ if (barrier_data() != ZLoadBarrierElided) {
+ z_load_barrier(_masm, this, Address(noreg, 0), $newval$$Register, noreg /* tmp */, false /* weak */);
+ }
+ %}
+
+ ins_pipe(pipe_cmpxchg);
+%}
+
diff --git a/src/hotspot/cpu/x86/x86_64.ad b/src/hotspot/cpu/x86/x86_64.ad
index 95a8538f3..ede4d8864 100644
--- a/src/hotspot/cpu/x86/x86_64.ad
+++ b/src/hotspot/cpu/x86/x86_64.ad
@@ -538,19 +538,6 @@ reg_class int_rdi_reg(RDI);
%}
-source_hpp %{
-
-#include "gc/z/c2/zBarrierSetC2.hpp"
-#include "gc/z/zThreadLocalData.hpp"
-
-%}
-
-source_hpp %{
-#if INCLUDE_ZGC
-#include "gc/z/zBarrierSetAssembler.hpp"
-#endif
-%}
-
//----------SOURCE BLOCK-------------------------------------------------------
// This is a block of C++ code which provides values, functions, and
// definitions necessary in the rest of the architecture description
@@ -1882,19 +1869,6 @@ const RegMask Matcher::method_handle_invoke_SP_save_mask() {
return NO_REG_mask();
}
-static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp, bool weak) {
- ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, weak);
- __ testptr(ref, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset()));
- __ jcc(Assembler::notZero, *stub->entry());
- __ bind(*stub->continuation());
-}
-
-static void z_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) {
- ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref, tmp, false /* weak */);
- __ jmp(*stub->entry());
- __ bind(*stub->continuation());
-}
-
%}
//----------ENCODING BLOCK-----------------------------------------------------
@@ -12845,131 +12819,6 @@ instruct RethrowException()
ins_pipe(pipe_jmp);
%}
-//
-// Execute ZGC load barrier (strong) slow path
-//
-
-// Load Pointer
-instruct zLoadP(rRegP dst, memory mem, rFlagsReg cr)
-%{
- predicate(UseZGC && n->as_Load()->barrier_data() == ZLoadBarrierStrong);
- match(Set dst (LoadP mem));
- effect(KILL cr, TEMP dst);
-
- ins_cost(125);
-
- format %{ "movq $dst, $mem" %}
-
- ins_encode %{
- __ movptr($dst$$Register, $mem$$Address);
- if (barrier_data() != ZLoadBarrierElided) {
- z_load_barrier(_masm, this, $mem$$Address, $dst$$Register, noreg /* tmp */, false /* weak */);
- }
- %}
-
- ins_pipe(ialu_reg_mem);
-%}
-
-// Load Weak Pointer
-instruct zLoadWeakP(rRegP dst, memory mem, rFlagsReg cr)
-%{
- predicate(UseZGC && n->as_Load()->barrier_data() == ZLoadBarrierWeak);
- match(Set dst (LoadP mem));
- effect(KILL cr, TEMP dst);
-
- ins_cost(125);
-
- format %{ "movq $dst, $mem" %}
- ins_encode %{
- __ movptr($dst$$Register, $mem$$Address);
- z_load_barrier(_masm, this, $mem$$Address, $dst$$Register, noreg /* tmp */, true /* weak */);
- %}
-
- ins_pipe(ialu_reg_mem);
-%}
-
-instruct zCompareAndExchangeP(memory mem, rax_RegP oldval, rRegP newval, rRegP tmp, rFlagsReg cr) %{
- match(Set oldval (CompareAndExchangeP mem (Binary oldval newval)));
- predicate(UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
- effect(KILL cr, TEMP tmp);
-
- format %{ "lock\n\t"
- "cmpxchgq $newval, $mem" %}
-
- ins_encode %{
- if (barrier_data() != ZLoadBarrierElided) {
- __ movptr($tmp$$Register, $oldval$$Register);
- }
- __ lock();
- __ cmpxchgptr($newval$$Register, $mem$$Address);
- if (barrier_data() != ZLoadBarrierElided) {
- Label good;
- __ testptr($oldval$$Register, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset()));
- __ jcc(Assembler::zero, good);
- z_load_barrier_slow_path(_masm, this, $mem$$Address, $oldval$$Register, $tmp$$Register);
- __ movptr($oldval$$Register, $tmp$$Register);
- __ lock();
- __ cmpxchgptr($newval$$Register, $mem$$Address);
- __ bind(good);
- }
- %}
-
- ins_pipe(pipe_cmpxchg);
-%}
-
-
-instruct zCompareAndSwapP(rRegI res, memory mem, rRegP newval, rRegP tmp, rFlagsReg cr, rax_RegP oldval) %{
- match(Set res (CompareAndSwapP mem (Binary oldval newval)));
- match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
- predicate(UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
- effect(KILL cr, KILL oldval, TEMP tmp);
-
- format %{ "lock\n\t"
- "cmpxchgq $newval, $mem\n\t"
- "sete $res\n\t"
- "movzbl $res, $res" %}
-
- ins_encode %{
- if (barrier_data() != ZLoadBarrierElided) {
- __ movptr($tmp$$Register, $oldval$$Register);
- }
- __ lock();
- __ cmpxchgptr($newval$$Register, $mem$$Address);
- if (barrier_data() != ZLoadBarrierElided) {
- Label good;
- __ testptr($oldval$$Register, Address(r15_thread, ZThreadLocalData::address_bad_mask_offset()));
- __ jcc(Assembler::zero, good);
- z_load_barrier_slow_path(_masm, this, $mem$$Address, $oldval$$Register, $tmp$$Register);
- __ movptr($oldval$$Register, $tmp$$Register);
- __ lock();
- __ cmpxchgptr($newval$$Register, $mem$$Address);
- __ bind(good);
- __ cmpptr($tmp$$Register, $oldval$$Register);
- }
- __ setb(Assembler::equal, $res$$Register);
- __ movzbl($res$$Register, $res$$Register);
- %}
-
- ins_pipe(pipe_cmpxchg);
-%}
-
-instruct zXChgP(memory mem, rRegP newval, rFlagsReg cr) %{
- match(Set newval (GetAndSetP mem newval));
- predicate(UseZGC && n->as_LoadStore()->barrier_data() == ZLoadBarrierStrong);
- effect(KILL cr);
-
- format %{ "xchgq $newval, $mem" %}
-
- ins_encode %{
- __ xchgptr($newval$$Register, $mem$$Address);
- if (barrier_data() != ZLoadBarrierElided) {
- z_load_barrier(_masm, this, Address(noreg, 0), $newval$$Register, noreg /* tmp */, false /* weak */);
- }
- %}
-
- ins_pipe(pipe_cmpxchg);
-%}
-
// ============================================================================
// This name is KNOWN by the ADLC and cannot be changed.
// The ADLC forces a 'TypeRawPtr::BOTTOM' output type
--
2.19.0

View File

@ -308,8 +308,8 @@ index 643e3d564..82e615241 100644
static int cpu_revision() { return _revision; }
+ static int get_initial_sve_vector_length() { return _initial_sve_vector_length; };
static bool is_zva_enabled() { return 0 <= _zva_length; }
static int zva_length() {
static bool is_hisi_enabled() {
if (_cpu == CPU_HISILICON && (_model == 0xd01 || _model == 0xd02 || _model == 0xd03)) {
diff --git a/test/hotspot/jtreg/compiler/c2/aarch64/TestSVEWithJNI.java b/test/hotspot/jtreg/compiler/c2/aarch64/TestSVEWithJNI.java
new file mode 100644
index 000000000..dc15ca800

View File

@ -1,101 +0,0 @@
From d2137837d518a8bdb8e075109e502e78bd2f9fa9 Mon Sep 17 00:00:00 2001
Date: Wed, 19 Feb 2020 17:36:32 +0800
Subject: [PATCH] 8233061: ZGC: Enforce memory ordering in segmented bit maps
Summary: <gc>: <resolves try_mark and relocate crash>
LLT: renaissance
Bug url: https://bugs.openjdk.java.net/browse/JDK-8233061
---
src/hotspot/share/gc/z/zLiveMap.cpp | 20 +++++++++-----------
src/hotspot/share/gc/z/zLiveMap.inline.hpp | 9 +++++----
2 files changed, 14 insertions(+), 15 deletions(-)
diff --git a/src/hotspot/share/gc/z/zLiveMap.cpp b/src/hotspot/share/gc/z/zLiveMap.cpp
index 7187b6166..c1d79b794 100644
--- a/src/hotspot/share/gc/z/zLiveMap.cpp
+++ b/src/hotspot/share/gc/z/zLiveMap.cpp
@@ -50,7 +50,9 @@ void ZLiveMap::reset(size_t index) {
// Multiple threads can enter here, make sure only one of them
// resets the marking information while the others busy wait.
- for (uint32_t seqnum = _seqnum; seqnum != ZGlobalSeqNum; seqnum = _seqnum) {
+ for (uint32_t seqnum = OrderAccess::load_acquire(&_seqnum);
+ seqnum != ZGlobalSeqNum;
+ seqnum = OrderAccess::load_acquire(&_seqnum)) {
if ((seqnum != seqnum_initializing) &&
(Atomic::cmpxchg(seqnum_initializing, &_seqnum, seqnum) == seqnum)) {
// Reset marking information
@@ -61,13 +63,13 @@ void ZLiveMap::reset(size_t index) {
segment_live_bits().clear();
segment_claim_bits().clear();
- // Make sure the newly reset marking information is
- // globally visible before updating the page seqnum.
- OrderAccess::storestore();
-
- // Update seqnum
assert(_seqnum == seqnum_initializing, "Invalid");
- _seqnum = ZGlobalSeqNum;
+
+ // Make sure the newly reset marking information is ordered
+ // before the update of the page seqnum, such that when the
+ // up-to-date seqnum is load acquired, the bit maps will not
+ // contain stale information.
+ OrderAccess::release_store(&_seqnum, ZGlobalSeqNum);
break;
}
@@ -89,10 +91,6 @@ void ZLiveMap::reset_segment(BitMap::idx_t segment) {
if (!claim_segment(segment)) {
// Already claimed, wait for live bit to be set
while (!is_segment_live(segment)) {
- // Busy wait. The loadload barrier is needed to make
- // sure we re-read the live bit every time we loop.
- OrderAccess::loadload();
-
// Mark reset contention
if (!contention) {
// Count contention once
diff --git a/src/hotspot/share/gc/z/zLiveMap.inline.hpp b/src/hotspot/share/gc/z/zLiveMap.inline.hpp
index 1e4d56f41..fb45a892c 100644
--- a/src/hotspot/share/gc/z/zLiveMap.inline.hpp
+++ b/src/hotspot/share/gc/z/zLiveMap.inline.hpp
@@ -30,6 +30,7 @@
#include "gc/z/zOop.inline.hpp"
#include "gc/z/zUtils.inline.hpp"
#include "runtime/atomic.hpp"
+#include "runtime/orderAccess.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/debug.hpp"
@@ -38,7 +39,7 @@ inline void ZLiveMap::reset() {
}
inline bool ZLiveMap::is_marked() const {
- return _seqnum == ZGlobalSeqNum;
+ return OrderAccess::load_acquire(&_seqnum) == ZGlobalSeqNum;
}
inline uint32_t ZLiveMap::live_objects() const {
@@ -68,15 +69,15 @@ inline BitMapView ZLiveMap::segment_claim_bits() {
}
inline bool ZLiveMap::is_segment_live(BitMap::idx_t segment) const {
- return segment_live_bits().at(segment);
+ return segment_live_bits().par_at(segment);
}
inline bool ZLiveMap::set_segment_live_atomic(BitMap::idx_t segment) {
- return segment_live_bits().par_set_bit(segment);
+ return segment_live_bits().par_set_bit(segment, memory_order_release);
}
inline bool ZLiveMap::claim_segment(BitMap::idx_t segment) {
- return segment_claim_bits().par_set_bit(segment);
+ return segment_claim_bits().par_set_bit(segment, memory_order_acq_rel);
}
inline BitMap::idx_t ZLiveMap::first_live_segment() const {
--
2.12.3

View File

@ -1,162 +0,0 @@
diff --git a/src/hotspot/share/c1/c1_Instruction.cpp b/src/hotspot/share/c1/c1_Instruction.cpp
index ee3be89..62d8b48 100644
--- a/src/hotspot/share/c1/c1_Instruction.cpp
+++ b/src/hotspot/share/c1/c1_Instruction.cpp
@@ -29,6 +29,7 @@
#include "c1/c1_ValueStack.hpp"
#include "ci/ciObjArrayKlass.hpp"
#include "ci/ciTypeArrayKlass.hpp"
+#include "utilities/bitMap.inline.hpp"
// Implementation of Instruction
diff --git a/src/hotspot/share/opto/graphKit.cpp b/src/hotspot/share/opto/graphKit.cpp
index bf9179f..e0696de 100644
--- a/src/hotspot/share/opto/graphKit.cpp
+++ b/src/hotspot/share/opto/graphKit.cpp
@@ -43,6 +43,7 @@
#include "opto/runtime.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/sharedRuntime.hpp"
+#include "utilities/bitMap.inline.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_SHENANDOAHGC
#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
diff --git a/src/hotspot/share/opto/parse1.cpp b/src/hotspot/share/opto/parse1.cpp
index 99b1a67..f94f028 100644
--- a/src/hotspot/share/opto/parse1.cpp
+++ b/src/hotspot/share/opto/parse1.cpp
@@ -41,6 +41,7 @@
#include "runtime/handles.inline.hpp"
#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
+#include "utilities/bitMap.inline.hpp"
#include "utilities/copy.hpp"
// Static array so we can figure out which bytecodes stop us from compiling
diff --git a/src/hotspot/share/utilities/bitMap.hpp b/src/hotspot/share/utilities/bitMap.hpp
index c671535..e26f346 100644
--- a/src/hotspot/share/utilities/bitMap.hpp
+++ b/src/hotspot/share/utilities/bitMap.hpp
@@ -26,6 +26,7 @@
#define SHARE_VM_UTILITIES_BITMAP_HPP
#include "memory/allocation.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/align.hpp"
#include "utilities/globalDefinitions.hpp"
@@ -95,6 +96,8 @@ class BitMap {
void set_word (idx_t word) { set_word(word, ~(bm_word_t)0); }
void clear_word(idx_t word) { _map[word] = 0; }
+ static inline const bm_word_t load_word_ordered(const volatile bm_word_t* const addr, atomic_memory_order memory_order);
+
// Utilities for ranges of bits. Ranges are half-open [beg, end).
// Ranges within a single word.
@@ -194,6 +197,9 @@ class BitMap {
return (*word_addr(index) & bit_mask(index)) != 0;
}
+ // memory_order must be memory_order_relaxed or memory_order_acquire.
+ bool par_at(idx_t index, atomic_memory_order memory_order = memory_order_acquire) const;
+
// Align bit index up or down to the next bitmap word boundary, or check
// alignment.
static idx_t word_align_up(idx_t bit) {
@@ -210,9 +216,14 @@ class BitMap {
inline void set_bit(idx_t bit);
inline void clear_bit(idx_t bit);
- // Atomically set or clear the specified bit.
- inline bool par_set_bit(idx_t bit);
- inline bool par_clear_bit(idx_t bit);
+ // Attempts to change a bit to a desired value. The operation returns true if
+ // this thread changed the value of the bit. It was changed with a RMW operation
+ // using the specified memory_order. The operation returns false if the change
+ // could not be set due to the bit already being observed in the desired state.
+ // The atomic access that observed the bit in the desired state has acquire
+ // semantics, unless memory_order is memory_order_relaxed or memory_order_release.
+ inline bool par_set_bit(idx_t bit, atomic_memory_order memory_order = memory_order_conservative);
+ inline bool par_clear_bit(idx_t bit, atomic_memory_order memory_order = memory_order_conservative);
// Put the given value at the given offset. The parallel version
// will CAS the value into the bitmap and is quite a bit slower.
diff --git a/src/hotspot/share/utilities/bitMap.inline.hpp b/src/hotspot/share/utilities/bitMap.inline.hpp
index b10726d..7a7e2ad 100644
--- a/src/hotspot/share/utilities/bitMap.inline.hpp
+++ b/src/hotspot/share/utilities/bitMap.inline.hpp
@@ -26,6 +26,7 @@
#define SHARE_VM_UTILITIES_BITMAP_INLINE_HPP
#include "runtime/atomic.hpp"
+#include "runtime/orderAccess.hpp"
#include "utilities/bitMap.hpp"
inline void BitMap::set_bit(idx_t bit) {
@@ -38,18 +39,39 @@ inline void BitMap::clear_bit(idx_t bit) {
*word_addr(bit) &= ~bit_mask(bit);
}
-inline bool BitMap::par_set_bit(idx_t bit) {
+inline const BitMap::bm_word_t BitMap::load_word_ordered(const volatile bm_word_t* const addr, atomic_memory_order memory_order) {
+ if (memory_order == memory_order_relaxed || memory_order == memory_order_release) {
+ return Atomic::load(addr);
+ } else {
+ assert(memory_order == memory_order_acq_rel ||
+ memory_order == memory_order_acquire ||
+ memory_order == memory_order_conservative,
+ "unexpected memory ordering");
+ return OrderAccess::load_acquire(addr);
+ }
+}
+
+inline bool BitMap::par_at(idx_t index, atomic_memory_order memory_order) const {
+ verify_index(index);
+ assert(memory_order == memory_order_acquire ||
+ memory_order == memory_order_relaxed,
+ "unexpected memory ordering");
+ const volatile bm_word_t* const addr = word_addr(index);
+ return (load_word_ordered(addr, memory_order) & bit_mask(index)) != 0;
+}
+
+inline bool BitMap::par_set_bit(idx_t bit, atomic_memory_order memory_order) {
verify_index(bit);
volatile bm_word_t* const addr = word_addr(bit);
const bm_word_t mask = bit_mask(bit);
- bm_word_t old_val = *addr;
+ bm_word_t old_val = load_word_ordered(addr, memory_order);
do {
const bm_word_t new_val = old_val | mask;
if (new_val == old_val) {
return false; // Someone else beat us to it.
}
- const bm_word_t cur_val = Atomic::cmpxchg(new_val, addr, old_val);
+ const bm_word_t cur_val = Atomic::cmpxchg(new_val, addr, old_val, memory_order);
if (cur_val == old_val) {
return true; // Success.
}
@@ -57,18 +79,18 @@ inline bool BitMap::par_set_bit(idx_t bit) {
} while (true);
}
-inline bool BitMap::par_clear_bit(idx_t bit) {
+inline bool BitMap::par_clear_bit(idx_t bit, atomic_memory_order memory_order) {
verify_index(bit);
volatile bm_word_t* const addr = word_addr(bit);
const bm_word_t mask = ~bit_mask(bit);
- bm_word_t old_val = *addr;
+ bm_word_t old_val = load_word_ordered(addr, memory_order);
do {
const bm_word_t new_val = old_val & mask;
if (new_val == old_val) {
return false; // Someone else beat us to it.
}
- const bm_word_t cur_val = Atomic::cmpxchg(new_val, addr, old_val);
+ const bm_word_t cur_val = Atomic::cmpxchg(new_val, addr, old_val, memory_order);
if (cur_val == old_val) {
return true; // Success.
}

View File

@ -1,472 +0,0 @@
From aa824cddc917b1fcac41a0efe5e8c794f2d5cff9 Mon Sep 17 00:00:00 2001
Date: Thu, 26 Mar 2020 16:17:45 +0000
Subject: [PATCH] 8233506:ZGC: the load for Reference.get() can be converted to
a load for strong refs Summary: <gc>: <ZGC: the load for Reference.get() can
be converted to a load for strong refs> LLT: JDK8233506
Bug url: https://bugs.openjdk.java.net/browse/JDK-8233506
---
src/hotspot/share/gc/shared/c2/barrierSetC2.cpp | 73 +++++++++++++++----------
src/hotspot/share/gc/shared/c2/barrierSetC2.hpp | 7 ++-
src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp | 42 +++++---------
src/hotspot/share/opto/graphKit.cpp | 9 +--
src/hotspot/share/opto/graphKit.hpp | 10 ++--
src/hotspot/share/opto/memnode.cpp | 9 ++-
src/hotspot/share/opto/memnode.hpp | 7 ++-
7 files changed, 85 insertions(+), 72 deletions(-)
diff --git a/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp b/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp
index 545275644..48fe04b08 100644
--- a/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp
+++ b/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp
@@ -115,10 +115,13 @@ Node* BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) con
Node* load;
if (in_native) {
- load = kit->make_load(control, adr, val_type, access.type(), mo);
+ load = kit->make_load(control, adr, val_type, access.type(), mo, dep,
+ requires_atomic_access, unaligned,
+ mismatched, unsafe, access.barrier_data());
} else {
load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
- dep, requires_atomic_access, unaligned, mismatched, unsafe);
+ dep, requires_atomic_access, unaligned, mismatched, unsafe,
+ access.barrier_data());
}
access.set_raw_access(load);
@@ -348,28 +351,28 @@ Node* BarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node*
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
- load_store = kit->gvn().transform(new CompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo));
+ load_store = new CompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo);
} else
#endif
{
- load_store = kit->gvn().transform(new CompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo));
+ load_store = new CompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo);
}
} else {
switch (access.type()) {
case T_BYTE: {
- load_store = kit->gvn().transform(new CompareAndExchangeBNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
+ load_store = new CompareAndExchangeBNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo);
break;
}
case T_SHORT: {
- load_store = kit->gvn().transform(new CompareAndExchangeSNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
+ load_store = new CompareAndExchangeSNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo);
break;
}
case T_INT: {
- load_store = kit->gvn().transform(new CompareAndExchangeINode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
+ load_store = new CompareAndExchangeINode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo);
break;
}
case T_LONG: {
- load_store = kit->gvn().transform(new CompareAndExchangeLNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
+ load_store = new CompareAndExchangeLNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo);
break;
}
default:
@@ -377,6 +380,9 @@ Node* BarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node*
}
}
+ load_store->as_LoadStore()->set_barrier_data(access.barrier_data());
+ load_store = kit->gvn().transform(load_store);
+
access.set_raw_access(load_store);
pin_atomic_op(access);
@@ -405,50 +411,50 @@ Node* BarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node
Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
if (is_weak_cas) {
- load_store = kit->gvn().transform(new WeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
+ load_store = new WeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo);
} else {
- load_store = kit->gvn().transform(new CompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
+ load_store = new CompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo);
}
} else
#endif
{
if (is_weak_cas) {
- load_store = kit->gvn().transform(new WeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
+ load_store = new WeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo);
} else {
- load_store = kit->gvn().transform(new CompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
+ load_store = new CompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo);
}
}
} else {
switch(access.type()) {
case T_BYTE: {
if (is_weak_cas) {
- load_store = kit->gvn().transform(new WeakCompareAndSwapBNode(kit->control(), mem, adr, new_val, expected_val, mo));
+ load_store = new WeakCompareAndSwapBNode(kit->control(), mem, adr, new_val, expected_val, mo);
} else {
- load_store = kit->gvn().transform(new CompareAndSwapBNode(kit->control(), mem, adr, new_val, expected_val, mo));
+ load_store = new CompareAndSwapBNode(kit->control(), mem, adr, new_val, expected_val, mo);
}
break;
}
case T_SHORT: {
if (is_weak_cas) {
- load_store = kit->gvn().transform(new WeakCompareAndSwapSNode(kit->control(), mem, adr, new_val, expected_val, mo));
+ load_store = new WeakCompareAndSwapSNode(kit->control(), mem, adr, new_val, expected_val, mo);
} else {
- load_store = kit->gvn().transform(new CompareAndSwapSNode(kit->control(), mem, adr, new_val, expected_val, mo));
+ load_store = new CompareAndSwapSNode(kit->control(), mem, adr, new_val, expected_val, mo);
}
break;
}
case T_INT: {
if (is_weak_cas) {
- load_store = kit->gvn().transform(new WeakCompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo));
+ load_store = new WeakCompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo);
} else {
- load_store = kit->gvn().transform(new CompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo));
+ load_store = new CompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo);
}
break;
}
case T_LONG: {
if (is_weak_cas) {
- load_store = kit->gvn().transform(new WeakCompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo));
+ load_store = new WeakCompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo);
} else {
- load_store = kit->gvn().transform(new CompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo));
+ load_store = new CompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo);
}
break;
}
@@ -457,6 +463,9 @@ Node* BarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node
}
}
+ load_store->as_LoadStore()->set_barrier_data(access.barrier_data());
+ load_store = kit->gvn().transform(load_store);
+
access.set_raw_access(load_store);
pin_atomic_op(access);
@@ -478,27 +487,30 @@ Node* BarrierSetC2::atomic_xchg_at_resolved(C2AtomicAccess& access, Node* new_va
} else
#endif
{
- load_store = kit->gvn().transform(new GetAndSetPNode(kit->control(), mem, adr, new_val, adr_type, value_type->is_oopptr()));
+ load_store = new GetAndSetPNode(kit->control(), mem, adr, new_val, adr_type, value_type->is_oopptr());
}
} else {
switch (access.type()) {
case T_BYTE:
- load_store = kit->gvn().transform(new GetAndSetBNode(kit->control(), mem, adr, new_val, adr_type));
+ load_store = new GetAndSetBNode(kit->control(), mem, adr, new_val, adr_type);
break;
case T_SHORT:
- load_store = kit->gvn().transform(new GetAndSetSNode(kit->control(), mem, adr, new_val, adr_type));
+ load_store = new GetAndSetSNode(kit->control(), mem, adr, new_val, adr_type);
break;
case T_INT:
- load_store = kit->gvn().transform(new GetAndSetINode(kit->control(), mem, adr, new_val, adr_type));
+ load_store = new GetAndSetINode(kit->control(), mem, adr, new_val, adr_type);
break;
case T_LONG:
- load_store = kit->gvn().transform(new GetAndSetLNode(kit->control(), mem, adr, new_val, adr_type));
+ load_store = new GetAndSetLNode(kit->control(), mem, adr, new_val, adr_type);
break;
default:
ShouldNotReachHere();
}
}
+ load_store->as_LoadStore()->set_barrier_data(access.barrier_data());
+ load_store = kit->gvn().transform(load_store);
+
access.set_raw_access(load_store);
pin_atomic_op(access);
@@ -520,21 +532,24 @@ Node* BarrierSetC2::atomic_add_at_resolved(C2AtomicAccess& access, Node* new_val
switch(access.type()) {
case T_BYTE:
- load_store = kit->gvn().transform(new GetAndAddBNode(kit->control(), mem, adr, new_val, adr_type));
+ load_store = new GetAndAddBNode(kit->control(), mem, adr, new_val, adr_type);
break;
case T_SHORT:
- load_store = kit->gvn().transform(new GetAndAddSNode(kit->control(), mem, adr, new_val, adr_type));
+ load_store = new GetAndAddSNode(kit->control(), mem, adr, new_val, adr_type);
break;
case T_INT:
- load_store = kit->gvn().transform(new GetAndAddINode(kit->control(), mem, adr, new_val, adr_type));
+ load_store = new GetAndAddINode(kit->control(), mem, adr, new_val, adr_type);
break;
case T_LONG:
- load_store = kit->gvn().transform(new GetAndAddLNode(kit->control(), mem, adr, new_val, adr_type));
+ load_store = new GetAndAddLNode(kit->control(), mem, adr, new_val, adr_type);
break;
default:
ShouldNotReachHere();
}
+ load_store->as_LoadStore()->set_barrier_data(access.barrier_data());
+ load_store = kit->gvn().transform(load_store);
+
access.set_raw_access(load_store);
pin_atomic_op(access);
diff --git a/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp b/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp
index 487988bd8..8b4be7d11 100644
--- a/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp
+++ b/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp
@@ -96,6 +96,7 @@ protected:
Node* _base;
C2AccessValuePtr& _addr;
Node* _raw_access;
+ uint8_t _barrier_data;
void fixup_decorators();
void* barrier_set_state() const;
@@ -108,7 +109,8 @@ public:
_type(type),
_base(base),
_addr(addr),
- _raw_access(NULL)
+ _raw_access(NULL),
+ _barrier_data(0)
{
fixup_decorators();
}
@@ -122,6 +124,9 @@ public:
bool is_raw() const { return (_decorators & AS_RAW) != 0; }
Node* raw_access() const { return _raw_access; }
+ uint8_t barrier_data() const { return _barrier_data; }
+ void set_barrier_data(uint8_t data) { _barrier_data = data; }
+
void set_raw_access(Node* raw_access) { _raw_access = raw_access; }
virtual void set_memory() {} // no-op for normal accesses, but not for atomic accesses.
diff --git a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp
index a12973464..e178761a0 100644
--- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp
+++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp
@@ -174,48 +174,36 @@ int ZBarrierSetC2::estimate_stub_size() const {
return size;
}
-static bool barrier_needed(C2Access access) {
- return ZBarrierSet::barrier_needed(access.decorators(), access.type());
-}
-
-Node* ZBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
- Node* result = BarrierSetC2::load_at_resolved(access, val_type);
- if (barrier_needed(access) && access.raw_access()->is_Mem()) {
- if ((access.decorators() & ON_WEAK_OOP_REF) != 0) {
- access.raw_access()->as_Load()->set_barrier_data(ZLoadBarrierWeak);
+static void set_barrier_data(C2Access& access) {
+ if (ZBarrierSet::barrier_needed(access.decorators(), access.type())) {
+ if (access.decorators() & ON_WEAK_OOP_REF) {
+ access.set_barrier_data(ZLoadBarrierWeak);
} else {
- access.raw_access()->as_Load()->set_barrier_data(ZLoadBarrierStrong);
+ access.set_barrier_data(ZLoadBarrierStrong);
}
}
+}
- return result;
+Node* ZBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
+ set_barrier_data(access);
+ return BarrierSetC2::load_at_resolved(access, val_type);
}
Node* ZBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node* expected_val,
Node* new_val, const Type* val_type) const {
- Node* result = BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, val_type);
- if (barrier_needed(access)) {
- access.raw_access()->as_LoadStore()->set_barrier_data(ZLoadBarrierStrong);
- }
- return result;
+ set_barrier_data(access);
+ return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, val_type);
}
Node* ZBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node* expected_val,
Node* new_val, const Type* value_type) const {
- Node* result = BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
- if (barrier_needed(access)) {
- access.raw_access()->as_LoadStore()->set_barrier_data(ZLoadBarrierStrong);
- }
- return result;
-
+ set_barrier_data(access);
+ return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
}
Node* ZBarrierSetC2::atomic_xchg_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* val_type) const {
- Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, new_val, val_type);
- if (barrier_needed(access)) {
- access.raw_access()->as_LoadStore()->set_barrier_data(ZLoadBarrierStrong);
- }
- return result;
+ set_barrier_data(access);
+ return BarrierSetC2::atomic_xchg_at_resolved(access, new_val, val_type);
}
bool ZBarrierSetC2::array_copy_requires_gc_barriers(BasicType type) const {
diff --git a/src/hotspot/share/opto/graphKit.cpp b/src/hotspot/share/opto/graphKit.cpp
index 7bf2f6cfb..a1547b42f 100644
--- a/src/hotspot/share/opto/graphKit.cpp
+++ b/src/hotspot/share/opto/graphKit.cpp
@@ -1493,18 +1493,19 @@ Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
bool require_atomic_access,
bool unaligned,
bool mismatched,
- bool unsafe) {
+ bool unsafe,
+ uint8_t barrier_data) {
assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
const TypePtr* adr_type = NULL; // debug-mode-only argument
debug_only(adr_type = C->get_adr_type(adr_idx));
Node* mem = memory(adr_idx);
Node* ld;
if (require_atomic_access && bt == T_LONG) {
- ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe);
+ ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
} else if (require_atomic_access && bt == T_DOUBLE) {
- ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe);
+ ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
} else {
- ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched, unsafe);
+ ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
}
ld = _gvn.transform(ld);
if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
diff --git a/src/hotspot/share/opto/graphKit.hpp b/src/hotspot/share/opto/graphKit.hpp
index 07c20bbd5..df5d18ccc 100644
--- a/src/hotspot/share/opto/graphKit.hpp
+++ b/src/hotspot/share/opto/graphKit.hpp
@@ -518,27 +518,27 @@ class GraphKit : public Phase {
Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
bool require_atomic_access = false, bool unaligned = false,
- bool mismatched = false, bool unsafe = false) {
+ bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0) {
// This version computes alias_index from bottom_type
return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
mo, control_dependency, require_atomic_access,
- unaligned, mismatched, unsafe);
+ unaligned, mismatched, unsafe, barrier_data);
}
Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type,
MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
bool require_atomic_access = false, bool unaligned = false,
- bool mismatched = false, bool unsafe = false) {
+ bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0) {
// This version computes alias_index from an address type
assert(adr_type != NULL, "use other make_load factory");
return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
mo, control_dependency, require_atomic_access,
- unaligned, mismatched, unsafe);
+ unaligned, mismatched, unsafe, barrier_data);
}
// This is the base version which is given an alias index.
Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx,
MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
bool require_atomic_access = false, bool unaligned = false,
- bool mismatched = false, bool unsafe = false);
+ bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0);
// Create & transform a StoreNode and store the effect into the
// parser's memory state.
diff --git a/src/hotspot/share/opto/memnode.cpp b/src/hotspot/share/opto/memnode.cpp
index ee0f09e11..ff0a5726c 100644
--- a/src/hotspot/share/opto/memnode.cpp
+++ b/src/hotspot/share/opto/memnode.cpp
@@ -808,7 +808,7 @@ bool LoadNode::is_immutable_value(Node* adr) {
//----------------------------LoadNode::make-----------------------------------
// Polymorphic factory method:
Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt, MemOrd mo,
- ControlDependency control_dependency, bool unaligned, bool mismatched, bool unsafe) {
+ ControlDependency control_dependency, bool unaligned, bool mismatched, bool unsafe, uint8_t barrier_data) {
Compile* C = gvn.C;
// sanity check the alias category against the created node type
@@ -859,6 +859,7 @@ Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypeP
if (unsafe) {
load->set_unsafe_access();
}
+ load->set_barrier_data(barrier_data);
if (load->Opcode() == Op_LoadN) {
Node* ld = gvn.transform(load);
return new DecodeNNode(ld, ld->bottom_type()->make_ptr());
@@ -868,7 +869,7 @@ Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypeP
}
LoadLNode* LoadLNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo,
- ControlDependency control_dependency, bool unaligned, bool mismatched, bool unsafe) {
+ ControlDependency control_dependency, bool unaligned, bool mismatched, bool unsafe, uint8_t barrier_data) {
bool require_atomic = true;
LoadLNode* load = new LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency, require_atomic);
if (unaligned) {
@@ -880,11 +881,12 @@ LoadLNode* LoadLNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr
if (unsafe) {
load->set_unsafe_access();
}
+ load->set_barrier_data(barrier_data);
return load;
}
LoadDNode* LoadDNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo,
- ControlDependency control_dependency, bool unaligned, bool mismatched, bool unsafe) {
+ ControlDependency control_dependency, bool unaligned, bool mismatched, bool unsafe, uint8_t barrier_data) {
bool require_atomic = true;
LoadDNode* load = new LoadDNode(ctl, mem, adr, adr_type, rt, mo, control_dependency, require_atomic);
if (unaligned) {
@@ -896,6 +898,7 @@ LoadDNode* LoadDNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr
if (unsafe) {
load->set_unsafe_access();
}
+ load->set_barrier_data(barrier_data);
return load;
}
diff --git a/src/hotspot/share/opto/memnode.hpp b/src/hotspot/share/opto/memnode.hpp
index 7468abdbc..14a4a67c6 100644
--- a/src/hotspot/share/opto/memnode.hpp
+++ b/src/hotspot/share/opto/memnode.hpp
@@ -227,7 +227,8 @@ public:
static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
const TypePtr* at, const Type *rt, BasicType bt,
MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest,
- bool unaligned = false, bool mismatched = false, bool unsafe = false);
+ bool unaligned = false, bool mismatched = false, bool unsafe = false,
+ uint8_t barrier_data = 0);
virtual uint hash() const; // Check the type
@@ -408,7 +409,7 @@ public:
bool require_atomic_access() const { return _require_atomic_access; }
static LoadLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest,
- bool unaligned = false, bool mismatched = false, bool unsafe = false);
+ bool unaligned = false, bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0);
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const {
LoadNode::dump_spec(st);
@@ -460,7 +461,7 @@ public:
bool require_atomic_access() const { return _require_atomic_access; }
static LoadDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest,
- bool unaligned = false, bool mismatched = false, bool unsafe = false);
+ bool unaligned = false, bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0);
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const {
LoadNode::dump_spec(st);
--
2.12.3

View File

@ -0,0 +1,113 @@
---
src/hotspot/share/prims/unsafe.cpp | 6 ++++++
src/hotspot/share/runtime/globals.hpp | 5 +++++
.../share/classes/java/lang/StringUTF16.java | 19 +++++++++++++++++++
.../classes/jdk/internal/misc/Unsafe.java | 1 +
4 files changed, 31 insertions(+)
diff --git a/src/hotspot/share/prims/unsafe.cpp b/src/hotspot/share/prims/unsafe.cpp
index 91328cd19..37d46225f 100644
--- a/src/hotspot/share/prims/unsafe.cpp
+++ b/src/hotspot/share/prims/unsafe.cpp
@@ -1007,6 +1007,11 @@ UNSAFE_ENTRY(jint, Unsafe_GetLoadAverage0(JNIEnv *env, jobject unsafe, jdoubleAr
return ret;
} UNSAFE_END
+UNSAFE_ENTRY(jboolean, Unsafe_GetUseCharCache(JNIEnv *env, jobject unsafe)) {
+ return UseCharCache;
+}
+UNSAFE_END
+
UNSAFE_ENTRY(jboolean, Unsafe_GetUseHashMapIntegerCache(JNIEnv *env, jobject unsafe)) {
return UseHashMapIntegerCache;
}
@@ -1102,6 +1107,7 @@ static JNINativeMethod jdk_internal_misc_Unsafe_methods[] = {
{CC "isBigEndian0", CC "()Z", FN_PTR(Unsafe_isBigEndian0)},
{CC "unalignedAccess0", CC "()Z", FN_PTR(Unsafe_unalignedAccess0)},
+ {CC "getUseCharCache", CC "()Z", FN_PTR(Unsafe_GetUseCharCache)},
{CC "getUseHashMapIntegerCache", CC "()Z", FN_PTR(Unsafe_GetUseHashMapIntegerCache)},
{CC "getUseFastSerializer", CC "()Z", FN_PTR(Unsafe_GetUseFastSerializer)},
diff --git a/src/hotspot/share/runtime/globals.hpp b/src/hotspot/share/runtime/globals.hpp
index e2bfd0c5b..40acb29b4 100644
--- a/src/hotspot/share/runtime/globals.hpp
+++ b/src/hotspot/share/runtime/globals.hpp
@@ -2691,6 +2691,11 @@ define_pd_global(uint64_t,MaxRAM, 1ULL*G);
"the HashMap Value type, indexed by the unboxed int key value." \
"faster in execution, higher in memory consumption.") \
\
+ experimental(bool, UseCharCache, false, \
+ "When char[] is frequently used to build strings, " \
+ "and char[] has a lot of duplicate data, using char cache can" \
+ "greatly improve performance and take up little extra space") \
+ \
experimental(bool, UseFastSerializer, false, \
"Cache-based serialization.It is extremely fast, but it can only" \
"be effective in certain scenarios.") \
diff --git a/src/java.base/share/classes/java/lang/StringUTF16.java b/src/java.base/share/classes/java/lang/StringUTF16.java
index 331b51812..c3ede9676 100644
--- a/src/java.base/share/classes/java/lang/StringUTF16.java
+++ b/src/java.base/share/classes/java/lang/StringUTF16.java
@@ -28,11 +28,13 @@ package java.lang;
import java.util.Arrays;
import java.util.Locale;
import java.util.Spliterator;
+import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Consumer;
import java.util.function.IntConsumer;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
import jdk.internal.HotSpotIntrinsicCandidate;
+import jdk.internal.misc.Unsafe;
import jdk.internal.vm.annotation.ForceInline;
import jdk.internal.vm.annotation.DontInline;
@@ -41,6 +43,14 @@ import static java.lang.String.LATIN1;
final class StringUTF16 {
+ private static final Unsafe UNSAFE = Unsafe.getUnsafe();
+
+ private static boolean enableCharCache = UNSAFE.getUseCharCache();
+
+ private static final int MAX_CHAR_CACHE = 1200000;
+
+ private static transient ConcurrentHashMap<char[], byte[]> charCache = new ConcurrentHashMap<>();
+
public static byte[] newBytesFor(int len) {
if (len < 0) {
throw new NegativeArraySizeException();
@@ -157,8 +167,17 @@ final class StringUTF16 {
}
public static byte[] compress(char[] val, int off, int len) {
+ boolean flag = (off == 0 && len == val.length);
+ if(enableCharCache && flag) {
+ if(charCache.containsKey(val)) {
+ return charCache.get(val);
+ }
+ }
byte[] ret = new byte[len];
if (compress(val, off, ret, 0, len) == len) {
+ if(enableCharCache && flag && charCache.size() < MAX_CHAR_CACHE) {
+ charCache.put(val, ret);
+ }
return ret;
}
return null;
diff --git a/src/java.base/share/classes/jdk/internal/misc/Unsafe.java b/src/java.base/share/classes/jdk/internal/misc/Unsafe.java
index 4d71e671e..4fc4b1a43 100644
--- a/src/java.base/share/classes/jdk/internal/misc/Unsafe.java
+++ b/src/java.base/share/classes/jdk/internal/misc/Unsafe.java
@@ -3702,6 +3702,7 @@ public final class Unsafe {
private static int convEndian(boolean big, int n) { return big == BE ? n : Integer.reverseBytes(n) ; }
private static long convEndian(boolean big, long n) { return big == BE ? n : Long.reverseBytes(n) ; }
+ public native boolean getUseCharCache();
public native boolean getUseHashMapIntegerCache();
public native boolean getUseFastSerializer();
private native long allocateMemory0(long bytes);
--
2.19.1

View File

@ -1,58 +0,0 @@
From e8bf6d9c5a02b3ffaf223dd1109bc15c664cca28 Mon Sep 17 00:00:00 2001
Date: Mon, 24 Feb 2020 18:51:09 +0800
Subject: [PATCH] ZGC: aarch64: fix not using load/store Pre-indexed
addressing to modify sp
Summary: <gc>: <instruction load/store Pre-indexed addressing offset range is not enough>
LLT: JFUZZ
Bug url:
---
src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp | 16 +++++-----------
1 file changed, 5 insertions(+), 11 deletions(-)
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
index a65a605d0..6db979b57 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
@@ -2114,12 +2114,11 @@ int MacroAssembler::push_fp(unsigned int bitset, Register stack) {
return 0;
}
+ add(stack, stack, -count * wordSize * 2);
+
if (count & 1) {
- strq(as_FloatRegister(regs[0]), Address(pre(stack, -count * wordSize * 2)));
+ strq(as_FloatRegister(regs[0]), Address(stack));
i += 1;
- } else {
- stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -count * wordSize * 2)));
- i += 2;
}
for (; i < count; i += 2) {
@@ -2145,20 +2144,15 @@ int MacroAssembler::pop_fp(unsigned int bitset, Register stack) {
}
if (count & 1) {
+ ldrq(as_FloatRegister(regs[0]), Address(stack));
i += 1;
- } else {
- i += 2;
}
for (; i < count; i += 2) {
ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
}
- if ((count & 1) == 0) {
- ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, count * wordSize * 2)));
- } else {
- ldrq(as_FloatRegister(regs[0]), Address(post(stack, count * wordSize * 2)));
- }
+ add(stack, stack, count * wordSize * 2);
return count;
}
--
2.12.3

View File

@ -1,28 +0,0 @@
From e25b331a945301e24429c120bef1ed0daf04d49c Mon Sep 17 00:00:00 2001
Date: Fri, 3 Apr 2020 17:12:16 +0800
Subject: [PATCH] ZGC: aarch64: Fix MR 32, fix system call number of
memfd_create
Summary: <gc>: <memfd_create in aarch64 always fail because the system call number is wrong>
LLT: N/A
Bug url: N/A
---
src/hotspot/os_cpu/linux_aarch64/gc/z/zBackingFile_linux_aarch64.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/hotspot/os_cpu/linux_aarch64/gc/z/zBackingFile_linux_aarch64.cpp b/src/hotspot/os_cpu/linux_aarch64/gc/z/zBackingFile_linux_aarch64.cpp
index 47894b5..f956b53 100644
--- a/src/hotspot/os_cpu/linux_aarch64/gc/z/zBackingFile_linux_aarch64.cpp
+++ b/src/hotspot/os_cpu/linux_aarch64/gc/z/zBackingFile_linux_aarch64.cpp
@@ -51,7 +51,7 @@
// Support for building on older Linux systems
#ifndef __NR_memfd_create
-#define __NR_memfd_create 319
+#define __NR_memfd_create 279
#endif
#ifndef MFD_CLOEXEC
#define MFD_CLOEXEC 0x0001U
--
1.8.3.1

View File

@ -1,106 +0,0 @@
From 425112071e77e2fb599d1f96ce48689d45461261 Mon Sep 17 00:00:00 2001
Date: Mon, 17 Feb 2020 18:55:47 +0800
Subject: [PATCH] ZGC: aarch64: not using zr register avoid sigill in
MacroAssembler::push_fp and pop_fp
Summary: <gc>: <instruction ldp doesn't support two same register>
LLT: jtreg
Bug url:
---
src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp | 48 +++++++++++++---------
1 file changed, 28 insertions(+), 20 deletions(-)
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
index 611f13b0e..a65a605d0 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
@@ -2100,58 +2100,66 @@ int MacroAssembler::pop(unsigned int bitset, Register stack) {
// Push lots of registers in the bit set supplied. Don't push sp.
// Return the number of words pushed
int MacroAssembler::push_fp(unsigned int bitset, Register stack) {
- int words_pushed = 0;
-
// Scan bitset to accumulate register pairs
unsigned char regs[32];
int count = 0;
+ int i = 0;
for (int reg = 0; reg <= 31; reg++) {
if (1 & bitset)
regs[count++] = reg;
bitset >>= 1;
}
- regs[count++] = zr->encoding_nocheck();
- count &= ~1; // Only push an even number of regs
- // Always pushing full 128 bit registers.
- if (count) {
+ if (!count) {
+ return 0;
+ }
+
+ if (count & 1) {
+ strq(as_FloatRegister(regs[0]), Address(pre(stack, -count * wordSize * 2)));
+ i += 1;
+ } else {
stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -count * wordSize * 2)));
- words_pushed += 2;
+ i += 2;
}
- for (int i = 2; i < count; i += 2) {
+
+ for (; i < count; i += 2) {
stpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
- words_pushed += 2;
}
- assert(words_pushed == count, "oops, pushed != count");
return count;
}
int MacroAssembler::pop_fp(unsigned int bitset, Register stack) {
- int words_pushed = 0;
-
// Scan bitset to accumulate register pairs
unsigned char regs[32];
int count = 0;
+ int i = 0;
for (int reg = 0; reg <= 31; reg++) {
if (1 & bitset)
regs[count++] = reg;
bitset >>= 1;
}
- regs[count++] = zr->encoding_nocheck();
- count &= ~1;
- for (int i = 2; i < count; i += 2) {
+ if (!count) {
+ return 0;
+ }
+
+ if (count & 1) {
+ i += 1;
+ } else {
+ i += 2;
+ }
+
+ for (; i < count; i += 2) {
ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2));
- words_pushed += 2;
}
- if (count) {
+
+ if ((count & 1) == 0) {
ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, count * wordSize * 2)));
- words_pushed += 2;
+ } else {
+ ldrq(as_FloatRegister(regs[0]), Address(post(stack, count * wordSize * 2)));
}
- assert(words_pushed == count, "oops, pushed != count");
-
return count;
}
--
2.12.3

View File

@ -1,35 +0,0 @@
diff --git a/src/hotspot/share/gc/z/zHeap.cpp b/src/hotspot/share/gc/z/zHeap.cpp
index 62f97d2..e950acf 100644
--- a/src/hotspot/share/gc/z/zHeap.cpp
+++ b/src/hotspot/share/gc/z/zHeap.cpp
@@ -49,6 +49,7 @@
#include "runtime/thread.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
+#include "prims/resolvedMethodTable.hpp"
static const ZStatSampler ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes);
static const ZStatSampler ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes);
@@ -334,6 +335,10 @@ bool ZHeap::mark_end() {
Universe::verify();
}
+ // Free unsed entries of ResolvedMethodTable and weakhandles
+ // avoid ResolvedMethodTable inflation and native memory leak
+ ResolvedMethodTable::unlink();
+
return true;
}
diff --git a/test/hotspot/jtreg/runtime/MemberName/MemberNameLeak.java b/test/hotspot/jtreg/runtime/MemberName/MemberNameLeak.java
index a8aff47..afadfd6 100644
--- a/test/hotspot/jtreg/runtime/MemberName/MemberNameLeak.java
+++ b/test/hotspot/jtreg/runtime/MemberName/MemberNameLeak.java
@@ -75,6 +75,7 @@ public class MemberNameLeak {
test("-XX:+UseG1GC");
test("-XX:+UseParallelGC");
test("-XX:+UseSerialGC");
+ test("-XX:+UseZGC");
if (!Compiler.isGraalEnabled()) { // Graal does not support CMS and Shenandoah
test("-XX:+UseConcMarkSweepGC");
if (GC.Shenandoah.isSupported()) {

View File

@ -1,12 +0,0 @@
diff --git a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp
index 18f455086..785470dbe 100644
--- a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/codeBlob.hpp"
+#include "code/vmreg.inline.hpp"
#include "gc/z/zBarrier.inline.hpp"
#include "gc/z/zBarrierSet.hpp"
#include "gc/z/zBarrierSetAssembler.hpp"

View File

@ -762,7 +762,7 @@ Provides: java-src%{?1} = %{epoch}:%{version}-%{release}
Name: java-%{javaver}-%{origin}
Version: %{newjavaver}.%{buildver}
Release: 3
Release: 4
# java-1.5.0-ibm from jpackage.org set Epoch to 1 for unknown reasons
# and this change was brought into RHEL-4. java-1.5.0-ibm packages
# also included the epoch in their virtual provides. This created a
@ -834,18 +834,7 @@ Patch2001: LoongArch64-support.patch
Patch5: Add-ability-to-configure-third-port-for-remote-JMX.patch
Patch6: 8214527-AArch64-ZGC-for-Aarch64.patch
Patch7: 8224675-Late-GC-barrier-insertion-for-ZGC.patch
Patch9: ZGC-Redesign-C2-load-barrier-to-expand-on-th.patch
Patch10: ZGC-aarch64-not-using-zr-register-avoid-sigill-in-Ma.patch
Patch11: 8217856-ZGC-Break-out-C2-matching-rules-into-separat.patch
Patch12: 8233073-Make-BitMap-accessors-more-memory-ordering-f.patch
Patch13: 8233061-ZGC-Enforce-memory-ordering-in-segmented-bit.patch
Patch18: 8209375-ZGC-Use-dynamic-base-address-for-mark-stack-.patch
Patch20: 8209894-ZGC-Cap-number-of-GC-workers-based-on-heap-s.patch
Patch22: 8233506-ZGC-the-load-for-Reference.get-can-be-conver.patch
Patch23: add-missing-inline.patch
Patch26: ZGC-aarch64-fix-system-call-number-of-memfd_create.patch
Patch27: ZGC-aarch64-fix-not-using-load-store-Pre-index.patch
Patch29: ZGC-reuse-entries-of-ResolvedMethodTable.patch
Patch9: ZGC-AArch64-Optimizations-and-Fixes.patch
# 11.0.8
Patch33: 8210473-JEP-345-NUMA-Aware-Memory-Allocation-for-G1.patch
@ -910,6 +899,9 @@ Patch91: 8222289-Overhaul-logic-for-reading-writing-constant-pool-entries.patch
# 11.0.21
Patch92: 8295068-SSLEngine-throws-NPE-parsing-Certificate.patch
# 11.0.23
Patch93: Cache-byte-when-constructing-String-with-duplicate-c.patch
############################################
#
# riscv64 specific patches
@ -1156,17 +1148,6 @@ pushd %{top_level_dir_name}
%patch6 -p1
%patch7 -p1
%patch9 -p1
%patch10 -p1
%patch11 -p1
%patch12 -p1
%patch13 -p1
%patch18 -p1
%patch20 -p1
%patch22 -p1
%patch23 -p1
%patch26 -p1
%patch27 -p1
%patch29 -p1
%patch33 -p1
%patch34 -p1
%patch35 -p1
@ -1212,6 +1193,7 @@ pushd %{top_level_dir_name}
%patch90 -p1
%patch91 -p1
%patch92 -p1
%patch93 -p1
%endif
%endif
%ifarch loongarch64
@ -1733,6 +1715,9 @@ cjc.mainProgram(arg)
%changelog
* Sat Jun 15 2024 neu-mobi <liuyulong35@huawei.com> - 1.11.0.23.9-4
- Collate patches and merge patches related to ZGC
* Mon Jun 03 2024 songliyang <songliyng@kylinos.cn> - 1:11.0.23.9-3
- fix loongarch vendor error
- fix changelog error