add release/acquire barriers patches

This commit is contained in:
jdkboy 2020-11-06 17:43:38 +08:00
parent a13f79f215
commit 6942049685
6 changed files with 373 additions and 2 deletions

View File

@ -0,0 +1,162 @@
# HG changeset patch
# User kbarrett
# Date 1473955843 14400
# Thu Sep 15 12:10:43 2016 -0400
# Node ID f008248d98dd625b62b5f3f5ca9b24956d33c18d
# Parent 7767224562139a10efca0575c28a62be7895d5b3
8165808: Add release barriers when allocating objects with concurrent collection
Summary: Add release_set_klass, use in slow-path allocators.
Reviewed-by: jmasa, dholmes
diff --git a/src/share/vm/gc_interface/collectedHeap.hpp b/src/share/vm/gc_interface/collectedHeap.hpp
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp
@@ -320,9 +320,6 @@
inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS);
inline static oop array_allocate_nozero(KlassHandle klass, int size, int length, TRAPS);
- inline static void post_allocation_install_obj_klass(KlassHandle klass,
- oop obj);
-
// Raw memory allocation facilities
// The obj and array allocate methods are covers for these methods.
// mem_allocate() should never be
diff --git a/src/share/vm/gc_interface/collectedHeap.inline.hpp b/src/share/vm/gc_interface/collectedHeap.inline.hpp
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.inline.hpp
@@ -39,14 +39,22 @@
// Inline allocation implementations.
void CollectedHeap::post_allocation_setup_common(KlassHandle klass,
- HeapWord* obj) {
- post_allocation_setup_no_klass_install(klass, obj);
- post_allocation_install_obj_klass(klass, oop(obj));
+ HeapWord* obj_ptr) {
+ post_allocation_setup_no_klass_install(klass, obj_ptr);
+ oop obj = (oop)obj_ptr;
+#if ! INCLUDE_ALL_GCS
+ obj->set_klass(klass());
+#else
+ // Need a release store to ensure array/class length, mark word, and
+ // object zeroing are visible before setting the klass non-NULL, for
+ // concurrent collectors.
+ obj->release_set_klass(klass());
+#endif
}
void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass,
- HeapWord* objPtr) {
- oop obj = (oop)objPtr;
+ HeapWord* obj_ptr) {
+ oop obj = (oop)obj_ptr;
assert(obj != NULL, "NULL object pointer");
if (UseBiasedLocking && (klass() != NULL)) {
@@ -57,18 +65,6 @@
}
}
-void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass,
- oop obj) {
- // These asserts are kind of complicated because of klassKlass
- // and the beginning of the world.
- assert(klass() != NULL || !Universe::is_fully_initialized(), "NULL klass");
- assert(klass() == NULL || klass()->is_klass(), "not a klass");
- assert(obj != NULL, "NULL object pointer");
- obj->set_klass(klass());
- assert(!Universe::is_fully_initialized() || obj->klass() != NULL,
- "missing klass");
-}
-
// Support for jvmti and dtrace
inline void post_allocation_notify(KlassHandle klass, oop obj, int size) {
// support low memory notifications (no-op if not enabled)
@@ -86,25 +82,26 @@
}
void CollectedHeap::post_allocation_setup_obj(KlassHandle klass,
- HeapWord* obj,
+ HeapWord* obj_ptr,
int size) {
- post_allocation_setup_common(klass, obj);
+ post_allocation_setup_common(klass, obj_ptr);
+ oop obj = (oop)obj_ptr;
assert(Universe::is_bootstrapping() ||
- !((oop)obj)->is_array(), "must not be an array");
+ !obj->is_array(), "must not be an array");
// notify jvmti and dtrace
- post_allocation_notify(klass, (oop)obj, size);
+ post_allocation_notify(klass, obj, size);
}
void CollectedHeap::post_allocation_setup_array(KlassHandle klass,
- HeapWord* obj,
+ HeapWord* obj_ptr,
int length) {
- // Set array length before setting the _klass field
- // in post_allocation_setup_common() because the klass field
- // indicates that the object is parsable by concurrent GC.
+ // Set array length before setting the _klass field because a
+ // non-NULL klass field indicates that the object is parsable by
+ // concurrent GC.
assert(length >= 0, "length should be non-negative");
- ((arrayOop)obj)->set_length(length);
- post_allocation_setup_common(klass, obj);
- oop new_obj = (oop)obj;
+ ((arrayOop)obj_ptr)->set_length(length);
+ post_allocation_setup_common(klass, obj_ptr);
+ oop new_obj = (oop)obj_ptr;
assert(new_obj->is_array(), "must be an array");
// notify jvmti and dtrace (must be after length is set for dtrace)
post_allocation_notify(klass, new_obj, new_obj->size());
diff --git a/src/share/vm/oops/oop.hpp b/src/share/vm/oops/oop.hpp
--- a/hotspot/src/share/vm/oops/oop.hpp
+++ b/hotspot/src/share/vm/oops/oop.hpp
@@ -87,6 +87,7 @@
narrowKlass* compressed_klass_addr();
void set_klass(Klass* k);
+ void release_set_klass(Klass* k);
// For klass field compression
int klass_gap() const;
diff --git a/src/share/vm/oops/oop.inline.hpp b/src/share/vm/oops/oop.inline.hpp
--- a/hotspot/src/share/vm/oops/oop.inline.hpp
+++ b/hotspot/src/share/vm/oops/oop.inline.hpp
@@ -103,10 +103,14 @@
return &_metadata._compressed_klass;
}
+#define CHECK_SET_KLASS(k) \
+ do { \
+ assert(Universe::is_bootstrapping() || k != NULL, "NULL Klass"); \
+ assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass"); \
+ } while (0)
+
inline void oopDesc::set_klass(Klass* k) {
- // since klasses are promoted no store check is needed
- assert(Universe::is_bootstrapping() || k != NULL, "must be a real Klass*");
- assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass*");
+ CHECK_SET_KLASS(k);
if (UseCompressedClassPointers) {
*compressed_klass_addr() = Klass::encode_klass_not_null(k);
} else {
@@ -114,6 +118,18 @@
}
}
+inline void oopDesc::release_set_klass(Klass* k) {
+ CHECK_SET_KLASS(k);
+ if (UseCompressedClassPointers) {
+ OrderAccess::release_store(compressed_klass_addr(),
+ Klass::encode_klass_not_null(k));
+ } else {
+ OrderAccess::release_store_ptr(klass_addr(), k);
+ }
+}
+
+#undef CHECK_SET_KLASS
+
inline int oopDesc::klass_gap() const {
return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
}

View File

@ -0,0 +1,50 @@
# HG changeset patch
# User kbarrett
# Date 1474669392 14400
# Fri Sep 23 18:23:12 2016 -0400
# Node ID 3296281c85d3d7aa95a2aa95aa4801bf1a343426
# Parent 7f7c1e1fbc8a70f9730339872ddf56fee812304c
8166583: Add oopDesc::klass_or_null_acquire()
Summary: Added new function.
Reviewed-by: dholmes, tschatzl
diff --git a/src/share/vm/oops/oop.hpp b/src/share/vm/oops/oop.hpp
--- a/hotspot/src/share/vm/oops/oop.hpp
+++ b/hotspot/src/share/vm/oops/oop.hpp
@@ -83,6 +83,7 @@
Klass* klass() const;
Klass* klass_or_null() const volatile;
+ Klass* klass_or_null_acquire() const volatile;
Klass** klass_addr();
narrowKlass* compressed_klass_addr();
diff --git a/src/share/vm/oops/oop.inline.hpp b/src/share/vm/oops/oop.inline.hpp
--- a/hotspot/src/share/vm/oops/oop.inline.hpp
+++ b/hotspot/src/share/vm/oops/oop.inline.hpp
@@ -78,7 +78,6 @@
}
inline Klass* oopDesc::klass_or_null() const volatile {
- // can be NULL in CMS
if (UseCompressedClassPointers) {
return Klass::decode_klass(_metadata._compressed_klass);
} else {
@@ -86,6 +85,17 @@
}
}
+inline Klass* oopDesc::klass_or_null_acquire() const volatile {
+ if (UseCompressedClassPointers) {
+ // Workaround for non-const load_acquire parameter.
+ const volatile narrowKlass* addr = &_metadata._compressed_klass;
+ volatile narrowKlass* xaddr = const_cast<volatile narrowKlass*>(addr);
+ return Klass::decode_klass(OrderAccess::load_acquire(xaddr));
+ } else {
+ return (Klass*)OrderAccess::load_ptr_acquire(&_metadata._klass);
+ }
+}
+
inline int oopDesc::klass_gap_offset_in_bytes() {
assert(UseCompressedClassPointers, "only applicable to compressed klass pointers");
return oopDesc::klass_offset_in_bytes() + sizeof(narrowKlass);

View File

@ -0,0 +1,142 @@
# HG changeset patch
# User kbarrett
# Date 1477103211 14400
# Fri Oct 21 22:26:51 2016 -0400
# Node ID 053ad011aea25994e337f1c5dc9bd4dcc63f38cb
# Parent 7f7c1e1fbc8a70f9730339872ddf56fee812304c
8166862: CMS needs klass_or_null_acquire
Summary: Change CMS non-assert uses of klass_or_null to klass_or_null_acquire.
Reviewed-by: tschatzl, mgerdin
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
@@ -998,18 +998,13 @@
// and the klass read.
OrderAccess::loadload();
- // must read from what 'p' points to in each loop.
- Klass* k = ((volatile oopDesc*)p)->klass_or_null();
+ // Ensure klass read before size.
+ Klass* k = oop(p)->klass_or_null_acquire();
if (k != NULL) {
assert(k->is_klass(), "Should really be klass oop.");
oop o = (oop)p;
assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
- // Bugfix for systems with weak memory model (PPC64/IA64).
- // The object o may be an array. Acquire to make sure that the array
- // size (third word) is consistent.
- OrderAccess::acquire();
-
size_t res = o->size_given_klass(k);
res = adjustObjectSize(res);
assert(res != 0, "Block size should not be 0");
@@ -1057,21 +1052,13 @@
// and the klass read.
OrderAccess::loadload();
- // must read from what 'p' points to in each loop.
- Klass* k = ((volatile oopDesc*)p)->klass_or_null();
- // We trust the size of any object that has a non-NULL
- // klass and (for those in the perm gen) is parsable
- // -- irrespective of its conc_safe-ty.
+ // Ensure klass read before size.
+ Klass* k = oop(p)->klass_or_null_acquire();
if (k != NULL) {
assert(k->is_klass(), "Should really be klass oop.");
oop o = (oop)p;
assert(o->is_oop(), "Should be an oop");
- // Bugfix for systems with weak memory model (PPC64/IA64).
- // The object o may be an array. Acquire to make sure that the array
- // size (third word) is consistent.
- OrderAccess::acquire();
-
size_t res = o->size_given_klass(k);
res = adjustObjectSize(res);
assert(res != 0, "Block size should not be 0");
@@ -1124,7 +1111,7 @@
// and the klass read.
OrderAccess::loadload();
- Klass* k = oop(p)->klass_or_null();
+ Klass* k = oop(p)->klass_or_null_acquire();
if (k != NULL) {
// Ignore mark word because it may have been used to
// chain together promoted objects (the last one
diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
@@ -6728,7 +6728,7 @@
HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
size_t sz = 0;
oop p = (oop)addr;
- if (p->klass_or_null() != NULL) {
+ if (p->klass_or_null_acquire() != NULL) {
sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
} else {
sz = block_size_using_printezis_bits(addr);
@@ -7186,7 +7186,7 @@
}
if (_bitMap->isMarked(addr)) {
// it's marked; is it potentially uninitialized?
- if (p->klass_or_null() != NULL) {
+ if (p->klass_or_null_acquire() != NULL) {
// an initialized object; ignore mark word in verification below
// since we are running concurrent with mutators
assert(p->is_oop(true), "should be an oop");
@@ -7227,7 +7227,7 @@
}
} else {
// Either a not yet marked object or an uninitialized object
- if (p->klass_or_null() == NULL) {
+ if (p->klass_or_null_acquire() == NULL) {
// An uninitialized object, skip to the next card, since
// we may not be able to read its P-bits yet.
assert(size == 0, "Initial value");
@@ -7438,7 +7438,7 @@
assert(_skipBits == 0, "tautology");
_skipBits = 2; // skip next two marked bits ("Printezis-marks")
oop p = oop(addr);
- if (p->klass_or_null() == NULL) {
+ if (p->klass_or_null_acquire() == NULL) {
DEBUG_ONLY(if (!_verifying) {)
// We re-dirty the cards on which this object lies and increase
// the _threshold so that we'll come back to scan this object
@@ -7458,7 +7458,7 @@
if (_threshold < end_card_addr) {
_threshold = end_card_addr;
}
- if (p->klass_or_null() != NULL) {
+ if (p->klass_or_null_acquire() != NULL) {
// Redirty the range of cards...
_mut->mark_range(redirty_range);
} // ...else the setting of klass will dirty the card anyway.
@@ -7609,7 +7609,7 @@
assert(_skip_bits == 0, "tautology");
_skip_bits = 2; // skip next two marked bits ("Printezis-marks")
oop p = oop(addr);
- if (p->klass_or_null() == NULL) {
+ if (p->klass_or_null_acquire() == NULL) {
// in the case of Clean-on-Enter optimization, redirty card
// and avoid clearing card by increasing the threshold.
return true;
@@ -8596,7 +8596,7 @@
"alignment problem");
#ifdef ASSERT
- if (oop(addr)->klass_or_null() != NULL) {
+ if (oop(addr)->klass_or_null_acquire() != NULL) {
// Ignore mark word because we are running concurrent with mutators
assert(oop(addr)->is_oop(true), "live block should be an oop");
assert(size ==
@@ -8607,7 +8607,7 @@
} else {
// This should be an initialized object that's alive.
- assert(oop(addr)->klass_or_null() != NULL,
+ assert(oop(addr)->klass_or_null_acquire() != NULL,
"Should be an initialized object");
// Ignore mark word because we are running concurrent with mutators
assert(oop(addr)->is_oop(true), "live block should be an oop");

View File

View File

View File

@ -915,7 +915,7 @@ Provides: java-%{javaver}-%{origin}-accessibility%{?1} = %{epoch}:%{version}-%{r
Name: java-%{javaver}-%{origin}
Version: %{javaver}.%{updatever}.%{buildver}
Release: 3
Release: 4
# java-1.5.0-ibm from jpackage.org set Epoch to 1 for unknown reasons
# and this change was brought into RHEL-4. java-1.5.0-ibm packages
# also included the epoch in their virtual provides. This created a
@ -1055,6 +1055,11 @@ Patch127: add-DumpSharedSpace-guarantee-when-create-anonymous-classes.patch
# 8u272
Patch128: 8214440-ldap-over-a-TLS-connection-negotiate-fail.patch
Patch129: 8248336-AArch64-C2-offset-overflow-in-BoxLockNode-em.patch
Patch130: 8165808-Add-release-barriers-when-allocating-objects-with-concurrent-collection.patch
Patch131: 8166583-Add-oopDesc-klass_or_null_acquire.patch
Patch132: 8166862-CMS-needs-klass_or_null_acquire.patch
Patch133: 8160369.patch
Patch134: PS-GC-adding-acquire_size-method-for-PSParallelCompa.patch
#############################################
#
@ -1465,6 +1470,11 @@ pushd %{top_level_dir_name}
%patch127 -p1
%patch128 -p1
%patch129 -p1
%patch130 -p1
%patch131 -p1
%patch132 -p1
%patch133 -p1
%patch134 -p1
popd
@ -2081,7 +2091,14 @@ require "copy_jdk_configs.lua"
%endif
%changelog
* Fri Nov 06 2020 wuyan <wuyan34@huawei.com> - 1:1.8.0.272-b10.2
* Fri Nov 06 2020 jdkboy <guoge1@huawei.com> - 1:1.8.0.272-b10.4
- add 8165808-Add-release-barriers-when-allocating-objects-with-concurrent-collection.patch
- add 8166583-Add-oopDesc-klass_or_null_acquire.patch
- add 8166862-CMS-needs-klass_or_null_acquire.patch
- add 8160369.patch
- add PS-GC-adding-acquire_size-method-for-PSParallelCompa.patch
* Fri Nov 06 2020 wuyan <wuyan34@huawei.com> - 1:1.8.0.272-b10.3
- add 8248336-AArch64-C2-offset-overflow-in-BoxLockNode-em.patch
* Fri Nov 06 2020 xiezhaokun <xiezhaokun@huawei.com> - 1:1.8.0.272-b10.2