openjdk-1.8.0/Dynamic-CDS-Archive.patch
佛系少年中二 dbbd692128 update to 8u422
2024-07-24 15:52:54 +08:00

8655 lines
320 KiB
Diff
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

From f1cba2dd8fe526f4ad5ea4913154a174bd19a080 Mon Sep 17 00:00:00 2001
Date: Sat, 3 Sep 2022 14:25:11 +0000
Subject: Dynamic-CDS-Archive
---
hotspot/src/os/linux/vm/os_linux.cpp | 3 +-
hotspot/src/share/vm/cds/archiveBuilder.cpp | 807 ++++++++++++++++
hotspot/src/share/vm/cds/archiveBuilder.hpp | 368 +++++++
hotspot/src/share/vm/cds/archiveUtils.cpp | 247 +++++
hotspot/src/share/vm/cds/archiveUtils.hpp | 141 +++
hotspot/src/share/vm/cds/dumpAllocStats.cpp | 109 +++
hotspot/src/share/vm/cds/dumpAllocStats.hpp | 88 ++
hotspot/src/share/vm/cds/dynamicArchive.cpp | 412 ++++++++
hotspot/src/share/vm/cds/dynamicArchive.hpp | 54 ++
.../share/vm/classfile/classFileParser.cpp | 7 +
.../src/share/vm/classfile/classLoaderExt.hpp | 2 +-
.../share/vm/classfile/compactHashtable.cpp | 216 +++++
.../share/vm/classfile/compactHashtable.hpp | 349 +++++++
.../share/vm/classfile/sharedClassUtil.hpp | 4 +
.../src/share/vm/classfile/symbolTable.cpp | 102 +-
.../src/share/vm/classfile/symbolTable.hpp | 12 +
.../share/vm/classfile/systemDictionary.cpp | 159 +--
.../share/vm/classfile/systemDictionary.hpp | 1 +
.../vm/classfile/systemDictionaryShared.cpp | 911 ++++++++++++++++++
.../vm/classfile/systemDictionaryShared.hpp | 167 +++-
hotspot/src/share/vm/memory/allocation.hpp | 12 +
.../src/share/vm/memory/allocation.inline.hpp | 53 +-
hotspot/src/share/vm/memory/filemap.cpp | 352 +++++--
hotspot/src/share/vm/memory/filemap.hpp | 104 +-
hotspot/src/share/vm/memory/iterator.hpp | 7 +
hotspot/src/share/vm/memory/metaspace.cpp | 80 +-
hotspot/src/share/vm/memory/metaspace.hpp | 1 +
.../src/share/vm/memory/metaspaceClosure.cpp | 87 ++
.../src/share/vm/memory/metaspaceClosure.hpp | 381 ++++++++
.../src/share/vm/memory/metaspaceShared.cpp | 148 ++-
.../src/share/vm/memory/metaspaceShared.hpp | 51 +-
hotspot/src/share/vm/oops/annotations.cpp | 12 +
hotspot/src/share/vm/oops/annotations.hpp | 9 +
hotspot/src/share/vm/oops/arrayKlass.cpp | 22 +
hotspot/src/share/vm/oops/arrayKlass.hpp | 3 +-
hotspot/src/share/vm/oops/constMethod.cpp | 26 +
hotspot/src/share/vm/oops/constMethod.hpp | 8 +-
hotspot/src/share/vm/oops/constantPool.cpp | 93 +-
hotspot/src/share/vm/oops/constantPool.hpp | 12 +
hotspot/src/share/vm/oops/cpCache.cpp | 69 ++
hotspot/src/share/vm/oops/cpCache.hpp | 25 +-
hotspot/src/share/vm/oops/instanceKlass.cpp | 131 ++-
hotspot/src/share/vm/oops/instanceKlass.hpp | 12 +-
hotspot/src/share/vm/oops/klass.cpp | 83 +-
hotspot/src/share/vm/oops/klass.hpp | 10 +-
hotspot/src/share/vm/oops/klassVtable.hpp | 3 +
hotspot/src/share/vm/oops/metadata.hpp | 4 +-
hotspot/src/share/vm/oops/method.cpp | 22 +-
hotspot/src/share/vm/oops/method.hpp | 7 +-
hotspot/src/share/vm/oops/methodCounters.hpp | 7 +
hotspot/src/share/vm/oops/methodData.cpp | 9 +
hotspot/src/share/vm/oops/methodData.hpp | 5 +-
hotspot/src/share/vm/oops/objArrayKlass.cpp | 7 +
hotspot/src/share/vm/oops/objArrayKlass.hpp | 3 +-
hotspot/src/share/vm/oops/symbol.hpp | 22 +-
hotspot/src/share/vm/runtime/arguments.cpp | 142 +++
hotspot/src/share/vm/runtime/arguments.hpp | 19 +-
hotspot/src/share/vm/runtime/globals.hpp | 21 +
hotspot/src/share/vm/runtime/java.cpp | 8 +
hotspot/src/share/vm/runtime/mutexLocker.cpp | 5 +-
hotspot/src/share/vm/runtime/mutexLocker.hpp | 3 +
hotspot/src/share/vm/runtime/os.cpp | 9 +-
hotspot/src/share/vm/runtime/os.hpp | 2 +
hotspot/src/share/vm/runtime/thread.cpp | 10 +
.../share/vm/services/diagnosticCommand.cpp | 13 +
.../share/vm/services/diagnosticCommand.hpp | 23 +
hotspot/src/share/vm/utilities/array.hpp | 1 +
hotspot/src/share/vm/utilities/bitMap.cpp | 17 +-
hotspot/src/share/vm/utilities/bitMap.hpp | 1 +
.../src/share/vm/utilities/constantTag.hpp | 5 +-
.../share/vm/utilities/globalDefinitions.hpp | 11 +-
hotspot/src/share/vm/utilities/hashtable.cpp | 60 +-
hotspot/src/share/vm/utilities/hashtable.hpp | 98 +-
.../share/vm/utilities/hashtable.inline.hpp | 2 +-
hotspot/src/share/vm/utilities/ostream.cpp | 11 +
hotspot/src/share/vm/utilities/ostream.hpp | 2 +-
.../src/share/vm/utilities/resourceHash.hpp | 27 +-
77 files changed, 6234 insertions(+), 295 deletions(-)
create mode 100644 hotspot/src/share/vm/cds/archiveBuilder.cpp
create mode 100644 hotspot/src/share/vm/cds/archiveBuilder.hpp
create mode 100644 hotspot/src/share/vm/cds/archiveUtils.cpp
create mode 100644 hotspot/src/share/vm/cds/archiveUtils.hpp
create mode 100644 hotspot/src/share/vm/cds/dumpAllocStats.cpp
create mode 100644 hotspot/src/share/vm/cds/dumpAllocStats.hpp
create mode 100644 hotspot/src/share/vm/cds/dynamicArchive.cpp
create mode 100644 hotspot/src/share/vm/cds/dynamicArchive.hpp
create mode 100644 hotspot/src/share/vm/classfile/compactHashtable.cpp
create mode 100644 hotspot/src/share/vm/classfile/compactHashtable.hpp
create mode 100644 hotspot/src/share/vm/classfile/systemDictionaryShared.cpp
create mode 100644 hotspot/src/share/vm/memory/metaspaceClosure.cpp
create mode 100644 hotspot/src/share/vm/memory/metaspaceClosure.hpp
diff --git a/hotspot/src/os/linux/vm/os_linux.cpp b/hotspot/src/os/linux/vm/os_linux.cpp
index f700335a3..6dbedf5c2 100644
--- a/hotspot/src/os/linux/vm/os_linux.cpp
+++ b/hotspot/src/os/linux/vm/os_linux.cpp
@@ -2370,8 +2370,7 @@ void os::print_siginfo(outputStream* st, void* siginfo) {
#if INCLUDE_CDS
if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
UseSharedSpaces) {
- FileMapInfo* mapinfo = FileMapInfo::current_info();
- if (mapinfo->is_in_shared_space(si->si_addr)) {
+ if (MetaspaceShared::is_in_shared_space(si->si_addr)) {
st->print("\n\nError accessing class data sharing archive." \
" Mapped file inaccessible during execution, " \
" possible disk/network problem.");
diff --git a/hotspot/src/share/vm/cds/archiveBuilder.cpp b/hotspot/src/share/vm/cds/archiveBuilder.cpp
new file mode 100644
index 000000000..144dedfa9
--- /dev/null
+++ b/hotspot/src/share/vm/cds/archiveBuilder.cpp
@@ -0,0 +1,807 @@
+/*
+ * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, Huawei Technologies Co., Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "cds/archiveBuilder.hpp"
+#include "cds/archiveUtils.hpp"
+#include "classfile/symbolTable.hpp"
+#include "classfile/systemDictionaryShared.hpp"
+#include "interpreter/abstractInterpreter.hpp"
+#include "memory/filemap.hpp"
+#include "memory/memRegion.hpp"
+#include "memory/metaspaceShared.hpp"
+#include "memory/resourceArea.hpp"
+#include "oops/instanceKlass.hpp"
+#include "oops/objArrayKlass.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/globals_extension.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/thread.hpp"
+#include "utilities/align.hpp"
+#include "utilities/bitMap.inline.hpp"
+#include "utilities/hashtable.inline.hpp"
+
+ArchiveBuilder* ArchiveBuilder::_current = NULL;
+
+ArchiveBuilder::OtherROAllocMark::~OtherROAllocMark() {
+ char* newtop = ArchiveBuilder::current()->_ro_region.top();
+ ArchiveBuilder::alloc_stats()->record_other_type(int(newtop - _oldtop), true);
+}
+
+ArchiveBuilder::SourceObjList::SourceObjList() : _ptrmap(16 * K, false) {
+ _total_bytes = 0;
+ _objs = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray<SourceObjInfo*>(128 * K, mtClassShared);
+}
+
+ArchiveBuilder::SourceObjList::~SourceObjList() {
+ delete _objs;
+}
+
+void ArchiveBuilder::SourceObjList::append(MetaspaceClosure::Ref* enclosing_ref, SourceObjInfo* src_info) {
+ // Save this source object for copying
+ _objs->append(src_info);
+
+ // Prepare for marking the pointers in this source object
+ assert(is_aligned(_total_bytes, sizeof(address)), "must be");
+ src_info->set_ptrmap_start(_total_bytes / sizeof(address));
+ _total_bytes = align_up(_total_bytes + (uintx)src_info->size_in_bytes(), sizeof(address));
+ src_info->set_ptrmap_end(_total_bytes / sizeof(address));
+
+ BitMap::idx_t bitmap_size_needed = BitMap::idx_t(src_info->ptrmap_end());
+ if (_ptrmap.size() <= bitmap_size_needed) {
+ _ptrmap.resize((bitmap_size_needed + 1) * 2, false);
+ }
+}
+
+class PrintBitMap : public BitMapClosure {
+ public:
+ bool do_bit(BitMap::idx_t bit_offset) {
+ tty->print_cr("PrintBitMap : " SIZE_FORMAT, bit_offset);
+ return true;
+ }
+};
+
+void ArchiveBuilder::SourceObjList::remember_embedded_pointer(SourceObjInfo* src_info, MetaspaceClosure::Ref* ref) {
+ // src_obj contains a pointer. Remember the location of this pointer in _ptrmap,
+ // so that we can copy/relocate it later. E.g., if we have
+ // class Foo { intx scala; Bar* ptr; }
+ // Foo *f = 0x100;
+ // To mark the f->ptr pointer on 64-bit platform, this function is called with
+ // src_info()->obj() == 0x100
+ // ref->addr() == 0x108
+ address src_obj = src_info->obj();
+ address* field_addr = ref->addr();
+ assert(src_info->ptrmap_start() < _total_bytes, "sanity");
+ assert(src_info->ptrmap_end() <= _total_bytes, "sanity");
+ assert(*field_addr != NULL, "should have checked");
+
+ intx field_offset_in_bytes = ((address)field_addr) - src_obj;
+ DEBUG_ONLY(int src_obj_size = src_info->size_in_bytes();)
+ assert(field_offset_in_bytes >= 0, "must be");
+ assert(field_offset_in_bytes + intx(sizeof(intptr_t)) <= intx(src_obj_size), "must be");
+ assert(is_aligned(field_offset_in_bytes, sizeof(address)), "must be");
+
+ BitMap::idx_t idx = BitMap::idx_t(src_info->ptrmap_start() + (uintx)(field_offset_in_bytes / sizeof(address)));
+ if (TraceDynamicCDS) {
+ dynamic_cds_log->print_cr("remember_embedded_pointer: _ptrmap_start: " SIZE_FORMAT
+ "_ptrmap_end: " SIZE_FORMAT
+ " field: " PTR_FORMAT" -> " PTR_FORMAT
+ " bit_index: " SIZE_FORMAT " ",
+ src_info->ptrmap_start(), src_info->ptrmap_end(), p2i(src_obj), p2i(field_addr), idx);
+ }
+ _ptrmap.set_bit(BitMap::idx_t(idx));
+}
+
+class RelocateEmbeddedPointers : public BitMapClosure {
+ ArchiveBuilder* _builder;
+ address _dumped_obj;
+ BitMap::idx_t _start_idx;
+public:
+ RelocateEmbeddedPointers(ArchiveBuilder* builder, address dumped_obj, BitMap::idx_t start_idx) :
+ _builder(builder), _dumped_obj(dumped_obj), _start_idx(start_idx) {}
+
+ bool do_bit(BitMap::idx_t bit_offset) {
+ uintx FLAG_MASK = 0x03; // See comments around MetaspaceClosure::FLAG_MASK
+ size_t field_offset = size_t(bit_offset - _start_idx) * sizeof(address);
+ address* ptr_loc = (address*)(_dumped_obj + field_offset);
+ uintx old_p_and_bits = (uintx)(*ptr_loc);
+ uintx flag_bits = (old_p_and_bits & FLAG_MASK);
+ address old_p = (address)(old_p_and_bits & (~FLAG_MASK));
+ address new_p = _builder->get_dumped_addr(old_p);
+ uintx new_p_and_bits = ((uintx)new_p) | flag_bits;
+
+ if (TraceDynamicCDS) {
+ dynamic_cds_log->print_cr("Ref: [" PTR_FORMAT "] -> " PTR_FORMAT " => " PTR_FORMAT,
+ p2i(ptr_loc), p2i(old_p), p2i(new_p));
+ }
+ ArchivePtrMarker::set_and_mark_pointer(ptr_loc, (address)(new_p_and_bits));
+ return true; // keep iterating the bitmap
+ }
+};
+
+void ArchiveBuilder::SourceObjList::relocate(int i, ArchiveBuilder* builder) {
+ SourceObjInfo* src_info = objs()->at(i);
+ assert(src_info->should_copy(), "must be");
+ BitMap::idx_t start = BitMap::idx_t(src_info->ptrmap_start()); // inclusive
+ BitMap::idx_t end = BitMap::idx_t(src_info->ptrmap_end()); // exclusive
+
+ RelocateEmbeddedPointers relocator(builder, src_info->dumped_addr(), start);
+ _ptrmap.iterate(&relocator, start, end);
+}
+
+ArchiveBuilder::ArchiveBuilder() :
+ _current_dump_space(NULL),
+ _buffer_bottom(NULL),
+ _last_verified_top(NULL),
+ _num_dump_regions_used(0),
+ _other_region_used_bytes(0),
+ _requested_static_archive_bottom(NULL),
+ _requested_static_archive_top(NULL),
+ _requested_dynamic_archive_bottom(NULL),
+ _requested_dynamic_archive_top(NULL),
+ _mapped_static_archive_bottom(NULL),
+ _mapped_static_archive_top(NULL),
+ _buffer_to_requested_delta(0),
+ _rw_region("rw", MAX_SHARED_DELTA),
+ _ro_region("ro", MAX_SHARED_DELTA),
+ _rw_src_objs(),
+ _ro_src_objs(),
+ _src_obj_table(INITIAL_TABLE_SIZE),
+ _num_instance_klasses(0),
+ _num_obj_array_klasses(0),
+ _num_type_array_klasses(0),
+ _estimated_metaspaceobj_bytes(0),
+ _estimated_hashtable_bytes(0) {
+ _klasses = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray<Klass*>(4 * K, mtClassShared);
+ _symbols = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray<Symbol*>(256 * K, mtClassShared);
+
+ assert(_current == NULL, "must be");
+ _current = this;
+}
+
+ArchiveBuilder::~ArchiveBuilder() {
+ assert(_current == this, "must be");
+ _current = NULL;
+
+ clean_up_src_obj_table();
+
+ for (int i = 0; i < _symbols->length(); i++) {
+ _symbols->at(i)->decrement_refcount();
+ }
+
+ delete _klasses;
+ delete _symbols;
+ if (_shared_rs.is_reserved()) {
+ _shared_rs.release();
+ }
+}
+
+bool ArchiveBuilder::gather_one_source_obj(MetaspaceClosure::Ref* enclosing_ref,
+ MetaspaceClosure::Ref* ref, bool read_only) {
+ address src_obj = ref->obj();
+ if (src_obj == NULL) {
+ return false;
+ }
+ ref->set_keep_after_pushing();
+ remember_embedded_pointer_in_copied_obj(enclosing_ref, ref);
+
+ FollowMode follow_mode = get_follow_mode(ref);
+ SourceObjInfo src_info(ref, read_only, follow_mode);
+ bool created;
+ SourceObjInfo* p = _src_obj_table.add_if_absent(src_obj, src_info, &created);
+ if (created) {
+ if (_src_obj_table.maybe_grow(MAX_TABLE_SIZE)) {
+ if (InfoDynamicCDS) {
+ dynamic_cds_log->print_cr("Expanded _src_obj_table table to %d", _src_obj_table.table_size());
+ }
+ }
+ }
+
+ assert(p->read_only() == src_info.read_only(), "must be");
+
+ if (created && src_info.should_copy()) {
+ ref->set_user_data((void*)p);
+ if (read_only) {
+ _ro_src_objs.append(enclosing_ref, p);
+ } else {
+ _rw_src_objs.append(enclosing_ref, p);
+ }
+ return true; // Need to recurse into this ref only if we are copying it
+ } else {
+ return false;
+ }
+}
+
+void ArchiveBuilder::iterate_sorted_roots(MetaspaceClosure* it, bool is_relocating_pointers) {
+ int i;
+
+ if (!is_relocating_pointers) {
+ // Don't relocate _symbol, so we can safely call decrement_refcount on the
+ // original symbols.
+ int num_symbols = _symbols->length();
+ for (i = 0; i < num_symbols; i++) {
+ it->push(_symbols->adr_at(i));
+ }
+ }
+
+ int num_klasses = _klasses->length();
+ for (i = 0; i < num_klasses; i++) {
+ it->push(_klasses->adr_at(i));
+ }
+
+ iterate_roots(it, is_relocating_pointers);
+}
+
+class GatherSortedSourceObjs : public MetaspaceClosure {
+ ArchiveBuilder* _builder;
+
+public:
+ GatherSortedSourceObjs(ArchiveBuilder* builder) : _builder(builder) {}
+
+ virtual bool do_ref(Ref* ref, bool read_only) {
+ return _builder->gather_one_source_obj(enclosing_ref(), ref, read_only);
+ }
+
+ virtual void do_pending_ref(Ref* ref) {
+ if (ref->obj() != NULL) {
+ _builder->remember_embedded_pointer_in_copied_obj(enclosing_ref(), ref);
+ }
+ }
+};
+
+void ArchiveBuilder::gather_source_objs() {
+ ResourceMark rm;
+ if (InfoDynamicCDS) {
+ dynamic_cds_log->print_cr("Gathering all archivable objects ... ");
+ }
+ gather_klasses_and_symbols();
+ GatherSortedSourceObjs doit(this);
+ iterate_sorted_roots(&doit, /*is_relocating_pointers=*/false);
+ doit.finish();
+}
+
+bool ArchiveBuilder::is_excluded(Klass* klass) {
+ if (klass->oop_is_instance()) {
+ InstanceKlass* ik = InstanceKlass::cast(klass);
+ return SystemDictionaryShared::is_excluded_class(ik);
+ } else if (klass->oop_is_objArray()) {
+ if (DynamicDumpSharedSpaces) {
+ // Don't support archiving of array klasses for now (WHY???)
+ return true;
+ }
+ Klass* bottom = ObjArrayKlass::cast(klass)->bottom_klass();
+ if (bottom->oop_is_instance()) {
+ return SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(bottom));
+ }
+ }
+
+ return false;
+}
+
+ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref *ref) {
+ address obj = ref->obj();
+ if (MetaspaceShared::is_in_shared_space(obj)) {
+ // Don't dump existing shared metadata again.
+ return point_to_it;
+ } else if (ref->msotype() == MetaspaceObj::MethodDataType) {
+ return set_to_null;
+ } else {
+ if (ref->msotype() == MetaspaceObj::ClassType) {
+ Klass* klass = (Klass*)ref->obj();
+ assert(klass->is_klass(), "must be");
+ if (is_excluded(klass)) {
+ if (TraceDynamicCDS) {
+ ResourceMark rm;
+ dynamic_cds_log->print_cr("Skipping class (excluded): %s", klass->external_name());
+ }
+ return set_to_null;
+ }
+ }
+
+ return make_a_copy;
+ }
+}
+
+int ArchiveBuilder::compare_symbols_by_address(Symbol** a, Symbol** b) {
+ if (a[0] < b[0]) {
+ return -1;
+ } else {
+ assert(a[0] > b[0], "Duplicated symbol unexpected");
+ return 1;
+ }
+}
+
+int ArchiveBuilder::compare_klass_by_name(Klass** a, Klass** b) {
+ return a[0]->name()->fast_compare(b[0]->name());
+}
+
+void ArchiveBuilder::sort_klasses() {
+ if (InfoDynamicCDS) {
+ dynamic_cds_log->print_cr("Sorting classes ... ");
+ }
+ _klasses->sort(compare_klass_by_name);
+}
+
+class GatherKlassesAndSymbols : public UniqueMetaspaceClosure {
+ ArchiveBuilder* _builder;
+
+public:
+ GatherKlassesAndSymbols(ArchiveBuilder* builder) : _builder(builder) { }
+
+ virtual bool do_unique_ref(Ref* ref, bool read_only) {
+ return _builder->gather_klass_and_symbol(ref, read_only);
+ }
+};
+
+void ArchiveBuilder::gather_klasses_and_symbols() {
+ ResourceMark rm;
+ if (InfoDynamicCDS) {
+ dynamic_cds_log->print_cr("Gathering classes and symbols ... ");
+ }
+ GatherKlassesAndSymbols doit(this);
+ iterate_roots(&doit, false);
+ doit.finish();
+
+ if (InfoDynamicCDS) {
+ dynamic_cds_log->print_cr("Number of classes %d", _num_instance_klasses + _num_obj_array_klasses + _num_type_array_klasses);
+ dynamic_cds_log->print_cr(" instance classes = %5d", _num_instance_klasses);
+ dynamic_cds_log->print_cr(" obj array classes = %5d", _num_obj_array_klasses);
+ dynamic_cds_log->print_cr(" type array classes = %5d", _num_type_array_klasses);
+ dynamic_cds_log->print_cr(" symbols = %5d", _symbols->length());
+ }
+}
+
+bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool read_only) {
+ if (ref->obj() == NULL) {
+ return false;
+ }
+ if (get_follow_mode(ref) != make_a_copy) {
+ return false;
+ }
+ if (ref->msotype() == MetaspaceObj::ClassType) {
+ Klass* klass = (Klass*)ref->obj();
+ assert(klass->is_klass(), "must be");
+ if (!is_excluded(klass)) {
+ _klasses->append(klass);
+ if (klass->oop_is_instance()) {
+ _num_instance_klasses ++;
+ } else if (klass->oop_is_objArray()) {
+ _num_obj_array_klasses ++;
+ } else {
+ assert(klass->oop_is_typeArray(), "sanity");
+ _num_type_array_klasses ++;
+ }
+ }
+ // See RunTimeSharedClassInfo::get_for()
+ _estimated_metaspaceobj_bytes += align_up(BytesPerWord, KlassAlignmentInBytes);
+ } else if (ref->msotype() == MetaspaceObj::SymbolType) {
+ // Make sure the symbol won't be GC'ed while we are dumping the archive.
+ Symbol* sym = (Symbol*)ref->obj();
+ sym->increment_refcount();
+ _symbols->append(sym);
+ }
+
+ int bytes = ref->size() * BytesPerWord;
+ _estimated_metaspaceobj_bytes += align_up(bytes, KlassAlignmentInBytes);
+ return true; // recurse
+}
+
+size_t ArchiveBuilder::estimate_archive_size() {
+ // size of the symbol table and two dictionaries, plus the RunTimeSharedClassInfo's
+ size_t symbol_table_est = SymbolTable::estimate_size_for_archive();
+ size_t dictionary_est = SystemDictionaryShared::estimate_size_for_archive();
+ _estimated_hashtable_bytes = symbol_table_est + dictionary_est;
+
+ size_t total = 0;
+
+ total += _estimated_metaspaceobj_bytes;
+ total += _estimated_hashtable_bytes;
+
+ // allow fragmentation at the end of each dump region
+ total += _total_dump_regions * ((size_t)os::vm_allocation_granularity());
+
+ if (InfoDynamicCDS) {
+ dynamic_cds_log->print_cr("_estimated_hashtable_bytes = " SIZE_FORMAT " + " SIZE_FORMAT " = " SIZE_FORMAT,
+ symbol_table_est, dictionary_est, _estimated_hashtable_bytes);
+ dynamic_cds_log->print_cr("_estimated_metaspaceobj_bytes = " SIZE_FORMAT, _estimated_metaspaceobj_bytes);
+ dynamic_cds_log->print_cr("total estimate bytes = " SIZE_FORMAT, total);
+ }
+
+ return align_up(total, (size_t)os::vm_allocation_granularity());
+}
+
+address ArchiveBuilder::reserve_buffer() {
+ size_t buffer_size = estimate_archive_size();
+ ReservedSpace rs(buffer_size, os::vm_allocation_granularity(), false);
+ if (!rs.is_reserved()) {
+ tty->print_cr("Failed to reserve " SIZE_FORMAT " bytes of output buffer.", buffer_size);
+ vm_direct_exit(0);
+ }
+
+ // buffer_bottom is the lowest address of the 2 core regions (rw, ro) when
+ // we are copying the class metadata into the buffer.
+ address buffer_bottom = (address)rs.base();
+ _shared_rs = rs;
+
+ _buffer_bottom = buffer_bottom;
+ _last_verified_top = buffer_bottom;
+ _current_dump_space = &_rw_region;
+ _num_dump_regions_used = 1;
+ _other_region_used_bytes = 0;
+ _current_dump_space->init(&_shared_rs, &_shared_vs);
+
+ ArchivePtrMarker::initialize(&_ptrmap, &_shared_vs);
+
+ // The bottom of the static archive should be mapped at this address by default.
+ _requested_static_archive_bottom = (address)MetaspaceShared::requested_base_address();
+
+ size_t static_archive_size = FileMapInfo::shared_spaces_size();
+ _requested_static_archive_top = _requested_static_archive_bottom + static_archive_size;
+
+ _mapped_static_archive_bottom = (address)MetaspaceShared::shared_metaspace_static_bottom();
+ _mapped_static_archive_top = _mapped_static_archive_bottom + static_archive_size;
+
+ _requested_dynamic_archive_bottom = align_up(_requested_static_archive_top, (size_t)os::vm_allocation_granularity());
+
+ _buffer_to_requested_delta = _requested_dynamic_archive_bottom - _buffer_bottom;
+
+ if (InfoDynamicCDS) {
+ dynamic_cds_log->print_cr("Reserved output buffer space at " PTR_FORMAT " [" SIZE_FORMAT " bytes]",
+ p2i(buffer_bottom), buffer_size);
+ dynamic_cds_log->print_cr("Dynamic archive mapped space at " PTR_FORMAT, p2i(_requested_dynamic_archive_bottom));
+ }
+
+ return buffer_bottom;
+}
+
+void ArchiveBuilder::verify_estimate_size(size_t estimate, const char* which) {
+ address bottom = _last_verified_top;
+ address top = (address)(current_dump_space()->top());
+ size_t used = size_t(top - bottom) + _other_region_used_bytes;
+ int diff = int(estimate) - int(used);
+
+ if (InfoDynamicCDS) {
+ dynamic_cds_log->print_cr("%s estimate = " SIZE_FORMAT " used = " SIZE_FORMAT "; diff = %d bytes", which, estimate, used, diff);
+ }
+ assert(diff >= 0, "Estimate is too small");
+
+ _last_verified_top = top;
+ _other_region_used_bytes = 0;
+}
+
+void ArchiveBuilder::dump_rw_metadata() {
+ ResourceMark rm;
+ if (InfoDynamicCDS) {
+ dynamic_cds_log->print_cr("Allocating RW objects ... ");
+ }
+ make_shallow_copies(&_rw_region, &_rw_src_objs);
+}
+
+void ArchiveBuilder::dump_ro_metadata() {
+ ResourceMark rm;
+ if (InfoDynamicCDS) {
+ dynamic_cds_log->print_cr("Allocating RO objects ... ");
+ }
+ start_dump_space(&_ro_region);
+ make_shallow_copies(&_ro_region, &_ro_src_objs);
+}
+
+void ArchiveBuilder::start_dump_space(DumpRegion* next) {
+ address bottom = _last_verified_top;
+ address top = (address)(_current_dump_space->top());
+ _other_region_used_bytes += size_t(top - bottom);
+ _current_dump_space->pack(next);
+ _current_dump_space = next;
+ _num_dump_regions_used ++;
+ _last_verified_top = (address)(_current_dump_space->top());
+}
+
+void ArchiveBuilder::patch_shared_obj_vtable() {
+ SourceObjList* objs = &_rw_src_objs;
+
+ for (int i = 0; i < objs->objs()->length(); i++) {
+ SourceObjInfo* src_info = objs->objs()->at(i);
+ address dest = src_info->dumped_addr();
+ MetaspaceClosure::Ref* ref = src_info->ref();
+ intptr_t* archived_vtable = MetaspaceShared::get_archived_vtable(ref->msotype(), dest);
+ if (archived_vtable != NULL) {
+ // When we copy archived vtable from base archive into dynamic archive's objs, we can't call
+ // virtual function before restore dynamic archive.
+ *(intptr_t**)dest = archived_vtable;
+ ArchivePtrMarker::mark_pointer((address*)dest);
+ }
+ }
+ if (InfoDynamicCDS) {
+ dynamic_cds_log->print_cr("patch vtable done (%d objects)", objs->objs()->length());
+ }
+}
+
+void ArchiveBuilder::remember_embedded_pointer_in_copied_obj(MetaspaceClosure::Ref* enclosing_ref,
+ MetaspaceClosure::Ref* ref) {
+ assert(ref->obj() != NULL, "should have checked");
+
+ if (enclosing_ref != NULL) {
+ SourceObjInfo* src_info = (SourceObjInfo*)enclosing_ref->user_data();
+ if (src_info == NULL) {
+ // source objects of point_to_it/set_to_null types are not copied
+ // so we don't need to remember their pointers.
+ } else {
+ if (src_info->read_only()) {
+ _ro_src_objs.remember_embedded_pointer(src_info, ref);
+ } else {
+ _rw_src_objs.remember_embedded_pointer(src_info, ref);
+ }
+ }
+ }
+}
+
+void ArchiveBuilder::make_shallow_copies(DumpRegion *dump_region,
+ const ArchiveBuilder::SourceObjList* src_objs) {
+ for (int i = 0; i < src_objs->objs()->length(); i++) {
+ make_shallow_copy(dump_region, src_objs->objs()->at(i));
+ }
+ if (InfoDynamicCDS) {
+ dynamic_cds_log->print_cr("done (%d objects)", src_objs->objs()->length());
+ }
+}
+
+void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* src_info) {
+ MetaspaceClosure::Ref* ref = src_info->ref();
+ address src = ref->obj();
+ int bytes = src_info->size_in_bytes();
+ char* dest;
+ char* oldtop;
+ char* newtop;
+
+ oldtop = dump_region->top();
+ if (ref->msotype() == MetaspaceObj::ClassType) {
+ // Save a pointer immediate in front of an InstanceKlass, so
+ // we can do a quick lookup from InstanceKlass* -> RunTimeSharedClassInfo*
+ // without building another hashtable. See RunTimeSharedClassInfo::get_for()
+ // in systemDictionaryShared.cpp.
+ Klass* klass = (Klass*)src;
+ if (klass->oop_is_instance()) {
+ dump_region->allocate(sizeof(address));
+ }
+ }
+ dest = dump_region->allocate(bytes);
+ newtop = dump_region->top();
+
+ memcpy(dest, src, bytes);
+
+ if (TraceDynamicCDS) {
+ dynamic_cds_log->print_cr("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(src), p2i(dest), bytes);
+ }
+ src_info->set_dumped_addr((address)dest);
+
+ _alloc_stats.record(ref->msotype(), int(newtop - oldtop), src_info->read_only());
+}
+
+address ArchiveBuilder::get_dumped_addr(address src_obj) {
+ SourceObjInfo* p = _src_obj_table.lookup(src_obj);
+ assert(p != NULL, "must be");
+
+ return p->dumped_addr();
+}
+
+void ArchiveBuilder::relocate_embedded_pointers(ArchiveBuilder::SourceObjList* src_objs) {
+ for (int i = 0; i < src_objs->objs()->length(); i++) {
+ src_objs->relocate(i, this);
+ }
+}
+
+void ArchiveBuilder::print_stats() {
+ _alloc_stats.print_stats(int(_ro_region.used()), int(_rw_region.used()));
+}
+
+void ArchiveBuilder::make_klasses_shareable() {
+ for (int i = 0; i < klasses()->length(); i++) {
+ Klass* k = klasses()->at(i);
+ k->remove_java_mirror();
+ if (k->oop_is_objArray()) {
+ // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info
+ // on their array classes.
+ } else if (k->oop_is_typeArray()) {
+ k->remove_unshareable_info();
+ } else {
+ assert(k->oop_is_instance(), " must be");
+ InstanceKlass* ik = InstanceKlass::cast(k);
+ // High version introduce fast bytecode, jdk8 no need do it.
+ // MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread::current(), ik);
+ ik->remove_unshareable_info(); // assign_class_loader_type is in Klass::remove_unshareable_info
+
+ if (DebugDynamicCDS) {
+ ResourceMark rm;
+ dynamic_cds_log->print_cr("klasses[%4d] = " PTR_FORMAT " => " PTR_FORMAT " %s", i, p2i(ik), p2i(to_requested(ik)), ik->external_name());
+ }
+ }
+ }
+}
+
+uintx ArchiveBuilder::buffer_to_offset(address p) const {
+ address requested_p = to_requested(p);
+ assert(requested_p >= _requested_static_archive_bottom, "must be");
+ return requested_p - _requested_static_archive_bottom;
+}
+
+uintx ArchiveBuilder::any_to_offset(address p) const {
+ if (is_in_mapped_static_archive(p)) {
+ assert(DynamicDumpSharedSpaces, "must be");
+ return p - _mapped_static_archive_bottom;
+ }
+ return buffer_to_offset(p);
+}
+
+// RelocateBufferToRequested --- Relocate all the pointers in rw/ro,
+// so that the archive can be mapped to the "requested" location without runtime relocation.
+//
+// - See ArchiveBuilder header for the definition of "buffer", "mapped" and "requested"
+// - ArchivePtrMarker::ptrmap() marks all the pointers in the rw/ro regions
+// - Every pointer must have one of the following values:
+// [a] NULL:
+// No relocation is needed. Remove this pointer from ptrmap so we don't need to
+// consider it at runtime.
+// [b] Points into an object X which is inside the buffer:
+// Adjust this pointer by _buffer_to_requested_delta, so it points to X
+// when the archive is mapped at the requested location.
+// [c] Points into an object Y which is inside mapped static archive:
+// - This happens only during dynamic dump
+// - Adjust this pointer by _mapped_to_requested_static_archive_delta,
+// so it points to Y when the static archive is mapped at the requested location.
+class RelocateBufferToRequested : public BitMapClosure {
+ ArchiveBuilder* _builder;
+ address _buffer_bottom;
+ intx _buffer_to_requested_delta;
+ intx _mapped_to_requested_static_archive_delta;
+ size_t _max_non_null_offset;
+
+ public:
+ RelocateBufferToRequested(ArchiveBuilder* builder) {
+ _builder = builder;
+ _buffer_bottom = _builder->buffer_bottom();
+ _buffer_to_requested_delta = builder->buffer_to_requested_delta();
+ _mapped_to_requested_static_archive_delta = builder->requested_static_archive_bottom() - builder->mapped_static_archive_bottom();
+ _max_non_null_offset = 0;
+
+ address bottom = _builder->buffer_bottom();
+ address top = _builder->buffer_top();
+ address new_bottom = bottom + _buffer_to_requested_delta;
+ address new_top = top + _buffer_to_requested_delta;
+ if (TraceDynamicCDS) {
+ dynamic_cds_log->print_cr("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT "] to "
+ "[" INTPTR_FORMAT " - " INTPTR_FORMAT "]",
+ p2i(bottom), p2i(top),
+ p2i(new_bottom), p2i(new_top));
+ }
+ }
+
+ bool do_bit(size_t offset) {
+ address* p = (address*)_buffer_bottom + offset;
+ assert(_builder->is_in_buffer_space(p), "pointer must live in buffer space");
+
+ if (*p == NULL) {
+ // todo -- clear bit, etc
+ ArchivePtrMarker::ptrmap()->clear_bit(offset);
+ } else {
+ if (_builder->is_in_buffer_space(*p)) {
+ *p += _buffer_to_requested_delta;
+ // assert is in requested dynamic archive
+ } else {
+ assert(_builder->is_in_mapped_static_archive(*p), "old pointer must point inside buffer space or mapped static archive");
+ *p += _mapped_to_requested_static_archive_delta;
+ assert(_builder->is_in_requested_static_archive(*p), "new pointer must point inside requested archive");
+ }
+
+ _max_non_null_offset = offset;
+ }
+
+ return true; // keep iterating
+ }
+
+ void doit() {
+ ArchivePtrMarker::ptrmap()->iterate(this);
+ ArchivePtrMarker::compact(_max_non_null_offset);
+ }
+};
+
+void ArchiveBuilder::relocate_to_requested() {
+ ro_region()->pack();
+
+ size_t my_archive_size = buffer_top() - buffer_bottom();
+
+ assert(DynamicDumpSharedSpaces, "must be");
+ _requested_dynamic_archive_top = _requested_dynamic_archive_bottom + my_archive_size;
+ RelocateBufferToRequested patcher(this);
+ patcher.doit();
+}
+
+void ArchiveBuilder::clean_up_src_obj_table() {
+ SrcObjTableCleaner cleaner;
+ _src_obj_table.iterate(&cleaner);
+}
+
+void ArchiveBuilder::write_archive(FileMapInfo* mapinfo) {
+ assert(mapinfo->header()->magic() == CDS_DYNAMIC_ARCHIVE_MAGIC, "Dynamic CDS calls only");
+
+ mapinfo->write_dynamic_header();
+
+ write_region(mapinfo, MetaspaceShared::d_rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
+ write_region(mapinfo, MetaspaceShared::d_ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
+
+ char* bitmap = mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap());
+
+ if (InfoDynamicCDS && mapinfo->is_open()) {
+ print_stats();
+ }
+
+ mapinfo->close();
+ FREE_C_HEAP_ARRAY(char, bitmap, mtClassShared);
+}
+
+void ArchiveBuilder::write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only, bool allow_exec) {
+ mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), dump_region->used(), read_only, allow_exec);
+}
+
+class RefRelocator: public MetaspaceClosure {
+ ArchiveBuilder* _builder;
+
+public:
+ RefRelocator(ArchiveBuilder* builder) : _builder(builder) {}
+
+ virtual bool do_ref(Ref* ref, bool read_only) {
+ if (ref->not_null()) {
+ ref->update(_builder->get_dumped_addr(ref->obj()));
+ ArchivePtrMarker::mark_pointer(ref->addr());
+ }
+ return false; // Do not recurse.
+ }
+};
+
+void ArchiveBuilder::relocate_roots() {
+ if (InfoDynamicCDS) {
+ dynamic_cds_log->print_cr("Relocating external roots ... ");
+ }
+ ResourceMark rm;
+ RefRelocator doit(this);
+ iterate_sorted_roots(&doit, /*is_relocating_pointers=*/true);
+ doit.finish();
+ if (InfoDynamicCDS) {
+ dynamic_cds_log->print_cr("done");
+ }
+}
+
+void ArchiveBuilder::relocate_metaspaceobj_embedded_pointers() {
+ if (InfoDynamicCDS) {
+ dynamic_cds_log->print_cr("Relocating embedded pointers in core regions ... ");
+ }
+ relocate_embedded_pointers(&_rw_src_objs);
+ relocate_embedded_pointers(&_ro_src_objs);
+}
+
+#ifndef PRODUCT
+void ArchiveBuilder::assert_is_vm_thread() {
+ assert(Thread::current()->is_VM_thread(), "ArchiveBuilder should be used only inside the VMThread");
+}
+#endif
diff --git a/hotspot/src/share/vm/cds/archiveBuilder.hpp b/hotspot/src/share/vm/cds/archiveBuilder.hpp
new file mode 100644
index 000000000..18cd3c622
--- /dev/null
+++ b/hotspot/src/share/vm/cds/archiveBuilder.hpp
@@ -0,0 +1,368 @@
+/*
+ * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, Huawei Technologies Co., Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CDS_ARCHIVEBUILDER_HPP
+#define SHARE_VM_CDS_ARCHIVEBUILDER_HPP
+
+#include "cds/archiveUtils.hpp"
+#include "cds/dumpAllocStats.hpp"
+#include "memory/metaspaceClosure.hpp"
+//#include "oops/array.hpp"
+#include "oops/klass.hpp"
+#include "runtime/os.hpp"
+#include "utilities/align.hpp"
+#include "utilities/bitMap.hpp"
+#include "utilities/growableArray.hpp"
+#include "utilities/hashtable.hpp"
+#include "utilities/resourceHash.hpp"
+
+class FileMapInfo;
+// Overview of CDS archive creation (for both static??? and dynamic dump):
+//
+// [1] Load all classes (static dump: from the classlist, dynamic dump: as part of app execution)
+// [2] Allocate "output buffer"
+// [3] Copy contents of the 2 "core" regions (rw/ro) into the output buffer.
+// - allocate the cpp vtables in rw (static dump only)
+// - memcpy the MetaspaceObjs into rw/ro:
+// dump_rw_region();
+// dump_ro_region();
+// - fix all the pointers in the MetaspaceObjs to point to the copies
+// relocate_metaspaceobj_embedded_pointers()
+// [4] Copy symbol table, dictionary, etc, into the ro region
+// [5] Relocate all the pointers in rw/ro, so that the archive can be mapped to
+// the "requested" location without runtime relocation. See relocate_to_requested()
+class ArchiveBuilder : public StackObj {
+protected:
+ DumpRegion* _current_dump_space;
+ address _buffer_bottom; // for writing the contents of rw/ro regions
+ address _last_verified_top;
+ int _num_dump_regions_used;
+ size_t _other_region_used_bytes;
+
+ // These are the addresses where we will request the static and dynamic archives to be
+ // mapped at run time. If the request fails (due to ASLR), we will map the archives at
+ // os-selected addresses.
+ address _requested_static_archive_bottom; // This is determined solely by the value of
+ // SharedBaseAddress during -Xshare:dump.
+ address _requested_static_archive_top;
+ address _requested_dynamic_archive_bottom; // Used only during dynamic dump. It's placed
+ // immediately above _requested_static_archive_top.
+ address _requested_dynamic_archive_top;
+
+ // (Used only during dynamic dump) where the static archive is actually mapped. This
+ // may be different than _requested_static_archive_{bottom,top} due to ASLR
+ address _mapped_static_archive_bottom;
+ address _mapped_static_archive_top;
+
+ intx _buffer_to_requested_delta;
+
+ DumpRegion* current_dump_space() const { return _current_dump_space; }
+
+public:
+ enum FollowMode {
+ make_a_copy, point_to_it, set_to_null
+ };
+
+private:
+ class SourceObjInfo {
+ MetaspaceClosure::Ref* _ref;
+ uintx _ptrmap_start; // The bit-offset of the start of this object (inclusive)
+ uintx _ptrmap_end; // The bit-offset of the end of this object (exclusive)
+ bool _read_only;
+ FollowMode _follow_mode;
+ int _size_in_bytes;
+ MetaspaceObj::Type _msotype;
+ address _dumped_addr; // Address this->obj(), as used by the dumped archive.
+ address _orig_obj; // The value of the original object (_ref->obj()) when this
+ // SourceObjInfo was created. Note that _ref->obj() may change
+ // later if _ref is relocated.
+
+ public:
+ SourceObjInfo(MetaspaceClosure::Ref* ref, bool read_only, FollowMode follow_mode) :
+ _ref(ref), _ptrmap_start(0), _ptrmap_end(0), _read_only(read_only), _follow_mode(follow_mode),
+ _size_in_bytes(ref->size() * BytesPerWord), _msotype(ref->msotype()),
+ _orig_obj(ref->obj()) {
+ if (follow_mode == point_to_it) {
+ _dumped_addr = ref->obj();
+ } else {
+ _dumped_addr = NULL;
+ }
+ }
+
+ bool should_copy() const { return _follow_mode == make_a_copy; }
+ MetaspaceClosure::Ref* ref() const { return _ref; }
+ void set_dumped_addr(address dumped_addr) {
+ assert(should_copy(), "must be");
+ assert(_dumped_addr == NULL, "cannot be copied twice");
+ assert(dumped_addr != NULL, "must be a valid copy");
+ _dumped_addr = dumped_addr;
+ }
+ void set_ptrmap_start(uintx v) { _ptrmap_start = v; }
+ void set_ptrmap_end(uintx v) { _ptrmap_end = v; }
+ uintx ptrmap_start() const { return _ptrmap_start; } // inclusive
+ uintx ptrmap_end() const { return _ptrmap_end; } // exclusive
+ bool read_only() const { return _read_only; }
+ int size_in_bytes() const { return _size_in_bytes; }
+ address orig_obj() const { return _orig_obj; }
+ address dumped_addr() const { return _dumped_addr; }
+ MetaspaceObj::Type msotype() const { return _msotype; }
+
+ // convenience accessor
+ address obj() const { return ref()->obj(); }
+ };
+
+ class SourceObjList {
+ uintx _total_bytes;
+ GrowableArray<SourceObjInfo*>* _objs; // Source objects to be archived
+ BitMap _ptrmap; // Marks the addresses of the pointer fields
+ // in the source objects
+ public:
+ SourceObjList();
+ ~SourceObjList();
+ GrowableArray<SourceObjInfo*>* objs() const { return _objs; }
+
+ void append(MetaspaceClosure::Ref* enclosing_ref, SourceObjInfo* src_info);
+ void remember_embedded_pointer(SourceObjInfo* pointing_obj, MetaspaceClosure::Ref* ref);
+ void relocate(int i, ArchiveBuilder* builder);
+
+ // convenience accessor
+ SourceObjInfo* at(int i) const { return objs()->at(i); }
+ };
+
+ class SrcObjTableCleaner {
+ public:
+ bool do_entry(address key, const SourceObjInfo* value) {
+ delete value->ref();
+ return true;
+ }
+ };
+
+ static const int INITIAL_TABLE_SIZE = 15889;
+ static const int MAX_TABLE_SIZE = 1000000;
+
+ ReservedSpace _shared_rs;
+ VirtualSpace _shared_vs;
+
+ DumpRegion _rw_region;
+ DumpRegion _ro_region;
+ BitMap _ptrmap;
+
+ SourceObjList _rw_src_objs; // objs to put in rw region
+ SourceObjList _ro_src_objs; // objs to put in ro region
+ KVHashtable<address, SourceObjInfo, mtClassShared> _src_obj_table;
+ GrowableArray<Klass*>* _klasses;
+ GrowableArray<Symbol*>* _symbols;
+
+ // statistics
+ int _num_instance_klasses;
+ int _num_obj_array_klasses;
+ int _num_type_array_klasses;
+ DumpAllocStats _alloc_stats;
+
+ // For global access.
+ static ArchiveBuilder* _current;
+
+public:
+ // Use this when you allocate space outside of ArchiveBuilder::dump_{rw,ro}_region.
+ // These are usually for misc tables that are allocated in the RO space.
+ class OtherROAllocMark {
+ char* _oldtop;
+ public:
+ OtherROAllocMark() {
+ _oldtop = _current->_ro_region.top();
+ }
+ ~OtherROAllocMark();
+ };
+
+private:
+ FollowMode get_follow_mode(MetaspaceClosure::Ref *ref);
+
+ void iterate_sorted_roots(MetaspaceClosure* it, bool is_relocating_pointers);
+ void sort_klasses();
+ static int compare_symbols_by_address(Symbol** a, Symbol** b);
+ static int compare_klass_by_name(Klass** a, Klass** b);
+
+ bool is_excluded(Klass* k);
+ void clean_up_src_obj_table();
+
+ void make_shallow_copies(DumpRegion *dump_region, const SourceObjList* src_objs);
+ void make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* src_info);
+ void relocate_embedded_pointers(SourceObjList* src_objs);
+
+protected:
+ virtual void iterate_roots(MetaspaceClosure* it, bool is_relocating_pointers) = 0;
+
+ // Conservative estimate for number of bytes needed for:
+ size_t _estimated_metaspaceobj_bytes; // all archived MetaspaceObj's.
+ size_t _estimated_hashtable_bytes; // symbol table and dictionaries
+
+ size_t estimate_archive_size();
+
+ static const int _total_dump_regions = 2;
+
+ void start_dump_space(DumpRegion* next);
+ void verify_estimate_size(size_t estimate, const char* which);
+
+public:
+ address reserve_buffer();
+
+ address buffer_bottom() const { return _buffer_bottom; }
+ address buffer_top() const { return (address)current_dump_space()->top(); }
+ address requested_static_archive_bottom() const { return _requested_static_archive_bottom; }
+ address mapped_static_archive_bottom() const { return _mapped_static_archive_bottom; }
+ intx buffer_to_requested_delta() const { return _buffer_to_requested_delta; }
+
+ bool is_in_buffer_space(address p) const {
+ return (buffer_bottom() <= p && p < buffer_top());
+ }
+
+ template <typename T> bool is_in_buffer_space(T obj) const {
+ return is_in_buffer_space(address(obj));
+ }
+
+ template <typename T> bool is_in_requested_static_archive(T p) const {
+ return _requested_static_archive_bottom <= (address)p && (address)p < _requested_static_archive_top;
+ }
+
+ template <typename T> bool is_in_mapped_static_archive(T p) const {
+ return _mapped_static_archive_bottom <= (address)p && (address)p < _mapped_static_archive_top;
+ }
+
+ template <typename T> T to_requested(T obj) const {
+ assert(is_in_buffer_space(obj), "must be");
+ return (T)(address(obj) + _buffer_to_requested_delta);
+ }
+
+public:
+ static const uintx MAX_SHARED_DELTA = 0x7FFFFFFF;
+
+ // The address p points to an object inside the output buffer. When the archive is mapped
+ // at the requested address, what's the offset of this object from _requested_static_archive_bottom?
+ uintx buffer_to_offset(address p) const;
+
+ // Same as buffer_to_offset, except that the address p points to either (a) an object
+ // inside the output buffer, or (b), an object in the currently mapped static archive.
+ uintx any_to_offset(address p) const;
+
+ template <typename T>
+ u4 buffer_to_offset_u4(T p) const {
+ uintx offset = buffer_to_offset((address)p);
+ guarantee(offset <= MAX_SHARED_DELTA, "must be 32-bit offset");
+ return (u4)offset;
+ }
+
+ template <typename T>
+ u4 any_to_offset_u4(T p) const {
+ uintx offset = any_to_offset((address)p);
+ guarantee(offset <= MAX_SHARED_DELTA, "must be 32-bit offset");
+ return (u4)offset;
+ }
+
+ static void assert_is_vm_thread() PRODUCT_RETURN;
+
+public:
+ ArchiveBuilder();
+ ~ArchiveBuilder();
+
+ void gather_klasses_and_symbols();
+ void replace_klass_in_constanPool();
+ void gather_source_objs();
+ bool gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool read_only);
+ bool gather_one_source_obj(MetaspaceClosure::Ref* enclosing_ref, MetaspaceClosure::Ref* ref, bool read_only);
+ void remember_embedded_pointer_in_copied_obj(MetaspaceClosure::Ref* enclosing_ref, MetaspaceClosure::Ref* ref);
+
+ DumpRegion* rw_region() { return &_rw_region; }
+ DumpRegion* ro_region() { return &_ro_region; }
+
+ static char* rw_region_alloc(size_t num_bytes) {
+ return current()->rw_region()->allocate(num_bytes);
+ }
+ static char* ro_region_alloc(size_t num_bytes) {
+ return current()->ro_region()->allocate(num_bytes);
+ }
+
+ template <typename T>
+ static Array<T>* new_ro_array(int length) {
+ size_t byte_size = Array<T>::byte_sizeof(length);
+ Array<T>* array = (Array<T>*)ro_region_alloc(byte_size);
+ array->initialize(length);
+ return array;
+ }
+
+ template <typename T>
+ static Array<T>* new_rw_array(int length) {
+ size_t byte_size = Array<T>::byte_sizeof(length);
+ Array<T>* array = (Array<T>*)rw_region_alloc(byte_size);
+ array->initialize(length);
+ return array;
+ }
+
+ template <typename T>
+ static size_t ro_array_bytesize(int length) {
+ size_t byte_size = Array<T>::byte_sizeof(length);
+ return align_up(byte_size, KlassAlignmentInBytes);
+ }
+
+ void dump_rw_metadata();
+ void dump_ro_metadata();
+ void relocate_metaspaceobj_embedded_pointers();
+ void relocate_roots();
+ void make_klasses_shareable();
+ void relocate_to_requested();
+ void write_archive(FileMapInfo* mapinfo);
+ void write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region, bool read_only, bool allow_exec);
+ address get_dumped_addr(address src_obj);
+ void patch_shared_obj_vtable();
+
+ // All klasses and symbols that will be copied into the archive
+ GrowableArray<Klass*>* klasses() const { return _klasses; }
+ GrowableArray<Symbol*>* symbols() const { return _symbols; }
+
+ static bool is_active() {
+ return (_current != NULL);
+ }
+
+ static ArchiveBuilder* current() {
+ assert_is_vm_thread();
+ assert(_current != NULL, "ArchiveBuilder must be active");
+ return _current;
+ }
+
+ static DumpAllocStats* alloc_stats() {
+ return &(current()->_alloc_stats);
+ }
+
+ static Symbol* get_relocated_symbol(Symbol* orig_symbol) {
+ return (Symbol*)current()->get_dumped_addr((address)orig_symbol);
+ }
+
+ static CompactHashtableStats* symbol_stats() {
+ return alloc_stats()->symbol_stats();
+ }
+
+ void print_stats();
+};
+
+#endif // SHARE_VM_CDS_ARCHIVEBUILDER_HPP
diff --git a/hotspot/src/share/vm/cds/archiveUtils.cpp b/hotspot/src/share/vm/cds/archiveUtils.cpp
new file mode 100644
index 000000000..88c04241d
--- /dev/null
+++ b/hotspot/src/share/vm/cds/archiveUtils.cpp
@@ -0,0 +1,247 @@
+/*
+ * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, Huawei Technologies Co., Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "cds/archiveBuilder.hpp"
+#include "cds/archiveUtils.hpp"
+#include "cds/dynamicArchive.hpp"
+#include "classfile/systemDictionaryShared.hpp"
+#include "memory/filemap.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/arguments.hpp"
+#include "utilities/bitMap.inline.hpp"
+#include "utilities/align.hpp"
+
+BitMap* ArchivePtrMarker::_ptrmap = NULL;
+VirtualSpace* ArchivePtrMarker::_vs;
+
+bool ArchivePtrMarker::_compacted;
+
+void ArchivePtrMarker::initialize(BitMap* ptrmap, VirtualSpace* vs) {
+ assert(_ptrmap == NULL, "initialize only once");
+ _vs = vs;
+ _compacted = false;
+ _ptrmap = ptrmap;
+
+ // Use this as initial guesstimate. We should need less space in the
+ // archive, but if we're wrong the bitmap will be expanded automatically.
+ size_t estimated_archive_size = MetaspaceGC::capacity_until_GC();
+ // But set it smaller in debug builds so we always test the expansion code.
+ // (Default archive is about 12MB).
+ DEBUG_ONLY(estimated_archive_size = 6 * M);
+
+ // We need one bit per pointer in the archive.
+ _ptrmap->resize(estimated_archive_size / sizeof(intptr_t), false);
+}
+
+void ArchivePtrMarker::mark_pointer(address* ptr_loc) {
+ assert(_ptrmap != NULL, "not initialized");
+ assert(!_compacted, "cannot mark anymore");
+
+ if (ptr_base() <= ptr_loc && ptr_loc < ptr_end()) {
+ address value = *ptr_loc;
+ // We don't want any pointer that points to very bottom of the archive, otherwise when
+ // MetaspaceShared::default_base_address()==0, we can't distinguish between a pointer
+ // to nothing (NULL) vs a pointer to an objects that happens to be at the very bottom
+ // of the archive.
+ assert(value != (address)ptr_base(), "don't point to the bottom of the archive");
+
+ if (value != NULL) {
+ assert(uintx(ptr_loc) % sizeof(intptr_t) == 0, "pointers must be stored in aligned addresses");
+ size_t idx = ptr_loc - ptr_base();
+ if (_ptrmap->size() <= idx) {
+ _ptrmap->resize((idx + 1) * 2, false);
+ }
+ assert(idx < _ptrmap->size(), "must be");
+ _ptrmap->set_bit(idx);
+ if (TraceDynamicCDS) {
+ dynamic_cds_log->print_cr("Marking pointer [" PTR_FORMAT "] -> " PTR_FORMAT " @ " SIZE_FORMAT_W(5), p2i(ptr_loc), p2i(*ptr_loc), idx);
+ }
+ }
+ }
+}
+
+void ArchivePtrMarker::clear_pointer(address* ptr_loc) {
+ assert(_ptrmap != NULL, "not initialized");
+ assert(!_compacted, "cannot clear anymore");
+
+ assert(ptr_base() <= ptr_loc && ptr_loc < ptr_end(), "must be");
+ assert(uintx(ptr_loc) % sizeof(intptr_t) == 0, "pointers must be stored in aligned addresses");
+ size_t idx = ptr_loc - ptr_base();
+ assert(idx < _ptrmap->size(), "cannot clear pointers that have not been marked");
+ _ptrmap->clear_bit(idx);
+ if (TraceDynamicCDS)
+ dynamic_cds_log->print_cr("Clearing pointer [" PTR_FORMAT "] -> " PTR_FORMAT " @ " SIZE_FORMAT_W(5), p2i(ptr_loc), p2i(*ptr_loc), idx);
+}
+
+class ArchivePtrBitmapCleaner: public BitMapClosure {
+ BitMap* _ptrmap;
+ address* _ptr_base;
+ address _relocatable_base;
+ address _relocatable_end;
+ size_t _max_non_null_offset;
+
+public:
+ ArchivePtrBitmapCleaner(BitMap* ptrmap, address* ptr_base, address relocatable_base, address relocatable_end) :
+ _ptrmap(ptrmap), _ptr_base(ptr_base),
+ _relocatable_base(relocatable_base), _relocatable_end(relocatable_end), _max_non_null_offset(0) {}
+
+ bool do_bit(size_t offset) {
+ address* ptr_loc = _ptr_base + offset;
+ address ptr_value = *ptr_loc;
+ if (ptr_value != NULL) {
+ assert(_relocatable_base <= ptr_value && ptr_value < _relocatable_end, "do not point to arbitrary locations!");
+ if (_max_non_null_offset < offset) {
+ _max_non_null_offset = offset;
+ }
+ } else {
+ _ptrmap->clear_bit(offset);
+ }
+
+ return true;
+ }
+
+ size_t max_non_null_offset() const { return _max_non_null_offset; }
+};
+
+void ArchivePtrMarker::compact(address relocatable_base, address relocatable_end) {
+ assert(!_compacted, "cannot compact again");
+ ArchivePtrBitmapCleaner cleaner(_ptrmap, ptr_base(), relocatable_base, relocatable_end);
+ _ptrmap->iterate(&cleaner);
+ compact(cleaner.max_non_null_offset());
+}
+
+void ArchivePtrMarker::compact(size_t max_non_null_offset) {
+ assert(!_compacted, "cannot compact again");
+ _ptrmap->resize(max_non_null_offset + 1, false);
+ _compacted = true;
+}
+
+char* DumpRegion::expand_top_to(char* newtop) {
+ assert(is_allocatable(), "must be initialized and not packed");
+ assert(newtop >= _top, "must not grow backwards");
+ if (newtop > _end) {
+ vm_exit_during_initialization("Unable to allocate memory",
+ "Please reduce the number of shared classes.");
+ ShouldNotReachHere();
+ }
+
+ commit_to(newtop);
+ _top = newtop;
+
+ if (_max_delta > 0) {
+ uintx delta = ArchiveBuilder::current()->buffer_to_offset((address)(newtop-1));
+ if (delta > _max_delta) {
+ // This is just a sanity check and should not appear in any real world usage. This
+ // happens only if you allocate more than 2GB of shared objects and would require
+ // millions of shared classes.
+ vm_exit_during_initialization("Out of memory in the CDS archive",
+ "Please reduce the number of shared classes.");
+ }
+ }
+
+ return _top;
+}
+
+void DumpRegion::commit_to(char* newtop) {
+ Arguments::assert_is_dumping_archive();
+ char* base = _rs->base();
+ size_t need_committed_size = newtop - base;
+ size_t has_committed_size = _vs->committed_size();
+ if (need_committed_size < has_committed_size) {
+ return;
+ }
+
+ size_t min_bytes = need_committed_size - has_committed_size;
+ size_t preferred_bytes = 1 * M;
+ size_t uncommitted = _vs->reserved_size() - has_committed_size;
+
+ size_t commit = MAX2(min_bytes, preferred_bytes);
+ commit = MIN2(commit, uncommitted);
+ assert(commit <= uncommitted, "sanity");
+
+ if (!_vs->expand_by(commit, false)) {
+ vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes",
+ need_committed_size));
+ }
+
+ if (DebugDynamicCDS) {
+ dynamic_cds_log->print_cr("Expanding shared spaces by " SIZE_FORMAT_W(7) " bytes [total " SIZE_FORMAT_W(9) " bytes ending at %p]",
+ commit, _vs->actual_committed_size(), _vs->high());
+ }
+}
+
+char* DumpRegion::allocate(size_t num_bytes) {
+ char* p = (char*)align_up(_top, (size_t)KlassAlignmentInBytes);
+ char* newtop = p + align_up(num_bytes, (size_t)KlassAlignmentInBytes);
+ expand_top_to(newtop);
+ memset(p, 0, newtop - p);
+ return p;
+}
+
+void DumpRegion::append_intptr_t(intptr_t n, bool need_to_mark) {
+ assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment");
+ intptr_t *p = (intptr_t*)_top;
+ char* newtop = _top + sizeof(intptr_t);
+ expand_top_to(newtop);
+ *p = n;
+ if (need_to_mark) {
+ ArchivePtrMarker::mark_pointer(p);
+ }
+}
+
+void DumpRegion::init(ReservedSpace* rs, VirtualSpace* vs) {
+ _rs = rs;
+ _vs = vs;
+ // Start with 0 committed bytes. The memory will be committed as needed.
+ if (!_vs->initialize(*_rs, 0)) {
+ fatal("Unable to allocate memory for shared space");
+ }
+ _base = _top = _rs->base();
+ _end = _rs->base() + _rs->size();
+}
+
+void DumpRegion::pack(DumpRegion* next) {
+ assert(!is_packed(), "sanity");
+ _end = (char*)align_up(_top, (size_t)os::vm_allocation_granularity());
+ _is_packed = true;
+ if (next != NULL) {
+ next->_rs = _rs;
+ next->_vs = _vs;
+ next->_base = next->_top = this->_end;
+ next->_end = _rs->base() + _rs->size();
+ }
+}
+
+void DynamicWriteClosure::do_region(u_char* start, size_t size) {
+ assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
+ assert(size % sizeof(intptr_t) == 0, "bad size");
+ do_tag((int)size);
+ while (size > 0) {
+ _dump_region->append_intptr_t(*(intptr_t*)start, true);
+ start += sizeof(intptr_t);
+ size -= sizeof(intptr_t);
+ }
+}
diff --git a/hotspot/src/share/vm/cds/archiveUtils.hpp b/hotspot/src/share/vm/cds/archiveUtils.hpp
new file mode 100644
index 000000000..55c2431a0
--- /dev/null
+++ b/hotspot/src/share/vm/cds/archiveUtils.hpp
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, Huawei Technologies Co., Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CDS_ARCHIVEUTILS_HPP
+#define SHARE_VM_CDS_ARCHIVEUTILS_HPP
+
+#include "memory/iterator.hpp"
+#include "runtime/virtualspace.hpp"
+#include "utilities/bitMap.hpp"
+
+class ArchivePtrMarker : AllStatic {
+ static BitMap* _ptrmap;
+ static VirtualSpace* _vs;
+
+ // Once _ptrmap is compacted, we don't allow bit marking anymore. This is to
+ // avoid unintentional copy operations after the bitmap has been finalized and written.
+ static bool _compacted;
+
+ static address* ptr_base() { return (address*)_vs->low(); } // committed lower bound (inclusive)
+ static address* ptr_end() { return (address*)_vs->high(); } // committed upper bound (exclusive)
+
+public:
+ static void initialize(BitMap* ptrmap, VirtualSpace* vs);
+ static void mark_pointer(address* ptr_loc);
+ static void clear_pointer(address* ptr_loc);
+ static void compact(address relocatable_base, address relocatable_end);
+ static void compact(size_t max_non_null_offset);
+
+ template <typename T>
+ static void mark_pointer(T* ptr_loc) {
+ mark_pointer((address*)ptr_loc);
+ }
+
+ template <typename T>
+ static void set_and_mark_pointer(T* ptr_loc, T ptr_value) {
+ *ptr_loc = ptr_value;
+ mark_pointer(ptr_loc);
+ }
+
+ static BitMap* ptrmap() {
+ return _ptrmap;
+ }
+};
+
+class DumpRegion {
+private:
+ const char* _name;
+ char* _base;
+ char* _top;
+ char* _end;
+ uintx _max_delta;
+ bool _is_packed;
+ ReservedSpace* _rs;
+ VirtualSpace* _vs;
+
+ void commit_to(char* newtop);
+
+public:
+ DumpRegion(const char* name, uintx max_delta = 0)
+ : _name(name), _base(NULL), _top(NULL), _end(NULL),
+ _max_delta(max_delta), _is_packed(false) {}
+
+ char* expand_top_to(char* newtop);
+ char* allocate(size_t num_bytes);
+
+ void append_intptr_t(intptr_t n, bool need_to_mark = false);
+
+ char* base() const { return _base; }
+ char* top() const { return _top; }
+ char* end() const { return _end; }
+ size_t reserved() const { return _end - _base; }
+ size_t used() const { return _top - _base; }
+ bool is_packed() const { return _is_packed; }
+ bool is_allocatable() const {
+ return !is_packed() && _base != NULL;
+ }
+
+ void print(size_t total_bytes) const;
+ void print_out_of_space_msg(const char* failing_region, size_t needed_bytes);
+
+ void init(ReservedSpace* rs, VirtualSpace* vs);
+
+ void pack(DumpRegion* next = NULL);
+
+ bool contains(char* p) const {
+ return base() <= p && p < top();
+ }
+};
+
+// Closure for serializing initialization data out to a data area to be
+// written to the shared file.
+
+class DynamicWriteClosure : public SerializeClosure {
+private:
+ DumpRegion* _dump_region;
+
+public:
+ DynamicWriteClosure(DumpRegion* r) {
+ _dump_region = r;
+ }
+
+ void do_ptr(void** p) {
+ _dump_region->append_intptr_t((intptr_t)*p, true);
+ }
+
+ void do_u4(u4* p) {
+ _dump_region->append_intptr_t((intptr_t)(*p));
+ }
+
+ void do_tag(int tag) {
+ _dump_region->append_intptr_t((intptr_t)tag);
+ }
+
+ //void do_oop(oop* o);
+ void do_region(u_char* start, size_t size);
+ bool reading() const { return false; }
+};
+
+#endif // SHARE_VM_CDS_ARCHIVEUTILS_HPP
diff --git a/hotspot/src/share/vm/cds/dumpAllocStats.cpp b/hotspot/src/share/vm/cds/dumpAllocStats.cpp
new file mode 100644
index 000000000..e9146555d
--- /dev/null
+++ b/hotspot/src/share/vm/cds/dumpAllocStats.cpp
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, Huawei Technologies Co., Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "cds/dumpAllocStats.hpp"
+
+// Returns numerator/denominator as percentage value from 0 to 100. If denominator
+// is zero, return 0.0.
+static inline double percent_of(int numerator, int denominator) {
+ return denominator != 0 ? (double)numerator / denominator * 100.0 : 0.0;
+}
+
+void DumpAllocStats::print_stats(int ro_all, int rw_all) {
+ if (!DebugDynamicCDS) {
+ return;
+ }
+
+ // symbols
+ _counts[RO][SymbolHashentryType] = _symbol_stats.hashentry_count;
+ _bytes [RO][SymbolHashentryType] = _symbol_stats.hashentry_bytes;
+
+ _counts[RO][SymbolBucketType] = _symbol_stats.bucket_count;
+ _bytes [RO][SymbolBucketType] = _symbol_stats.bucket_bytes;
+
+ // prevent divide-by-zero
+ if (ro_all < 1) {
+ ro_all = 1;
+ }
+ if (rw_all < 1) {
+ rw_all = 1;
+ }
+
+ int all_ro_count = 0;
+ int all_ro_bytes = 0;
+ int all_rw_count = 0;
+ int all_rw_bytes = 0;
+
+// To make fmt_stats be a syntactic constant (for format warnings), use #define.
+#define fmt_stats "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f"
+ const char *sep = "--------------------+---------------------------+---------------------------+--------------------------";
+ const char *hdr = " ro_cnt ro_bytes % | rw_cnt rw_bytes % | all_cnt all_bytes %";
+
+ dynamic_cds_log->print_cr("Detailed metadata info (excluding heap regions):");
+ dynamic_cds_log->print_cr("%s", hdr);
+ dynamic_cds_log->print_cr("%s", sep);
+ for (int type = 0; type < int(_number_of_types); type ++) {
+ const char *name = type_name((Type)type);
+ int ro_count = _counts[RO][type];
+ int ro_bytes = _bytes [RO][type];
+ int rw_count = _counts[RW][type];
+ int rw_bytes = _bytes [RW][type];
+ int count = ro_count + rw_count;
+ int bytes = ro_bytes + rw_bytes;
+
+ double ro_perc = percent_of(ro_bytes, ro_all);
+ double rw_perc = percent_of(rw_bytes, rw_all);
+ double perc = percent_of(bytes, ro_all + rw_all);
+
+ dynamic_cds_log->print_cr(fmt_stats, name,
+ ro_count, ro_bytes, ro_perc,
+ rw_count, rw_bytes, rw_perc,
+ count, bytes, perc);
+
+ all_ro_count += ro_count;
+ all_ro_bytes += ro_bytes;
+ all_rw_count += rw_count;
+ all_rw_bytes += rw_bytes;
+ }
+
+ int all_count = all_ro_count + all_rw_count;
+ int all_bytes = all_ro_bytes + all_rw_bytes;
+
+ double all_ro_perc = percent_of(all_ro_bytes, ro_all);
+ double all_rw_perc = percent_of(all_rw_bytes, rw_all);
+ double all_perc = percent_of(all_bytes, ro_all + rw_all);
+
+ dynamic_cds_log->print_cr("%s", sep);
+ dynamic_cds_log->print_cr(fmt_stats, "Total",
+ all_ro_count, all_ro_bytes, all_ro_perc,
+ all_rw_count, all_rw_bytes, all_rw_perc,
+ all_count, all_bytes, all_perc);
+
+ assert(all_ro_bytes == ro_all, "everything should have been counted");
+ assert(all_rw_bytes == rw_all, "everything should have been counted");
+
+#undef fmt_stats
+}
diff --git a/hotspot/src/share/vm/cds/dumpAllocStats.hpp b/hotspot/src/share/vm/cds/dumpAllocStats.hpp
new file mode 100644
index 000000000..2f9247bcb
--- /dev/null
+++ b/hotspot/src/share/vm/cds/dumpAllocStats.hpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, Huawei Technologies Co., Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CDS_DUMPALLOCSTATS_HPP
+#define SHARE_VM_CDS_DUMPALLOCSTATS_HPP
+
+#include "classfile/compactHashtable.hpp"
+#include "memory/allocation.hpp"
+
+// This is for dumping detailed statistics for the allocations
+// in the shared spaces.
+class DumpAllocStats : public ResourceObj {
+public:
+ // Here's poor man's enum inheritance
+#define SHAREDSPACE_OBJ_TYPES_DO(f) \
+ METASPACE_OBJ_TYPES_DO(f) \
+ f(SymbolHashentry) \
+ f(SymbolBucket) \
+ f(Other)
+
+ enum Type {
+ // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc
+ SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE)
+ _number_of_types
+ };
+
+ static const char* type_name(Type type) {
+ switch(type) {
+ SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
+ default:
+ ShouldNotReachHere();
+ return NULL;
+ }
+ }
+
+ CompactHashtableStats _symbol_stats;
+
+ int _counts[2][_number_of_types];
+ int _bytes [2][_number_of_types];
+
+public:
+ enum { RO = 0, RW = 1 };
+
+ DumpAllocStats() {
+ memset(_counts, 0, sizeof(_counts));
+ memset(_bytes, 0, sizeof(_bytes));
+ };
+
+ CompactHashtableStats* symbol_stats() { return &_symbol_stats; }
+
+ void record(MetaspaceObj::Type type, int byte_size, bool read_only) {
+ assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity");
+ int which = (read_only) ? RO : RW;
+ _counts[which][type] ++;
+ _bytes [which][type] += byte_size;
+ }
+
+ void record_other_type(int byte_size, bool read_only) {
+ int which = (read_only) ? RO : RW;
+ _bytes [which][OtherType] += byte_size;
+ }
+
+ void print_stats(int ro_all, int rw_all);
+};
+
+#endif // SHARE_VM_CDS_DUMPALLOCSTATS_HPP
diff --git a/hotspot/src/share/vm/cds/dynamicArchive.cpp b/hotspot/src/share/vm/cds/dynamicArchive.cpp
new file mode 100644
index 000000000..efed275c8
--- /dev/null
+++ b/hotspot/src/share/vm/cds/dynamicArchive.cpp
@@ -0,0 +1,412 @@
+/*
+ * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, Huawei Technologies Co., Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "cds/archiveBuilder.hpp"
+#include "cds/archiveUtils.hpp"
+#include "cds/dynamicArchive.hpp"
+#include "classfile/systemDictionaryShared.hpp"
+#include "runtime/vm_operations.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/vmThread.hpp"
+#include "memory/metaspaceShared.hpp"
+#include "memory/filemap.hpp"
+#include "memory/metaspaceClosure.hpp"
+#include "utilities/exceptions.hpp"
+#include "utilities/align.hpp"
+#include "utilities/bitMap.hpp"
+#include "utilities/exceptions.hpp"
+
+class DynamicArchiveBuilder : public ArchiveBuilder {
+public:
+ static int dynamic_dump_method_comparator(Method* a, Method* b) {
+ Symbol* a_name = a->name();
+ Symbol* b_name = b->name();
+
+ if (a_name == b_name) {
+ return 0;
+ }
+
+ u4 a_offset = ArchiveBuilder::current()->any_to_offset_u4(a_name);
+ u4 b_offset = ArchiveBuilder::current()->any_to_offset_u4(b_name);
+
+ if (a_offset < b_offset) {
+ return -1;
+ } else {
+ assert(a_offset > b_offset, "must be");
+ return 1;
+ }
+ }
+
+public:
+ FileMapInfo::DynamicArchiveHeader* _header;
+
+ void init_header();
+ void release_header();
+ void sort_methods();
+ void sort_methods(InstanceKlass* ik) const;
+ void remark_pointers_for_instance_klass(InstanceKlass* k, bool should_mark) const;
+ void write_archive(char* serialized_data);
+ virtual void iterate_roots(MetaspaceClosure* it, bool is_relocating_pointers) {
+ SystemDictionaryShared::dumptime_classes_do(it);
+ }
+
+ // Do this before and after the archive dump to see if any corruption
+ // is caused by dynamic dumping.
+ void verify_universe(const char* info) {
+ if (VerifyBeforeExit) {
+ if (InfoDynamicCDS) {
+ dynamic_cds_log->print_cr("Verify %s", info);
+ }
+ // Among other things, this ensures that Eden top is correct.
+ Universe::heap()->prepare_for_verify();
+ Universe::verify(info);
+ }
+ }
+
+ void doit() {
+ SystemDictionaryShared::start_dumping();
+
+ verify_universe("Before CDS dynamic dump");
+ DEBUG_ONLY(SystemDictionaryShared::NoClassLoadingMark nclm);
+
+ // No need DumpTimeTable_lock, since jdk8 doesn't support jcmd dump.
+ // Just remains this lock.
+ MutexLockerEx ml(DumpTimeTable_lock, Mutex::_no_safepoint_check_flag);
+ SystemDictionaryShared::check_excluded_classes();
+ SystemDictionaryShared::replace_klass_in_constantPool();
+
+ init_header();
+ gather_source_objs();
+ if (klasses()->length() == 0) {
+ if (InfoDynamicCDS) {
+ dynamic_cds_log->print_cr("No classes gathered, so do not generate Dynamic CDS jsa");
+ }
+ return;
+ }
+ reserve_buffer();
+
+ if (InfoDynamicCDS) {
+ dynamic_cds_log->print_cr("Copying %d klasses and %d symbols",
+ klasses()->length(), symbols()->length());
+ }
+ dump_rw_metadata();
+ dump_ro_metadata();
+ relocate_metaspaceobj_embedded_pointers();
+ relocate_roots();
+
+ verify_estimate_size(_estimated_metaspaceobj_bytes, "MetaspaceObjs");
+
+ char* serialized_data;
+ {
+ // Write the symbol table and system dictionaries to the RO space.
+ // Note that these tables still point to the *original* objects, so
+ // they would need to get the correct addresses.
+ assert(current_dump_space() == ro_region(), "Must be RO space");
+ SymbolTable::write_to_archive(symbols());
+
+ ArchiveBuilder::OtherROAllocMark mark;
+ SystemDictionaryShared::write_to_archive();
+
+ serialized_data = ro_region()->top();
+ DynamicWriteClosure wc(ro_region());
+ SymbolTable::serialize_shared_table_header(&wc);
+ SystemDictionaryShared::serialize_dictionary_headers(&wc);
+ }
+
+ verify_estimate_size(_estimated_hashtable_bytes, "Hashtables");
+
+ sort_methods();
+
+ if (InfoDynamicCDS) {
+ dynamic_cds_log->print_cr("Make classes shareable");
+ }
+ make_klasses_shareable();
+
+ patch_shared_obj_vtable();
+
+ relocate_to_requested();
+
+ write_archive(serialized_data);
+ release_header();
+
+ assert(_num_dump_regions_used == _total_dump_regions, "must be");
+ verify_universe("After CDS dynamic dump");
+ }
+};
+
+void DynamicArchiveBuilder::init_header() {
+ FileMapInfo* mapinfo = new FileMapInfo(false);
+ assert(FileMapInfo::dynamic_info() == mapinfo, "must be");
+ _header = mapinfo->dynamic_header();
+
+ FileMapInfo* base_info = FileMapInfo::current_info();
+ _header->set_base_header_crc(base_info->header()->crc());
+ for (int i = 0; i < MetaspaceShared::n_regions; i++) {
+ _header->set_base_region_crc(i, base_info->header()->space_crc(i));
+ }
+
+ _header->populate(base_info, base_info->alignment());
+}
+
+void DynamicArchiveBuilder::release_header() {
+ // We temporarily allocated a dynamic FileMapInfo for dumping, which makes it appear we
+ // have mapped a dynamic archive, but we actually have not. We are in a safepoint now.
+ // Let's free it so that if class loading happens after we leave the safepoint, nothing
+ // bad will happen.
+ assert(SafepointSynchronize::is_at_safepoint(), "must be");
+ FileMapInfo *mapinfo = FileMapInfo::dynamic_info();
+ assert(mapinfo != NULL && _header == mapinfo->dynamic_header(), "must be");
+ delete mapinfo;
+ assert(!DynamicArchive::is_mapped(), "must be");
+ _header = NULL;
+}
+
+void DynamicArchiveBuilder::sort_methods() {
+ // Because high version support jcmd dynamic cds dump, jvm need go on after dump.
+ // Jdk8 no need as so, just exit after dump.
+ InstanceKlass::disable_method_binary_search();
+ for (int i = 0; i < klasses()->length(); i++) {
+ Klass* k = klasses()->at(i);
+ if (k->oop_is_instance()) {
+ sort_methods(InstanceKlass::cast(k));
+ }
+ }
+}
+
+// The address order of the copied Symbols may be different than when the original
+// klasses were created. Re-sort all the tables. See Method::sort_methods().
+void DynamicArchiveBuilder::sort_methods(InstanceKlass* ik) const {
+ assert(ik != NULL, "DynamicArchiveBuilder currently doesn't support dumping the base archive");
+ if (MetaspaceShared::is_in_shared_space(ik)) {
+ // We have reached a supertype that's already in the base archive
+ return;
+ }
+
+ if (ik->java_mirror() == NULL) {
+ // NULL mirror means this class has already been visited and methods are already sorted
+ return;
+ }
+ ik->remove_java_mirror();
+
+ if (DebugDynamicCDS) {
+ ResourceMark rm;
+ dynamic_cds_log->print_cr("sorting methods for " PTR_FORMAT " (" PTR_FORMAT ") %s",
+ p2i(ik), p2i(to_requested(ik)), ik->external_name());
+ }
+ // Method sorting may re-layout the [iv]tables, which would change the offset(s)
+ // of the locations in an InstanceKlass that would contain pointers. Let's clear
+ // all the existing pointer marking bits, and re-mark the pointers after sorting.
+ remark_pointers_for_instance_klass(ik, false);
+
+ // Make sure all supertypes have been sorted
+ sort_methods(ik->java_super());
+ Array<Klass*>* interfaces = ik->local_interfaces();
+ int len = interfaces->length();
+ for (int i = 0; i < len; i++) {
+ sort_methods(InstanceKlass::cast(interfaces->at(i)));
+ }
+
+#ifdef ASSERT
+ if (ik->methods() != NULL) {
+ for (int m = 0; m < ik->methods()->length(); m++) {
+ Symbol* name = ik->methods()->at(m)->name();
+ assert(MetaspaceShared::is_in_shared_space(name) || is_in_buffer_space(name), "must be");
+ }
+ }
+ if (ik->default_methods() != NULL) {
+ for (int m = 0; m < ik->default_methods()->length(); m++) {
+ Symbol* name = ik->default_methods()->at(m)->name();
+ assert(MetaspaceShared::is_in_shared_space(name) || is_in_buffer_space(name), "must be");
+ }
+ }
+#endif
+
+ Method::sort_methods(ik->methods(), /*idempotent=*/false, /*set_idnums=*/true, dynamic_dump_method_comparator);
+ if (ik->default_methods() != NULL) {
+ Method::sort_methods(ik->default_methods(), /*idempotent=*/false, /*set_idnums=*/false, dynamic_dump_method_comparator);
+ }
+
+ EXCEPTION_MARK;
+
+ ik->vtable()->initialize_vtable(false, CATCH); // No need checkconstraints
+ CLEAR_PENDING_EXCEPTION;
+ ik->itable()->initialize_itable(false, CATCH);
+ CLEAR_PENDING_EXCEPTION;
+
+ // Set all the pointer marking bits after sorting.
+ remark_pointers_for_instance_klass(ik, true);
+}
+
+template<bool should_mark>
+class PointerRemarker: public MetaspaceClosure {
+public:
+ virtual bool do_ref(Ref* ref, bool read_only) {
+ if (should_mark) {
+ ArchivePtrMarker::mark_pointer(ref->addr());
+ } else {
+ ArchivePtrMarker::clear_pointer(ref->addr());
+ }
+ return false; // don't recurse
+ }
+};
+
+void DynamicArchiveBuilder::remark_pointers_for_instance_klass(InstanceKlass* k, bool should_mark) const {
+ if (should_mark) {
+ PointerRemarker<true> marker;
+ k->metaspace_pointers_do(&marker);
+ marker.finish();
+ } else {
+ PointerRemarker<false> marker;
+ k->metaspace_pointers_do(&marker);
+ marker.finish();
+ }
+}
+
+void DynamicArchiveBuilder::write_archive(char* serialized_data) {
+ _header->set_serialized_data(serialized_data);
+
+ FileMapInfo* dynamic_info = FileMapInfo::dynamic_info();
+ assert(dynamic_info != NULL, "Sanity");
+
+ // Update file offset
+ ArchiveBuilder::write_archive(dynamic_info);
+
+ // Write into file
+ dynamic_info->open_for_write();
+ dynamic_info->set_requested_base((char*)MetaspaceShared::requested_base_address());
+ dynamic_info->set_header_base_archive_name_size(strlen(Arguments::GetSharedArchivePath()) + 1);
+ dynamic_info->set_header_crc(dynamic_info->compute_header_crc());
+ ArchiveBuilder::write_archive(dynamic_info);
+
+ address base = _requested_dynamic_archive_bottom;
+ address top = _requested_dynamic_archive_top;
+ size_t file_size = pointer_delta(top, base, sizeof(char));
+
+ if (InfoDynamicCDS) {
+ dynamic_cds_log->print_cr("Written dynamic archive " PTR_FORMAT " - " PTR_FORMAT
+ " , " SIZE_FORMAT " bytes total]",
+ p2i(base), p2i(top), file_size);
+
+ dynamic_cds_log->print_cr("%d klasses; %d symbols", klasses()->length(), symbols()->length());
+ }
+}
+
+class VM_GC_Sync_Operation : public VM_Operation {
+public:
+
+ VM_GC_Sync_Operation() : VM_Operation() { }
+
+ // Acquires the Heap_lock.
+ virtual bool doit_prologue() {
+ Heap_lock->lock();
+ return true;
+ }
+ // Releases the Heap_lock.
+ virtual void doit_epilogue() {
+ Heap_lock->unlock();
+ }
+};
+
+class VM_PopulateDynamicDumpSharedSpace : public VM_GC_Sync_Operation {
+ DynamicArchiveBuilder builder;
+public:
+ VM_PopulateDynamicDumpSharedSpace() : VM_GC_Sync_Operation() {}
+ VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
+ void doit() {
+ if (DynamicDumpSharedSpaces == false) {
+ return;
+ }
+ ResourceMark rm;
+
+ if (SystemDictionaryShared::empty_dumptime_table()) {
+ tty->print_cr("There is no class to be included in the dynamic archive.");
+ return;
+ }
+
+ builder.doit();
+
+ DynamicDumpSharedSpaces = false;
+ exit(0);
+ }
+};
+
+bool DynamicArchive::_has_been_dumped_once = false;
+
+void DynamicArchive::prepare_for_dynamic_dumping_at_exit() {
+ {
+ MutexLockerEx ml(DumpTimeTable_lock, Mutex::_no_safepoint_check_flag);
+ if (DynamicArchive::has_been_dumped_once()) {
+ return;
+ } else {
+ DynamicArchive::set_has_been_dumped_once();
+ }
+ }
+ EXCEPTION_MARK;
+ ResourceMark rm(THREAD);
+ MetaspaceShared::link_and_cleanup_shared_classes(THREAD);
+
+ if (HAS_PENDING_EXCEPTION) {
+ tty->print_cr("ArchiveClassesAtExit has failed");
+ tty->print_cr("%s: %s", PENDING_EXCEPTION->klass()->external_name(),
+ java_lang_String::as_utf8_string(java_lang_Throwable::message(PENDING_EXCEPTION)));
+ // We cannot continue to dump the archive anymore.
+ DynamicDumpSharedSpaces = false;
+ CLEAR_PENDING_EXCEPTION;
+ }
+}
+
+void DynamicArchive::dump() {
+ if (Arguments::GetSharedDynamicArchivePath() == NULL) {
+ tty->print_cr("SharedDynamicArchivePath is not specified");
+ return;
+ }
+
+ VM_PopulateDynamicDumpSharedSpace op;
+ VMThread::execute(&op);
+}
+
+bool DynamicArchive::validate(FileMapInfo* dynamic_info) {
+ assert(!dynamic_info->is_static(), "must be");
+ // Check if the recorded base archive matches with the current one
+ FileMapInfo* base_info = FileMapInfo::current_info();
+ FileMapInfo::DynamicArchiveHeader* dynamic_header = dynamic_info->dynamic_header();
+
+ // Check the header crc
+ if (dynamic_header->base_header_crc() != base_info->crc()) {
+ FileMapInfo::fail_continue("Dynamic archive cannot be used: static archive header checksum verification failed.");
+ return false;
+ }
+
+ // Check each space's crc
+ for (int i = 0; i < MetaspaceShared::n_regions; i++) {
+ if (dynamic_header->base_region_crc(i) != base_info->space_crc(i)) {
+ FileMapInfo::fail_continue("Dynamic archive cannot be used: static archive region #%d checksum verification failed.", i);
+ return false;
+ }
+ }
+
+ return true;
+}
diff --git a/hotspot/src/share/vm/cds/dynamicArchive.hpp b/hotspot/src/share/vm/cds/dynamicArchive.hpp
new file mode 100644
index 000000000..1d5b71221
--- /dev/null
+++ b/hotspot/src/share/vm/cds/dynamicArchive.hpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, Huawei Technologies Co., Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CDS_DYNAMICARCHIVE_HPP
+#define SHARE_VM_CDS_DYNAMICARCHIVE_HPP
+
+//#include "classfile/compactHashtable.hpp"
+#include "memory/allocation.hpp"
+#include "memory/filemap.hpp"
+#include "memory/memRegion.hpp"
+#include "runtime/virtualspace.hpp"
+#include "oops/oop.hpp"
+#include "utilities/exceptions.hpp"
+#include "utilities/macros.hpp"
+#include "utilities/resourceHash.hpp"
+
+#if INCLUDE_CDS
+
+// Fixme
+class DynamicArchive : AllStatic {
+ static bool _has_been_dumped_once;
+public:
+ static void prepare_for_dynamic_dumping_at_exit();
+ static void dump();
+ static bool has_been_dumped_once() { return _has_been_dumped_once; }
+ static void set_has_been_dumped_once() { _has_been_dumped_once = true; }
+ static bool is_mapped() { return FileMapInfo::dynamic_info() != NULL; }
+ static bool validate(FileMapInfo* dynamic_info);
+};
+
+#endif // INCLUDE_CDS
+#endif // SHARE_VM_CDS_DYNAMICARCHIVE_HPP
diff --git a/hotspot/src/share/vm/classfile/classFileParser.cpp b/hotspot/src/share/vm/classfile/classFileParser.cpp
index 5c36a9d6f..ae9199525 100644
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp
@@ -4376,6 +4376,13 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
instanceKlassHandle this_klass (THREAD, preserve_this_klass);
debug_only(this_klass->verify();)
+#if INCLUDE_CDS
+ if (DynamicDumpSharedSpaces && !SystemDictionary::is_builtin_loader(class_loader)) {
+ this_klass->set_shared_classpath_index(UNREGISTERED_INDEX);
+ SystemDictionaryShared::set_shared_class_misc_info(this_klass(), cfs);
+ }
+#endif // INCLUDE_CDS
+
// Clear class if no error has occurred so destructor doesn't deallocate it
_klass = NULL;
return this_klass;
diff --git a/hotspot/src/share/vm/classfile/classLoaderExt.hpp b/hotspot/src/share/vm/classfile/classLoaderExt.hpp
index 7b2360af9..3bd4f3bde 100644
--- a/hotspot/src/share/vm/classfile/classLoaderExt.hpp
+++ b/hotspot/src/share/vm/classfile/classLoaderExt.hpp
@@ -48,7 +48,7 @@ public:
instanceKlassHandle record_result(const int classpath_index,
ClassPathEntry* e, instanceKlassHandle result, TRAPS) {
if (ClassLoader::add_package(_file_name, classpath_index, THREAD)) {
- if (DumpSharedSpaces) {
+ if (DumpSharedSpaces || DynamicDumpSharedSpaces) {
result->set_shared_classpath_index(classpath_index);
}
return result;
diff --git a/hotspot/src/share/vm/classfile/compactHashtable.cpp b/hotspot/src/share/vm/classfile/compactHashtable.cpp
new file mode 100644
index 000000000..232a89fa1
--- /dev/null
+++ b/hotspot/src/share/vm/classfile/compactHashtable.cpp
@@ -0,0 +1,216 @@
+/*
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, Huawei Technologies Co., Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jvm.h"
+#include "cds/archiveBuilder.hpp"
+#include "classfile/compactHashtable.hpp"
+#include "classfile/javaClasses.hpp"
+#include "memory/metadataFactory.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/vmThread.hpp"
+#include "utilities/align.hpp"
+#include "utilities/numberSeq.hpp"
+
+/////////////////////////////////////////////////////
+//
+// The compact hash table writer implementations
+//
+CompactHashtableWriter::CompactHashtableWriter(int num_entries,
+ CompactHashtableStats* stats) {
+ Arguments::assert_is_dumping_archive();
+ assert(num_entries >= 0, "sanity");
+ _num_buckets = calculate_num_buckets(num_entries);
+ assert(_num_buckets > 0, "no buckets");
+
+ _num_entries_written = 0;
+ _buckets = NEW_C_HEAP_ARRAY(GrowableArray<Entry>*, _num_buckets, mtSymbol);
+ for (int i = 0; i < _num_buckets; i++) {
+ _buckets[i] = new (ResourceObj::C_HEAP, mtSymbol) GrowableArray<Entry>(0, true, mtSymbol);
+ }
+
+ _stats = stats;
+ _compact_buckets = NULL;
+ _compact_entries = NULL;
+ _num_empty_buckets = 0;
+ _num_value_only_buckets = 0;
+ _num_other_buckets = 0;
+}
+
+CompactHashtableWriter::~CompactHashtableWriter() {
+ for (int index = 0; index < _num_buckets; index++) {
+ GrowableArray<Entry>* bucket = _buckets[index];
+ delete bucket;
+ }
+
+ FREE_C_HEAP_ARRAY(GrowableArray<Entry>*, _buckets, mtSymbol);
+}
+
+size_t CompactHashtableWriter::estimate_size(int num_entries) {
+ int num_buckets = calculate_num_buckets(num_entries);
+ size_t bucket_bytes = ArchiveBuilder::ro_array_bytesize<u4>(num_buckets + 1);
+
+ // In worst case, we have no VALUE_ONLY_BUCKET_TYPE, so each entry takes 2 slots
+ int entries_space = 2 * num_entries;
+ size_t entry_bytes = ArchiveBuilder::ro_array_bytesize<u4>(entries_space);
+
+ return bucket_bytes
+ + entry_bytes
+ + SimpleCompactHashtable::calculate_header_size();
+}
+
+// Add a symbol entry to the temporary hash table
+void CompactHashtableWriter::add(unsigned int hash, u4 value) {
+ int index = hash % _num_buckets;
+ _buckets[index]->append_if_missing(Entry(hash, value));
+ _num_entries_written++;
+}
+
+void CompactHashtableWriter::allocate_table() {
+ int entries_space = 0;
+ for (int index = 0; index < _num_buckets; index++) {
+ GrowableArray<Entry>* bucket = _buckets[index];
+ int bucket_size = bucket->length();
+ if (bucket_size == 1) {
+ entries_space++;
+ } else if (bucket_size > 1) {
+ entries_space += 2 * bucket_size;
+ }
+ }
+
+ if (entries_space & ~BUCKET_OFFSET_MASK) {
+ vm_exit_during_initialization("CompactHashtableWriter::allocate_table: Overflow! "
+ "Too many entries.");
+ }
+
+ _compact_buckets = ArchiveBuilder::new_ro_array<u4>(_num_buckets + 1);
+ _compact_entries = ArchiveBuilder::new_ro_array<u4>(entries_space);
+
+ _stats->bucket_count = _num_buckets;
+ _stats->bucket_bytes = align_up(_compact_buckets->size() * BytesPerWord,
+ KlassAlignmentInBytes);
+ _stats->hashentry_count = _num_entries_written;
+ _stats->hashentry_bytes = align_up(_compact_entries->size() * BytesPerWord,
+ KlassAlignmentInBytes);
+}
+
+// Write the compact table's buckets
+void CompactHashtableWriter::dump_table(NumberSeq* summary) {
+ u4 offset = 0;
+ for (int index = 0; index < _num_buckets; index++) {
+ GrowableArray<Entry>* bucket = _buckets[index];
+ int bucket_size = bucket->length();
+ if (bucket_size == 1) {
+ // bucket with one entry is compacted and only has the symbol offset
+ _compact_buckets->at_put(index, BUCKET_INFO(offset, VALUE_ONLY_BUCKET_TYPE));
+
+ Entry ent = bucket->at(0);
+ _compact_entries->at_put(offset++, ent.value());
+ _num_value_only_buckets++;
+ } else {
+ // regular bucket, each entry is a symbol (hash, offset) pair
+ _compact_buckets->at_put(index, BUCKET_INFO(offset, REGULAR_BUCKET_TYPE));
+
+ for (int i=0; i<bucket_size; i++) {
+ Entry ent = bucket->at(i);
+ _compact_entries->at_put(offset++, u4(ent.hash())); // write entry hash
+ _compact_entries->at_put(offset++, ent.value());
+ }
+ if (bucket_size == 0) {
+ _num_empty_buckets++;
+ } else {
+ _num_other_buckets++;
+ }
+ }
+ summary->add(bucket_size);
+ }
+
+ // Mark the end of the buckets
+ _compact_buckets->at_put(_num_buckets, BUCKET_INFO(offset, TABLEEND_BUCKET_TYPE));
+ assert(offset == (u4)_compact_entries->length(), "sanity");
+}
+
+// Write the compact table
+void CompactHashtableWriter::dump(SimpleCompactHashtable *cht, const char* table_name) {
+ NumberSeq summary;
+ allocate_table();
+ dump_table(&summary);
+
+ int table_bytes = _stats->bucket_bytes + _stats->hashentry_bytes;
+ address base_address = address(SharedBaseAddress);
+ cht->init(base_address, _num_entries_written, _num_buckets,
+ _compact_buckets->data(), _compact_entries->data());
+
+ if (InfoDynamicCDS) {
+ double avg_cost = 0.0;
+ if (_num_entries_written > 0) {
+ avg_cost = double(table_bytes)/double(_num_entries_written);
+ }
+ dynamic_cds_log->print_cr("Shared %s table stats -------- base: " PTR_FORMAT,
+ table_name, (intptr_t)base_address);
+ dynamic_cds_log->print_cr("Number of entries : %9d", _num_entries_written);
+ dynamic_cds_log->print_cr("Total bytes used : %9d", table_bytes);
+ dynamic_cds_log->print_cr("Average bytes per entry : %9.3f", avg_cost);
+ dynamic_cds_log->print_cr("Average bucket size : %9.3f", summary.avg());
+ dynamic_cds_log->print_cr("Variance of bucket size : %9.3f", summary.variance());
+ dynamic_cds_log->print_cr("Std. dev. of bucket size: %9.3f", summary.sd());
+ dynamic_cds_log->print_cr("Maximum bucket size : %9d", (int)summary.maximum());
+ dynamic_cds_log->print_cr("Empty buckets : %9d", _num_empty_buckets);
+ dynamic_cds_log->print_cr("Value_Only buckets : %9d", _num_value_only_buckets);
+ dynamic_cds_log->print_cr("Other buckets : %9d", _num_other_buckets);
+ }
+}
+
+/////////////////////////////////////////////////////////////
+//
+// The CompactHashtable implementation
+//
+
+void SimpleCompactHashtable::init(address base_address, u4 entry_count, u4 bucket_count, u4* buckets, u4* entries) {
+ _bucket_count = bucket_count;
+ _entry_count = entry_count;
+ _base_address = base_address;
+ _buckets = buckets;
+ _entries = entries;
+}
+
+size_t SimpleCompactHashtable::calculate_header_size() {
+ // We have 5 fields. Each takes up sizeof(intptr_t). See WriteClosure::do_u4
+ size_t bytes = sizeof(intptr_t) * 5;
+ return bytes;
+}
+
+void SimpleCompactHashtable::serialize_header(SerializeClosure* soc) {
+ // NOTE: if you change this function, you MUST change the number 5 in
+ // calculate_header_size() accordingly.
+ soc->do_u4(&_entry_count);
+ soc->do_u4(&_bucket_count);
+ soc->do_ptr((void**)&_buckets);
+ soc->do_ptr((void**)&_entries);
+ if (soc->reading()) {
+ _base_address = (address)SharedBaseAddress;
+ }
+}
diff --git a/hotspot/src/share/vm/classfile/compactHashtable.hpp b/hotspot/src/share/vm/classfile/compactHashtable.hpp
new file mode 100644
index 000000000..727b3ebfb
--- /dev/null
+++ b/hotspot/src/share/vm/classfile/compactHashtable.hpp
@@ -0,0 +1,349 @@
+/*
+ * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, Huawei Technologies Co., Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CLASSFILE_COMPACTHASHTABLE_HPP
+#define SHARE_VM_CLASSFILE_COMPACTHASHTABLE_HPP
+
+#include "oops/symbol.hpp"
+#include "runtime/globals.hpp"
+#include "utilities/array.hpp"
+#include "utilities/growableArray.hpp"
+#include "utilities/numberSeq.hpp"
+
+
+template <
+ typename K,
+ typename V,
+ V (*DECODE)(address base_address, u4 offset),
+ bool (*EQUALS)(V value, K key, int len)
+ >
+class CompactHashtable;
+class NumberSeq;
+class SimpleCompactHashtable;
+
+// Stats for symbol tables in the CDS archive
+class CompactHashtableStats {
+public:
+ int hashentry_count;
+ int hashentry_bytes;
+ int bucket_count;
+ int bucket_bytes;
+
+ CompactHashtableStats() :
+ hashentry_count(0), hashentry_bytes(0),
+ bucket_count(0), bucket_bytes(0) {}
+};
+
+/////////////////////////////////////////////////////////////////////////
+//
+// The compact hash table writer. Used at dump time for writing out
+// the compact table to the shared archive.
+//
+// At dump time, the CompactHashtableWriter obtains all entries from the
+// symbol/string table and adds them to a new temporary hash table. The hash
+// table size (number of buckets) is calculated using
+// '(num_entries + bucket_size - 1) / bucket_size'. The default bucket
+// size is 4 and can be changed by -XX:SharedSymbolTableBucketSize option.
+// 4 is chosen because it produces smaller sized bucket on average for
+// faster lookup. It also has relatively small number of empty buckets and
+// good distribution of the entries.
+//
+// We use a simple hash function (hash % num_bucket) for the table.
+// The new table is compacted when written out. Please see comments
+// above the CompactHashtable class for the table layout detail. The bucket
+// offsets are written to the archive as part of the compact table. The
+// bucket offset is encoded in the low 30-bit (0-29) and the bucket type
+// (regular or compact) are encoded in bit[31, 30]. For buckets with more
+// than one entry, both hash and entry offset are written to the
+// table. For buckets with only one entry, only the entry offset is written
+// to the table and the buckets are tagged as compact in their type bits.
+// Buckets without entry are skipped from the table. Their offsets are
+// still written out for faster lookup.
+//
+class CompactHashtableWriter: public StackObj {
+public:
+ class Entry {
+ unsigned int _hash;
+ u4 _value;
+
+ public:
+ Entry() {}
+ Entry(unsigned int hash, u4 val) : _hash(hash), _value(val) {}
+
+ u4 value() {
+ return _value;
+ }
+ unsigned int hash() {
+ return _hash;
+ }
+
+ bool operator==(const CompactHashtableWriter::Entry& other) {
+ return (_value == other._value && _hash == other._hash);
+ }
+ }; // class CompactHashtableWriter::Entry
+
+private:
+ int _num_entries_written;
+ int _num_buckets;
+ int _num_empty_buckets;
+ int _num_value_only_buckets;
+ int _num_other_buckets;
+ GrowableArray<Entry>** _buckets;
+ CompactHashtableStats* _stats;
+ Array<u4>* _compact_buckets;
+ Array<u4>* _compact_entries;
+
+public:
+ // This is called at dump-time only
+ CompactHashtableWriter(int num_entries, CompactHashtableStats* stats);
+ ~CompactHashtableWriter();
+
+ void add(unsigned int hash, u4 value);
+
+private:
+ void allocate_table();
+ void dump_table(NumberSeq* summary);
+
+ static int calculate_num_buckets(int num_entries) {
+ int num_buckets = num_entries / SharedSymbolTableBucketSize;
+ // calculation of num_buckets can result in zero buckets, we need at least one
+ return (num_buckets < 1) ? 1 : num_buckets;
+ }
+
+public:
+ void dump(SimpleCompactHashtable *cht, const char* table_name);
+
+ static size_t estimate_size(int num_entries);
+};
+
+#define REGULAR_BUCKET_TYPE 0
+#define VALUE_ONLY_BUCKET_TYPE 1
+#define TABLEEND_BUCKET_TYPE 3
+#define BUCKET_OFFSET_MASK 0x3FFFFFFF
+#define BUCKET_OFFSET(info) ((info) & BUCKET_OFFSET_MASK)
+#define BUCKET_TYPE_SHIFT 30
+#define BUCKET_TYPE(info) (((info) & ~BUCKET_OFFSET_MASK) >> BUCKET_TYPE_SHIFT)
+#define BUCKET_INFO(offset, type) (((type) << BUCKET_TYPE_SHIFT) | ((offset) & BUCKET_OFFSET_MASK))
+
+/////////////////////////////////////////////////////////////////////////////
+//
+// CompactHashtable is used to store the CDS archive's symbol/string tables.
+//
+// Because these tables are read-only (no entries can be added/deleted) at run-time
+// and tend to have large number of entries, we try to minimize the footprint
+// cost per entry.
+//
+// The CompactHashtable is split into two arrays
+//
+// u4 buckets[num_buckets+1]; // bit[31,30]: type; bit[29-0]: offset
+// u4 entries[<variable size>]
+//
+// The size of buckets[] is 'num_buckets + 1'. Each entry of
+// buckets[] is a 32-bit encoding of the bucket type and bucket offset,
+// with the type in the left-most 2-bit and offset in the remaining 30-bit.
+// The last entry is a special type. It contains the end of the last
+// bucket.
+//
+// There are two types of buckets, regular buckets and value_only buckets. The
+// value_only buckets have '01' in their highest 2-bit, and regular buckets have
+// '00' in their highest 2-bit.
+//
+// For normal buckets, each entry is 8 bytes in the entries[]:
+// u4 hash; /* symbol/string hash */
+// union {
+// u4 offset; /* Symbol* sym = (Symbol*)(base_address + offset) */
+// narrowOop str; /* String narrowOop encoding */
+// }
+//
+//
+// For value_only buckets, each entry has only the 4-byte 'offset' in the entries[].
+//
+// Example -- note that the second bucket is a VALUE_ONLY_BUCKET_TYPE so the hash code
+// is skipped.
+// buckets[0, 4, 5, ....]
+// | | |
+// | | +---+
+// | | |
+// | +----+ |
+// v v v
+// entries[H,O,H,O,O,H,O,H,O.....]
+//
+// See CompactHashtable::lookup() for how the table is searched at runtime.
+// See CompactHashtableWriter::dump() for how the table is written at CDS
+// dump time.
+//
+class SimpleCompactHashtable {
+protected:
+ address _base_address;
+ u4 _bucket_count;
+ u4 _entry_count;
+ u4* _buckets;
+ u4* _entries;
+
+public:
+ SimpleCompactHashtable() {
+ _entry_count = 0;
+ _bucket_count = 0;
+ _buckets = 0;
+ _entries = 0;
+ }
+
+ void reset() {
+ _bucket_count = 0;
+ _entry_count = 0;
+ _buckets = 0;
+ _entries = 0;
+ }
+
+ void init(address base_address, u4 entry_count, u4 bucket_count, u4* buckets, u4* entries);
+
+ // Read/Write the table's header from/to the CDS archive
+ void serialize_header(SerializeClosure* soc) NOT_CDS_RETURN;
+
+ inline bool empty() const {
+ return (_entry_count == 0);
+ }
+
+ inline size_t entry_count() const {
+ return _entry_count;
+ }
+
+ static size_t calculate_header_size();
+};
+
+template <
+ typename K,
+ typename V,
+ V (*DECODE)(address base_address, u4 offset),
+ bool (*EQUALS)(V value, K key, int len)
+ >
+class CompactHashtable : public SimpleCompactHashtable {
+ friend class VMStructs;
+
+ V decode(u4 offset) const {
+ return DECODE(_base_address, offset);
+ }
+
+public:
+ // Lookup a value V from the compact table using key K
+ inline V lookup(K key, unsigned int hash, int len) const {
+ if (_entry_count > 0) {
+ int index = hash % _bucket_count;
+ u4 bucket_info = _buckets[index];
+ u4 bucket_offset = BUCKET_OFFSET(bucket_info);
+ int bucket_type = BUCKET_TYPE(bucket_info);
+ u4* entry = _entries + bucket_offset;
+
+ if (bucket_type == VALUE_ONLY_BUCKET_TYPE) {
+ V value = decode(entry[0]);
+ if (EQUALS(value, key, len)) {
+ return value;
+ }
+ } else {
+ // This is a regular bucket, which has more than one
+ // entries. Each entry is a pair of entry (hash, offset).
+ // Seek until the end of the bucket.
+ u4* entry_max = _entries + BUCKET_OFFSET(_buckets[index + 1]);
+ while (entry < entry_max) {
+ unsigned int h = (unsigned int)(entry[0]);
+ if (h == hash) {
+ V value = decode(entry[1]);
+ if (EQUALS(value, key, len)) {
+ return value;
+ }
+ }
+ entry += 2;
+ }
+ }
+ }
+ return NULL;
+ }
+
+ template <class ITER>
+ inline void iterate(ITER* iter) const {
+ for (u4 i = 0; i < _bucket_count; i++) {
+ u4 bucket_info = _buckets[i];
+ u4 bucket_offset = BUCKET_OFFSET(bucket_info);
+ int bucket_type = BUCKET_TYPE(bucket_info);
+ u4* entry = _entries + bucket_offset;
+
+ if (bucket_type == VALUE_ONLY_BUCKET_TYPE) {
+ iter->do_value(decode(entry[0]));
+ } else {
+ u4*entry_max = _entries + BUCKET_OFFSET(_buckets[i + 1]);
+ while (entry < entry_max) {
+ iter->do_value(decode(entry[1]));
+ entry += 2;
+ }
+ }
+ }
+ }
+
+ void print_table_statistics(outputStream* st, const char* name) {
+ st->print_cr("%s statistics:", name);
+ int total_entries = 0;
+ int max_bucket = 0;
+ for (u4 i = 0; i < _bucket_count; i++) {
+ u4 bucket_info = _buckets[i];
+ int bucket_type = BUCKET_TYPE(bucket_info);
+ int bucket_size;
+
+ if (bucket_type == VALUE_ONLY_BUCKET_TYPE) {
+ bucket_size = 1;
+ } else {
+ bucket_size = (BUCKET_OFFSET(_buckets[i + 1]) - BUCKET_OFFSET(bucket_info)) / 2;
+ }
+ total_entries += bucket_size;
+ if (max_bucket < bucket_size) {
+ max_bucket = bucket_size;
+ }
+ }
+ st->print_cr("Number of buckets : %9d", _bucket_count);
+ st->print_cr("Number of entries : %9d", total_entries);
+ st->print_cr("Maximum bucket size : %9d", max_bucket);
+ }
+};
+
+////////////////////////////////////////////////////////////////////////
+//
+// OffsetCompactHashtable -- This is used to store many types of objects
+// in the CDS archive. On 64-bit platforms, we save space by using a 32-bit
+// offset from the CDS base address.
+
+template <typename V>
+inline V read_value_from_compact_hashtable(address base_address, u4 offset) {
+ return (V)(base_address + offset);
+}
+
+template <
+ typename K,
+ typename V,
+ bool (*EQUALS)(V value, K key, int len)
+ >
+class OffsetCompactHashtable : public CompactHashtable<
+ K, V, read_value_from_compact_hashtable<V>, EQUALS> {
+};
+
+#endif // SHARE_VM_CLASSFILE_COMPACTHASHTABLE_HPP
diff --git a/hotspot/src/share/vm/classfile/sharedClassUtil.hpp b/hotspot/src/share/vm/classfile/sharedClassUtil.hpp
index 13be2b1b5..b24e84d45 100644
--- a/hotspot/src/share/vm/classfile/sharedClassUtil.hpp
+++ b/hotspot/src/share/vm/classfile/sharedClassUtil.hpp
@@ -43,6 +43,10 @@ public:
return new FileMapInfo::FileMapHeader();
}
+ static FileMapInfo::DynamicArchiveHeader* allocate_dynamic_archive_header() {
+ return new FileMapInfo::DynamicArchiveHeader();
+ }
+
static size_t file_map_header_size() {
return sizeof(FileMapInfo::FileMapHeader);
}
diff --git a/hotspot/src/share/vm/classfile/symbolTable.cpp b/hotspot/src/share/vm/classfile/symbolTable.cpp
index 8dd4e6b21..6a2d8077f 100644
--- a/hotspot/src/share/vm/classfile/symbolTable.cpp
+++ b/hotspot/src/share/vm/classfile/symbolTable.cpp
@@ -23,6 +23,8 @@
*/
#include "precompiled.hpp"
+#include "cds/archiveBuilder.hpp"
+#include "cds/dynamicArchive.hpp"
#include "classfile/altHashing.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/symbolTable.hpp"
@@ -42,6 +44,19 @@
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
+inline bool symbol_equals_compact_hashtable_entry(Symbol* value, const char* key, int len) {
+ if (value->equals(key, len)) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static OffsetCompactHashtable<
+ const char*, Symbol*,
+ symbol_equals_compact_hashtable_entry
+> _dynamic_shared_table;
+
// --------------------------------------------------------------------------
// the number of buckets a thread claims
@@ -95,6 +110,7 @@ void SymbolTable::symbols_do(SymbolClosure *cl) {
int SymbolTable::_symbols_removed = 0;
int SymbolTable::_symbols_counted = 0;
volatile int SymbolTable::_parallel_claimed_idx = 0;
+volatile bool _lookup_shared_first = false;
void SymbolTable::buckets_unlink(int start_idx, int end_idx, BucketUnlinkContext* context, size_t* memory_total) {
for (int i = start_idx; i < end_idx; ++i) {
@@ -228,7 +244,22 @@ Symbol* SymbolTable::lookup(int index, const char* name,
java_lang_String::hash_code((const jbyte*)s, len);
}
+#if INCLUDE_CDS
+Symbol* SymbolTable::lookup_shared(const char* name,
+ int len, unsigned int hash) {
+ Symbol* sym = NULL;
+ if (DynamicArchive::is_mapped()) {
+ if (use_alternate_hashcode()) {
+ // hash_code parameter may use alternate hashing algorithm but the shared table
+ // always uses the same original hash code.
+ hash = java_lang_String::hash_code((const jbyte*)name, len);
+ }
+ sym = _dynamic_shared_table.lookup(name, hash, len);
+ }
+ return sym;
+}
+#endif
// We take care not to be blocking while holding the
// SymbolTable_lock. Otherwise, the system might deadlock, since the
// symboltable is used during compilation (VM_thread) The lock free
@@ -251,13 +282,33 @@ unsigned int SymbolTable::hash_symbol(const char* s, int len) {
return len;
}
-Symbol* SymbolTable::lookup(const char* name, int len, TRAPS) {
+Symbol* SymbolTable::lookup_common(const char* name, int len) {
len = check_length(name, len);
unsigned int hashValue = hash_symbol(name, len);
int index = the_table()->hash_to_index(hashValue);
+ Symbol* s;
+ if (_lookup_shared_first) {
+ s = lookup_shared(name, len, hashValue);
+ if (s == NULL) {
+ _lookup_shared_first = false;
+ s = the_table()->lookup(index, name, len, hashValue);
+ }
+ } else {
+ s = the_table()->lookup(index, name, len, hashValue);
+ if (s == NULL) {
+ s = lookup_shared(name, len, hashValue);
+ if (s!= NULL) {
+ _lookup_shared_first = true;
+ }
+ }
+ }
+ return s;
+}
- Symbol* s = the_table()->lookup(index, name, len, hashValue);
-
+Symbol* SymbolTable::lookup(const char* name, int len, TRAPS) {
+ unsigned int hashValue = hash_symbol(name, len);
+ int index = the_table()->hash_to_index(hashValue);
+ Symbol* s = lookup_common(name, len);
// Found
if (s != NULL) return s;
@@ -264,8 +315,7 @@ Symbol* SymbolTable::lookup(const Symbol* sym, int begin, int end, TRAPS) {
len = end - begin;
hashValue = hash_symbol(name, len);
index = the_table()->hash_to_index(hashValue);
- Symbol* s = the_table()->lookup(index, name, len, hashValue);
-
+ Symbol* s = lookup_common(name, len);
// Found
if (s != NULL) return s;
}
@@ -294,9 +344,7 @@ Symbol* SymbolTable::lookup(const Symbol* sym, int begin, int end, TRAPS) {
Symbol* SymbolTable::lookup_only(const char* name, int len,
unsigned int& hash) {
hash = hash_symbol(name, len);
- int index = the_table()->hash_to_index(hash);
-
- Symbol* s = the_table()->lookup(index, name, len, hash);
+ Symbol* s = lookup_common(name, len);
return s;
}
@@ -501,6 +549,42 @@ void SymbolTable::dump(outputStream* st) {
the_table()->dump_table(st, "SymbolTable");
}
+static uintx hash_shared_symbol(const char* s, int len) {
+ return java_lang_String::hash_code((const jbyte*)s, len);
+}
+
+void SymbolTable::copy_shared_symbol_table(GrowableArray<Symbol*>* symbols,
+ CompactHashtableWriter* writer) {
+ ArchiveBuilder* builder = ArchiveBuilder::current();
+ int len = symbols->length();
+ for (int i = 0; i < len; i++) {
+ Symbol* sym = ArchiveBuilder::get_relocated_symbol(symbols->at(i));
+ unsigned int fixed_hash = hash_shared_symbol((const char*)sym->bytes(), sym->utf8_length());
+ assert(fixed_hash == hash_symbol((const char*)sym->bytes(), sym->utf8_length()),
+ "must not rehash during dumping");
+ sym->set_permanent();
+ writer->add(fixed_hash, builder->buffer_to_offset_u4((address)sym));
+ }
+}
+
+size_t SymbolTable::estimate_size_for_archive() {
+ return CompactHashtableWriter::estimate_size(the_table()->number_of_entries());
+}
+
+void SymbolTable::write_to_archive(GrowableArray<Symbol*>* symbols) {
+ CompactHashtableWriter writer(symbols->length(), ArchiveBuilder::symbol_stats());
+ copy_shared_symbol_table(symbols, &writer);
+ _dynamic_shared_table.reset();
+ writer.dump(&_dynamic_shared_table, "symbol");
+}
+
+void SymbolTable::serialize_shared_table_header(SerializeClosure* soc) {
+ _dynamic_shared_table.serialize_header(soc);
+ if (soc->writing()) {
+ // Sanity. Make sure we don't use the shared table at dump time
+ _dynamic_shared_table.reset();
+ }
+}
//---------------------------------------------------------------------------
// Non-product code
diff --git a/hotspot/src/share/vm/classfile/symbolTable.hpp b/hotspot/src/share/vm/classfile/symbolTable.hpp
index 58fd22343..96eb173d1 100644
--- a/hotspot/src/share/vm/classfile/symbolTable.hpp
+++ b/hotspot/src/share/vm/classfile/symbolTable.hpp
@@ -25,6 +25,7 @@
#ifndef SHARE_VM_CLASSFILE_SYMBOLTABLE_HPP
#define SHARE_VM_CLASSFILE_SYMBOLTABLE_HPP
+#include "classfile/compactHashtable.hpp"
#include "memory/allocation.inline.hpp"
#include "oops/symbol.hpp"
#include "utilities/hashtable.hpp"
@@ -107,6 +108,10 @@ private:
add(loader_data, cp, names_count, name, lengths, cp_indices, hashValues, THREAD);
}
+ static Symbol* lookup_shared(const char* name, int len, unsigned int hash) NOT_CDS_RETURN_(NULL);
+
+ static Symbol* lookup_common(const char* name, int len);
+
Symbol* lookup(int index, const char* name, int len, unsigned int hash);
SymbolTable()
@@ -237,6 +242,10 @@ public:
static void dump(outputStream* st);
// Sharing
+private:
+ static void copy_shared_symbol_table(GrowableArray<Symbol*>* symbols,
+ CompactHashtableWriter* ch_table);
+public:
static void copy_buckets(char** top, char*end) {
the_table()->Hashtable<Symbol*, mtSymbol>::copy_buckets(top, end);
}
@@ -246,6 +255,9 @@ public:
static void reverse(void* boundary = NULL) {
the_table()->Hashtable<Symbol*, mtSymbol>::reverse(boundary);
}
+ static size_t estimate_size_for_archive();
+ static void write_to_archive(GrowableArray<Symbol*>* symbols);
+ static void serialize_shared_table_header(SerializeClosure* soc);
// Rehash the symbol table if it gets out of balance
static void rehash_table();
diff --git a/hotspot/src/share/vm/classfile/systemDictionary.cpp b/hotspot/src/share/vm/classfile/systemDictionary.cpp
index 0d937c3ba..0ea2d9b79 100644
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp
@@ -31,6 +31,7 @@
#include "classfile/resolutionErrors.hpp"
#include "classfile/systemDictionary.hpp"
#if INCLUDE_CDS
+#include "cds/dynamicArchive.hpp"
#include "classfile/sharedClassUtil.hpp"
#include "classfile/systemDictionaryShared.hpp"
#endif
@@ -185,6 +186,11 @@ bool SystemDictionary::is_app_class_loader(Handle class_loader) {
return (class_loader->klass()->name() == vmSymbols::sun_misc_Launcher_AppClassLoader());
}
+bool SystemDictionary::is_builtin_loader(Handle class_loader) {
+ return class_loader.is_null() ||
+ class_loader->klass()->name() == vmSymbols::sun_misc_Launcher_AppClassLoader() ||
+ class_loader->klass()->name() == vmSymbols::sun_misc_Launcher_ExtClassLoader();
+}
// ----------------------------------------------------------------------------
// Resolving of classes
@@ -1131,76 +1137,92 @@ Klass* SystemDictionary::resolve_from_stream(Symbol* class_name,
check_loader_lock_contention(lockObject, THREAD);
ObjectLocker ol(lockObject, THREAD, DoObjectLock);
+ instanceKlassHandle k;
TempNewSymbol parsed_name = NULL;
- // Parse the stream. Note that we do this even though this klass might
- // already be present in the SystemDictionary, otherwise we would not
- // throw potential ClassFormatErrors.
- //
- // Note: "name" is updated.
+#if INCLUDE_CDS
+ if (DynamicArchive::is_mapped()) {
+ k = SystemDictionaryShared::lookup_from_stream(class_name,
+ class_loader,
+ protection_domain,
+ st,
+ CHECK_NULL);
+ }
+#endif
- // Callers are expected to declare a ResourceMark to determine
- // the lifetime of any updated (resource) allocated under
- // this call to parseClassFile
- ResourceMark rm(THREAD);
- ClassFileParser parser(st);
- instanceKlassHandle k = parser.parseClassFile(class_name,
- loader_data,
- protection_domain,
- parsed_name,
- verify,
- THREAD);
-
- const char* pkg = "java/";
- size_t pkglen = strlen(pkg);
- if (!HAS_PENDING_EXCEPTION &&
- !class_loader.is_null() &&
- parsed_name != NULL &&
- parsed_name->utf8_length() >= (int)pkglen) {
- ResourceMark rm(THREAD);
- bool prohibited;
- const jbyte* base = parsed_name->base();
- if ((base[0] | base[1] | base[2] | base[3] | base[4]) & 0x80) {
- prohibited = is_prohibited_package_slow(parsed_name);
- } else {
- char* name = parsed_name->as_C_string();
- prohibited = (strncmp(name, pkg, pkglen) == 0);
- }
- if (prohibited) {
- // It is illegal to define classes in the "java." package from
- // JVM_DefineClass or jni_DefineClass unless you're the bootclassloader
- char* name = parsed_name->as_C_string();
- char* index = strrchr(name, '/');
- assert(index != NULL, "must be");
- *index = '\0'; // chop to just the package name
- while ((index = strchr(name, '/')) != NULL) {
- *index = '.'; // replace '/' with '.' in package name
+ if (k() != NULL) {
+ parsed_name = k->name();
+ } else {
+ // Parse the stream. Note that we do this even though this klass might
+ // already be present in the SystemDictionary, otherwise we would not
+ // throw potential ClassFormatErrors.
+ //
+ // Note: "name" is updated.
+
+ // Callers are expected to declare a ResourceMark to determine
+ // the lifetime of any updated (resource) allocated under
+ // this call to parseClassFile
+ ResourceMark rm(THREAD);
+ ClassFileParser parser(st);
+ k = parser.parseClassFile(class_name,
+ loader_data,
+ protection_domain,
+ parsed_name,
+ verify,
+ THREAD);
+ const char* pkg = "java/";
+ size_t pkglen = strlen(pkg);
+ if (!HAS_PENDING_EXCEPTION &&
+ !class_loader.is_null() &&
+ parsed_name != NULL &&
+ parsed_name->utf8_length() >= (int)pkglen) {
+ ResourceMark rm(THREAD);
+ bool prohibited;
+ const jbyte* base = parsed_name->base();
+ if ((base[0] | base[1] | base[2] | base[3] | base[4]) & 0x80) {
+ prohibited = is_prohibited_package_slow(parsed_name);
+ } else {
+ char* name = parsed_name->as_C_string();
+ prohibited = (strncmp(name, pkg, pkglen) == 0);
}
- const char* fmt = "Prohibited package name: %s";
- size_t len = strlen(fmt) + strlen(name);
- char* message = NEW_RESOURCE_ARRAY(char, len);
- jio_snprintf(message, len, fmt, name);
- Exceptions::_throw_msg(THREAD_AND_LOCATION,
- vmSymbols::java_lang_SecurityException(), message);
- }
- }
+ if (prohibited) {
+ // It is illegal to define classes in the "java." package from
+ // JVM_DefineClass or jni_DefineClass unless you're the bootclassloader
+ char* name = parsed_name->as_C_string();
+ char* index = strrchr(name, '/');
+ assert(index != NULL, "must be");
+ *index = '\0'; // chop to just the package name
+ while ((index = strchr(name, '/')) != NULL) {
+ *index = '.'; // replace '/' with '.' in package name
+ }
+ const char* fmt = "Prohibited package name: %s";
+ size_t len = strlen(fmt) + strlen(name);
+ char* message = NEW_RESOURCE_ARRAY(char, len);
+ jio_snprintf(message, len, fmt, name);
+ Exceptions::_throw_msg(THREAD_AND_LOCATION,
+ vmSymbols::java_lang_SecurityException(), message);
+ }
+ }
- if (!HAS_PENDING_EXCEPTION) {
- assert(parsed_name != NULL, "Sanity");
- assert(class_name == NULL || class_name == parsed_name, "name mismatch");
- // Verification prevents us from creating names with dots in them, this
- // asserts that that's the case.
- assert(is_internal_format(parsed_name),
- "external class name format used internally");
+ if (!HAS_PENDING_EXCEPTION) {
+ assert(parsed_name != NULL, "Sanity");
+ assert(class_name == NULL || class_name == parsed_name, "name mismatch");
+ // Verification prevents us from creating names with dots in them, this
+ // asserts that that's the case.
+ assert(is_internal_format(parsed_name),
+ "external class name format used internally");
#if INCLUDE_JFR
- {
- InstanceKlass* ik = k();
- ON_KLASS_CREATION(ik, parser, THREAD);
- k = instanceKlassHandle(ik);
- }
+ {
+ InstanceKlass* ik = k();
+ ON_KLASS_CREATION(ik, parser, THREAD);
+ k = instanceKlassHandle(ik);
+ }
#endif
+ }
+ }
+ if (!HAS_PENDING_EXCEPTION) {
// Add class just loaded
// If a class loader supports parallel classloading handle parallel define requests
// find_or_define_instance_class may return a different InstanceKlass
@@ -1274,14 +1296,19 @@ Klass* SystemDictionary::find_shared_class(Symbol* class_name) {
instanceKlassHandle SystemDictionary::load_shared_class(
Symbol* class_name, Handle class_loader, TRAPS) {
- if (!(class_loader.is_null() || SystemDictionary::is_app_class_loader(class_loader) ||
+ if (!(class_loader.is_null() || SystemDictionary::is_app_class_loader(class_loader) ||
SystemDictionary::is_ext_class_loader(class_loader))) {
return instanceKlassHandle();
}
- instanceKlassHandle ik (THREAD, find_shared_class(class_name)); // InstanceKlass is find with null class loader.
+ Klass* klass = SystemDictionaryShared::find_dynamic_builtin_class(class_name);
+ if (klass == NULL) {
+ klass = find_shared_class(class_name);
+ }
+
+ instanceKlassHandle ik (THREAD, klass); // InstanceKlass is find with null class loader.
if (ik.not_null()) {
- if (!UseAppCDS) {
+ if (!(UseAppCDS || DynamicArchive::is_mapped())) {
// CDS logic
if (SharedClassUtil::is_shared_boot_class(ik()) && class_loader.is_null()) {
// CDS record boot class load index.
@@ -1289,7 +1316,7 @@ instanceKlassHandle SystemDictionary::load_shared_class(
return load_shared_class(ik, class_loader, protection_domain, THREAD);
}
} else {
- // AppCDS logic. Only use null loader only to load classes that
+ // AppCDS and dynamic CDS logic. Only use null loader only to load classes that
// have been dumped by null loader. For non-null class loaders,
// either the class loader data is not initialized (but also not
// null) or the same class loader is used to load previously
@@ -1424,7 +1451,7 @@ instanceKlassHandle SystemDictionary::load_shared_class(instanceKlassHandle ik,
true /* shared class */);
// register package for this class, if necessary
- if (UseAppCDS && class_loader.not_null()) {
+ if (SystemDictionary::is_app_class_loader(class_loader) || SystemDictionary::is_ext_class_loader(class_loader)) {
ResourceMark rm(THREAD);
char* name = ik->name()->as_C_string();
diff --git a/hotspot/src/share/vm/classfile/systemDictionary.hpp b/hotspot/src/share/vm/classfile/systemDictionary.hpp
index 3b9be4430..320f71865 100644
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp
@@ -652,6 +652,7 @@ public:
TRAPS);
static bool is_ext_class_loader(Handle class_loader);
static bool is_app_class_loader(Handle class_loader);
+ static bool is_builtin_loader(Handle class_loader);
protected:
static Klass* find_shared_class(Symbol* class_name);
diff --git a/hotspot/src/share/vm/classfile/systemDictionaryShared.cpp b/hotspot/src/share/vm/classfile/systemDictionaryShared.cpp
new file mode 100644
index 000000000..99354cd4b
--- /dev/null
+++ b/hotspot/src/share/vm/classfile/systemDictionaryShared.cpp
@@ -0,0 +1,911 @@
+/*
+ * Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, Huawei Technologies Co., Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "cds/archiveBuilder.hpp"
+#include "cds/dynamicArchive.hpp"
+#include "classfile/systemDictionaryShared.hpp"
+#include "classfile/classLoaderData.inline.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "memory/metaspaceShared.hpp"
+#include "memory/metaspaceClosure.hpp"
+#include "utilities/resourceHash.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "utilities/ostream.hpp"
+
+DEBUG_ONLY(bool SystemDictionaryShared::_no_class_loading_should_happen = false;)
+bool SystemDictionaryShared::_dump_in_progress = false;
+
+class DumpTimeSharedClassInfo: public CHeapObj<mtClass> {
+ bool _excluded;
+ bool _has_checked_exclusion;
+public:
+ struct DTLoaderConstraint {
+ Symbol* _name;
+ char _loader_type1;
+ char _loader_type2;
+ DTLoaderConstraint(Symbol* name, char l1, char l2) : _name(name), _loader_type1(l1), _loader_type2(l2) {
+ _name->increment_refcount();
+ }
+ DTLoaderConstraint() : _name(NULL), _loader_type1('0'), _loader_type2('0') {}
+ bool equals(const DTLoaderConstraint& t) {
+ return t._name == _name &&
+ ((t._loader_type1 == _loader_type1 && t._loader_type2 == _loader_type2) ||
+ (t._loader_type2 == _loader_type1 && t._loader_type1 == _loader_type2));
+ }
+ };
+
+ struct DTVerifierConstraint {
+ Symbol* _name;
+ Symbol* _from_name;
+ DTVerifierConstraint() : _name(NULL), _from_name(NULL) {}
+ DTVerifierConstraint(Symbol* n, Symbol* fn) : _name(n), _from_name(fn) {
+ _name->increment_refcount();
+ _from_name->increment_refcount();
+ }
+ };
+
+ InstanceKlass* _klass;
+ InstanceKlass* _nest_host;
+ bool _failed_verification;
+ bool _is_archived_lambda_proxy;
+ int _id;
+ int _clsfile_size;
+ int _clsfile_crc32;
+ GrowableArray<DTVerifierConstraint>* _verifier_constraints;
+ GrowableArray<char>* _verifier_constraint_flags;
+ GrowableArray<DTLoaderConstraint>* _loader_constraints;
+
+ DumpTimeSharedClassInfo() {
+ _klass = NULL;
+ _nest_host = NULL;
+ _failed_verification = false;
+ _is_archived_lambda_proxy = false;
+ _has_checked_exclusion = false;
+ _id = -1;
+ _clsfile_size = -1;
+ _clsfile_crc32 = -1;
+ _excluded = false;
+ _verifier_constraints = NULL;
+ _verifier_constraint_flags = NULL;
+ _loader_constraints = NULL;
+ }
+
+ void add_verification_constraint(InstanceKlass* k, Symbol* name,
+ Symbol* from_name, bool from_field_is_protected, bool from_is_array, bool from_is_object);
+ void record_linking_constraint(Symbol* name, Handle loader1, Handle loader2);
+
+ bool is_builtin() {
+ return SystemDictionaryShared::is_builtin(_klass);
+ }
+
+ int num_verifier_constraints() {
+ if (_verifier_constraint_flags != NULL) {
+ return _verifier_constraint_flags->length();
+ } else {
+ return 0;
+ }
+ }
+
+ int num_loader_constraints() {
+ if (_loader_constraints != NULL) {
+ return _loader_constraints->length();
+ } else {
+ return 0;
+ }
+ }
+
+ void metaspace_pointers_do(MetaspaceClosure* it) {
+ it->push(&_klass);
+ it->push(&_nest_host);
+ if (_verifier_constraints != NULL) {
+ for (int i = 0; i < _verifier_constraints->length(); i++) {
+ DTVerifierConstraint* cons = _verifier_constraints->adr_at(i);
+ it->push(&cons->_name);
+ it->push(&cons->_from_name);
+ }
+ }
+ if (_loader_constraints != NULL) {
+ for (int i = 0; i < _loader_constraints->length(); i++) {
+ DTLoaderConstraint* lc = _loader_constraints->adr_at(i);
+ it->push(&lc->_name);
+ }
+ }
+ }
+
+ bool is_excluded() {
+ // _klass may become NULL due to DynamicArchiveBuilder::set_to_null
+ return _excluded || _failed_verification || _klass == NULL;
+ }
+
+ // simple accessors
+ void set_excluded() { _excluded = true; }
+ bool has_checked_exclusion() const { return _has_checked_exclusion; }
+ void set_has_checked_exclusion() { _has_checked_exclusion = true; }
+ bool failed_verification() const { return _failed_verification; }
+ void set_failed_verification() { _failed_verification = true; }
+ InstanceKlass* nest_host() const { return _nest_host; }
+ void set_nest_host(InstanceKlass* nest_host) { _nest_host = nest_host; }
+};
+
+inline unsigned DumpTimeSharedClassTable_hash(InstanceKlass* const& k) {
+ // Deterministic archive is not possible because classes can be loaded
+ // in multiple threads.
+ return primitive_hash<InstanceKlass*>(k);
+}
+
+class DumpTimeSharedClassTable: public ResourceHashtable<
+ InstanceKlass*,
+ DumpTimeSharedClassInfo,
+ &DumpTimeSharedClassTable_hash,
+ primitive_equals<InstanceKlass*>,
+ 15889, // prime number
+ ResourceObj::C_HEAP>
+{
+ int _builtin_count;
+ int _unregistered_count;
+public:
+ DumpTimeSharedClassInfo* find_or_allocate_info_for(InstanceKlass* k, bool dump_in_progress) {
+ bool created = false;
+ DumpTimeSharedClassInfo* p;
+ if (!dump_in_progress) {
+ p = put_if_absent(k, &created);
+ } else {
+ p = get(k);
+ }
+ if (created) {
+ assert(!SystemDictionaryShared::no_class_loading_should_happen(),
+ "no new classes can be loaded while dumping archive");
+ p->_klass = k;
+ } else {
+ if (!dump_in_progress) {
+ assert(p->_klass == k, "Sanity");
+ }
+ }
+ return p;
+ }
+
+ class CountClassByCategory : StackObj {
+ DumpTimeSharedClassTable* _table;
+ public:
+ CountClassByCategory(DumpTimeSharedClassTable* table) : _table(table) {}
+ bool do_entry(InstanceKlass* k, DumpTimeSharedClassInfo& info) {
+ if (!info.is_excluded()) {
+ if (info.is_builtin()) {
+ ++ _table->_builtin_count;
+ } else {
+ ++ _table->_unregistered_count;
+ }
+ }
+ return true; // keep on iterating
+ }
+ };
+
+ void update_counts() {
+ _builtin_count = 0;
+ _unregistered_count = 0;
+ CountClassByCategory counter(this);
+ iterate(&counter);
+ }
+
+ int count_of(bool is_builtin) const {
+ if (is_builtin) {
+ return _builtin_count;
+ } else {
+ return _unregistered_count;
+ }
+ }
+};
+
+class RunTimeSharedClassInfo {
+public:
+ struct CrcInfo {
+ int _clsfile_size;
+ int _clsfile_crc32;
+ };
+
+ // This is different than DumpTimeSharedClassInfo::DTVerifierConstraint. We use
+ // u4 instead of Symbol* to save space on 64-bit CPU.
+ struct RTVerifierConstraint {
+ u4 _name;
+ u4 _from_name;
+ Symbol* name() { return (Symbol*)(SharedBaseAddress + _name);}
+ Symbol* from_name() { return (Symbol*)(SharedBaseAddress + _from_name); }
+ };
+
+ struct RTLoaderConstraint {
+ u4 _name;
+ char _loader_type1;
+ char _loader_type2;
+ Symbol* constraint_name() {
+ return (Symbol*)(SharedBaseAddress + _name);
+ }
+ };
+
+ InstanceKlass* _klass;
+ int _num_verifier_constraints;
+ int _num_loader_constraints;
+
+ // optional CrcInfo _crc; (only for UNREGISTERED classes)
+ // optional InstanceKlass* _nest_host
+ // optional RTLoaderConstraint _loader_constraint_types[_num_loader_constraints]
+ // optional RTVerifierConstraint _verifier_constraints[_num_verifier_constraints]
+ // optional char _verifier_constraint_flags[_num_verifier_constraints]
+
+private:
+ static size_t header_size_size() {
+ return sizeof(RunTimeSharedClassInfo);
+ }
+ static size_t crc_size(InstanceKlass* klass) {
+ if (!SystemDictionaryShared::is_builtin(klass)) {
+ return sizeof(CrcInfo);
+ } else {
+ return 0;
+ }
+ }
+ static size_t verifier_constraints_size(int num_verifier_constraints) {
+ return sizeof(RTVerifierConstraint) * num_verifier_constraints;
+ }
+ static size_t verifier_constraint_flags_size(int num_verifier_constraints) {
+ return sizeof(char) * num_verifier_constraints;
+ }
+ static size_t loader_constraints_size(int num_loader_constraints) {
+ return sizeof(RTLoaderConstraint) * num_loader_constraints;
+ }
+ static size_t nest_host_size(InstanceKlass* klass) {
+ assert(!klass->is_anonymous(), "klass should not be hidden right now.");
+ if (klass->is_anonymous()) {
+ return sizeof(InstanceKlass*);
+ } else {
+ return 0;
+ }
+ }
+
+public:
+ static size_t byte_size(InstanceKlass* klass, int num_verifier_constraints, int num_loader_constraints) {
+ return header_size_size() +
+ crc_size(klass) +
+ nest_host_size(klass) +
+ loader_constraints_size(num_loader_constraints) +
+ verifier_constraints_size(num_verifier_constraints) +
+ verifier_constraint_flags_size(num_verifier_constraints);
+ }
+
+private:
+ size_t crc_offset() const {
+ return header_size_size();
+ }
+
+ size_t nest_host_offset() const {
+ return crc_offset() + crc_size(_klass);
+ }
+
+ size_t loader_constraints_offset() const {
+ return nest_host_offset() + nest_host_size(_klass);
+ }
+ size_t verifier_constraints_offset() const {
+ return loader_constraints_offset() + loader_constraints_size(_num_loader_constraints);
+ }
+ size_t verifier_constraint_flags_offset() const {
+ return verifier_constraints_offset() + verifier_constraints_size(_num_verifier_constraints);
+ }
+
+ void check_verifier_constraint_offset(int i) const {
+ assert(0 <= i && i < _num_verifier_constraints, "sanity");
+ }
+
+ void check_loader_constraint_offset(int i) const {
+ assert(0 <= i && i < _num_loader_constraints, "sanity");
+ }
+
+public:
+ CrcInfo* crc() const {
+ assert(crc_size(_klass) > 0, "must be");
+ return (CrcInfo*)(address(this) + crc_offset());
+ }
+ RTVerifierConstraint* verifier_constraints() {
+ assert(_num_verifier_constraints > 0, "sanity");
+ return (RTVerifierConstraint*)(address(this) + verifier_constraints_offset());
+ }
+ RTVerifierConstraint* verifier_constraint_at(int i) {
+ check_verifier_constraint_offset(i);
+ return verifier_constraints() + i;
+ }
+
+ char* verifier_constraint_flags() {
+ assert(_num_verifier_constraints > 0, "sanity");
+ return (char*)(address(this) + verifier_constraint_flags_offset());
+ }
+
+ RTLoaderConstraint* loader_constraints() {
+ assert(_num_loader_constraints > 0, "sanity");
+ return (RTLoaderConstraint*)(address(this) + loader_constraints_offset());
+ }
+
+ RTLoaderConstraint* loader_constraint_at(int i) {
+ check_loader_constraint_offset(i);
+ return loader_constraints() + i;
+ }
+
+ void init(DumpTimeSharedClassInfo& info) {
+ ArchiveBuilder* builder = ArchiveBuilder::current();
+ assert(builder->is_in_buffer_space(info._klass), "must be");
+ _klass = info._klass;
+ if (!SystemDictionaryShared::is_builtin(_klass)) {
+ CrcInfo* c = crc();
+ c->_clsfile_size = info._clsfile_size;
+ c->_clsfile_crc32 = info._clsfile_crc32;
+ }
+ _num_verifier_constraints = info.num_verifier_constraints();
+ _num_loader_constraints = info.num_loader_constraints();
+ int i;
+ if (_num_verifier_constraints > 0) {
+ RTVerifierConstraint* vf_constraints = verifier_constraints();
+ char* flags = verifier_constraint_flags();
+ for (i = 0; i < _num_verifier_constraints; i++) {
+ vf_constraints[i]._name = builder->any_to_offset_u4(info._verifier_constraints->at(i)._name);
+ vf_constraints[i]._from_name = builder->any_to_offset_u4(info._verifier_constraints->at(i)._from_name);
+ }
+ for (i = 0; i < _num_verifier_constraints; i++) {
+ flags[i] = info._verifier_constraint_flags->at(i);
+ }
+ }
+
+ if (_num_loader_constraints > 0) {
+ RTLoaderConstraint* ld_constraints = loader_constraints();
+ for (i = 0; i < _num_loader_constraints; i++) {
+ ld_constraints[i]._name = builder->any_to_offset_u4(info._loader_constraints->at(i)._name);
+ ld_constraints[i]._loader_type1 = info._loader_constraints->at(i)._loader_type1;
+ ld_constraints[i]._loader_type2 = info._loader_constraints->at(i)._loader_type2;
+ }
+ }
+
+ ArchivePtrMarker::mark_pointer(&_klass);
+ }
+
+ bool matches(int clsfile_size, int clsfile_crc32) const {
+ return crc()->_clsfile_size == clsfile_size &&
+ crc()->_clsfile_crc32 == clsfile_crc32;
+ }
+
+ char verifier_constraint_flag(int i) {
+ check_verifier_constraint_offset(i);
+ return verifier_constraint_flags()[i];
+ }
+
+private:
+ // ArchiveBuilder::make_shallow_copy() has reserved a pointer immediately
+ // before archived InstanceKlasses. We can use this slot to do a quick
+ // lookup of InstanceKlass* -> RunTimeSharedClassInfo* without
+ // building a new hashtable.
+ //
+ // info_pointer_addr(klass) --> 0x0100 RunTimeSharedClassInfo*
+ // InstanceKlass* klass --> 0x0108 <C++ vtbl>
+ // 0x0110 fields from Klass ...
+ static RunTimeSharedClassInfo** info_pointer_addr(InstanceKlass* klass) {
+ return &((RunTimeSharedClassInfo**)klass)[-1];
+ }
+
+public:
+ static RunTimeSharedClassInfo* get_for(InstanceKlass* klass) {
+ assert(klass->is_shared(), "don't call for non-shared class");
+ return *info_pointer_addr(klass);
+ }
+ static void set_for(InstanceKlass* klass, RunTimeSharedClassInfo* record) {
+ assert(ArchiveBuilder::current()->is_in_buffer_space(klass), "must be");
+ assert(ArchiveBuilder::current()->is_in_buffer_space(record), "must be");
+ *info_pointer_addr(klass) = record;
+ ArchivePtrMarker::mark_pointer(info_pointer_addr(klass));
+ }
+
+ // Used by RunTimeSharedDictionary to implement OffsetCompactHashtable::EQUALS
+ static inline bool EQUALS(
+ const RunTimeSharedClassInfo* value, Symbol* key, int len_unused) {
+ return (value->_klass->name() == key);
+ }
+};
+
+class RunTimeSharedDictionary : public OffsetCompactHashtable<
+ Symbol*,
+ const RunTimeSharedClassInfo*,
+ RunTimeSharedClassInfo::EQUALS> {};
+
+static DumpTimeSharedClassTable* _dumptime_table = NULL;
+// SystemDictionaries in the top layer dynamic archive
+static RunTimeSharedDictionary _dynamic_builtin_dictionary;
+static RunTimeSharedDictionary _dynamic_unregistered_dictionary;
+
+void SystemDictionaryShared::set_class_has_failed_verification(InstanceKlass* ik) {
+ Arguments::assert_is_dumping_archive();
+ DumpTimeSharedClassInfo* p = find_or_allocate_info_for(ik);
+ if (p != NULL) {
+ p->set_failed_verification();
+ }
+}
+
+void SystemDictionaryShared::start_dumping() {
+ MutexLockerEx ml(DumpTimeTable_lock, Mutex::_no_safepoint_check_flag);
+ _dump_in_progress = true;
+}
+
+void SystemDictionaryShared::init_dumptime_info(InstanceKlass* k) {
+ (void)find_or_allocate_info_for(k);
+}
+
+void SystemDictionaryShared::remove_dumptime_info(InstanceKlass* k) {
+ MutexLockerEx ml(DumpTimeTable_lock, Mutex::_no_safepoint_check_flag);
+ DumpTimeSharedClassInfo* p = _dumptime_table->get(k);
+ if (p == NULL) {
+ return;
+ }
+ _dumptime_table->remove(k);
+}
+
+DumpTimeSharedClassInfo* SystemDictionaryShared::find_or_allocate_info_for(InstanceKlass* k) {
+ MutexLockerEx ml(DumpTimeTable_lock, Mutex::_no_safepoint_check_flag);
+ return find_or_allocate_info_for_locked(k);
+}
+
+DumpTimeSharedClassInfo* SystemDictionaryShared::find_or_allocate_info_for_locked(InstanceKlass* k) {
+ assert_lock_strong(DumpTimeTable_lock);
+ if (_dumptime_table == NULL) {
+ _dumptime_table = new (ResourceObj::C_HEAP, mtClass)DumpTimeSharedClassTable();
+ }
+ return _dumptime_table->find_or_allocate_info_for(k, _dump_in_progress);
+}
+
+bool SystemDictionaryShared::empty_dumptime_table() {
+ if (_dumptime_table == NULL) {
+ return true;
+ }
+ _dumptime_table->update_counts();
+ if (_dumptime_table->count_of(true) == 0 && _dumptime_table->count_of(false) == 0) {
+ return true;
+ }
+ return false;
+}
+
+class ExcludeDumpTimeSharedClasses : StackObj {
+public:
+ bool do_entry(InstanceKlass* k, DumpTimeSharedClassInfo& info) {
+ SystemDictionaryShared::check_for_exclusion(k, &info);
+ return true; // keep on iterating
+ }
+};
+
+class IterateDumpTimeSharedClassTable : StackObj {
+ MetaspaceClosure *_it;
+public:
+ IterateDumpTimeSharedClassTable(MetaspaceClosure* it) : _it(it) {}
+
+ bool do_entry(InstanceKlass* k, DumpTimeSharedClassInfo& info) {
+ assert_lock_strong(DumpTimeTable_lock);
+ if (!info.is_excluded()) {
+ info.metaspace_pointers_do(_it);
+ }
+ return true; // keep on iterating
+ }
+};
+
+class IterateDumpTimeTableReplaceKlass : StackObj {
+public:
+ IterateDumpTimeTableReplaceKlass() { }
+
+ bool do_entry(InstanceKlass* k, DumpTimeSharedClassInfo& info) {
+ if (k->oop_is_instance() && !info.is_excluded()) {
+ k->constants()->symbol_replace_excluded_klass();
+ }
+ return true;
+ }
+};
+
+void SystemDictionaryShared::check_excluded_classes() {
+ assert(no_class_loading_should_happen(), "sanity");
+ assert_lock_strong(DumpTimeTable_lock);
+ ExcludeDumpTimeSharedClasses excl;
+ _dumptime_table->iterate(&excl);
+ _dumptime_table->update_counts();
+}
+
+bool SystemDictionaryShared::check_for_exclusion(InstanceKlass* k, DumpTimeSharedClassInfo* info) {
+ if (MetaspaceShared::is_in_shared_space(k)) {
+ // We have reached a super type that's already in the base archive. Treat it
+ // as "not excluded".
+ assert(DynamicDumpSharedSpaces, "must be");
+ return false;
+ }
+
+ if (info == NULL) {
+ info = _dumptime_table->get(k);
+ assert(info != NULL, "supertypes of any classes in _dumptime_table must either be shared, or must also be in _dumptime_table");
+ }
+
+ if (!info->has_checked_exclusion()) {
+ if (check_for_exclusion_impl(k)) {
+ info->set_excluded();
+ }
+ info->set_has_checked_exclusion();
+ }
+
+ return info->is_excluded();
+}
+
+// Check if a class or any of its supertypes has been redefined.
+bool SystemDictionaryShared::has_been_redefined(InstanceKlass* k) {
+ if (k->has_been_redefined()) {
+ return true;
+ }
+ if (k->java_super() != NULL && has_been_redefined(k->java_super())) {
+ return true;
+ }
+ Array<Klass*>* interfaces = k->local_interfaces();
+ int len = interfaces->length();
+ for (int i = 0; i < len; i++) {
+ if (has_been_redefined((InstanceKlass*)interfaces->at(i))) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool SystemDictionaryShared::check_for_exclusion_impl(InstanceKlass* k) {
+ if (k->is_in_error_state()) {
+ return warn_excluded(k, "In error state");
+ }
+ if (k->init_state() < InstanceKlass::loaded) {
+ return warn_excluded(k, "not loaded klass");
+ }
+ if (has_been_redefined(k)) {
+ return warn_excluded(k, "Has been redefined");
+ }
+ if (k->signers() != NULL) {
+ // We cannot include signed classes in the archive because the certificates
+ // used during dump time may be different than those used during
+ // runtime (due to expiration, etc).
+ return warn_excluded(k, "Signed JAR");
+ }
+ if (is_jfr_event_class(k)) {
+ // We cannot include JFR event classes because they need runtime-specific
+ // instrumentation in order to work with -XX:FlightRecorderOptions:retransform=false.
+ // There are only a small number of these classes, so it's not worthwhile to
+ // support them and make CDS more complicated.
+ return warn_excluded(k, "JFR event class");
+ }
+ if (k->init_state() < InstanceKlass::linked) {
+ // In CDS dumping, we will attempt to link all classes. Those that fail to link will
+ // be recorded in DumpTimeSharedClassInfo.
+ Arguments::assert_is_dumping_archive();
+
+ // TODO -- rethink how this can be handled.
+ // We should try to link ik, however, we can't do it here because
+ // 1. We are at VM exit
+ // 2. linking a class may cause other classes to be loaded, which means
+ // a custom ClassLoader.loadClass() may be called, at a point where the
+ // class loader doesn't expect it.
+ if (has_class_failed_verification(k)) {
+ return warn_excluded(k, "Failed verification");
+ } else {
+ if (k->can_be_verified_at_dumptime()) {
+ return warn_excluded(k, "Not linked");
+ }
+ }
+ }
+ if (DynamicDumpSharedSpaces && k->major_version() < 50 /*JAVA_6_VERSION*/) {
+ // In order to support old classes during dynamic dump, class rewriting needs to
+ // be reverted. This would result in more complex code and testing but not much gain.
+ ResourceMark rm;
+ dynamic_cds_log->print_cr("Pre JDK 6 class not supported by CDS: %u.%u %s",
+ k->major_version(), k->minor_version(), k->name()->as_C_string());
+ return true;
+ }
+
+ if (!k->can_be_verified_at_dumptime() && k->is_linked()) {
+ return warn_excluded(k, "Old class has been linked");
+ }
+
+ if (k->is_anonymous() /* && !is_registered_lambda_proxy_class(k) */) {
+ return warn_excluded(k, "Hidden class");
+ }
+
+ InstanceKlass* super = k->java_super();
+ if (super != NULL && check_for_exclusion(super, NULL)) {
+ ResourceMark rm;
+ dynamic_cds_log->print_cr("Skipping %s: super class %s is excluded", k->name()->as_C_string(), super->name()->as_C_string());
+ return true;
+ }
+
+ Array<Klass*>* interfaces = k->local_interfaces();
+ int len = interfaces->length();
+ for (int i = 0; i < len; i++) {
+ InstanceKlass* intf = (InstanceKlass*)interfaces->at(i);
+ if (check_for_exclusion(intf, NULL)) {
+ dynamic_cds_log->print_cr("Skipping %s: interface %s is excluded", k->name()->as_C_string(), intf->name()->as_C_string());
+ return true;
+ }
+ }
+
+ return false; // false == k should NOT be excluded
+}
+
+// Returns true so the caller can do: return warn_excluded(".....");
+bool SystemDictionaryShared::warn_excluded(InstanceKlass* k, const char* reason) {
+ ResourceMark rm;
+ dynamic_cds_log->print_cr("Skipping %s: %s", k->name()->as_C_string(), reason);
+ return true;
+}
+
+bool SystemDictionaryShared::is_jfr_event_class(InstanceKlass *k) {
+ while (k) {
+ if (k->name()->equals("jdk/jfr/Event")) {
+ return true;
+ }
+ k = k->java_super();
+ }
+ return false;
+}
+
+bool SystemDictionaryShared::has_class_failed_verification(InstanceKlass* ik) {
+ if (_dumptime_table == NULL) {
+ assert(DynamicDumpSharedSpaces, "sanity");
+ assert(ik->is_shared(), "must be a shared class in the static archive");
+ return false;
+ }
+ DumpTimeSharedClassInfo* p = _dumptime_table->get(ik);
+ return (p == NULL) ? false : p->failed_verification();
+}
+
+void SystemDictionaryShared::dumptime_classes_do(class MetaspaceClosure* it) {
+ assert_lock_strong(DumpTimeTable_lock);
+ IterateDumpTimeSharedClassTable iter(it);
+ _dumptime_table->iterate(&iter);
+}
+
+void SystemDictionaryShared::replace_klass_in_constantPool() {
+ IterateDumpTimeTableReplaceKlass iter;
+ _dumptime_table->iterate(&iter);
+}
+
+bool SystemDictionaryShared::is_excluded_class(InstanceKlass* k) {
+ assert(_no_class_loading_should_happen, "sanity");
+ assert_lock_strong(DumpTimeTable_lock);
+ Arguments::assert_is_dumping_archive();
+ DumpTimeSharedClassInfo* p = find_or_allocate_info_for_locked(k);
+ return (p == NULL) ? true : p->is_excluded();
+}
+
+class EstimateSizeForArchive : StackObj {
+ size_t _shared_class_info_size;
+ int _num_builtin_klasses;
+ int _num_unregistered_klasses;
+
+public:
+ EstimateSizeForArchive() {
+ _shared_class_info_size = 0;
+ _num_builtin_klasses = 0;
+ _num_unregistered_klasses = 0;
+ }
+
+ bool do_entry(InstanceKlass* k, DumpTimeSharedClassInfo& info) {
+ if (!info.is_excluded()) {
+ size_t byte_size = RunTimeSharedClassInfo::byte_size(info._klass, info.num_verifier_constraints(), info.num_loader_constraints());
+ _shared_class_info_size += align_up(byte_size, KlassAlignmentInBytes);
+ }
+ return true; // keep on iterating
+ }
+
+ size_t total() {
+ return _shared_class_info_size;
+ }
+};
+
+size_t SystemDictionaryShared::estimate_size_for_archive() {
+ EstimateSizeForArchive est;
+ _dumptime_table->iterate(&est);
+ size_t total_size = est.total() +
+ CompactHashtableWriter::estimate_size(_dumptime_table->count_of(true)) +
+ CompactHashtableWriter::estimate_size(_dumptime_table->count_of(false));
+ total_size += CompactHashtableWriter::estimate_size(0);
+ return total_size;
+}
+
+unsigned int SystemDictionaryShared::hash_for_shared_dictionary(address ptr) {
+ if (ArchiveBuilder::is_active()) {
+ uintx offset = ArchiveBuilder::current()->any_to_offset(ptr);
+ unsigned int hash = primitive_hash<uintx>(offset);
+ DEBUG_ONLY({
+ if (((const MetaspaceObj*)ptr)->is_shared()) {
+ assert(hash == SystemDictionaryShared::hash_for_shared_dictionary_quick(ptr), "must be");
+ }
+ });
+ return hash;
+ } else {
+ return SystemDictionaryShared::hash_for_shared_dictionary_quick(ptr);
+ }
+}
+
+class CopySharedClassInfoToArchive : StackObj {
+ CompactHashtableWriter* _writer;
+ bool _is_builtin;
+ ArchiveBuilder *_builder;
+public:
+ CopySharedClassInfoToArchive(CompactHashtableWriter* writer,
+ bool is_builtin)
+ : _writer(writer), _is_builtin(is_builtin), _builder(ArchiveBuilder::current()) {}
+
+ bool do_entry(InstanceKlass* k, DumpTimeSharedClassInfo& info) {
+ if (!info.is_excluded() && info.is_builtin() == _is_builtin) {
+ size_t byte_size = RunTimeSharedClassInfo::byte_size(info._klass, info.num_verifier_constraints(), info.num_loader_constraints());
+ RunTimeSharedClassInfo* record;
+ record = (RunTimeSharedClassInfo*)ArchiveBuilder::ro_region_alloc(byte_size);
+ record->init(info);
+
+ unsigned int hash;
+ Symbol* name = info._klass->name();
+ hash = SystemDictionaryShared::hash_for_shared_dictionary((address)name);
+ u4 delta = _builder->buffer_to_offset_u4((address)record);
+ if (_is_builtin && info._klass->is_anonymous()) {
+ // skip
+ } else {
+ _writer->add(hash, delta);
+ }
+ if (TraceDynamicCDS) {
+ ResourceMark rm;
+ dynamic_cds_log->print_cr("%s dictionary: %s", (_is_builtin ? "builtin" : "unregistered"), info._klass->external_name());
+ }
+
+ // Save this for quick runtime lookup of InstanceKlass* -> RunTimeSharedClassInfo*
+ RunTimeSharedClassInfo::set_for(info._klass, record);
+ }
+ return true; // keep on iterating
+ }
+};
+
+void SystemDictionaryShared::write_dictionary(RunTimeSharedDictionary* dictionary,
+ bool is_builtin) {
+ CompactHashtableStats stats;
+ dictionary->reset();
+ CompactHashtableWriter writer(_dumptime_table->count_of(is_builtin), &stats);
+ CopySharedClassInfoToArchive copy(&writer, is_builtin);
+ assert_lock_strong(DumpTimeTable_lock);
+ _dumptime_table->iterate(&copy);
+ writer.dump(dictionary, is_builtin ? "builtin dictionary" : "unregistered dictionary");
+}
+
+void SystemDictionaryShared::write_to_archive() {
+ write_dictionary(&_dynamic_builtin_dictionary, true);
+ write_dictionary(&_dynamic_unregistered_dictionary, false);
+}
+
+void SystemDictionaryShared::serialize_dictionary_headers(SerializeClosure* soc) {
+ _dynamic_builtin_dictionary.serialize_header(soc);
+ _dynamic_unregistered_dictionary.serialize_header(soc);
+}
+
+void SystemDictionaryShared::set_shared_class_misc_info(InstanceKlass* k, ClassFileStream* cfs) {
+ Arguments::assert_is_dumping_archive();
+ assert(!is_builtin(k), "must be unregistered class");
+ DumpTimeSharedClassInfo* info = find_or_allocate_info_for(k);
+ if (info != NULL) {
+ info->_clsfile_size = cfs->length();
+ info->_clsfile_crc32 = ClassLoader::crc32(0, (const char*)cfs->buffer(), cfs->length());
+ }
+}
+
+// This function is called for loading only UNREGISTERED classes
+InstanceKlass* SystemDictionaryShared::lookup_from_stream(Symbol* class_name,
+ Handle class_loader,
+ Handle protection_domain,
+ const ClassFileStream* cfs,
+ TRAPS) {
+ if (!UseSharedSpaces) {
+ return NULL;
+ }
+ if (class_name == NULL) { // don't do this for hidden classes
+ return NULL;
+ }
+ if (SystemDictionary::is_builtin_loader(class_loader)) {
+ // Do nothing for the BUILTIN loaders.
+ return NULL;
+ }
+
+ const RunTimeSharedClassInfo* record = find_record(&_dynamic_unregistered_dictionary, class_name);
+ if (record == NULL) {
+ return NULL;
+ }
+
+ int clsfile_size = cfs->length();
+ int clsfile_crc32 = ClassLoader::crc32(0, (const char*)cfs->buffer(), cfs->length());
+
+ if (!record->matches(clsfile_size, clsfile_crc32)) {
+ return NULL;
+ }
+
+ return acquire_class_for_current_thread(record->_klass, class_loader,
+ protection_domain, cfs,
+ THREAD);
+}
+
+const RunTimeSharedClassInfo*
+SystemDictionaryShared::find_record(RunTimeSharedDictionary* dynamic_dict, Symbol* name) {
+ if (!UseSharedSpaces || !name->is_shared()) {
+ // The names of all shared classes must also be a shared Symbol.
+ return NULL;
+ }
+
+ unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(name);
+ const RunTimeSharedClassInfo* record = NULL;
+ // AppCDS only support builtin classloader, customer class loader is just in dynamic archive.
+ if (DynamicArchive::is_mapped()) {
+ record = dynamic_dict->lookup(name, hash, 0);
+ }
+
+ return record;
+}
+
+InstanceKlass* SystemDictionaryShared::acquire_class_for_current_thread(
+ InstanceKlass *ik,
+ Handle class_loader,
+ Handle protection_domain,
+ const ClassFileStream *cfs,
+ TRAPS) {
+ ClassLoaderData* loader_data = ClassLoaderData::class_loader_data(class_loader());
+
+ {
+ MutexLocker mu(SharedDictionary_lock, THREAD);
+ if (ik->class_loader_data() != NULL) {
+ // ik is already loaded (by this loader or by a different loader)
+ // or ik is being loaded by a different thread (by this loader or by a different loader)
+ return NULL;
+ }
+
+ // No other thread has acquired this yet, so give it to *this thread*
+ ik->set_class_loader_data(loader_data);
+ }
+
+ // No longer holding SharedDictionary_lock
+ // No need to lock, as <ik> can be held only by a single thread.
+ loader_data->add_class(ik);
+
+ // Load and check super/interfaces, restore unsharable info
+ instanceKlassHandle shared_klass = SystemDictionary::load_shared_class(ik, class_loader, protection_domain, THREAD);
+ if (shared_klass() == NULL || HAS_PENDING_EXCEPTION) {
+ // TODO: clean up <ik> so it can be used again
+ return NULL;
+ }
+
+ return shared_klass();
+}
+
+InstanceKlass* SystemDictionaryShared::find_dynamic_builtin_class(Symbol* name) {
+ const RunTimeSharedClassInfo* record = find_record(&_dynamic_builtin_dictionary, name);
+ if (record != NULL) {
+ assert(!record->_klass->is_anonymous(), "hidden class cannot be looked up by name");
+ assert(check_klass_alignment(record->_klass), "Address not aligned");
+ return record->_klass;
+ } else {
+ return NULL;
+ }
+}
diff --git a/hotspot/src/share/vm/classfile/systemDictionaryShared.hpp b/hotspot/src/share/vm/classfile/systemDictionaryShared.hpp
index 1bd61b02..36423bee 100644
--- a/hotspot/src/share/vm/classfile/systemDictionaryShared.hpp
+++ b/hotspot/src/share/vm/classfile/systemDictionaryShared.hpp
@@ -22,7 +22,6 @@
*
*/
-
#ifndef SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
#define SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
@@ -30,13 +29,91 @@
#include "classfile/systemDictionary.hpp"
#include "verifier.hpp"
+/*===============================================================================
+
+ Handling of the classes in the AppCDS archive
+
+ To ensure safety and to simplify the implementation, archived classes are
+ "segregated" into 2 types. The following rules describe how they
+ are stored and looked up.
+
+[1] Category of archived classes
+
+ There are 2 disjoint groups of classes stored in the AppCDS archive:
+
+ BUILTIN: These classes may be defined ONLY by the BOOT/PLATFORM/APP
+ loaders.
+
+ UNREGISTERED: These classes may be defined ONLY by a ClassLoader
+ instance that's not listed above (using fingerprint matching)
+
+[2] How classes from different categories are specified in the classlist:
+
+ Starting from JDK9, each class in the classlist may be specified with
+ these keywords: "id", "super", "interfaces", "loader" and "source".
+
+
+ BUILTIN Only the "id" keyword may be (optionally) specified. All other
+ keywords are forbidden.
+
+ The named class is looked up from the jimage and from
+ Xbootclasspath/a and CLASSPATH.
+
+ UNREGISTERED: The "id", "super", and "source" keywords must all be
+ specified.
+
+ The "interfaces" keyword must be specified if the class implements
+ one or more local interfaces. The "interfaces" keyword must not be
+ specified if the class does not implement local interfaces.
+
+ The named class is looked up from the location specified in the
+ "source" keyword.
+
+ Example classlist:
+
+ # BUILTIN
+ java/lang/Object id: 0
+ java/lang/Cloneable id: 1
+ java/lang/String
+
+ # UNREGISTERED
+ Bar id: 3 super: 0 interfaces: 1 source: /foo.jar
+
+
+[3] Identifying the category of archived classes
+
+ BUILTIN: (C->shared_classpath_index() >= 0)
+ UNREGISTERED: (C->shared_classpath_index() == UNREGISTERED_INDEX (-9999))
+
+[4] Lookup of archived classes at run time:
+
+ (a) BUILTIN loaders:
+
+ search _builtin_dictionary
+
+ (b) UNREGISTERED loaders:
+
+ search _unregistered_dictionary for an entry that matches the
+ (name, clsfile_len, clsfile_crc32).
+
+===============================================================================*/
+#define UNREGISTERED_INDEX -9999
+
+class DumpTimeSharedClassInfo;
+class RunTimeSharedClassInfo;
+class RunTimeSharedDictionary;
+
class SystemDictionaryShared: public SystemDictionary {
+private:
+ static bool _dump_in_progress;
+ DEBUG_ONLY(static bool _no_class_loading_should_happen;)
+
public:
static void initialize(TRAPS) {}
static instanceKlassHandle find_or_load_shared_class(Symbol* class_name,
Handle class_loader,
TRAPS) {
- if (UseAppCDS) {
+ if (UseSharedSpaces) {
instanceKlassHandle ik = load_shared_class(class_name, class_loader, CHECK_NULL);
if (!ik.is_null()) {
instanceKlassHandle nh = instanceKlassHandle(); // null Handle
@@ -48,7 +125,7 @@ public:
}
static void roots_oops_do(OopClosure* blk) {}
static void oops_do(OopClosure* f) {}
-
+
static bool is_sharing_possible(ClassLoaderData* loader_data) {
oop class_loader = loader_data->class_loader();
return (class_loader == NULL ||
@@ -60,8 +137,43 @@ public:
static size_t dictionary_entry_size() {
return sizeof(DictionaryEntry);
}
+
static void init_shared_dictionary_entry(Klass* k, DictionaryEntry* entry) {}
+ static void init_dumptime_info(InstanceKlass* k) NOT_CDS_RETURN;
+ static void remove_dumptime_info(InstanceKlass* k) NOT_CDS_RETURN;
+
+ static void start_dumping();
+
+ static DumpTimeSharedClassInfo* find_or_allocate_info_for(InstanceKlass* k);
+
+ static DumpTimeSharedClassInfo* find_or_allocate_info_for_locked(InstanceKlass* k);
+
+ static bool empty_dumptime_table();
+
+ static void check_excluded_classes();
+
+ static bool check_for_exclusion(InstanceKlass* k, DumpTimeSharedClassInfo* info);
+
+ static bool has_been_redefined(InstanceKlass* k);
+
+ static bool check_for_exclusion_impl(InstanceKlass* k);
+
+ static bool warn_excluded(InstanceKlass* k, const char* reason);
+
+ static bool is_jfr_event_class(InstanceKlass *k);
+
+ static bool has_class_failed_verification(InstanceKlass* ik);
+
+ static bool is_builtin(InstanceKlass* k) {
+ return (k->shared_classpath_index() != UNREGISTERED_INDEX);
+ }
+
+ static void dumptime_classes_do(class MetaspaceClosure* it);
+
+ static void replace_klass_in_constantPool();
+
+ static bool is_excluded_class(InstanceKlass* k);
// The (non-application) CDS implementation supports only classes in the boot
// class loader, which ensures that the verification dependencies are the same
// during archive creation time and runtime. Thus we can do the dependency checks
@@ -69,6 +181,7 @@ public:
static void add_verification_dependency(Klass* k, Symbol* accessor_clsname,
Symbol* target_clsname) {}
static void finalize_verification_dependencies() {}
+ static void set_class_has_failed_verification(InstanceKlass* ik);
static bool check_verification_dependencies(Klass* k, Handle class_loader,
Handle protection_domain,
char** message_buffer, TRAPS) {
@@ -81,6 +194,49 @@ public:
}
return true;
}
+ static size_t estimate_size_for_archive();
+ static void write_to_archive();
+ static void write_dictionary(RunTimeSharedDictionary* dictionary, bool is_builtin);
+ static void serialize_dictionary_headers(class SerializeClosure* soc);
+ static unsigned int hash_for_shared_dictionary(address ptr);
+ static void set_shared_class_misc_info(InstanceKlass* k, ClassFileStream* cfs);
+ static InstanceKlass* lookup_from_stream(Symbol* class_name,
+ Handle class_loader,
+ Handle protection_domain,
+ const ClassFileStream* cfs,
+ TRAPS);
+
+ DEBUG_ONLY(static bool no_class_loading_should_happen() {return _no_class_loading_should_happen;})
+
+#ifdef ASSERT
+ class NoClassLoadingMark: public StackObj {
+ public:
+ NoClassLoadingMark() {
+ assert(!_no_class_loading_should_happen, "must not be nested");
+ _no_class_loading_should_happen = true;
+ }
+ ~NoClassLoadingMark() {
+ _no_class_loading_should_happen = false;
+ }
+ };
+#endif
+
+ template <typename T>
+ static unsigned int hash_for_shared_dictionary_quick(T* ptr) {
+ assert(((MetaspaceObj*)ptr)->is_shared(), "must be");
+ assert(ptr > (T*)SharedBaseAddress, "must be");
+ uintx offset = uintx(ptr) - uintx(SharedBaseAddress);
+ return primitive_hash<uintx>(offset);
+ }
+
+ static const RunTimeSharedClassInfo* find_record(RunTimeSharedDictionary* dynamic_dict, Symbol* name);
+ static InstanceKlass* acquire_class_for_current_thread(InstanceKlass *ik,
+ Handle class_loader,
+ Handle protection_domain,
+ const ClassFileStream *cfs,
+ TRAPS);
+
+ static InstanceKlass* find_dynamic_builtin_class(Symbol* name);
};
#endif // SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
diff --git a/hotspot/src/share/vm/memory/allocation.hpp b/hotspot/src/share/vm/memory/allocation.hpp
index aa8f02d09..4d324b442 100644
--- a/hotspot/src/share/vm/memory/allocation.hpp
+++ b/hotspot/src/share/vm/memory/allocation.hpp
@@ -302,6 +302,11 @@ class MetaspaceObj {
Type type, Thread* thread) throw();
// can't use TRAPS from this header file.
void operator delete(void* p) { ShouldNotCallThis(); }
+
+ // Declare a *static* method with the same signature in any subclass of MetaspaceObj
+ // that should be read-only by default. See symbol.hpp for an example. This function
+ // is used by the templates in metaspaceClosure.hpp
+ static bool is_read_only_by_default() { return false; }
};
// Base class for classes that constitute name spaces.
@@ -728,6 +733,12 @@ class ArrayAllocator VALUE_OBJ_CLASS_SPEC {
bool _use_malloc;
size_t _size;
bool _free_in_destructor;
+
+ static bool should_use_malloc(size_t size) {
+ return size < ArrayAllocatorMallocLimit;
+ }
+
+ static char* allocate_inner(size_t& size, bool& use_malloc);
public:
ArrayAllocator(bool free_in_destructor = true) :
_addr(NULL), _use_malloc(false), _size(0), _free_in_destructor(free_in_destructor) { }
@@ -739,6 +750,7 @@ class ArrayAllocator VALUE_OBJ_CLASS_SPEC {
}
E* allocate(size_t length);
+ E* reallocate(size_t new_length);
void free();
};
diff --git a/hotspot/src/share/vm/memory/allocation.inline.hpp b/hotspot/src/share/vm/memory/allocation.inline.hpp
index 9f2e1655a..2e794a8b6 100644
--- a/hotspot/src/share/vm/memory/allocation.inline.hpp
+++ b/hotspot/src/share/vm/memory/allocation.inline.hpp
@@ -151,35 +151,58 @@ template <MEMFLAGS F> void CHeapObj<F>::operator delete [](void* p){
}
template <class E, MEMFLAGS F>
-E* ArrayAllocator<E, F>::allocate(size_t length) {
- assert(_addr == NULL, "Already in use");
+char* ArrayAllocator<E, F>::allocate_inner(size_t &size, bool &use_malloc) {
+ char* addr = NULL;
- _size = sizeof(E) * length;
- _use_malloc = _size < ArrayAllocatorMallocLimit;
-
- if (_use_malloc) {
- _addr = AllocateHeap(_size, F);
- if (_addr == NULL && _size >= (size_t)os::vm_allocation_granularity()) {
+ if (use_malloc) {
+ addr = AllocateHeap(size, F);
+ if (addr == NULL && size >= (size_t)os::vm_allocation_granularity()) {
// malloc failed let's try with mmap instead
- _use_malloc = false;
+ use_malloc = false;
} else {
- return (E*)_addr;
+ return addr;
}
}
int alignment = os::vm_allocation_granularity();
- _size = align_size_up(_size, alignment);
+ size = align_size_up(size, alignment);
- _addr = os::reserve_memory(_size, NULL, alignment, F);
- if (_addr == NULL) {
- vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (reserve)");
+ addr = os::reserve_memory(size, NULL, alignment, F);
+ if (addr == NULL) {
+ vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "Allocator (reserve)");
}
- os::commit_memory_or_exit(_addr, _size, !ExecMem, "Allocator (commit)");
+ os::commit_memory_or_exit(addr, size, !ExecMem, "Allocator (commit)");
+ return addr;
+}
+
+template <class E, MEMFLAGS F>
+E* ArrayAllocator<E, F>::allocate(size_t length) {
+ assert(_addr == NULL, "Already in use");
+ _size = sizeof(E) * length;
+
+ _use_malloc = should_use_malloc(_size);
+ _addr = allocate_inner(_size, _use_malloc);
return (E*)_addr;
}
+template <class E, MEMFLAGS F>
+E* ArrayAllocator<E, F>::reallocate(size_t new_length) {
+ size_t new_size = sizeof(E) * new_length;
+ bool use_malloc = should_use_malloc(new_size);
+ char* new_addr = allocate_inner(new_size, use_malloc);
+
+ memcpy(new_addr, _addr, MIN2(new_size, _size));
+
+ free();
+ _size = new_size;
+ _use_malloc = use_malloc;
+ _addr = new_addr;
+ return (E*)new_addr;
+}
+
+
template<class E, MEMFLAGS F>
void ArrayAllocator<E, F>::free() {
if (_addr != NULL) {
diff --git a/hotspot/src/share/vm/memory/filemap.cpp b/hotspot/src/share/vm/memory/filemap.cpp
index 99b1f58d0..3f4106476 100644
--- a/hotspot/src/share/vm/memory/filemap.cpp
+++ b/hotspot/src/share/vm/memory/filemap.cpp
@@ -24,6 +24,8 @@
#include "jvm.h"
#include "precompiled.hpp"
+#include "cds/archiveBuilder.hpp"
+#include "cds/dynamicArchive.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/sharedClassUtil.hpp"
#include "classfile/symbolTable.hpp"
@@ -140,19 +142,33 @@ template <int N> static void get_header_version(char (&header_version) [N]) {
}
}
-FileMapInfo::FileMapInfo() {
- assert(_current_info == NULL, "must be singleton"); // not thread safe
- _current_info = this;
+FileMapInfo::FileMapInfo(bool is_static) {
memset(this, 0, sizeof(FileMapInfo));
+ _is_static = is_static;
+
+ if (is_static) {
+ assert(_current_info == NULL, "must be singleton"); // not thread safe
+ _current_info = this;
+ _header = SharedClassUtil::allocate_file_map_header();
+ } else {
+ assert(_dynamic_archive_info == NULL, "must be singleton"); // not thread safe
+ _dynamic_archive_info = this;
+ _header = SharedClassUtil::allocate_dynamic_archive_header();
+ }
+
+ _header->_version = _invalid_version;
_file_offset = 0;
_file_open = false;
- _header = SharedClassUtil::allocate_file_map_header();
- _header->_version = _invalid_version;
}
FileMapInfo::~FileMapInfo() {
- assert(_current_info == this, "must be singleton"); // not thread safe
- _current_info = NULL;
+ if (_is_static) {
+ assert(_current_info == this, "must be singleton"); // not thread safe
+ _current_info = NULL;
+ } else {
+ assert(_dynamic_archive_info == this, "must be singleton"); // not thread safe
+ _dynamic_archive_info = NULL;
+ }
}
void FileMapInfo::populate_header(size_t alignment) {
@@ -163,14 +179,66 @@ size_t FileMapInfo::FileMapHeader::data_size() {
return SharedClassUtil::file_map_header_size() - sizeof(FileMapInfo::FileMapHeaderBase);
}
+size_t FileMapInfo::DynamicArchiveHeader::data_size() {
+ return sizeof(FileMapInfo::DynamicArchiveHeader) - sizeof(FileMapInfo::FileMapHeaderBase);
+}
+
+bool FileMapInfo::DynamicArchiveHeader::validate() {
+ if (_magic != CDS_DYNAMIC_ARCHIVE_MAGIC) {
+ FileMapInfo::fail_continue("The shared archive file has a bad magic number.");
+ return false;
+ }
+ if (VerifySharedSpaces && compute_crc() != _crc) {
+ fail_continue("Header checksum verification failed.");
+ return false;
+ }
+ if (_version != current_version()) {
+ FileMapInfo::fail_continue("The shared archive file is the wrong version.");
+ return false;
+ }
+ char header_version[JVM_IDENT_MAX];
+ get_header_version(header_version);
+ if (strncmp(_jvm_ident, header_version, JVM_IDENT_MAX-1) != 0) {
+ if (TraceClassPaths) {
+ tty->print_cr("Expected: %s", header_version);
+ tty->print_cr("Actual: %s", _jvm_ident);
+ }
+ FileMapInfo::fail_continue("The shared archive file was created by a different"
+ " version or build of HotSpot");
+ return false;
+ }
+ if (_obj_alignment != ObjectAlignmentInBytes) {
+ FileMapInfo::fail_continue("The shared archive file's ObjectAlignmentInBytes of %d"
+ " does not equal the current ObjectAlignmentInBytes of %d.",
+ _obj_alignment, ObjectAlignmentInBytes);
+ return false;
+ }
+
+ // TODO: much more validate check
+
+ return true;
+}
+
void FileMapInfo::FileMapHeader::populate(FileMapInfo* mapinfo, size_t alignment) {
- _magic = 0xf00baba2;
- _version = _current_version;
+ if (DynamicDumpSharedSpaces) {
+ _magic = CDS_DYNAMIC_ARCHIVE_MAGIC;
+ } else {
+ _magic = CDS_ARCHIVE_MAGIC;
+ }
+ _version = current_version();
_alignment = alignment;
_obj_alignment = ObjectAlignmentInBytes;
- _classpath_entry_table_size = mapinfo->_classpath_entry_table_size;
- _classpath_entry_table = mapinfo->_classpath_entry_table;
- _classpath_entry_size = mapinfo->_classpath_entry_size;
+ /* TODO
+ _compressed_oops = UseCompressedOops;
+ _compressed_class_ptrs = UseCompressedClassPointers;
+ _max_heap_size = MaxHeapSize;
+ _narrow_klass_shift = CompressedKlassPointers::shift();
+ */
+ if (!DynamicDumpSharedSpaces) {
+ _classpath_entry_table_size = mapinfo->_classpath_entry_table_size;
+ _classpath_entry_table = mapinfo->_classpath_entry_table;
+ _classpath_entry_size = mapinfo->_classpath_entry_size;
+ }
// The following fields are for sanity checks for whether this archive
// will function correctly with this JVM and the bootclasspath it's
@@ -303,62 +371,174 @@ bool FileMapInfo::validate_classpath_entry_table() {
return true;
}
+bool FileMapInfo::get_base_archive_name_from_header(const char* archive_name,
+ int* size, char** base_archive_name) {
+ int fd = os::open(archive_name, O_RDONLY | O_BINARY, 0);
+ if (fd < 0) {
+ *size = 0;
+ return false;
+ }
-// Read the FileMapInfo information from the file.
-
-bool FileMapInfo::init_from_file(int fd) {
- size_t sz = _header->data_size();
- char* addr = _header->data();
+ // read the header as a dynamic archive header
+ DynamicArchiveHeader* dynamic_header = SharedClassUtil::allocate_dynamic_archive_header();
+ size_t sz = dynamic_header->data_size();
+ char* addr = dynamic_header->data();
size_t n = os::read(fd, addr, (unsigned int)sz);
if (n != sz) {
fail_continue("Unable to read the file header.");
+ delete dynamic_header;
+ os::close(fd);
return false;
}
- if (_header->_version != current_version()) {
- fail_continue("The shared archive file has the wrong version.");
+ if (dynamic_header->magic() != CDS_DYNAMIC_ARCHIVE_MAGIC) {
+ // Not a dynamic header, no need to proceed further.
+ *size = 0;
+ delete dynamic_header;
+ os::close(fd);
return false;
}
- size_t info_size = _header->_paths_misc_info_size;
- _paths_misc_info = NEW_C_HEAP_ARRAY_RETURN_NULL(char, info_size, mtClass);
- if (_paths_misc_info == NULL) {
- fail_continue("Unable to read the file header.");
+ // read the base archive name
+ size_t name_size = dynamic_header->base_archive_name_size();
+ if (name_size == 0) {
+ delete dynamic_header;
+ os::close(fd);
return false;
}
- n = os::read(fd, _paths_misc_info, (unsigned int)info_size);
- if (n != info_size) {
- fail_continue("Unable to read the shared path info header.");
- FREE_C_HEAP_ARRAY(char, _paths_misc_info, mtClass);
- _paths_misc_info = NULL;
+ *base_archive_name = NEW_C_HEAP_ARRAY(char, name_size, mtInternal);
+ n = os::read(fd, *base_archive_name, (unsigned int)name_size);
+ if (n != name_size) {
+ fail_continue("Unable to read the base archive name from the header.");
+ FREE_C_HEAP_ARRAY(char, *base_archive_name, mtInternal);
+ *base_archive_name = NULL;
+ delete dynamic_header;
+ os::close(fd);
return false;
}
- size_t len = lseek(fd, 0, SEEK_END);
- struct FileMapInfo::FileMapHeader::space_info* si =
- &_header->_space[MetaspaceShared::mc];
- if (si->_file_offset >= len || len - si->_file_offset < si->_used) {
- fail_continue("The shared archive file has been truncated.");
+ delete dynamic_header;
+ os::close(fd);
+ return true;
+}
+
+bool FileMapInfo::check_archive(const char* archive_name, bool is_static) {
+ int fd = os::open(archive_name, O_RDONLY | O_BINARY, 0);
+ if (fd < 0) {
+ // do not vm_exit_during_initialization here because Arguments::init_shared_archive_paths()
+ // requires a shared archive name. The open_for_read() function will log a message regarding
+ // failure in opening a shared archive.
return false;
}
- _file_offset += (long)n;
+ FileMapHeader* header = NULL;
+ if (is_static) {
+ header = SharedClassUtil::allocate_file_map_header();
+ } else {
+ header = SharedClassUtil::allocate_dynamic_archive_header();
+ }
+
+ size_t sz = header->data_size();
+ size_t n = os::read(fd, header->data(), (unsigned int)sz);
+ if (n != sz) {
+ delete header;
+ os::close(fd);
+ vm_exit_during_initialization("Unable to read header from shared archive", archive_name);
+ return false;
+ }
+ if (is_static) {
+ FileMapHeader* static_header = (FileMapHeader*)header;
+ if (static_header->magic() != CDS_ARCHIVE_MAGIC) {
+ delete header;
+ os::close(fd);
+ vm_exit_during_initialization("Not a base shared archive", archive_name);
+ return false;
+ }
+ } else {
+ DynamicArchiveHeader* dynamic_header = (DynamicArchiveHeader*)header;
+ if (dynamic_header->magic() != CDS_DYNAMIC_ARCHIVE_MAGIC) {
+ delete header;
+ os::close(fd);
+ vm_exit_during_initialization("Not a top shared archive", archive_name);
+ return false;
+ }
+ }
+ delete header;
+ os::close(fd);
+ return true;
+}
+
+// Read the FileMapInfo information from the file.
+
+bool FileMapInfo::init_from_file(int fd) {
+ size_t sz = header()->data_size();
+ char* addr = header()->data();
+ size_t n = os::read(fd, addr, (unsigned int)sz);
+ if (n != sz) {
+ fail_continue("Unable to read the file header.");
+ return false;
+ }
+
+ _file_offset += n;
+
+ if (is_static()) {
+ size_t info_size = _header->_paths_misc_info_size;
+ _paths_misc_info = NEW_C_HEAP_ARRAY_RETURN_NULL(char, info_size, mtClass);
+ if (_paths_misc_info == NULL) {
+ fail_continue("Unable to read the file header.");
+ return false;
+ }
+ n = os::read(fd, _paths_misc_info, (unsigned int)info_size);
+ if (n != info_size) {
+ fail_continue("Unable to read the shared path info header.");
+ FREE_C_HEAP_ARRAY(char, _paths_misc_info, mtClass);
+ _paths_misc_info = NULL;
+ return false;
+ }
+
+ // just checking the last region is sufficient since the archive is written
+ // in sequential order
+ size_t len = lseek(fd, 0, SEEK_END);
+ struct FileMapInfo::FileMapHeader::space_info* si =
+ &_header->_space[MetaspaceShared::mc];
+ if (si->_file_offset >= len || len - si->_file_offset < si->_used) {
+ fail_continue("The shared archive file has been truncated.");
+ return false;
+ }
+
+ _file_offset += n;
+ } else {
+ _file_offset += dynamic_header()->base_archive_name_size(); // accounts for the size of _base_archive_name
+ }
+
return true;
}
// Read the FileMapInfo information from the file.
bool FileMapInfo::open_for_read() {
- _full_path = make_log_name(Arguments::GetSharedArchivePath(), NULL);
- int fd = open(_full_path, O_RDONLY | O_BINARY, 0);
+ if (_file_open) {
+ return true;
+ }
+ if (is_static()) {
+ _full_path = Arguments::GetSharedArchivePath();
+ } else {
+ _full_path = Arguments::GetSharedDynamicArchivePath();
+ }
+ if (InfoDynamicCDS) {
+ dynamic_cds_log->print_cr("trying to map %s", _full_path);
+ }
+ int fd = os::open(_full_path, O_RDONLY | O_BINARY, 0);
if (fd < 0) {
if (errno == ENOENT) {
- // Not locating the shared archive is ok.
- fail_continue("Specified shared archive not found. archive file path:%s", _full_path);
+ fail_continue("Specified shared archive not found (%s).", _full_path);
} else {
- fail_continue("Failed to open shared archive file (%s).",
- strerror(errno));
+ fail_continue("Failed to open shared archive file (%s).", strerror(errno));
}
return false;
+ } else {
+ if (InfoDynamicCDS) {
+ dynamic_cds_log->print_cr("Opened archive %s.", _full_path);
+ }
}
_fd = fd;
@@ -368,7 +548,7 @@ bool FileMapInfo::open_for_read() {
// Write the FileMapInfo information to the file.
void FileMapInfo::open_for_write() {
- if (UseAppCDS && AppCDSLockFile != NULL) {
+ if ((DynamicDumpSharedSpaces || UseAppCDS) && AppCDSLockFile != NULL) {
char* pos = strrchr(const_cast<char*>(AppCDSLockFile), '/');
#ifdef __linux__
if (pos != NULL && pos != AppCDSLockFile) { // No directory path specified
@@ -391,14 +571,18 @@ void FileMapInfo::open_for_write() {
int lock_fd = open(_appcds_file_lock_path, O_CREAT | O_WRONLY | O_EXCL, S_IRUSR | S_IWUSR);
if (lock_fd < 0) {
tty->print_cr("Failed to create jsa file !\n Please check: \n 1. The directory exists.\n "
- "2. You have the permission.\n 3. Make sure no other process using the same lock file.\n");
+ "2. You have the permission.\n 3. Make sure no other process using the same lock file.\n");
fail_stop("Failed to create appcds lock file, the lock path is: %s.", _appcds_file_lock_path);
}
tty->print_cr("You are using file lock %s in concurrent mode", AppCDSLockFile);
}
#endif
}
- _full_path = make_log_name(Arguments::GetSharedArchivePath(), NULL);
+ if (is_static()) {
+ _full_path = make_log_name(Arguments::GetSharedArchivePath(), NULL);
+ } else {
+ _full_path = make_log_name(Arguments::GetSharedDynamicArchivePath(), NULL);
+ }
if (PrintSharedSpaces) {
tty->print_cr("Dumping shared data to file: ");
tty->print_cr(" %s", _full_path);
@@ -436,6 +620,18 @@ void FileMapInfo::write_header() {
align_file_position();
}
+void FileMapInfo::write_dynamic_header() {
+ align_file_position();
+ size_t sz = _header->data_size();
+ char* addr = _header->data();
+ write_bytes(addr, (int)sz); // skip the C++ vtable
+
+ char* base_archive_name = (char*)Arguments::GetSharedArchivePath();
+ if (base_archive_name != NULL) {
+ write_bytes(base_archive_name, dynamic_header()->base_archive_name_size());
+ }
+ align_file_position();
+}
// Dump shared spaces to file.
@@ -464,7 +660,15 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
} else {
si->_file_offset = _file_offset;
}
- si->_base = base;
+ if (is_static()) {
+ si->_base = base;
+ } else {
+ if (region == MetaspaceShared::d_bm) {
+ si->_base = NULL; // always NULL for bm region
+ } else {
+ si->_base = ArchiveBuilder::current()->to_requested(base);
+ }
+ }
si->_used = size;
si->_capacity = capacity;
si->_read_only = read_only;
@@ -473,7 +677,16 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
write_bytes_aligned(base, (int)size);
}
+char* FileMapInfo::write_bitmap_region(const BitMap* ptrmap) {
+ size_t size_in_bits = ptrmap->size();
+ size_t size_in_bytes = ptrmap->size_in_words() * BytesPerWord;
+ char* buffer = NEW_C_HEAP_ARRAY(char, size_in_bytes, mtClassShared);
+ ptrmap->write_to((BitMap::bm_word_t*)buffer, size_in_bytes);
+ dynamic_header()->set_ptrmap_size_in_bits(size_in_bits);
+ write_region(MetaspaceShared::d_bm, (char*)buffer, size_in_bytes, size_in_bytes, /*read_only=*/true, /*allow_exec=*/false);
+ return buffer;
+}
// Dump bytes to file -- at the current file position.
void FileMapInfo::write_bytes(const void* buffer, int nbytes) {
@@ -542,7 +755,7 @@ void FileMapInfo::close() {
// JVM/TI RedefineClasses() support:
// Remap the shared readonly space to shared readwrite, private.
bool FileMapInfo::remap_shared_readonly_as_readwrite() {
- struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[0];
+ struct FileMapInfo::FileMapHeader::space_info* si = is_static() ? &_header->_space[0] : &_header->_space[1];
if (!si->_read_only) {
// the space is already readwrite so we are done
return true;
@@ -570,10 +783,14 @@ bool FileMapInfo::remap_shared_readonly_as_readwrite() {
// Map the whole region at once, assumed to be allocated contiguously.
ReservedSpace FileMapInfo::reserve_shared_memory() {
- struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[0];
- char* requested_addr = si->_base;
+ char* requested_addr = region_base(0);
+ size_t size = 0;
- size_t size = FileMapInfo::shared_spaces_size();
+ if (is_static()) {
+ size = FileMapInfo::shared_spaces_size();
+ } else {
+ size = align_up((uintptr_t)region_end(1) - (uintptr_t)region_base(0), (size_t)os::vm_allocation_granularity());
+ }
// Reserve the space first, then map otherwise map will go right over some
// other reserved memory (like the code cache).
@@ -648,6 +865,7 @@ void FileMapInfo::assert_mark(bool check) {
FileMapInfo* FileMapInfo::_current_info = NULL;
+FileMapInfo* FileMapInfo::_dynamic_archive_info = NULL;
SharedClassPathEntry* FileMapInfo::_classpath_entry_table = NULL;
int FileMapInfo::_classpath_entry_table_size = 0;
size_t FileMapInfo::_classpath_entry_size = 0x1234baad;
@@ -674,19 +892,26 @@ bool FileMapInfo::initialize() {
if (!open_for_read()) {
return false;
}
-
- init_from_file(_fd);
+ if (!init_from_file(_fd)) {
+ return false;
+ }
if (!validate_header()) {
return false;
}
- SharedReadOnlySize = _header->_space[0]._capacity;
- SharedReadWriteSize = _header->_space[1]._capacity;
- SharedMiscDataSize = _header->_space[2]._capacity;
- SharedMiscCodeSize = _header->_space[3]._capacity;
+ if (is_static()) {
+ SharedReadOnlySize = _header->_space[0]._capacity;
+ SharedReadWriteSize = _header->_space[1]._capacity;
+ SharedMiscDataSize = _header->_space[2]._capacity;
+ SharedMiscCodeSize = _header->_space[3]._capacity;
+ }
return true;
}
+void FileMapInfo::DynamicArchiveHeader::set_as_offset(char* p, size_t *offset) {
+ *offset = ArchiveBuilder::current()->any_to_offset((address)p);
+}
+
int FileMapInfo::FileMapHeader::compute_crc() {
char* header = data();
// start computing from the field after _crc
@@ -701,7 +926,7 @@ int FileMapInfo::compute_header_crc() {
}
bool FileMapInfo::FileMapHeader::validate() {
- if (_magic != (int)0xf00baba2) {
+ if (_magic != CDS_ARCHIVE_MAGIC) {
FileMapInfo::fail_continue("The shared archive file has a bad magic number.");
return false;
}
@@ -738,6 +963,10 @@ bool FileMapInfo::FileMapHeader::validate() {
bool FileMapInfo::validate_header() {
bool status = _header->validate();
+ if (status && !is_static()) {
+ return DynamicArchive::validate(this);
+ }
+
if (status) {
if (!ClassLoader::check_shared_paths_misc_info(_paths_misc_info, _header->_paths_misc_info_size)) {
if (!PrintSharedArchiveAndExit) {
@@ -761,7 +990,13 @@ bool FileMapInfo::validate_header() {
// Return:
// True if the p is within the mapped shared space, otherwise, false.
bool FileMapInfo::is_in_shared_space(const void* p) {
- for (int i = 0; i < MetaspaceShared::n_regions; i++) {
+ int count = 0;
+ if (is_static()) {
+ count = MetaspaceShared::n_regions;
+ } else {
+ count = MetaspaceShared::d_n_regions;
+ }
+ for (int i = 0; i < count; i++) {
if (p >= _header->_space[i]._base &&
p < _header->_space[i]._base + _header->_space[i]._used) {
return true;
@@ -772,6 +1007,11 @@ bool FileMapInfo::is_in_shared_space(const void* p) {
}
void FileMapInfo::print_shared_spaces() {
+ // TODO: support dynamic archive
+ if (!is_static()) {
+ return;
+ }
+
gclog_or_tty->print_cr("Shared Spaces:");
for (int i = 0; i < MetaspaceShared::n_regions; i++) {
struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
diff --git a/hotspot/src/share/vm/memory/filemap.hpp b/hotspot/src/share/vm/memory/filemap.hpp
index 0eee1c7ea..eab9ebcfc 100644
--- a/hotspot/src/share/vm/memory/filemap.hpp
+++ b/hotspot/src/share/vm/memory/filemap.hpp
@@ -27,6 +27,8 @@
#include "memory/metaspaceShared.hpp"
#include "memory/metaspace.hpp"
+#include "runtime/os.hpp"
+#include "utilities/align.hpp"
// Layout of the file:
// header: dump of archive instance plus versioning info, datestamp, etc.
@@ -37,8 +39,12 @@
// misc data (block offset table, string table, symbols, dictionary, etc.)
// tag(666)
+#define CDS_ARCHIVE_MAGIC 0xf00baba2
+#define CDS_DYNAMIC_ARCHIVE_MAGIC 0xf00baba8
+
static const int JVM_IDENT_MAX = 256;
+class BitMap;
class Metaspace;
class SharedClassPathEntry VALUE_OBJ_CLASS_SPEC {
@@ -56,11 +62,13 @@ private:
friend class ManifestStream;
enum {
_invalid_version = -1,
- _current_version = 2
+ _current_version = 3,
};
- bool _file_open;
- int _fd;
+ bool _is_static;
+ bool _file_open;
+ bool _is_mapped;
+ int _fd;
size_t _file_offset;
private:
@@ -77,20 +85,21 @@ public:
struct FileMapHeaderBase : public CHeapObj<mtClass> {
virtual bool validate() = 0;
virtual void populate(FileMapInfo* info, size_t alignment) = 0;
- };
- struct FileMapHeader : FileMapHeaderBase {
// Use data() and data_size() to memcopy to/from the FileMapHeader. We need to
// avoid read/writing the C++ vtable pointer.
- static size_t data_size();
+ virtual size_t data_size() = 0;
+ };
+ struct FileMapHeader : FileMapHeaderBase {
+ size_t data_size();
char* data() {
return ((char*)this) + sizeof(FileMapHeaderBase);
}
- int _magic; // identify file type.
- int _crc; // header crc checksum.
- int _version; // (from enum, above.)
- size_t _alignment; // how shared archive should be aligned
- int _obj_alignment; // value of ObjectAlignmentInBytes
+ unsigned int _magic; // identify file type.
+ int _crc; // header crc checksum.
+ int _version; // (from enum, above.)
+ size_t _alignment; // how shared archive should be aligned
+ int _obj_alignment; // value of ObjectAlignmentInBytes
struct space_info {
int _crc; // crc checksum of the current space
@@ -137,7 +146,48 @@ public:
virtual bool validate();
virtual void populate(FileMapInfo* info, size_t alignment);
+ int crc() { return _crc; }
+ int space_crc(int i) { return _space[i]._crc; }
int compute_crc();
+ unsigned int magic() const { return _magic; }
+ const char* jvm_ident() const { return _jvm_ident; }
+ };
+
+ // Fixme
+ struct DynamicArchiveHeader : FileMapHeader {
+ private:
+ int _base_header_crc;
+ int _base_region_crc[MetaspaceShared::n_regions];
+ char* _requested_base_address; // Archive relocation is not necessary if we map with this base address.
+ size_t _ptrmap_size_in_bits; // Size of pointer relocation bitmap
+ size_t _base_archive_name_size;
+ size_t _serialized_data_offset; // Data accessed using {ReadClosure,WriteClosure}::serialize()
+
+ public:
+ size_t data_size();
+ int base_header_crc() const { return _base_header_crc; }
+ int base_region_crc(int i) const {
+ return _base_region_crc[i];
+ }
+
+ void set_base_header_crc(int c) { _base_header_crc = c; }
+ void set_base_region_crc(int i, int c) {
+ _base_region_crc[i] = c;
+ }
+
+ void set_requested_base(char* b) {
+ _requested_base_address = b;
+ }
+ size_t ptrmap_size_in_bits() const { return _ptrmap_size_in_bits; }
+ void set_ptrmap_size_in_bits(size_t s) { _ptrmap_size_in_bits = s; }
+ void set_base_archive_name_size(size_t s) { _base_archive_name_size = s; }
+ size_t base_archive_name_size() { return _base_archive_name_size; }
+ void set_as_offset(char* p, size_t *offset);
+ char* from_mapped_offset(size_t offset) const { return _requested_base_address + offset; }
+ void set_serialized_data(char* p) { set_as_offset(p, &_serialized_data_offset); }
+ char* serialized_data() const { return from_mapped_offset(_serialized_data_offset); }
+
+ virtual bool validate();
};
FileMapHeader * _header;
@@ -147,32 +197,52 @@ public:
char* _paths_misc_info;
static FileMapInfo* _current_info;
+ static FileMapInfo* _dynamic_archive_info;
+ static bool get_base_archive_name_from_header(const char* archive_name,
+ int* size, char** base_archive_name);
+ static bool check_archive(const char* archive_name, bool is_static);
bool init_from_file(int fd);
void align_file_position();
bool validate_header_impl();
public:
- FileMapInfo();
+ FileMapInfo(bool is_static = true);
~FileMapInfo();
static int current_version() { return _current_version; }
int compute_header_crc();
void set_header_crc(int crc) { _header->_crc = crc; }
+ int space_crc(int i) { return _header->_space[i]._crc; }
void populate_header(size_t alignment);
bool validate_header();
void invalidate();
+ int crc() { return _header->_crc; }
int version() { return _header->_version; }
size_t alignment() { return _header->_alignment; }
size_t space_capacity(int i) { return _header->_space[i]._capacity; }
+ size_t used(int i) { return _header->_space[i]._used; }
+ size_t used_aligned(int i) { return align_up(used(i), (size_t)os::vm_allocation_granularity()); }
char* region_base(int i) { return _header->_space[i]._base; }
+ char* region_end(int i) { return region_base(i) + used_aligned(i); }
struct FileMapHeader* header() { return _header; }
+ struct DynamicArchiveHeader* dynamic_header() {
+ // assert(!is_static(), "must be");
+ return (struct DynamicArchiveHeader*)header();
+ }
+
+ void set_header_base_archive_name_size(size_t size) { dynamic_header()->set_base_archive_name_size(size); }
static FileMapInfo* current_info() {
CDS_ONLY(return _current_info;)
NOT_CDS(return NULL;)
}
+ static FileMapInfo* dynamic_info() {
+ CDS_ONLY(return _dynamic_archive_info;)
+ NOT_CDS(return NULL;)
+ }
+
static void assert_mark(bool check);
// File manipulation.
@@ -180,18 +250,24 @@ public:
bool open_for_read();
void open_for_write();
void write_header();
+ void write_dynamic_header();
void write_space(int i, Metaspace* space, bool read_only);
void write_region(int region, char* base, size_t size,
size_t capacity, bool read_only, bool allow_exec);
+ char* write_bitmap_region(const BitMap* ptrmap);
void write_bytes(const void* buffer, int count);
void write_bytes_aligned(const void* buffer, int count);
char* map_region(int i);
void unmap_region(int i);
bool verify_region_checksum(int i);
void close();
- bool is_open() { return _file_open; }
+ bool is_open() { return _file_open; }
+ bool is_static() const { return _is_static; }
+ bool is_mapped() const { return _is_mapped; }
+ void set_is_mapped(bool v) { _is_mapped = v; }
ReservedSpace reserve_shared_memory();
-
+ void set_requested_base(char* b) { dynamic_header()->set_requested_base(b); }
+ char* serialized_data() { return dynamic_header()->serialized_data(); }
// JVM/TI RedefineClasses() support:
// Remap the shared readonly space to shared readwrite, private.
bool remap_shared_readonly_as_readwrite();
diff --git a/hotspot/src/share/vm/memory/iterator.hpp b/hotspot/src/share/vm/memory/iterator.hpp
index 62204eea7..dc01186a2 100644
--- a/hotspot/src/share/vm/memory/iterator.hpp
+++ b/hotspot/src/share/vm/memory/iterator.hpp
@@ -378,6 +378,13 @@ public:
// for verification that sections of the serialized data are of the
// correct length.
virtual void do_tag(int tag) = 0;
+
+ // Read/write the 32-bit unsigned integer pointed to by p.
+ virtual void do_u4(u4* p) { }
+
+ bool writing() {
+ return !reading();
+ }
};
class SymbolClosure : public StackObj {
diff --git a/hotspot/src/share/vm/memory/metaspace.cpp b/hotspot/src/share/vm/memory/metaspace.cpp
index 2912f41b6..7e95b5c0b 100644
--- a/hotspot/src/share/vm/memory/metaspace.cpp
+++ b/hotspot/src/share/vm/memory/metaspace.cpp
@@ -37,6 +37,7 @@
#include "memory/metaspaceTracer.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
+#include "runtime/arguments.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/globals.hpp"
#include "runtime/init.hpp"
@@ -426,8 +427,16 @@ VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(
assert(shared_base == 0 || _rs.base() == shared_base, "should match");
} else {
// Get a mmap region anywhere if the SharedBaseAddress fails.
+ if (InfoDynamicCDS) {
+ dynamic_cds_log->print_cr("Could not allocate static space at request address: " INTPTR_FORMAT, p2i(shared_base));
+ }
_rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
}
+ // ...failing that, give up.
+ if (!_rs.is_reserved()) {
+ vm_exit_during_initialization(
+ err_msg("Could not allocate static shared space: " SIZE_FORMAT " bytes", bytes));
+ }
MetaspaceShared::set_shared_rs(&_rs);
} else
#endif
@@ -3322,21 +3331,80 @@ void Metaspace::global_initialize() {
// the addresses don't conflict)
address cds_address = NULL;
if (UseSharedSpaces) {
- FileMapInfo* mapinfo = new FileMapInfo();
+ FileMapInfo* static_mapinfo = new FileMapInfo();
+ FileMapInfo* dynamic_mapinfo = new FileMapInfo(false);
// Open the shared archive file, read and validate the header. If
// initialization fails, shared spaces [UseSharedSpaces] are
// disabled and the file is closed.
- // Map in spaces now also
- if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
+ //
+ // This will reserve two address spaces suitable to house Klass structures, one
+ // for the cds archives (static archive and optionally dynamic archive) and
+ // optionally one move for ccs.
+ //
+ // Since both spaces must fall within the compressed class pointer encoding
+ // range, they are allocated close to each other.
+ //
+ // Space for archives will be reserved first, followed by a potential gap,
+ // followed by the space for ccs:
+ //
+ // +-- Base address End
+ // | |
+ // v v
+ // +------------+ +-------------+ +--------------------+
+ // | static arc | [align] | [dyn. arch] | [align] | compr. class space |
+ // +------------+ +-------------+ +--------------------+
+ //
+ // (The gap may result from different alignment requirements between metaspace
+ // and CDS)
+ //
+ // If UseCompressedClassPointers is disabled, only one address space will be
+ // reserved:
+ //
+ // +-- Base address End
+ // | |
+ // v v
+ // +------------+ +-------------+
+ // | static arc | [align] | [dyn. arch] |
+ // +------------+ +-------------+
+ //
+ // If UseCompressedClassPointers=1, the range encompassing both spaces will be
+ // suitable to en/decode narrow Klass pointers: the base will be valid for
+ // encoding, the range [Base, End) not surpass KlassEncodingMetaspaceMax.
+ if (static_mapinfo->initialize() && MetaspaceShared::map_shared_spaces(static_mapinfo)) {
cds_total = FileMapInfo::shared_spaces_size();
- cds_address = (address)mapinfo->region_base(0);
+ cds_address = (address)static_mapinfo->region_base(0);
+ MetaspaceShared::set_shared_metaspace_static_bottom(cds_address);
+ // Update SharedBaseAddress to the same value as the dump phase.
+ SharedBaseAddress = (size_t)cds_address;
+ if (!DynamicDumpSharedSpaces &&
+ (Arguments::GetSharedDynamicArchivePath() != NULL) &&
+ dynamic_mapinfo->initialize() &&
+ MetaspaceShared::map_shared_spaces(dynamic_mapinfo)) {
+ cds_total += align_up(dynamic_mapinfo->region_end(1) - dynamic_mapinfo->region_base(0),
+ (size_t)os::vm_allocation_granularity());
+ } else {
+ assert(!dynamic_mapinfo->is_open(),
+ "dynamic archive file not closed or shared spaces not disabled.");
+ }
} else {
- assert(!mapinfo->is_open() && !UseSharedSpaces,
- "archive file not closed or shared spaces not disabled.");
+ assert(!static_mapinfo->is_open() && !UseSharedSpaces,
+ "static archive file not closed or shared spaces not disabled.");
+ }
+
+ if (static_mapinfo != NULL && !static_mapinfo->is_mapped()) {
+ delete static_mapinfo;
+ }
+ if (dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped()) {
+ delete dynamic_mapinfo;
}
}
+
+ if (DynamicDumpSharedSpaces && !UseSharedSpaces) {
+ vm_exit_during_initialization("DynamicDumpSharedSpaces is unsupported when base CDS archive is not loaded", NULL);
+ }
#endif // INCLUDE_CDS
+
#ifdef _LP64
// If UseCompressedClassPointers is set then allocate the metaspace area
// above the heap and above the CDS area (if it exists).
diff --git a/hotspot/src/share/vm/memory/metaspace.hpp b/hotspot/src/share/vm/memory/metaspace.hpp
index 3920004a8..2b06cb620 100644
--- a/hotspot/src/share/vm/memory/metaspace.hpp
+++ b/hotspot/src/share/vm/memory/metaspace.hpp
@@ -82,6 +82,7 @@ class VirtualSpaceList;
// quantum of metadata.
class Metaspace : public CHeapObj<mtClass> {
+ friend class ArchiveBuilder;
friend class VMStructs;
friend class SpaceManager;
friend class VM_CollectForMetadataAllocation;
diff --git a/hotspot/src/share/vm/memory/metaspaceClosure.cpp b/hotspot/src/share/vm/memory/metaspaceClosure.cpp
new file mode 100644
index 000000000..00ec8fced
--- /dev/null
+++ b/hotspot/src/share/vm/memory/metaspaceClosure.cpp
@@ -0,0 +1,87 @@
+#include "precompiled.hpp"
+#include "memory/metaspaceClosure.hpp"
+
+// Update the reference to point to new_loc.
+void MetaspaceClosure::Ref::update(address new_loc) const {
+ if (TraceDynamicCDS) {
+ dynamic_cds_log->print_cr("Ref: [" PTR_FORMAT "] -> " PTR_FORMAT " => " PTR_FORMAT,
+ p2i(mpp()), p2i(obj()), p2i(new_loc));
+ }
+ uintx p = (uintx)new_loc;
+ p |= flag_bits(); // Make sure the flag bits are copied to the new pointer.
+ *(address*)mpp() = (address)p;
+}
+
+void MetaspaceClosure::push_impl(MetaspaceClosure::Ref* ref) {
+ if (_nest_level < MAX_NEST_LEVEL) {
+ do_push(ref);
+ if (!ref->keep_after_pushing()) {
+ delete ref;
+ }
+ } else {
+ do_pending_ref(ref);
+ ref->set_next(_pending_refs);
+ _pending_refs = ref;
+ }
+}
+
+void MetaspaceClosure::do_push(MetaspaceClosure::Ref* ref) {
+ if (ref->not_null()) {
+ bool read_only;
+ Writability w = ref->writability();
+ switch (w) {
+ case _writable:
+ read_only = false;
+ break;
+ case _not_writable:
+ read_only = true;
+ break;
+ default:
+ assert(w == _default, "must be");
+ read_only = ref->is_read_only_by_default();
+ }
+ if (_nest_level == 0) {
+ assert(_enclosing_ref == NULL, "must be");
+ }
+ _nest_level ++;
+ if (do_ref(ref, read_only)) { // true means we want to iterate the embedded pointer in <ref>
+ Ref* saved = _enclosing_ref;
+ _enclosing_ref = ref;
+ ref->metaspace_pointers_do(this);
+ _enclosing_ref = saved;
+ }
+ _nest_level --;
+ }
+}
+
+void MetaspaceClosure::finish() {
+ assert(_nest_level == 0, "must be");
+ while (_pending_refs != NULL) {
+ Ref* ref = _pending_refs;
+ _pending_refs = _pending_refs->next();
+ do_push(ref);
+ if (!ref->keep_after_pushing()) {
+ delete ref;
+ }
+ }
+}
+
+MetaspaceClosure::~MetaspaceClosure() {
+ assert(_pending_refs == NULL,
+ "you must explicitly call MetaspaceClosure::finish() to process all refs!");
+}
+
+bool UniqueMetaspaceClosure::do_ref(MetaspaceClosure::Ref* ref, bool read_only) {
+ bool created;
+ _has_been_visited.add_if_absent(ref->obj(), read_only, &created);
+ if (!created) {
+ return false; // Already visited: no need to iterate embedded pointers.
+ } else {
+ if (_has_been_visited.maybe_grow(MAX_TABLE_SIZE)) {
+ if (InfoDynamicCDS) {
+ dynamic_cds_log->print_cr("Expanded _has_been_visited table to %d", _has_been_visited.table_size());
+ }
+ }
+ return do_unique_ref(ref, read_only);
+ }
+}
diff --git a/hotspot/src/share/vm/memory/metaspaceClosure.hpp b/hotspot/src/share/vm/memory/metaspaceClosure.hpp
new file mode 100644
index 000000000..f67d8d6fd
--- /dev/null
+++ b/hotspot/src/share/vm/memory/metaspaceClosure.hpp
@@ -0,0 +1,381 @@
+
+
+#ifndef SHARE_VM_MEMORY_METASPACECLOSURE_HPP
+#define SHARE_VM_MEMORY_METASPACECLOSURE_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/array.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/hashtable.hpp"
+
+// The metadata hierarchy is separate from the oop hierarchy
+ class MetaspaceObj; // no C++ vtable
+//class Array; // no C++ vtable
+ class Annotations; // no C++ vtable
+ class ConstantPoolCache; // no C++ vtable
+ class ConstMethod; // no C++ vtable
+ class MethodCounters; // no C++ vtable
+ class Symbol; // no C++ vtable
+ class Metadata; // has C++ vtable (so do all subclasses)
+ class ConstantPool;
+ class MethodData;
+ class Method;
+ class Klass;
+ class InstanceKlass;
+ class InstanceMirrorKlass;
+ class InstanceClassLoaderKlass;
+ class InstanceRefKlass;
+ class ArrayKlass;
+ class ObjArrayKlass;
+ class TypeArrayKlass;
+
+// class MetaspaceClosure --
+//
+// This class is used for iterating the objects in the HotSpot Metaspaces. It
+// provides an API to walk all the reachable objects starting from a set of
+// root references (such as all Klass'es in the SystemDictionary).
+//
+// Currently it is used for compacting the CDS archive by eliminate temporary
+// objects allocated during archive creation time. See ArchiveBuilder for an example.
+//
+// To support MetaspaceClosure, each subclass of MetaspaceObj must provide
+// a method of the type void metaspace_pointers_do(MetaspaceClosure*). This method
+// should call MetaspaceClosure::push() on every pointer fields of this
+// class that points to a MetaspaceObj. See Annotations::metaspace_pointers_do()
+// for an example.
+
+
+class MetaspaceClosure : public StackObj {
+public:
+ enum Writability {
+ _writable,
+ _not_writable,
+ _default
+ };
+
+ enum SpecialRef {
+ _method_entry_ref
+ };
+
+ // class MetaspaceClosure::Ref --
+ //
+ // MetaspaceClosure can be viewed as a very simple type of copying garbage
+ // collector. For it to function properly, it requires each subclass of
+ // MetaspaceObj to provide two methods:
+ //
+ // size_t size(); -- to determine how much data to copy
+ // void metaspace_pointers_do(MetaspaceClosure*); -- to locate all the embedded pointers
+ //
+ // Calling these methods would be trivial if these two were virtual methods.
+ // However, to save space, MetaspaceObj has NO vtable. The vtable is introduced
+ // only in the Metadata class.
+ //
+ // To work around the lack of a vtable, we use the Ref class with templates
+ // (see MSORef, OtherArrayRef, MSOArrayRef, and MSOPointerArrayRef)
+ // so that we can statically discover the type of a object. The use of Ref
+ // depends on the fact that:
+ //
+ // [1] We don't use polymorphic pointers for MetaspaceObj's that are not subclasses
+ // of Metadata. I.e., we don't do this:
+ // class Klass {
+ // MetaspaceObj *_obj;
+ // Array<int>* foo() { return (Array<int>*)_obj; }
+ // Symbol* bar() { return (Symbol*) _obj; }
+ //
+ // [2] All Array<T> dimensions are statically declared.
+ class Ref : public CHeapObj<mtClassShared> {
+ Writability _writability;
+ bool _keep_after_pushing;
+ Ref* _next;
+ void* _user_data;
+
+ protected:
+ virtual void** mpp() const = 0;
+ Ref(Writability w) : _writability(w), _keep_after_pushing(false), _next(NULL), _user_data(NULL) {}
+ public:
+ virtual bool not_null() const = 0;
+ virtual int size() const = 0;
+ virtual void metaspace_pointers_do(MetaspaceClosure *it) const = 0;
+ virtual void metaspace_pointers_do_at(MetaspaceClosure *it, address new_loc) const = 0;
+ virtual MetaspaceObj::Type msotype() const = 0;
+ virtual bool is_read_only_by_default() const = 0;
+ virtual ~Ref() {}
+
+ address obj() const {
+ // In some rare cases (see CPSlot in constantPool.hpp) we store some flags in the lowest
+ // 2 bits of a MetaspaceObj pointer. Unmask these when manipulating the pointer.
+ uintx p = (uintx)*mpp();
+ return (address)(p & (~FLAG_MASK));
+ }
+
+ address* addr() const {
+ return (address*)mpp();
+ }
+
+ void update(address new_loc) const;
+
+ Writability writability() const { return _writability; };
+ void set_keep_after_pushing() { _keep_after_pushing = true; }
+ bool keep_after_pushing() { return _keep_after_pushing; }
+ void set_user_data(void* data) { _user_data = data; }
+ void* user_data() { return _user_data; }
+ void set_next(Ref* n) { _next = n; }
+ Ref* next() const { return _next; }
+
+ private:
+ static const uintx FLAG_MASK = 0x03;
+
+ int flag_bits() const {
+ uintx p = (uintx)*mpp();
+ return (int)(p & FLAG_MASK);
+ }
+ };
+
+private:
+ // MSORef -- iterate an instance of MetaspaceObj
+ template <class T> class MSORef : public Ref {
+ T** _mpp;
+ T* dereference() const {
+ return *_mpp;
+ }
+ protected:
+ virtual void** mpp() const {
+ return (void**)_mpp;
+ }
+
+ public:
+ MSORef(T** mpp, Writability w) : Ref(w), _mpp(mpp) {}
+
+ virtual bool is_read_only_by_default() const { return T::is_read_only_by_default(); }
+ virtual bool not_null() const { return dereference() != NULL; }
+ virtual int size() const { return dereference()->size(); }
+ virtual MetaspaceObj::Type msotype() const { return dereference()->type(); }
+
+ virtual void metaspace_pointers_do(MetaspaceClosure *it) const {
+ dereference()->metaspace_pointers_do(it);
+ }
+ virtual void metaspace_pointers_do_at(MetaspaceClosure *it, address new_loc) const {
+ ((T*)new_loc)->metaspace_pointers_do(it);
+ }
+ };
+
+ // abstract base class for MSOArrayRef, MSOPointerArrayRef and OtherArrayRef
+ template <class T> class ArrayRef : public Ref {
+ Array<T>** _mpp;
+ protected:
+ Array<T>* dereference() const {
+ return *_mpp;
+ }
+ virtual void** mpp() const {
+ return (void**)_mpp;
+ }
+
+ ArrayRef(Array<T>** mpp, Writability w) : Ref(w), _mpp(mpp) {}
+
+ // all Arrays are read-only by default
+ virtual bool is_read_only_by_default() const { return true; }
+ virtual bool not_null() const { return dereference() != NULL; }
+ virtual int size() const { return dereference()->size(); }
+ virtual MetaspaceObj::Type msotype() const { return MetaspaceObj::array_type(sizeof(T)); }
+ };
+
+ // OtherArrayRef -- iterate an instance of Array<T>, where T is NOT a subtype of MetaspaceObj.
+ // T can be a primitive type, such as int, or a structure. However, we do not scan
+ // the fields inside T, so you should not embed any pointers inside T.
+ template <class T> class OtherArrayRef : public ArrayRef<T> {
+ public:
+ OtherArrayRef(Array<T>** mpp, Writability w) : ArrayRef<T>(mpp, w) {}
+
+ virtual void metaspace_pointers_do(MetaspaceClosure *it) const {
+ Array<T>* array = ArrayRef<T>::dereference();
+ if (TraceDynamicCDS)
+ dynamic_cds_log->print_cr("Iter(OtherArray): %p [%d]", array, array->length());
+ }
+ virtual void metaspace_pointers_do_at(MetaspaceClosure *it, address new_loc) const {
+ Array<T>* array = (Array<T>*)new_loc;
+ if (TraceDynamicCDS)
+ dynamic_cds_log->print_cr("Iter(OtherArray): %p [%d]", array, array->length());
+ }
+ };
+
+ // MSOArrayRef -- iterate an instance of Array<T>, where T is a subtype of MetaspaceObj.
+ // We recursively call T::metaspace_pointers_do() for each element in this array.
+ template <class T> class MSOArrayRef : public ArrayRef<T> {
+ public:
+ MSOArrayRef(Array<T>** mpp, Writability w) : ArrayRef<T>(mpp, w) {}
+
+ virtual void metaspace_pointers_do(MetaspaceClosure *it) const {
+ metaspace_pointers_do_at_impl(it, ArrayRef<T>::dereference());
+ }
+ virtual void metaspace_pointers_do_at(MetaspaceClosure *it, address new_loc) const {
+ metaspace_pointers_do_at_impl(it, (Array<T>*)new_loc);
+ }
+ private:
+ void metaspace_pointers_do_at_impl(MetaspaceClosure *it, Array<T>* array) const {
+ if (TraceDynamicCDS) {
+ dynamic_cds_log->print_cr("Iter(MSOArray): %p [%d]", array, array->length());
+ }
+ for (int i = 0; i < array->length(); i++) {
+ T* elm = array->adr_at(i);
+ elm->metaspace_pointers_do(it);
+ }
+ }
+ };
+
+ // MSOPointerArrayRef -- iterate an instance of Array<T*>, where T is a subtype of MetaspaceObj.
+ // We recursively call MetaspaceClosure::push() for each pointer in this array.
+ template <class T> class MSOPointerArrayRef : public ArrayRef<T*> {
+ public:
+ MSOPointerArrayRef(Array<T*>** mpp, Writability w) : ArrayRef<T*>(mpp, w) {}
+
+ virtual void metaspace_pointers_do(MetaspaceClosure *it) const {
+ metaspace_pointers_do_at_impl(it, ArrayRef<T*>::dereference());
+ }
+ virtual void metaspace_pointers_do_at(MetaspaceClosure *it, address new_loc) const {
+ metaspace_pointers_do_at_impl(it, (Array<T*>*)new_loc);
+ }
+ private:
+ void metaspace_pointers_do_at_impl(MetaspaceClosure *it, Array<T*>* array) const {
+ if (TraceDynamicCDS) {
+ dynamic_cds_log->print_cr("Iter(MSOPointerArray): %p [%d]", array, array->length());
+ }
+ for (int i = 0; i < array->length(); i++) {
+ T** mpp = array->adr_at(i);
+ it->push(mpp);
+ }
+ }
+ };
+
+ // Normally, chains of references like a->b->c->d are iterated recursively. However,
+ // if recursion is too deep, we save the Refs in _pending_refs, and push them later in
+ // MetaspaceClosure::finish(). This avoids overflowing the C stack.
+ static const int MAX_NEST_LEVEL = 5;
+ Ref* _pending_refs;
+ int _nest_level;
+ Ref* _enclosing_ref;
+
+ void push_impl(Ref* ref);
+ void do_push(Ref* ref);
+
+public:
+ MetaspaceClosure(): _pending_refs(NULL), _nest_level(0), _enclosing_ref(NULL) {}
+ ~MetaspaceClosure();
+
+ void finish();
+
+ // enclosing_ref() is used to compute the offset of a field in a C++ class. For example
+ // class Foo { intx scala; Bar* ptr; }
+ // Foo *f = 0x100;
+ // when the f->ptr field is iterated with do_ref() on 64-bit platforms, we will have
+ // do_ref(Ref* r) {
+ // r->addr() == 0x108; // == &f->ptr;
+ // enclosing_ref()->obj() == 0x100; // == foo
+ // So we know that we are iterating upon a field at offset 8 of the object at 0x100.
+ //
+ // Note that if we have stack overflow, do_pending_ref(r) will be called first and
+ // do_ref(r) will be called later, for the same r. In this case, enclosing_ref() is valid only
+ // when do_pending_ref(r) is called, and will return NULL when do_ref(r) is called.
+ Ref* enclosing_ref() const {
+ return _enclosing_ref;
+ }
+
+ // This is called when a reference is placed in _pending_refs. Override this
+ // function if you're using enclosing_ref(). See notes above.
+ virtual void do_pending_ref(Ref* ref) {}
+
+ // returns true if we want to keep iterating the pointers embedded inside <ref>
+ virtual bool do_ref(Ref* ref, bool read_only) = 0;
+
+private:
+ template <class REF_TYPE, typename T>
+ void push_with_ref(T** mpp, Writability w) {
+ push_impl(new REF_TYPE(mpp, w));
+ }
+
+public:
+ // When MetaspaceClosure::push(...) is called, pick the correct Ref subtype to handle it:
+ //
+ // MetaspaceClosure* it = ...;
+ // Klass* o = ...; it->push(&o); => MSORef
+ // Array<int>* a1 = ...; it->push(&a1); => OtherArrayRef
+ // Array<Annotation>* a2 = ...; it->push(&a2); => MSOArrayRef
+ // Array<Klass*>* a3 = ...; it->push(&a3); => MSOPointerArrayRef
+ // Array<Array<Klass*>*>* a4 = ...; it->push(&a4); => MSOPointerArrayRef
+ // Array<Annotation*>* a5 = ...; it->push(&a5); => MSOPointerArrayRef
+ //
+ // Note that the following will fail to compile (to prevent you from adding new fields
+ // into the MetaspaceObj subtypes that cannot be properly copied by CDS):
+ //
+ // Hashtable* h = ...; it->push(&h); => Hashtable is not a subclass of MetaspaceObj
+ // Array<Hashtable*>* a6 = ...; it->push(&a6); => Hashtable is not a subclass of MetaspaceObj
+ // Array<int*>* a7 = ...; it->push(&a7); => int is not a subclass of MetaspaceObj
+
+ template <typename T>
+ void push(T** mpp, Writability w = _default) {
+ push_with_ref<MSORef<T> >(mpp, w);
+ }
+
+ void push(Array<u1>** mpp, Writability w = _default) {
+ push_with_ref<OtherArrayRef<u1> >(mpp, w);
+ }
+
+ void push(Array<u2>** mpp, Writability w = _default) {
+ push_with_ref<OtherArrayRef<u2> >(mpp, w);
+ }
+
+ void push(Array<u4>** mpp, Writability w = _default) {
+ push_with_ref<OtherArrayRef<u4> >(mpp, w);
+ }
+
+ void push(Array<u8>** mpp, Writability w = _default) {
+ push_with_ref<OtherArrayRef<u8> >(mpp, w);
+ }
+
+ void push(Array<int>** mpp, Writability w = _default) {
+ push_with_ref<OtherArrayRef<int> >(mpp, w);
+ }
+
+ template <typename T>
+ void push(Array<T>** mpp, Writability w = _default) {
+ push_with_ref<MSOArrayRef<T> >(mpp, w);
+ }
+
+ template <typename T>
+ void push(Array<T*>** mpp, Writability w = _default) {
+ push_with_ref<MSOPointerArrayRef<T> >(mpp, w);
+ }
+
+#if 0
+ // Enable this block if you're changing the push(...) methods, to test for types that should be
+ // disallowed. Each of the following "push" calls should result in a compile-time error.
+ void test_disallowed_types(MetaspaceClosure* it) {
+ Hashtable<bool, mtInternal>* h = NULL;
+ it->push(&h);
+
+ Array<Hashtable<bool, mtInternal>*>* a6 = NULL;
+ it->push(&a6);
+
+ Array<int*>* a7 = NULL;
+ it->push(&a7);
+ }
+#endif
+};
+
+// This is a special MetaspaceClosure that visits each unique MetaspaceObj once.
+class UniqueMetaspaceClosure : public MetaspaceClosure {
+ static const int INITIAL_TABLE_SIZE = 15889;
+ static const int MAX_TABLE_SIZE = 1000000;
+
+ // Do not override. Returns true if we are discovering ref->obj() for the first time.
+ virtual bool do_ref(Ref* ref, bool read_only);
+
+public:
+ // Gets called the first time we discover an object.
+ virtual bool do_unique_ref(Ref* ref, bool read_only) = 0;
+ UniqueMetaspaceClosure() : _has_been_visited(INITIAL_TABLE_SIZE) {}
+
+private:
+ KVHashtable<address, bool, mtInternal> _has_been_visited;
+};
+
+#endif // SHARE_MEMORY_METASPACECLOSURE_HPP
diff --git a/hotspot/src/share/vm/memory/metaspaceShared.cpp b/hotspot/src/share/vm/memory/metaspaceShared.cpp
index 9857b7577..00fb9fe91 100644
--- a/hotspot/src/share/vm/memory/metaspaceShared.cpp
+++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp
@@ -38,6 +38,7 @@
#include "memory/metaspaceShared.hpp"
#include "oops/objArrayOop.hpp"
#include "oops/oop.inline.hpp"
+#include "runtime/arguments.hpp"
#include "runtime/signature.hpp"
#include "runtime/vm_operations.hpp"
#include "runtime/vmThread.hpp"
@@ -47,14 +48,17 @@
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
int MetaspaceShared::_max_alignment = 0;
-
ReservedSpace* MetaspaceShared::_shared_rs = NULL;
+char* MetaspaceShared::_requested_base_address;
bool MetaspaceShared::_link_classes_made_progress;
bool MetaspaceShared::_check_classes_made_progress;
bool MetaspaceShared::_has_error_classes;
bool MetaspaceShared::_archive_loading_failed = false;
bool MetaspaceShared::_remapped_readwrite = false;
+void* MetaspaceShared::_shared_metaspace_static_bottom = NULL;
+void* MetaspaceShared::_shared_metaspace_dynamic_base = NULL;
+void* MetaspaceShared::_shared_metaspace_dynamic_top = NULL;
// Read/write a data stream for restoring/preserving metadata pointers and
// miscellaneous data from/to the shared archive file.
@@ -843,7 +847,7 @@ int MetaspaceShared::preload_and_dump(const char * class_list_path,
// Returns true if the class's status has changed
bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) {
- assert(DumpSharedSpaces, "should only be called during dumping");
+// assert(DumpSharedSpaces, "should only be called during dumping");
if (ik->init_state() < InstanceKlass::linked) {
bool saved = BytecodeVerificationLocal;
if (!SharedClassUtil::is_shared_boot_class(ik)) {
@@ -862,6 +866,7 @@ bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) {
tty->print_cr("Preload Warning: Verification failed for %s",
ik->external_name());
CLEAR_PENDING_EXCEPTION;
+ SystemDictionaryShared::set_class_has_failed_verification(ik);
ik->set_in_error_state();
_has_error_classes = true;
}
@@ -902,6 +907,11 @@ public:
FileMapInfo::assert_mark(tag == old_tag);
}
+ void do_u4(u4* p) {
+ intptr_t obj = nextPtr();
+ *p = (u4)(uintx(obj));
+ }
+
void do_region(u_char* start, size_t size) {
assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
assert(size % sizeof(intptr_t) == 0, "bad size");
@@ -918,7 +928,10 @@ public:
// Return true if given address is in the mapped shared space.
bool MetaspaceShared::is_in_shared_space(const void* p) {
- return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_space(p);
+ return UseSharedSpaces && ((FileMapInfo::current_info() != NULL &&
+ FileMapInfo::current_info()->is_mapped() &&
+ FileMapInfo::current_info()->is_in_shared_space(p)) ||
+ is_shared_dynamic(p));
}
void MetaspaceShared::print_shared_spaces() {
@@ -927,19 +940,34 @@ void MetaspaceShared::print_shared_spaces() {
}
}
-
// Map shared spaces at requested addresses and return if succeeded.
// Need to keep the bounds of the ro and rw space for the Metaspace::contains
// call, or is_in_shared_space.
bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
size_t image_alignment = mapinfo->alignment();
+ mapinfo->set_is_mapped(false);
+
#ifndef _WINDOWS
// Map in the shared memory and then map the regions on top of it.
// On Windows, don't map the memory here because it will cause the
// mappings of the regions to fail.
ReservedSpace shared_rs = mapinfo->reserve_shared_memory();
- if (!shared_rs.is_reserved()) return false;
+ if (!shared_rs.is_reserved()) {
+ FileMapInfo::fail_continue("Unable to reserve shared memory");
+ FLAG_SET_DEFAULT(UseSharedSpaces, false);
+ return false;
+ }
+ if (InfoDynamicCDS) {
+ dynamic_cds_log->print_cr("Reserved archive_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes",
+ p2i(shared_rs.base()), p2i(shared_rs.base() + shared_rs.size()), shared_rs.size());
+ }
+ if (mapinfo->is_static()) {
+ _requested_base_address = shared_rs.base();
+ } else {
+ _shared_metaspace_dynamic_base = shared_rs.base();
+ _shared_metaspace_dynamic_top = shared_rs.base() + shared_rs.size();
+ }
#endif
assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
@@ -950,40 +978,79 @@ bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
char* _mc_base = NULL;
// Map each shared region
- if ((_ro_base = mapinfo->map_region(ro)) != NULL &&
- mapinfo->verify_region_checksum(ro) &&
- (_rw_base = mapinfo->map_region(rw)) != NULL &&
- mapinfo->verify_region_checksum(rw) &&
- (_md_base = mapinfo->map_region(md)) != NULL &&
- mapinfo->verify_region_checksum(md) &&
- (_mc_base = mapinfo->map_region(mc)) != NULL &&
- mapinfo->verify_region_checksum(mc) &&
- (image_alignment == (size_t)max_alignment()) &&
- mapinfo->validate_classpath_entry_table()) {
- // Success (no need to do anything)
- return true;
+ if (mapinfo->is_static()) {
+ if ((_ro_base = mapinfo->map_region(ro)) != NULL &&
+ mapinfo->verify_region_checksum(ro) &&
+ (_rw_base = mapinfo->map_region(rw)) != NULL &&
+ mapinfo->verify_region_checksum(rw) &&
+ (_md_base = mapinfo->map_region(md)) != NULL &&
+ mapinfo->verify_region_checksum(md) &&
+ (_mc_base = mapinfo->map_region(mc)) != NULL &&
+ mapinfo->verify_region_checksum(mc) &&
+ (image_alignment == (size_t)max_alignment()) &&
+ mapinfo->validate_classpath_entry_table()) {
+ mapinfo->set_is_mapped(true);
+ return true;
+ }
} else {
- // If there was a failure in mapping any of the spaces, unmap the ones
- // that succeeded
- if (_ro_base != NULL) mapinfo->unmap_region(ro);
- if (_rw_base != NULL) mapinfo->unmap_region(rw);
- if (_md_base != NULL) mapinfo->unmap_region(md);
- if (_mc_base != NULL) mapinfo->unmap_region(mc);
+ if ((_rw_base = mapinfo->map_region(d_rw)) != NULL &&
+ mapinfo->verify_region_checksum(d_rw) &&
+ (_ro_base = mapinfo->map_region(d_ro)) != NULL &&
+ mapinfo->verify_region_checksum(d_ro) &&
+ (image_alignment == (size_t)max_alignment())) {
+ mapinfo->set_is_mapped(true);
+ return true;
+ }
+ }
+
+ // If there was a failure in mapping any of the spaces, unmap the ones
+ // that succeeded
+ if (_ro_base != NULL) mapinfo->unmap_region(ro);
+ if (_rw_base != NULL) mapinfo->unmap_region(rw);
+ if (_md_base != NULL) mapinfo->unmap_region(md);
+ if (_mc_base != NULL) mapinfo->unmap_region(mc);
#ifndef _WINDOWS
- // Release the entire mapped region
- shared_rs.release();
+ // Release the entire mapped region
+ shared_rs.release();
#endif
- // If -Xshare:on is specified, print out the error message and exit VM,
- // otherwise, set UseSharedSpaces to false and continue.
- if (RequireSharedSpaces || PrintSharedArchiveAndExit) {
- vm_exit_during_initialization("Unable to use shared archive.", "Failed map_region for using -Xshare:on.");
- } else {
- FLAG_SET_DEFAULT(UseSharedSpaces, false);
- }
- return false;
+ // If -Xshare:on is specified, print out the error message and exit VM,
+ // otherwise, set UseSharedSpaces to false and continue.
+ if (RequireSharedSpaces || PrintSharedArchiveAndExit) {
+ vm_exit_during_initialization("Unable to use shared archive.", "Failed map_region for using -Xshare:on.");
+ } else {
+ FLAG_SET_DEFAULT(UseSharedSpaces, false);
}
+ return false;
}
+void** MetaspaceShared::_vtbl_list = NULL;
+
+intptr_t* MetaspaceShared::get_archived_vtable(MetaspaceObj::Type msotype, address obj) {
+ Arguments::assert_is_dumping_archive();
+ switch (msotype) {
+ case MetaspaceObj::SymbolType:
+ case MetaspaceObj::TypeArrayU1Type:
+ case MetaspaceObj::TypeArrayU2Type:
+ case MetaspaceObj::TypeArrayU4Type:
+ case MetaspaceObj::TypeArrayU8Type:
+ case MetaspaceObj::TypeArrayOtherType:
+ case MetaspaceObj::ConstMethodType:
+ case MetaspaceObj::ConstantPoolCacheType:
+ case MetaspaceObj::AnnotationType:
+ case MetaspaceObj::MethodCountersType:
+ // These have no vtables.
+ break;
+ case MetaspaceObj::MethodDataType:
+ // We don't archive MethodData <-- should have been removed in removed_unsharable_info
+ ShouldNotReachHere();
+ break;
+ default:
+ int vtable_offset = MetaspaceShared::vtbl_list_size * sizeof(void*) + sizeof(intptr_t);
+ char* vtable_start = (char*)_vtbl_list + vtable_offset;
+ return (intptr_t*)find_matching_vtbl_ptr(_vtbl_list, (void*)vtable_start, obj);
+ }
+ return NULL;
+}
// Read the miscellaneous data from the shared file, and
// serialize it out to its various destinations.
@@ -996,6 +1063,7 @@ void MetaspaceShared::initialize_shared_spaces() {
// for Klass objects. They get filled in later.
void** vtbl_list = (void**)buffer;
+ _vtbl_list = vtbl_list;
buffer += MetaspaceShared::vtbl_list_size * sizeof(void*);
Universe::init_self_patching_vtbl_list(vtbl_list, vtbl_list_size);
@@ -1079,6 +1147,15 @@ void MetaspaceShared::initialize_shared_spaces() {
// Close the mapinfo file
mapinfo->close();
+ FileMapInfo *dynamic_mapinfo = FileMapInfo::dynamic_info();
+ if (dynamic_mapinfo != NULL) {
+ intptr_t* buffer = (intptr_t*)dynamic_mapinfo->serialized_data();
+ ReadClosure rc(&buffer);
+ SymbolTable::serialize_shared_table_header(&rc);
+ SystemDictionaryShared::serialize_dictionary_headers(&rc);
+ dynamic_mapinfo->close();
+ }
+
if (PrintSharedArchiveAndExit) {
if (PrintSharedDictionary) {
tty->print_cr("\nShared classes:\n");
@@ -1104,6 +1181,11 @@ bool MetaspaceShared::remap_shared_readonly_as_readwrite() {
if (!mapinfo->remap_shared_readonly_as_readwrite()) {
return false;
}
+
+ mapinfo = FileMapInfo::dynamic_info();
+ if (mapinfo != NULL && !mapinfo->remap_shared_readonly_as_readwrite()) {
+ return false;
+ }
_remapped_readwrite = true;
}
return true;
diff --git a/hotspot/src/share/vm/memory/metaspaceShared.hpp b/hotspot/src/share/vm/memory/metaspaceShared.hpp
index d58ebecb2..a9dadfbb9 100644
--- a/hotspot/src/share/vm/memory/metaspaceShared.hpp
+++ b/hotspot/src/share/vm/memory/metaspaceShared.hpp
@@ -28,6 +28,7 @@
#include "memory/memRegion.hpp"
#include "runtime/virtualspace.hpp"
#include "utilities/exceptions.hpp"
+#include "utilities/growableArray.hpp"
#include "utilities/macros.hpp"
#define LargeSharedArchiveSize (300*M)
@@ -44,6 +45,7 @@
(uintx)(type ## SharedArchiveSize * region ## RegionPercentage) : Shared ## region ## Size
class FileMapInfo;
+class SerializeClosure;
// Class Data Sharing Support
class MetaspaceShared : AllStatic {
@@ -56,6 +58,11 @@ class MetaspaceShared : AllStatic {
static bool _has_error_classes;
static bool _archive_loading_failed;
static bool _remapped_readwrite;
+ static void* _shared_metaspace_static_bottom;
+ static void** _vtbl_list; // Remember the vtable start address for dynamic dump metadata
+ static char* _requested_base_address;
+ static void* _shared_metaspace_dynamic_base;
+ static void* _shared_metaspace_dynamic_top;
public:
enum {
vtbl_list_size = 17, // number of entries in the shared space vtable list.
@@ -71,11 +78,20 @@ class MetaspaceShared : AllStatic {
};
enum {
- ro = 0, // read-only shared space in the heap
- rw = 1, // read-write shared space in the heap
- md = 2, // miscellaneous data for initializing tables, etc.
- mc = 3, // miscellaneous code - vtable replacement.
- n_regions = 4
+ // core archive spaces
+ ro = 0, // read-only shared space in the heap
+ rw = 1, // read-write shared space in the heap
+ md = 2, // miscellaneous data for initializing tables, etc. (static only)
+ mc = 3, // miscellaneous code - vtable replacement. (static only)
+ n_regions = 4 // total number of static regions
+ };
+
+ enum {
+ // core dynamic archive spaces
+ d_rw = 0, // read-write shared space in the heap
+ d_ro = 1, // read-only shared space in the heap
+ d_bm = 2, // relocation bitmaps (freed after file mapping is finished)
+ d_n_regions = 2 // d_rw and d_ro
};
// Accessor functions to save shared space created for metadata, which has
@@ -108,6 +124,28 @@ class MetaspaceShared : AllStatic {
_archive_loading_failed = true;
}
static bool map_shared_spaces(FileMapInfo* mapinfo) NOT_CDS_RETURN_(false);
+
+ static bool is_shared_dynamic(const void* p) {
+ return p < _shared_metaspace_dynamic_top && p >= _shared_metaspace_dynamic_base;
+ }
+
+ // This is the base address as specified by -XX:SharedBaseAddress during -Xshare:dump.
+ // Both the base/top archives are written using this as their base address.
+ //
+ // During static dump: _requested_base_address == SharedBaseAddress.
+ //
+ // During dynamic dump: _requested_base_address is not always the same as SharedBaseAddress:
+ // - SharedBaseAddress is used for *reading the base archive*. I.e., CompactHashtable uses
+ // it to convert offsets to pointers to Symbols in the base archive.
+ // The base archive may be mapped to an OS-selected address due to ASLR. E.g.,
+ // you may have SharedBaseAddress == 0x00ff123400000000.
+ // - _requested_base_address is used for *writing the output archive*. It's usually
+ // 0x800000000 (unless it was set by -XX:SharedBaseAddress during -Xshare:dump).
+ static char* requested_base_address() {
+ return _requested_base_address;
+ }
+
+ static intptr_t* get_archived_vtable(MetaspaceObj::Type msotype, address obj);
static void initialize_shared_spaces() NOT_CDS_RETURN;
// Return true if given address is in the mapped shared space.
@@ -138,5 +176,8 @@ class MetaspaceShared : AllStatic {
static int count_class(const char* classlist_file);
static void estimate_regions_size() NOT_CDS_RETURN;
+
+ static void set_shared_metaspace_static_bottom(void* bottom) { _shared_metaspace_static_bottom = bottom; }
+ static void* shared_metaspace_static_bottom() { return _shared_metaspace_static_bottom; }
};
#endif // SHARE_VM_MEMORY_METASPACE_SHARED_HPP
diff --git a/hotspot/src/share/vm/oops/annotations.cpp b/hotspot/src/share/vm/oops/annotations.cpp
index 776b8606b..6b3080f17 100644
--- a/hotspot/src/share/vm/oops/annotations.cpp
+++ b/hotspot/src/share/vm/oops/annotations.cpp
@@ -27,6 +27,7 @@
#include "memory/heapInspection.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/oopFactory.hpp"
+#include "memory/metaspaceClosure.hpp"
#include "oops/annotations.hpp"
#include "oops/instanceKlass.hpp"
#include "utilities/ostream.hpp"
@@ -36,6 +37,17 @@ Annotations* Annotations::allocate(ClassLoaderData* loader_data, TRAPS) {
return new (loader_data, size(), true, MetaspaceObj::AnnotationType, THREAD) Annotations();
}
+void Annotations::metaspace_pointers_do(MetaspaceClosure* it) {
+ if (TraceDynamicCDS) {
+ dynamic_cds_log->print_cr("Iter(Annotations): %p", this);
+ }
+
+ it->push(&_class_annotations);
+ it->push(&_fields_annotations);
+ it->push(&_class_type_annotations);
+ it->push(&_fields_type_annotations); // FIXME: need a test case where _fields_type_annotations != NULL
+}
+
// helper
void Annotations::free_contents(ClassLoaderData* loader_data, Array<AnnotationArray*>* p) {
if (p != NULL) {
diff --git a/hotspot/src/share/vm/oops/annotations.hpp b/hotspot/src/share/vm/oops/annotations.hpp
index ad405a8db..d1f7bc71b 100644
--- a/hotspot/src/share/vm/oops/annotations.hpp
+++ b/hotspot/src/share/vm/oops/annotations.hpp
@@ -35,6 +35,7 @@
class ClassLoaderData;
class outputStream;
class KlassSizeStats;
+class MetaspaceClosure;
typedef Array<u1> AnnotationArray;
@@ -54,6 +55,8 @@ class Annotations: public MetaspaceObj {
Array<AnnotationArray*>* _fields_type_annotations;
public:
+ void metaspace_pointers_do(MetaspaceClosure* it);
+
// Allocate instance of this class
static Annotations* allocate(ClassLoaderData* loader_data, TRAPS);
@@ -61,8 +64,14 @@ class Annotations: public MetaspaceObj {
void deallocate_contents(ClassLoaderData* loader_data);
DEBUG_ONLY(bool on_stack() { return false; }) // for template
+ // Annotations should be stored in the read-only region of CDS archive.
+ static bool is_read_only_by_default() { return true; }
+
+ MetaspaceObj::Type type() const { return AnnotationType; }
+
// Sizing (in words)
static int size() { return sizeof(Annotations) / wordSize; }
+
#if INCLUDE_SERVICES
void collect_statistics(KlassSizeStats *sz) const;
#endif
diff --git a/hotspot/src/share/vm/oops/arrayKlass.cpp b/hotspot/src/share/vm/oops/arrayKlass.cpp
index 129bce63d..9009d6972 100644
--- a/hotspot/src/share/vm/oops/arrayKlass.cpp
+++ b/hotspot/src/share/vm/oops/arrayKlass.cpp
@@ -30,6 +30,7 @@
#include "jvmtifiles/jvmti.h"
#include "memory/gcLocker.hpp"
#include "memory/universe.inline.hpp"
+#include "memory/metaspaceClosure.hpp"
#include "oops/arrayKlass.hpp"
#include "oops/arrayOop.hpp"
#include "oops/instanceKlass.hpp"
@@ -64,6 +65,19 @@ oop ArrayKlass::multi_allocate(int rank, jint* sizes, TRAPS) {
return NULL;
}
+void ArrayKlass::metaspace_pointers_do(MetaspaceClosure* it) {
+ Klass::metaspace_pointers_do(it);
+
+ if (TraceDynamicCDS) {
+ ResourceMark rm;
+ dynamic_cds_log->print_cr("Iter(InstanceKlass): %p (%s)", this, external_name());
+ }
+
+ // need to cast away volatile
+ it->push((Klass**)&_higher_dimension);
+ it->push((Klass**)&_lower_dimension);
+}
+
// find field according to JVM spec 5.4.3.2, returns the klass in which the field is defined
Klass* ArrayKlass::find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
// There are no fields in an array klass but look to the super class (Object)
@@ -203,6 +217,14 @@ void ArrayKlass::remove_unshareable_info() {
_higher_dimension = NULL;
}
+void ArrayKlass::remove_java_mirror() {
+ Klass::remove_java_mirror();
+ if (_higher_dimension != NULL) {
+ ArrayKlass *ak = ArrayKlass::cast(higher_dimension());
+ ak->remove_java_mirror();
+ }
+}
+
void ArrayKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS) {
assert(loader_data == ClassLoaderData::the_null_class_loader_data(), "array classes belong to null loader");
Klass::restore_unshareable_info(loader_data, protection_domain, CHECK);
diff --git a/hotspot/src/share/vm/oops/arrayKlass.hpp b/hotspot/src/share/vm/oops/arrayKlass.hpp
index d28ece376..9b6fd9e0b 100644
--- a/hotspot/src/share/vm/oops/arrayKlass.hpp
+++ b/hotspot/src/share/vm/oops/arrayKlass.hpp
@@ -100,7 +100,7 @@ class ArrayKlass: public Klass {
GrowableArray<Klass*>* compute_secondary_supers(int num_extra_slots);
bool compute_is_subtype_of(Klass* k);
-
+ virtual void metaspace_pointers_do(MetaspaceClosure* it);
// Sizing
static int header_size() { return sizeof(ArrayKlass)/HeapWordSize; }
static int static_size(int header_size);
@@ -141,6 +141,7 @@ class ArrayKlass: public Klass {
// CDS support - remove and restore oops from metadata. Oops are not shared.
virtual void remove_unshareable_info();
+ virtual void remove_java_mirror();
virtual void restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS);
// Printing
diff --git a/hotspot/src/share/vm/oops/constMethod.cpp b/hotspot/src/share/vm/oops/constMethod.cpp
index a496149df..fc7d74512 100644
--- a/hotspot/src/share/vm/oops/constMethod.cpp
+++ b/hotspot/src/share/vm/oops/constMethod.cpp
@@ -26,6 +26,7 @@
#include "interpreter/interpreter.hpp"
#include "memory/gcLocker.hpp"
#include "memory/heapInspection.hpp"
+#include "memory/metaspaceClosure.hpp"
#include "memory/metadataFactory.hpp"
#include "oops/constMethod.hpp"
#include "oops/method.hpp"
@@ -148,6 +149,31 @@ Method* ConstMethod::method() const {
return _constants->pool_holder()->method_with_idnum(_method_idnum);
}
+void ConstMethod::metaspace_pointers_do(MetaspaceClosure* it) {
+ if (TraceDynamicCDS) {
+ dynamic_cds_log->print_cr("Iter(ConstMethod): %p", this);
+ }
+
+ if (!method()->method_holder()->is_rewritten()) {
+ it->push(&_constants, MetaspaceClosure::_writable);
+ } else {
+ it->push(&_constants);
+ }
+ it->push(&_stackmap_data);
+ if (has_method_annotations()) {
+ it->push(method_annotations_addr());
+ }
+ if (has_parameter_annotations()) {
+ it->push(parameter_annotations_addr());
+ }
+ if (has_type_annotations()) {
+ it->push(type_annotations_addr());
+ }
+ if (has_default_annotations()) {
+ it->push(default_annotations_addr());
+ }
+}
+
// linenumber table - note that length is unknown until decompression,
// see class CompressedLineNumberReadStream.
diff --git a/hotspot/src/share/vm/oops/constMethod.hpp b/hotspot/src/share/vm/oops/constMethod.hpp
index 0caa3a26f..20cff631e 100644
--- a/hotspot/src/share/vm/oops/constMethod.hpp
+++ b/hotspot/src/share/vm/oops/constMethod.hpp
@@ -129,7 +129,7 @@ class MethodParametersElement VALUE_OBJ_CLASS_SPEC {
};
class KlassSizeStats;
-
+class MetaspaceClosure;
// Class to collect the sizes of ConstMethod inline tables
#define INLINE_TABLES_DO(do_element) \
do_element(localvariable_table_length) \
@@ -344,6 +344,12 @@ public:
// Size needed
static int size(int code_size, InlineTableSizes* sizes);
+ // ConstMethods should be stored in the read-only region of CDS archive.
+ static bool is_read_only_by_default() { return true; }
+
+ void metaspace_pointers_do(MetaspaceClosure* it);
+ MetaspaceObj::Type type() const { return ConstMethodType; }
+
int size() const { return _constMethod_size;}
void set_constMethod_size(int size) { _constMethod_size = size; }
#if INCLUDE_SERVICES
diff --git a/hotspot/src/share/vm/oops/constantPool.cpp b/hotspot/src/share/vm/oops/constantPool.cpp
index b6158e4e9..f8078bffa 100644
--- a/hotspot/src/share/vm/oops/constantPool.cpp
+++ b/hotspot/src/share/vm/oops/constantPool.cpp
@@ -23,16 +23,19 @@
*/
#include "precompiled.hpp"
+#include "cds/archiveUtils.hpp"
#include "classfile/classLoaderData.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/metadataOnStackMark.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
+#include "classfile/systemDictionaryShared.hpp"
#include "classfile/vmSymbols.hpp"
#include "interpreter/linkResolver.hpp"
#include "memory/heapInspection.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/oopFactory.hpp"
+#include "memory/metaspaceClosure.hpp"
#include "oops/constantPool.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/objArrayKlass.hpp"
@@ -153,6 +156,52 @@ void ConstantPool::initialize_resolved_references(ClassLoaderData* loader_data,
}
}
+void ConstantPool::metaspace_pointers_do(MetaspaceClosure* it) {
+ if (TraceDynamicCDS) {
+ dynamic_cds_log->print_cr("Iter(ConstantPool): %p", this);
+ }
+
+ it->push(&_tags, MetaspaceClosure::_writable);
+ it->push(&_cache);
+ it->push(&_pool_holder);
+ it->push(&_operands);
+ it->push(&_reference_map, MetaspaceClosure::_writable);
+
+ for (int i = 0; i < length(); i++) {
+ // About resolved klasses, we should be careful because of data structure difference
+ // between jdk8 and jdk17.
+ constantTag ctag = tag_at(i);
+ if (ctag.is_string() || ctag.is_utf8() || ctag.is_replaced_symbol()) {
+ it->push(symbol_at_addr(i));
+ } else if (ctag.is_klass()) {
+ it->push((Klass**)obj_at_addr_raw(i));
+ }
+ }
+}
+
+// We replace data in base() by normal symbol in two conditions:
+// 1. resolved klass
+// The value is klass ptr, in remove_unshareable_info we need replace klass ptr by klasss
+// name. The klass may be excluded, hence klass ptr is NULL and lost klass'name
+// at the end. Replace excluded klasses by names.
+// 2. unresolved klass
+// The value is symbol ptr | 1, the data is unparseable pushed in MetaspaceClosure, we need
+// replace the data by a normal symbol ptr at first, and store value symbol ptr | 1 at last.
+void ConstantPool::symbol_replace_excluded_klass() {
+ for (int i = 0; i < length(); i++) {
+ constantTag ctag = tag_at(i);
+ if (ctag.is_klass()) {
+ Klass* klass = resolved_klass_at(i);
+ if (SystemDictionaryShared::is_excluded_class((InstanceKlass*)klass)) {
+ replaced_symbol_at_put(i, klass->name());
+ }
+ } else if (ctag.is_unresolved_klass()) {
+ CPSlot entry = slot_at(i);
+ replaced_symbol_at_put(i, entry.get_symbol());
+ }
+ }
+}
+
// CDS support. Create a new resolved_references array.
void ConstantPool::restore_unshareable_info(TRAPS) {
@@ -180,18 +229,30 @@ void ConstantPool::restore_unshareable_info(TRAPS) {
}
void ConstantPool::remove_unshareable_info() {
- if (UseAppCDS) {
- if (cache() != NULL) {
- cache()->reset();
+ if (cache() != NULL) {
+ cache()->remove_unshareable_info();
+ }
+
+ // Shared ConstantPools are in the RO region, so the _flags cannot be modified.
+ // The _on_stack flag is used to prevent ConstantPools from deallocation during
+ // class redefinition. Since shared ConstantPools cannot be deallocated anyway,
+ // we always set _on_stack to true to avoid having to change _flags during runtime.
+ _flags |= _on_stack;
+ int num_klasses = 0;
+ for (int index = 1; index < length(); index++) { // Index 0 is unused
+ if (tag_at(index).is_unresolved_klass_in_error()) {
+ tag_at_put(index, JVM_CONSTANT_UnresolvedClass);
+ } else if (tag_at(index).is_method_handle_in_error()) {
+ tag_at_put(index, JVM_CONSTANT_MethodHandle);
+ } else if (tag_at(index).is_method_type_in_error()) {
+ tag_at_put(index, JVM_CONSTANT_MethodType);
}
- for (int i = 0; i < _length; i++) {
- if (tag_at(i).is_klass()) {
- Klass* resolvedKlass = resolved_klass_at(i);
- ResourceMark rm;
- char* name = resolvedKlass->name()->as_C_string();
- int len = strlen(name);
- unresolved_klass_at_put(i, resolvedKlass->name());
- }
+
+ if (tag_at(index).is_klass()) {
+ Klass* resolved_Klass = resolved_klass_at(index);
+ unresolved_klass_at_put(index, resolved_Klass->name());
+ } else if (tag_at(index).is_replaced_symbol()) {
+ unresolved_klass_at_put(index, *symbol_at_addr(index));
}
}
// Resolved references are not in the shared archive.
@@ -519,8 +580,14 @@ Klass* ConstantPool::klass_ref_at(int which, TRAPS) {
Symbol* ConstantPool::klass_name_at(int which) const {
- assert(tag_at(which).is_unresolved_klass() || tag_at(which).is_klass(),
- "Corrupted constant pool");
+ // Dynamic CDS dump need call here in verify, release version no need do it.
+#ifndef PRODUCT
+ assert(tag_at(which).is_unresolved_klass() || tag_at(which).is_klass() ||
+ tag_at(which).is_replaced_symbol(), "Corrupted constant pool");
+ if (tag_at(which).is_replaced_symbol()) {
+ return *symbol_at_addr(which);
+ }
+#endif
// A resolved constantPool entry will contain a Klass*, otherwise a Symbol*.
// It is not safe to rely on the tag bit's here, since we don't have a lock, and the entry and
// tag is not updated atomicly.
diff --git a/hotspot/src/share/vm/oops/constantPool.hpp b/hotspot/src/share/vm/oops/constantPool.hpp
index ec111df04..b5b4db38b 100644
--- a/hotspot/src/share/vm/oops/constantPool.hpp
+++ b/hotspot/src/share/vm/oops/constantPool.hpp
@@ -231,6 +231,9 @@ class ConstantPool : public Metadata {
return cache()->entry_at(cp_cache_index);
}
+ virtual void metaspace_pointers_do(MetaspaceClosure* it);
+ void symbol_replace_excluded_klass();
+ virtual MetaspaceObj::Type type() const { return ConstantPoolType; }
// Assembly code support
static int tags_offset_in_bytes() { return offset_of(ConstantPool, _tags); }
static int cache_offset_in_bytes() { return offset_of(ConstantPool, _cache); }
@@ -315,6 +318,11 @@ class ConstantPool : public Metadata {
*symbol_at_addr(which) = s;
}
+ void replaced_symbol_at_put(int which, Symbol*s) {
+ tag_at_put(which, JVM_CONSTANT_ReplacedSymbol);
+ *symbol_at_addr(which) = s;
+ }
+
void string_at_put(int which, int obj_index, oop str) {
resolved_references()->obj_at_put(obj_index, str);
}
@@ -747,6 +755,10 @@ class ConstantPool : public Metadata {
void collect_statistics(KlassSizeStats *sz) const;
#endif
+ // ConstantPools should be stored in the read-only region of CDS archive.
+ // But the vtable will be patched in JDK8, so it must be writable.
+ static bool is_read_only_by_default() { return false; }
+
friend class ClassFileParser;
friend class SystemDictionary;
diff --git a/hotspot/src/share/vm/oops/cpCache.cpp b/hotspot/src/share/vm/oops/cpCache.cpp
index ebcf3d6a9..51f5397b8 100644
--- a/hotspot/src/share/vm/oops/cpCache.cpp
+++ b/hotspot/src/share/vm/oops/cpCache.cpp
@@ -24,14 +24,17 @@
#include "precompiled.hpp"
#include "gc_implementation/shared/markSweep.inline.hpp"
+#include "interpreter/bytecodeStream.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/rewriter.hpp"
#include "memory/universe.inline.hpp"
+#include "memory/metaspaceClosure.hpp"
#include "oops/cpCache.hpp"
#include "oops/objArrayOop.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp"
#include "prims/methodHandles.hpp"
+#include "runtime/arguments.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "utilities/macros.hpp"
@@ -602,6 +605,72 @@ void ConstantPoolCache::initialize(const intArray& inverse_index_map,
}
}
+void ConstantPoolCache::metaspace_pointers_do(MetaspaceClosure* it) {
+ if (TraceDynamicCDS) {
+ dynamic_cds_log->print_cr("Iter(ConstantPoolCache): %p", this);
+ }
+ it->push(&_constant_pool);
+ // it->push(&_reference_map);
+}
+
+void ConstantPoolCache::remove_unshareable_info() {
+ walk_entries_for_initialization(/*check_only = */ false);
+}
+
+void ConstantPoolCache::walk_entries_for_initialization(bool check_only) {
+ Arguments::assert_is_dumping_archive();
+ // When dumping the archive, we want to clean up the ConstantPoolCache
+ // to remove any effect of linking due to the execution of Java code --
+ // each ConstantPoolCacheEntry will have the same contents as if
+ // ConstantPoolCache::initialize has just returned:
+ //
+ // - We keep the ConstantPoolCache::constant_pool_index() bits for all entries.
+ // - We keep the "f2" field for entries used by invokedynamic and invokehandle
+ // - All other bits in the entries are cleared to zero.
+ ResourceMark rm;
+
+ InstanceKlass* ik = constant_pool()->pool_holder();
+ bool* f2_used = NEW_RESOURCE_ARRAY(bool, length());
+ memset(f2_used, 0, sizeof(bool) * length());
+
+ Thread* current = Thread::current();
+
+ // Find all the slots that we need to preserve f2
+ for (int i = 0; i < ik->methods()->length(); i++) {
+ Method* m = ik->methods()->at(i);
+ RawBytecodeStream bcs(methodHandle(current, m));
+ while (!bcs.is_last_bytecode()) {
+ Bytecodes::Code opcode = bcs.raw_next();
+ switch (opcode) {
+ case Bytecodes::_invokedynamic: {
+ int index = Bytes::get_native_u4(bcs.bcp() + 1);
+ int cp_cache_index = constant_pool()->invokedynamic_cp_cache_index(index);
+ f2_used[cp_cache_index] = 1;
+ }
+ break;
+ case Bytecodes::_invokehandle: {
+ int cp_cache_index = Bytes::get_native_u2(bcs.bcp() + 1);
+ f2_used[cp_cache_index] = 1;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ if (check_only) {
+ DEBUG_ONLY(
+ for (int i=0; i<length(); i++) {
+ entry_at(i)->verify_just_initialized(f2_used[i]);
+ })
+ } else {
+ for (int i=0; i<length(); i++) {
+ entry_at(i)->reinitialize(f2_used[i]);
+ }
+ }
+}
+
#if INCLUDE_JVMTI
// RedefineClasses() API support:
// If any entry of this ConstantPoolCache points to any of
diff --git a/hotspot/src/share/vm/oops/cpCache.hpp b/hotspot/src/share/vm/oops/cpCache.hpp
index 48f9bbd27..cb2fa43d6 100644
--- a/hotspot/src/share/vm/oops/cpCache.hpp
+++ b/hotspot/src/share/vm/oops/cpCache.hpp
@@ -124,6 +124,7 @@ class PSPromotionManager;
// source code. The _indices field with the bytecode must be written last.
class CallInfo;
+class MetaspaceClosure;
class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
@@ -397,6 +398,24 @@ class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC {
// When shifting flags as a 32-bit int, make sure we don't need an extra mask for tos_state:
assert((((u4)-1 >> tos_state_shift) & ~tos_state_mask) == 0, "no need for tos_state mask");
}
+
+ void reinitialize(bool f2_used) {
+ _indices &= cp_index_mask;
+ _f1 = NULL;
+ _flags = 0;
+ if (!f2_used) {
+ _f2 = 0;
+ }
+ }
+
+ void verify_just_initialized(bool f2_used) {
+ assert((_indices & (~cp_index_mask)) == 0, "sanity");
+ assert(_f1 == NULL, "sanity");
+ assert(_flags == 0, "sanity");
+ if (!f2_used) {
+ assert(_f2 == 0, "sanity");
+ }
+}
};
@@ -468,6 +487,10 @@ class ConstantPoolCache: public MetaspaceObj {
return base() + i;
}
+ void metaspace_pointers_do(MetaspaceClosure* it);
+ void remove_unshareable_info();
+ void walk_entries_for_initialization(bool check_only);
+ MetaspaceObj::Type type() const { return ConstantPoolCacheType; }
// Code generation
static ByteSize base_offset() { return in_ByteSize(sizeof(ConstantPoolCache)); }
static ByteSize entry_offset(int raw_index) {
@@ -488,7 +511,7 @@ class ConstantPoolCache: public MetaspaceObj {
#endif // INCLUDE_JVMTI
void reset();
-
+
// Deallocate - no fields to deallocate
DEBUG_ONLY(bool on_stack() { return false; })
void deallocate_contents(ClassLoaderData* data) {}
diff --git a/hotspot/src/share/vm/oops/instanceKlass.cpp b/hotspot/src/share/vm/oops/instanceKlass.cpp
index 367c9a09d..0d1b1a8d0 100644
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp
@@ -39,6 +39,7 @@
#include "memory/iterator.inline.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/oopFactory.hpp"
+#include "memory/metaspaceClosure.hpp"
#include "oops/fieldStreams.hpp"
#include "oops/instanceClassLoaderKlass.hpp"
#include "oops/instanceKlass.hpp"
@@ -53,6 +54,7 @@
#include "prims/jvmtiRedefineClasses.hpp"
#include "prims/jvmtiThreadState.hpp"
#include "prims/methodComparator.hpp"
+#include "runtime/arguments.hpp"
#include "runtime/fieldDescriptor.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
@@ -463,12 +465,73 @@ void InstanceKlass::deallocate_contents(ClassLoaderData* loader_data) {
MetadataFactory::free_metadata(loader_data, annotations());
}
set_annotations(NULL);
+
+ if (Arguments::is_dumping_archive()) {
+ SystemDictionaryShared::remove_dumptime_info(this);
+ }
}
bool InstanceKlass::should_be_initialized() const {
return !is_initialized();
}
+void InstanceKlass::metaspace_pointers_do(MetaspaceClosure* it) {
+ Klass::metaspace_pointers_do(it);
+
+ if (TraceDynamicCDS) {
+ ResourceMark rm;
+ dynamic_cds_log->print_cr("Iter(InstanceKlass): %p (%s)", this, external_name());
+ }
+
+ it->push(&_annotations);
+ it->push((Klass**)&_array_klasses);
+ if (!is_rewritten()) {
+ it->push(&_constants, MetaspaceClosure::_writable);
+ } else {
+ it->push(&_constants);
+ }
+ it->push(&_inner_classes);
+#if INCLUDE_JVMTI
+ it->push(&_previous_versions);
+#endif
+ it->push(&_array_name);
+ it->push(&_methods);
+ it->push(&_default_methods);
+ it->push(&_local_interfaces);
+ it->push(&_transitive_interfaces);
+ it->push(&_method_ordering);
+ if (!is_rewritten()) {
+ it->push(&_default_vtable_indices, MetaspaceClosure::_writable);
+ } else {
+ it->push(&_default_vtable_indices);
+ }
+
+ // _fields might be written into by Rewriter::scan_method() -> fd.set_has_initialized_final_update()
+ it->push(&_fields, MetaspaceClosure::_writable);
+
+ if (itable_length() > 0) {
+ itableOffsetEntry* ioe = (itableOffsetEntry*)start_of_itable();
+ int method_table_offset_in_words = ioe->offset()/wordSize;
+ int nof_interfaces = (method_table_offset_in_words - itable_offset_in_words())
+ / itableOffsetEntry::size();
+
+ for (int i = 0; i < nof_interfaces; i ++, ioe ++) {
+ if (ioe->interface_klass() != NULL) {
+ it->push(ioe->interface_klass_addr());
+ itableMethodEntry* ime = ioe->first_method_entry(this);
+ int n = klassItable::method_count_for_interface(ioe->interface_klass());
+ for (int index = 0; index < n; index ++) {
+ it->push(ime[index].method_addr());
+ }
+ }
+ }
+ }
+
+ // it->push(&_nest_members);
+ // it->push(&_permitted_subclasses);
+ // it->push(&_record_components);
+}
+
klassVtable* InstanceKlass::vtable() const {
return new klassVtable(this, start_of_vtable(), vtable_length() / vtableEntry::size());
}
@@ -765,6 +828,28 @@ bool InstanceKlass::link_class_impl(
}
+// Check if a class or any of its supertypes has a version older than 50.
+// CDS will not perform verification of old classes during dump time because
+// without changing the old verifier, the verification constraint cannot be
+// retrieved during dump time.
+// Verification of archived old classes will be performed during run time.
+bool InstanceKlass::can_be_verified_at_dumptime() const {
+ if (major_version() < 50 /*JAVA_6_VERSION*/) {
+ return false;
+ }
+ if (java_super() != NULL && !java_super()->can_be_verified_at_dumptime()) {
+ return false;
+ }
+ Array<Klass*>* interfaces = local_interfaces();
+ int len = interfaces->length();
+ for (int i = 0; i < len; i++) {
+ if (!((InstanceKlass*)interfaces->at(i))->can_be_verified_at_dumptime()) {
+ return false;
+ }
+ }
+ return true;
+}
+
// Rewrite the byte codes of all of the methods of a class.
// The rewriter must be called exactly once. Rewriting must happen after
// verification but before the first method of the class is executed.
@@ -1459,7 +1544,32 @@ static int linear_search(Array<Method*>* methods, Symbol* name, Symbol* signatur
}
#endif
+bool InstanceKlass::_disable_method_binary_search = false;
+
+NOINLINE int linear_search(const Array<Method*>* methods, const Symbol* name) {
+ int len = methods->length();
+ int l = 0;
+ int h = len - 1;
+ while (l <= h) {
+ Method* m = methods->at(l);
+ if (m->name() == name) {
+ return l;
+ }
+ l++;
+ }
+ return -1;
+}
+
static int binary_search(Array<Method*>* methods, Symbol* name) {
+ if (InstanceKlass::_disable_method_binary_search) {
+ assert(DynamicDumpSharedSpaces, "must be");
+ // At the final stage of dynamic dumping, the methods array may not be sorted
+ // by ascending addresses of their names, so we can't use binary search anymore.
+ // However, methods with the same name are still laid out consecutively inside the
+ // methods array, so let's look for the first one that matches.
+ return linear_search(methods, name);
+ }
+
int len = methods->length();
// methods are sorted, so do binary search
int l = 0;
@@ -2455,24 +2565,37 @@ void InstanceKlass::remove_unshareable_info() {
m->remove_unshareable_info();
}
- if (UseAppCDS) {
+ if (UseAppCDS || DynamicDumpSharedSpaces) {
if (_oop_map_cache != NULL) {
delete _oop_map_cache;
_oop_map_cache = NULL;
}
-
+
JNIid::deallocate(jni_ids());
set_jni_ids(NULL);
-
+
jmethodID* jmeths = methods_jmethod_ids_acquire();
if (jmeths != (jmethodID*)NULL) {
release_set_methods_jmethod_ids(NULL);
FreeHeap(jmeths);
}
}
-
// do array classes also.
array_klasses_do(remove_unshareable_in_class);
+ // These are not allocated from metaspace. They are safe to set to NULL.
+ _member_names = NULL;
+ _dependencies = NULL;
+ _osr_nmethods_head = NULL;
+ _init_thread = NULL;
+}
+
+void InstanceKlass::remove_java_mirror() {
+ Klass::remove_java_mirror();
+
+ // do array classes also.
+ if (array_klasses() != NULL) {
+ array_klasses()->remove_java_mirror();
+ }
}
static void restore_unshareable_in_class(Klass* k, TRAPS) {
diff --git a/hotspot/src/share/vm/oops/instanceKlass.hpp b/hotspot/src/share/vm/oops/instanceKlass.hpp
index 39d2c580c..43919e83d 100644
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp
@@ -323,6 +323,7 @@ class InstanceKlass: public Klass {
friend class SystemDictionary;
public:
+ static bool _disable_method_binary_search;
bool has_nonstatic_fields() const {
return (_misc_flags & _misc_has_nonstatic_fields) != 0;
}
@@ -488,6 +489,7 @@ class InstanceKlass: public Klass {
void link_class(TRAPS);
bool link_class_or_fail(TRAPS); // returns false on failure
void unlink_class();
+ bool can_be_verified_at_dumptime() const;
void rewrite_class(TRAPS);
void link_methods(TRAPS);
Method* class_initializer();
@@ -525,6 +527,10 @@ class InstanceKlass: public Klass {
Method* find_method(Symbol* name, Symbol* signature) const;
static Method* find_method(Array<Method*>* methods, Symbol* name, Symbol* signature);
+ static void disable_method_binary_search() {
+ _disable_method_binary_search = true;
+ }
+
// find a local method, but skip static methods
Method* find_instance_method(Symbol* name, Symbol* signature,
PrivateLookupMode private_mode);
@@ -1001,7 +1007,8 @@ class InstanceKlass: public Klass {
bool can_be_fastpath_allocated() const {
return !layout_helper_needs_slow_path(layout_helper());
}
-
+
+ virtual void metaspace_pointers_do(MetaspaceClosure* iter);
// Java vtable/itable
klassVtable* vtable() const; // return new klassVtable wrapper
inline Method* method_at_vtable(int index);
@@ -1075,7 +1082,7 @@ class InstanceKlass: public Klass {
public:
void set_in_error_state() {
- assert(DumpSharedSpaces, "only call this when dumping archive");
+ assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "only call this when dumping archive");
_init_state = initialization_error;
}
bool check_sharing_error_state();
@@ -1150,6 +1157,7 @@ private:
public:
// CDS support - remove and restore oops from metadata. Oops are not shared.
virtual void remove_unshareable_info();
+ virtual void remove_java_mirror();
virtual void restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS);
// jvm support
diff --git a/hotspot/src/share/vm/oops/klass.cpp b/hotspot/src/share/vm/oops/klass.cpp
index 5269060a4..34d9d9895 100644
--- a/hotspot/src/share/vm/oops/klass.cpp
+++ b/hotspot/src/share/vm/oops/klass.cpp
@@ -26,16 +26,19 @@
#include "classfile/javaClasses.hpp"
#include "classfile/dictionary.hpp"
#include "classfile/systemDictionary.hpp"
+#include "classfile/systemDictionaryShared.hpp"
#include "classfile/vmSymbols.hpp"
#include "gc_implementation/shared/markSweep.inline.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "memory/heapInspection.hpp"
#include "memory/metadataFactory.hpp"
+#include "memory/metaspaceClosure.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klass.inline.hpp"
#include "oops/oop.inline2.hpp"
+#include "runtime/arguments.hpp"
#include "runtime/atomic.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "utilities/stack.hpp"
@@ -69,6 +72,10 @@ ClassLoaderData *Klass::_fake_loader_data_Ext = reinterpret_cast<ClassLoaderData
void Klass::set_name(Symbol* n) {
_name = n;
if (_name != NULL) _name->increment_refcount();
+
+ if (Arguments::is_dumping_archive() && oop_is_instance()) {
+ SystemDictionaryShared::init_dumptime_info(InstanceKlass::cast(this));
+ }
}
bool Klass::is_subclass_of(const Klass* k) const {
@@ -369,6 +376,36 @@ GrowableArray<Klass*>* Klass::compute_secondary_supers(int num_extra_slots) {
return NULL;
}
+void Klass::metaspace_pointers_do(MetaspaceClosure* it) {
+ if (TraceDynamicCDS) {
+ ResourceMark rm;
+ dynamic_cds_log->print_cr("Iter(Klass): %p (%s)", this, external_name());
+ }
+
+ it->push(&_name);
+ it->push(&_secondary_super_cache);
+ it->push(&_secondary_supers);
+ for (int i = 0; i < _primary_super_limit; i++) {
+ it->push(&_primary_supers[i]);
+ }
+ it->push(&_super);
+ it->push((Klass**)&_subklass);
+ it->push((Klass**)&_next_sibling);
+ it->push(&_next_link);
+
+ vtableEntry* vt = start_of_vtable();
+ for (int i = 0; i < vtable_length(); i++) {
+ it->push(vt[i].method_addr());
+ }
+}
+
+inline vtableEntry* Klass::start_of_vtable() const {
+ return (vtableEntry*) ((address)this + in_bytes(vtable_start_offset()));
+}
+
+inline ByteSize Klass::vtable_start_offset() {
+ return in_ByteSize(InstanceKlass::header_size() * wordSize);
+}
Klass* Klass::subklass() const {
return _subklass == NULL ? NULL : _subklass;
@@ -530,7 +567,7 @@ void Klass::oops_do(OopClosure* cl) {
}
void Klass::remove_unshareable_info() {
- assert (DumpSharedSpaces, "only called for DumpSharedSpaces");
+ assert (DumpSharedSpaces || DynamicDumpSharedSpaces, "only called for DumpSharedSpaces or DynamicDumpSharedSpaces");
JFR_ONLY(REMOVE_ID(this);)
set_subklass(NULL);
@@ -539,40 +576,46 @@ void Klass::remove_unshareable_info() {
set_java_mirror(NULL);
set_next_link(NULL);
- if (!UseAppCDS) {
- // CDS logic
+ if (class_loader_data() == NULL) {
+ // Null out class loader data for classes loaded by bootstrap (null) loader
+ set_class_loader_data(NULL);
+ } else if (SystemDictionary::is_ext_class_loader(class_loader())) {
+ // Mark class loaded by system class loader
+ set_class_loader_data(_fake_loader_data_Ext);
+ } else if (SystemDictionary::is_app_class_loader(class_loader())) {
+ set_class_loader_data(_fake_loader_data_App);
+ } else {
+ // Class loader data for classes loaded by customer loader
set_class_loader_data(NULL);
- } else if (class_loader_data() != NULL) {
- // AppCDS logic
- if (class_loader() == NULL) {
- // Null out class loader data for classes loaded by bootstrap (null) loader
- set_class_loader_data(NULL);
- } else if(SystemDictionary::is_ext_class_loader(class_loader())) {
- // Mark class loaded by system class loader
- set_class_loader_data(_fake_loader_data_Ext);
- } else {
- set_class_loader_data(_fake_loader_data_App);
- }
}
}
+void Klass::remove_java_mirror() {
+ Arguments::assert_is_dumping_archive();
+ if (TraceDynamicCDS) {
+ ResourceMark rm;
+ dynamic_cds_log->print_cr("remove java_mirror: %s", external_name());
+ }
+ // Just null out the mirror. The class_loader_data() no longer exists.
+ _java_mirror = NULL;
+}
+
void Klass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS) {
JFR_ONLY(RESTORE_ID(this);)
+ if (TraceDynamicCDS) {
+ ResourceMark rm(THREAD);
+ dynamic_cds_log->print_cr("restore: %s", external_name());
+ }
+
// If an exception happened during CDS restore, some of these fields may already be
// set. We leave the class on the CLD list, even if incomplete so that we don't
// modify the CLD list outside a safepoint.
if (class_loader_data() == NULL || has_fake_loader_data()) {
- // CDS should not set fake loader data
- assert(!has_fake_loader_data() || (has_fake_loader_data() && UseAppCDS),
- "setting fake loader data possible only with AppCDS enabled");
- // Restore class_loader_data
set_class_loader_data(loader_data);
-
// Add to class loader list first before creating the mirror
// (same order as class file parsing)
loader_data->add_class(this);
}
-
// Recreate the class mirror.
// Only recreate it if not present. A previous attempt to restore may have
// gotten an OOM later but keep the mirror if it was created.
diff --git a/hotspot/src/share/vm/oops/klass.hpp b/hotspot/src/share/vm/oops/klass.hpp
index f70587eab..4e45a7756 100644
--- a/hotspot/src/share/vm/oops/klass.hpp
+++ b/hotspot/src/share/vm/oops/klass.hpp
@@ -94,6 +94,8 @@ class ParCompactionManager;
class KlassSizeStats;
class fieldDescriptor;
class MarkSweep;
+class MetaspaceClosure;
+class vtableEntry;
class Klass : public Metadata {
friend class VMStructs;
@@ -209,7 +211,7 @@ protected:
bool has_fake_loader_data_App() { return class_loader_data() == _fake_loader_data_App; }
bool has_fake_loader_data_Ext() { return class_loader_data() == _fake_loader_data_Ext; }
bool has_fake_loader_data() { return (has_fake_loader_data_App() || has_fake_loader_data_Ext()); }
-
+
bool is_klass() const volatile { return true; }
// super
@@ -316,6 +318,7 @@ protected:
_shared_class_path_index = index;
};
+ virtual void metaspace_pointers_do(MetaspaceClosure* it);
protected: // internal accessors
Klass* subklass_oop() const { return _subklass; }
@@ -323,7 +326,10 @@ protected:
void set_subklass(Klass* s);
void set_next_sibling(Klass* s);
+ vtableEntry* start_of_vtable() const;
+
public:
+ static ByteSize vtable_start_offset();
// Compiler support
static ByteSize super_offset() { return in_ByteSize(offset_of(Klass, _super)); }
@@ -505,6 +511,7 @@ protected:
public:
// CDS support - remove and restore oops from metadata. Oops are not shared.
virtual void remove_unshareable_info();
+ virtual void remove_java_mirror();
virtual void restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS);
protected:
@@ -725,6 +732,7 @@ protected:
virtual const char* internal_name() const = 0;
+ virtual MetaspaceObj::Type type() const { return ClassType; }
// Verification
virtual void verify_on(outputStream* st);
void verify() { verify_on(tty); }
diff --git a/hotspot/src/share/vm/oops/klassVtable.hpp b/hotspot/src/share/vm/oops/klassVtable.hpp
index 244f3c0cc..9379bcca0 100644
--- a/hotspot/src/share/vm/oops/klassVtable.hpp
+++ b/hotspot/src/share/vm/oops/klassVtable.hpp
@@ -176,6 +176,7 @@ class vtableEntry VALUE_OBJ_CLASS_SPEC {
}
static int method_offset_in_bytes() { return offset_of(vtableEntry, _method); }
Method* method() const { return _method; }
+ Method** method_addr() { return &_method; }
private:
Method* _method;
@@ -216,6 +217,7 @@ class itableOffsetEntry VALUE_OBJ_CLASS_SPEC {
int _offset;
public:
Klass* interface_klass() const { return _interface; }
+ InstanceKlass**interface_klass_addr() { return(InstanceKlass**) &_interface; }
int offset() const { return _offset; }
static itableMethodEntry* method_entry(Klass* k, int offset) { return (itableMethodEntry*)(((address)k) + offset); }
@@ -238,6 +240,7 @@ class itableMethodEntry VALUE_OBJ_CLASS_SPEC {
public:
Method* method() const { return _method; }
+ Method**method_addr() { return &_method; }
void clear() { _method = NULL; }
diff --git a/hotspot/src/share/vm/oops/metadata.hpp b/hotspot/src/share/vm/oops/metadata.hpp
index dc52c452e..372faa953 100644
--- a/hotspot/src/share/vm/oops/metadata.hpp
+++ b/hotspot/src/share/vm/oops/metadata.hpp
@@ -28,6 +28,7 @@
#include "utilities/exceptions.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/ostream.hpp"
+class MetaspaceClosure;
// This is the base class for an internal Class related metadata
class Metadata : public MetaspaceObj {
@@ -47,8 +48,9 @@ class Metadata : public MetaspaceObj {
virtual bool is_method() const volatile { return false; }
virtual bool is_methodData() const volatile { return false; }
virtual bool is_constantPool() const volatile { return false; }
-
+ virtual MetaspaceObj::Type type() const = 0;
virtual const char* internal_name() const = 0;
+ virtual void metaspace_pointers_do(MetaspaceClosure* iter) {}
void print() const { print_on(tty); }
void print_value() const { print_value_on(tty); }
diff --git a/hotspot/src/share/vm/oops/method.cpp b/hotspot/src/share/vm/oops/method.cpp
index 64cdae9c7..305348bd0 100644
--- a/hotspot/src/share/vm/oops/method.cpp
+++ b/hotspot/src/share/vm/oops/method.cpp
@@ -37,6 +37,7 @@
#include "memory/heapInspection.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceShared.hpp"
+#include "memory/metaspaceClosure.hpp"
#include "memory/oopFactory.hpp"
#include "oops/constMethod.hpp"
#include "oops/methodData.hpp"
@@ -834,6 +835,20 @@ void Method::set_not_osr_compilable(int comp_level, bool report, const char* rea
assert(!CompilationPolicy::can_be_osr_compiled(this, comp_level), "sanity check");
}
+void Method::metaspace_pointers_do(MetaspaceClosure* it) {
+ if (TraceDynamicCDS) {
+ dynamic_cds_log->print_cr("Iter(Method): %p", this);
+ }
+
+ if (!method_holder()->is_rewritten()) {
+ it->push(&_constMethod, MetaspaceClosure::_writable);
+ } else {
+ it->push(&_constMethod);
+ }
+ it->push(&_method_data);
+ it->push(&_method_counters);
+}
+
// Revert to using the interpreter and clear out the nmethod
void Method::clear_code(bool acquire_lock /* = true */) {
MutexLockerEx pl(acquire_lock ? Patching_lock : NULL, Mutex::_no_safepoint_check_flag);
@@ -1421,12 +1436,15 @@ static int method_comparator(Method* a, Method* b) {
// This is only done during class loading, so it is OK to assume method_idnum matches the methods() array
// default_methods also uses this without the ordering for fast find_method
-void Method::sort_methods(Array<Method*>* methods, bool idempotent, bool set_idnums) {
+void Method::sort_methods(Array<Method*>* methods, bool idempotent, bool set_idnums, method_comparator_func func) {
int length = methods->length();
if (length > 1) {
+ if (func == NULL) {
+ func = method_comparator;
+ }
{
No_Safepoint_Verifier nsv;
- QuickSort::sort<Method*>(methods->data(), length, method_comparator, idempotent);
+ QuickSort::sort<Method*>(methods->data(), length, func, idempotent);
}
// Reset method ordering
if (set_idnums) {
diff --git a/hotspot/src/share/vm/oops/method.hpp b/hotspot/src/share/vm/oops/method.hpp
index 1f507ac0f..ec93f2fb4 100644
--- a/hotspot/src/share/vm/oops/method.hpp
+++ b/hotspot/src/share/vm/oops/method.hpp
@@ -99,6 +99,7 @@ class MethodCounters;
class ConstMethod;
class InlineTableSizes;
class KlassSizeStats;
+class MetaspaceClosure;
class Method : public Metadata {
friend class VMStructs;
@@ -857,6 +858,9 @@ class Method : public Metadata {
void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason);
public:
+ void metaspace_pointers_do(MetaspaceClosure* it);
+ virtual MetaspaceObj::Type type() const { return MethodType; }
+
MethodCounters* get_method_counters(TRAPS) {
if (_method_counters == NULL) {
build_method_counters(this, CHECK_AND_CLEAR_NULL);
@@ -897,8 +901,9 @@ class Method : public Metadata {
void print_name(outputStream* st = tty) PRODUCT_RETURN; // prints as "virtual void foo(int)"
#endif
+ typedef int (*method_comparator_func)(Method* a, Method* b);
// Helper routine used for method sorting
- static void sort_methods(Array<Method*>* methods, bool idempotent = false, bool set_idnums = true);
+ static void sort_methods(Array<Method*>* methods, bool idempotent = false, bool set_idnums = true, method_comparator_func func = NULL);
// Deallocation function for redefine classes or if an error occurs
void deallocate_contents(ClassLoaderData* loader_data);
diff --git a/hotspot/src/share/vm/oops/methodCounters.hpp b/hotspot/src/share/vm/oops/methodCounters.hpp
index b98644574..6a3f7a738 100644
--- a/hotspot/src/share/vm/oops/methodCounters.hpp
+++ b/hotspot/src/share/vm/oops/methodCounters.hpp
@@ -129,5 +129,12 @@ class MethodCounters: public MetaspaceObj {
return offset_of(MethodCounters, _interpreter_invocation_count);
}
+ MetaspaceObj::Type type() const { return MethodCountersType; }
+
+ void metaspace_pointers_do(MetaspaceClosure* it) {
+ if (TraceDynamicCDS) {
+ dynamic_cds_log->print_cr("Iter(MethodCounters): %p", this);
+ }
+ }
};
#endif //SHARE_VM_OOPS_METHODCOUNTERS_HPP
diff --git a/hotspot/src/share/vm/oops/methodData.cpp b/hotspot/src/share/vm/oops/methodData.cpp
index eb48188a6..bde6ca123 100644
--- a/hotspot/src/share/vm/oops/methodData.cpp
+++ b/hotspot/src/share/vm/oops/methodData.cpp
@@ -29,6 +29,7 @@
#include "interpreter/bytecodeStream.hpp"
#include "interpreter/linkResolver.hpp"
#include "memory/heapInspection.hpp"
+#include "memory/metaspaceClosure.hpp"
#include "oops/methodData.hpp"
#include "prims/jvmtiRedefineClasses.hpp"
#include "runtime/compilationPolicy.hpp"
@@ -1683,3 +1684,11 @@ void MethodData::clean_weak_method_links() {
clean_extra_data(&cl);
verify_extra_data_clean(&cl);
}
+
+
+void MethodData::metaspace_pointers_do(MetaspaceClosure* iter) {
+ if (TraceDynamicCDS) {
+ dynamic_cds_log->print_cr("Iter(MethodData): %p", this);
+ }
+ iter->push(&_method);
+}
diff --git a/hotspot/src/share/vm/oops/methodData.hpp b/hotspot/src/share/vm/oops/methodData.hpp
index 3cd7cd6f1..eb121268f 100644
--- a/hotspot/src/share/vm/oops/methodData.hpp
+++ b/hotspot/src/share/vm/oops/methodData.hpp
@@ -67,7 +67,7 @@ class KlassSizeStats;
// forward decl
class ProfileData;
-
+class MetaspaceClosure;
// DataLayout
//
// Overlay for generic profiling data.
@@ -2486,6 +2486,9 @@ public:
void clean_method_data(BoolObjectClosure* is_alive);
void clean_weak_method_links();
+
+ virtual void metaspace_pointers_do(MetaspaceClosure* iter);
+ virtual MetaspaceObj::Type type() const { return MethodDataType; }
};
#endif // SHARE_VM_OOPS_METHODDATAOOP_HPP
diff --git a/hotspot/src/share/vm/oops/objArrayKlass.cpp b/hotspot/src/share/vm/oops/objArrayKlass.cpp
index 19abfbd5a..60d173e9e 100644
--- a/hotspot/src/share/vm/oops/objArrayKlass.cpp
+++ b/hotspot/src/share/vm/oops/objArrayKlass.cpp
@@ -33,6 +33,7 @@
#include "memory/metadataFactory.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.inline.hpp"
+#include "memory/metaspaceClosure.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klass.inline.hpp"
#include "oops/objArrayKlass.hpp"
@@ -569,6 +570,12 @@ int ObjArrayKlass::oop_adjust_pointers(oop obj) {
return size;
}
+void ObjArrayKlass::metaspace_pointers_do(MetaspaceClosure* it) {
+ ArrayKlass::metaspace_pointers_do(it);
+ it->push(&_element_klass);
+ it->push(&_bottom_klass);
+}
+
#if INCLUDE_ALL_GCS
void ObjArrayKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
assert(obj->is_objArray(), "obj must be obj array");
diff --git a/hotspot/src/share/vm/oops/objArrayKlass.hpp b/hotspot/src/share/vm/oops/objArrayKlass.hpp
index ab3cbc61c..c17adba70 100644
--- a/hotspot/src/share/vm/oops/objArrayKlass.hpp
+++ b/hotspot/src/share/vm/oops/objArrayKlass.hpp
@@ -109,7 +109,8 @@ class ObjArrayKlass : public ArrayKlass {
template <class T> inline void objarray_follow_contents(oop obj, int index, MarkSweep* mark);
int oop_adjust_pointers(oop obj);
-
+
+ virtual void metaspace_pointers_do(MetaspaceClosure* iter);
// Parallel Scavenge and Parallel Old
PARALLEL_GC_DECLS
#if INCLUDE_ALL_GCS
diff --git a/hotspot/src/share/vm/oops/symbol.hpp b/hotspot/src/share/vm/oops/symbol.hpp
index aaa55c589..4b1b5cb5d 100644
--- a/hotspot/src/share/vm/oops/symbol.hpp
+++ b/hotspot/src/share/vm/oops/symbol.hpp
@@ -25,9 +25,9 @@
#ifndef SHARE_VM_OOPS_SYMBOL_HPP
#define SHARE_VM_OOPS_SYMBOL_HPP
-#include "utilities/utf8.hpp"
#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
+#include "utilities/utf8.hpp"
// A Symbol is a canonicalized string.
// All Symbols reside in global SymbolTable and are reference counted.
@@ -101,6 +101,7 @@
// Since sometimes this is allocated from Metadata, pick a base allocation
// type without virtual functions.
class ClassLoaderData;
+class MetaspaceClosure;
// We separate the fields in SymbolBase from Symbol::_body so that
// Symbol::size(int) can correctly calculate the space needed.
@@ -113,7 +114,7 @@ class SymbolBase : public MetaspaceObj {
int _identity_hash;
};
-class Symbol : private SymbolBase {
+class Symbol : public SymbolBase {
friend class VMStructs;
friend class SymbolTable;
friend class MoveSymbols;
@@ -160,6 +161,9 @@ class Symbol : private SymbolBase {
int refcount() const { return _refcount; }
void increment_refcount();
void decrement_refcount();
+ bool is_permanent() const {
+ return (refcount() == -1);
+ }
int byte_at(int index) const {
assert(index >=0 && index < _length, "symbol index overflow");
@@ -180,6 +184,17 @@ class Symbol : private SymbolBase {
return starts_with(prefix, (int) strlen(prefix));
}
+ void set_permanent() {
+ _refcount = -1;
+ }
+
+ void metaspace_pointers_do(MetaspaceClosure* it) {
+ if (TraceDynamicCDS) {
+ dynamic_cds_log->print_cr("Iter(Symbol): %p", this);
+ }
+ }
+
+ MetaspaceObj::Type type() const { return SymbolType; }
// Tests if the symbol starts with the given prefix.
int index_of_at(int i, const char* str, int len) const;
int index_of_at(int i, const char* str) const {
@@ -208,6 +223,9 @@ class Symbol : private SymbolBase {
jchar* as_unicode(int& length) const;
+ // Symbols should be stored in the read-only region of CDS archive.
+ static bool is_read_only_by_default() { return true; }
+
// Treating this symbol as a class name, returns the Java name for the class.
// String is allocated in resource area if buffer is not provided.
// See Klass::external_name()
diff --git a/hotspot/src/share/vm/runtime/arguments.cpp b/hotspot/src/share/vm/runtime/arguments.cpp
index 6f5e75107..1f603021a 100644
--- a/hotspot/src/share/vm/runtime/arguments.cpp
+++ b/hotspot/src/share/vm/runtime/arguments.cpp
@@ -29,6 +29,7 @@
#include "compiler/compilerOracle.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/cardTableRS.hpp"
+#include "memory/filemap.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/referenceProcessor.hpp"
#include "memory/universe.inline.hpp"
@@ -126,6 +127,7 @@ bool Arguments::_BackgroundCompilation = BackgroundCompilation;
bool Arguments::_ClipInlining = ClipInlining;
char* Arguments::SharedArchivePath = NULL;
+char* Arguments::SharedDynamicArchivePath = NULL;
AgentLibraryList Arguments::_libraryList;
AgentLibraryList Arguments::_agentList;
@@ -179,6 +181,117 @@ static void logOption(const char* opt) {
}
}
+#if INCLUDE_CDS
+// Sharing support
+// Construct the path to the archive
+int Arguments::num_archives(const char* archive_path) {
+ if (archive_path == NULL) {
+ return 0;
+ }
+ int npaths = 1;
+ char* p = (char*)archive_path;
+ while (*p != '\0') {
+ if (*p == os::path_separator()[0]) {
+ npaths++;
+ }
+ p++;
+ }
+ return npaths;
+}
+
+void Arguments::extract_shared_archive_paths(const char* archive_path,
+ char** base_archive_path,
+ char** top_archive_path) {
+ char* begin_ptr = (char*)archive_path;
+ char* end_ptr = strchr((char*)archive_path, os::path_separator()[0]);
+ if (end_ptr == NULL || end_ptr == begin_ptr) {
+ vm_exit_during_initialization("Base archive was not specified", archive_path);
+ }
+ size_t len = end_ptr - begin_ptr;
+ char* cur_path = NEW_C_HEAP_ARRAY(char, len + 1, mtInternal);
+ strncpy(cur_path, begin_ptr, len);
+ cur_path[len] = '\0';
+ FileMapInfo::check_archive((const char*)cur_path, true /*is_static*/);
+ *base_archive_path = cur_path;
+
+ begin_ptr = ++end_ptr;
+ if (*begin_ptr == '\0') {
+ vm_exit_during_initialization("Top archive was not specified", archive_path);
+ }
+ end_ptr = strchr(begin_ptr, '\0');
+ assert(end_ptr != NULL, "sanity");
+ len = end_ptr - begin_ptr;
+ cur_path = NEW_C_HEAP_ARRAY(char, len + 1, mtInternal);
+ strncpy(cur_path, begin_ptr, len + 1);
+
+ FileMapInfo::check_archive((const char*)cur_path, false /*is_static*/);
+ *top_archive_path = cur_path;
+}
+
+bool Arguments::init_shared_archive_paths() {
+ if (ArchiveClassesAtExit != NULL) {
+ if (DumpSharedSpaces) {
+ vm_exit_during_initialization("-XX:ArchiveClassesAtExit cannot be used with -Xshare:dump");
+ }
+ SharedDynamicArchivePath = os::strdup_check_oom(ArchiveClassesAtExit, mtClassShared);
+ } else {
+ if (SharedDynamicArchivePath != NULL) {
+ os::free(SharedDynamicArchivePath);
+ SharedDynamicArchivePath = NULL;
+ }
+ }
+
+ if (SharedArchiveFile != NULL) {
+ int archives = num_archives(SharedArchiveFile);
+ if (is_dumping_archive()) {
+ if (archives > 1) {
+ vm_exit_during_initialization(
+ "Cannot have more than 1 archive file specified in -XX:SharedArchiveFile during CDS dumping");
+ }
+ if (DynamicDumpSharedSpaces) {
+ if (strcmp(SharedArchiveFile, ArchiveClassesAtExit) == 0) {
+ vm_exit_during_initialization(
+ "Cannot have the same archive file specified for -XX:SharedArchiveFile and -XX:ArchiveClassesAtExit",
+ SharedArchiveFile);
+ }
+ }
+ }
+
+ if (!is_dumping_archive()) {
+ if (archives > 2) {
+ vm_exit_during_initialization(
+ "Cannot have more than 2 archive files specified in the -XX:SharedArchiveFile option");
+ }
+ if (archives == 1) {
+ char* temp_archive_path = os::strdup_check_oom(SharedArchiveFile, mtClassShared);
+ int name_size;
+ bool success =
+ FileMapInfo::get_base_archive_name_from_header(temp_archive_path, &name_size, &SharedArchivePath);
+ if (!success) {
+ SharedArchivePath = temp_archive_path;
+ } else {
+ SharedDynamicArchivePath = temp_archive_path;
+ }
+ } else {
+ extract_shared_archive_paths((const char*)SharedArchiveFile,
+ &SharedArchivePath, &SharedDynamicArchivePath);
+ }
+
+ // We must use tty here instead of dynamic_cds_log for dynamic_cds_log is initialized after share path init.
+ if (InfoDynamicCDS && SharedArchivePath != NULL) {
+ tty->print_cr("SharedArchivePath: %s", SharedArchivePath);
+ }
+ if (InfoDynamicCDS && SharedDynamicArchivePath != NULL) {
+ tty->print_cr("SharedDynamicArchivePath: %s", SharedDynamicArchivePath);
+ }
+ } else { // CDS dumping
+ SharedArchivePath = os::strdup_check_oom(SharedArchiveFile, mtClassShared);
+ }
+ }
+ return (SharedArchivePath != NULL);
+}
+#endif // INCLUDE_CDS
+
// Process java launcher properties.
void Arguments::process_sun_java_launcher_properties(JavaVMInitArgs* args) {
// See if sun.java.launcher or sun.java.launcher.pid is defined.
@@ -3724,6 +3837,30 @@ jint Arguments::finalize_vm_init_args(SysClassPath* scp_p, bool scp_assembly_req
set_mode_flags(_int);
}
+#if INCLUDE_CDS
+ if (ArchiveClassesAtExit == NULL) {
+ FLAG_SET_DEFAULT(DynamicDumpSharedSpaces, false);
+ } else {
+ FLAG_SET_DEFAULT(DynamicDumpSharedSpaces, true);
+ // When Dynamic CDS dump is turned on, we will set ClassUnloading false,
+ // and there is no need to care if the class loader is alive.
+ FLAG_SET_DEFAULT(ClassUnloading, false);
+ }
+
+ if (TraceDynamicCDS) {
+ FLAG_SET_DEFAULT(DebugDynamicCDS, true);
+ FLAG_SET_DEFAULT(InfoDynamicCDS, true);
+ } else if (DebugDynamicCDS) {
+ FLAG_SET_DEFAULT(InfoDynamicCDS, true);
+ }
+
+#ifdef _LP64
+ // We attempt to set SharedBaseAddress right above
+ // the java heap base on ObjectAlignmentInBytes.
+ FLAG_SET_DEFAULT(SharedBaseAddress, (ObjectAlignmentInBytes * 4 * G));
+#endif // _LP64
+#endif // INCLUDE_CDS
+
// eventually fix up InitialTenuringThreshold if only MaxTenuringThreshold is set
if (FLAG_IS_DEFAULT(InitialTenuringThreshold) && (InitialTenuringThreshold > MaxTenuringThreshold)) {
FLAG_SET_ERGO(uintx, InitialTenuringThreshold, MaxTenuringThreshold);
@@ -3885,6 +4022,11 @@ void Arguments::set_shared_spaces_flags() {
}
#endif
}
+
+#if INCLUDE_CDS
+ // Initialize shared archive paths which could include both base and dynamic archive paths
+ init_shared_archive_paths();
+#endif // INCLUDE_CDS
}
#if !INCLUDE_ALL_GCS
diff --git a/hotspot/src/share/vm/runtime/arguments.hpp b/hotspot/src/share/vm/runtime/arguments.hpp
index a1fcfc398..19f5cb60b 100644
--- a/hotspot/src/share/vm/runtime/arguments.hpp
+++ b/hotspot/src/share/vm/runtime/arguments.hpp
@@ -443,7 +443,8 @@ class Arguments : AllStatic {
static bool CheckCompileOnly;
static char* SharedArchivePath;
- static char* AppCDSLockPath;
+
+ static char* SharedDynamicArchivePath;
public:
// Parses the arguments, first phase
@@ -553,6 +554,22 @@ class Arguments : AllStatic {
static const char* GetSharedArchivePath() { return SharedArchivePath; }
+ static const char* GetSharedDynamicArchivePath() { return SharedDynamicArchivePath; }
+
+ static bool init_shared_archive_paths();
+
+ static void extract_shared_archive_paths(const char* archive_path,
+ char** base_archive_path,
+ char** top_archive_path);
+
+ static int num_archives(const char* archive_path);
+
+ static bool is_dumping_archive() { return DumpSharedSpaces || DynamicDumpSharedSpaces; }
+
+ static void assert_is_dumping_archive() {
+ assert(Arguments::is_dumping_archive(), "dump time only");
+ }
+
static bool CompileMethod(char* className, char* methodName) {
return
methodExists(
diff --git a/hotspot/src/share/vm/runtime/globals.hpp b/hotspot/src/share/vm/runtime/globals.hpp
index 91e52f033..eb13ee0d7 100644
--- a/hotspot/src/share/vm/runtime/globals.hpp
+++ b/hotspot/src/share/vm/runtime/globals.hpp
@@ -3910,6 +3910,24 @@ class CommandLineFlags {
NOT_LP64(LINUX_ONLY(2*G) NOT_LINUX(0)), \
"Address to allocate shared memory region for class data") \
\
+ experimental(ccstr, ArchiveClassesAtExit, NULL, \
+ "The path and name of the dynamic archive file") \
+ \
+ product(bool, InfoDynamicCDS, false, \
+ "Log info level in DynamicCDS") \
+ \
+ product(bool, TraceDynamicCDS, false, \
+ "Trace details in DynamicCDS") \
+ \
+ product(bool, DebugDynamicCDS, false, \
+ "Debug details in DynamicCDS") \
+ \
+ product(bool, DynamicDumpSharedSpaces, false, \
+ "Dynamic archive") \
+ \
+ product(uintx, SharedSymbolTableBucketSize, 4, \
+ "Average number of symbols per bucket in shared table") \
+ \
diagnostic(bool, EnableInvokeDynamic, true, \
"support JSR 292 (method handles, invokedynamic, " \
"anonymous classes") \
@@ -4017,6 +4035,9 @@ class CommandLineFlags {
"Dump the names all loaded classes, that could be stored into " \
"the CDS archive, in the specified file") \
\
+ product(ccstr, DynamicCDSLog, NULL, \
+ "Dynamic CDS log path") \
+ \
product(ccstr, SharedClassListFile, NULL, \
"Override the default CDS class list") \
\
diff --git a/hotspot/src/share/vm/runtime/java.cpp b/hotspot/src/share/vm/runtime/java.cpp
index 0a263b017..4f290c826 100644
--- a/hotspot/src/share/vm/runtime/java.cpp
+++ b/hotspot/src/share/vm/runtime/java.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "cds/dynamicArchive.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
@@ -546,6 +547,13 @@ void before_exit(JavaThread * thread) {
// Note: we don't wait until it actually dies.
os::terminate_signal_thread();
+#if INCLUDE_CDS
+ if (DynamicDumpSharedSpaces) {
+ DynamicArchive::dump();
+ ShouldNotReachHere();
+ }
+#endif
+
print_statistics();
Universe::heap()->print_tracing_info();
diff --git a/hotspot/src/share/vm/runtime/mutexLocker.cpp b/hotspot/src/share/vm/runtime/mutexLocker.cpp
index a96ae50eb..a1c61f864 100644
--- a/hotspot/src/share/vm/runtime/mutexLocker.cpp
+++ b/hotspot/src/share/vm/runtime/mutexLocker.cpp
@@ -39,6 +39,7 @@
Mutex* Patching_lock = NULL;
Monitor* SystemDictionary_lock = NULL;
+Mutex* SharedDictionary_lock = NULL;
Mutex* PackageTable_lock = NULL;
Mutex* CompiledIC_lock = NULL;
Mutex* InlineCacheBuffer_lock = NULL;
@@ -129,6 +130,7 @@ Monitor* RedefineClasses_lock = NULL;
Mutex* FreeHumongousRegions_lock = NULL;
+Mutex* DumpTimeTable_lock = NULL;
#ifdef INCLUDE_JFR
Mutex* JfrStacktrace_lock = NULL;
Monitor* JfrMsg_lock = NULL;
@@ -224,6 +226,7 @@ void mutex_init() {
def(JmethodIdCreation_lock , Mutex , leaf, true ); // used for creating jmethodIDs.
def(SystemDictionary_lock , Monitor, leaf, true ); // lookups done by VM thread
+ def(SharedDictionary_lock , Mutex , leaf, true );
def(PackageTable_lock , Mutex , leaf, false);
def(InlineCacheBuffer_lock , Mutex , leaf, true );
def(VMStatistic_lock , Mutex , leaf, false);
@@ -289,7 +292,7 @@ void mutex_init() {
def(RedefineClasses_lock , Monitor, nonleaf+5, true);
def(FreeHumongousRegions_lock , Mutex , nonleaf, false);
-
+ def(DumpTimeTable_lock , Mutex , leaf - 1, true);
#if INCLUDE_JFR
def(JfrMsg_lock , Monitor, leaf, true);
def(JfrBuffer_lock , Mutex, leaf, true);
diff --git a/hotspot/src/share/vm/runtime/mutexLocker.hpp b/hotspot/src/share/vm/runtime/mutexLocker.hpp
index 428c80181..f28058b0e 100644
--- a/hotspot/src/share/vm/runtime/mutexLocker.hpp
+++ b/hotspot/src/share/vm/runtime/mutexLocker.hpp
@@ -47,6 +47,7 @@
extern Mutex* Patching_lock; // a lock used to guard code patching of compiled code
extern Monitor* SystemDictionary_lock; // a lock on the system dictonary
+extern Mutex* SharedDictionary_lock; // a lock on the CDS shared dictionary
extern Mutex* PackageTable_lock; // a lock on the class loader package table
extern Mutex* CompiledIC_lock; // a lock used to guard compiled IC patching and access
extern Mutex* InlineCacheBuffer_lock; // a lock used to guard the InlineCacheBuffer
@@ -145,6 +146,8 @@ extern Monitor* RedefineClasses_lock; // locks classes from parallel
extern Mutex* FreeHumongousRegions_lock; // locks humongous regions from freeing in parallel
+extern Mutex* DumpTimeTable_lock; // SystemDictionaryShared::find_or_allocate_info_for
+
#if INCLUDE_JFR
extern Mutex* JfrStacktrace_lock; // used to guard access to the JFR stacktrace table
extern Monitor* JfrMsg_lock; // protects JFR messaging
diff --git a/hotspot/src/share/vm/runtime/os.cpp b/hotspot/src/share/vm/runtime/os.cpp
index ed41265cc..5c5d60220 100644
--- a/hotspot/src/share/vm/runtime/os.cpp
+++ b/hotspot/src/share/vm/runtime/os.cpp
@@ -568,7 +568,7 @@ bool os::find_builtin_agent(AgentLibrary *agent_lib, const char *syms[],
// --------------------- heap allocation utilities ---------------------
-char *os::strdup(const char *str, MEMFLAGS flags) {
+char* os::strdup(const char *str, MEMFLAGS flags) {
size_t size = strlen(str);
char *dup_str = (char *)malloc(size + 1, flags);
if (dup_str == NULL) return NULL;
@@ -576,6 +576,13 @@ char *os::strdup(const char *str, MEMFLAGS flags) {
return dup_str;
}
+char* os::strdup_check_oom(const char* str, MEMFLAGS flags) {
+ char* p = os::strdup(str, flags);
+ if (p == NULL) {
+ vm_exit_out_of_memory(strlen(str) + 1, OOM_MALLOC_ERROR, "os::strdup_check_oom");
+ }
+ return p;
+}
#define paranoid 0 /* only set to 1 if you suspect checking code has bug */
diff --git a/hotspot/src/share/vm/runtime/os.hpp b/hotspot/src/share/vm/runtime/os.hpp
index 296380f39..7ae49fd5b 100644
--- a/hotspot/src/share/vm/runtime/os.hpp
+++ b/hotspot/src/share/vm/runtime/os.hpp
@@ -731,6 +731,8 @@ class os: AllStatic {
static void free (void *memblock, MEMFLAGS flags = mtNone);
static bool check_heap(bool force = false); // verify C heap integrity
static char* strdup(const char *, MEMFLAGS flags = mtInternal); // Like strdup
+ // Like strdup, but exit VM when strdup() returns NULL
+ static char* strdup_check_oom(const char*, MEMFLAGS flags = mtInternal);
#ifndef PRODUCT
static julong num_mallocs; // # of calls to malloc/realloc
diff --git a/hotspot/src/share/vm/runtime/thread.cpp b/hotspot/src/share/vm/runtime/thread.cpp
index 94b9e69d2..807786d98 100644
--- a/hotspot/src/share/vm/runtime/thread.cpp
+++ b/hotspot/src/share/vm/runtime/thread.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "cds/dynamicArchive.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/systemDictionary.hpp"
@@ -3934,6 +3935,15 @@ void JavaThread::invoke_shutdown_hooks() {
this->clear_pending_exception();
}
+#if INCLUDE_CDS
+ // Link all classes for dynamic CDS dumping before vm exit.
+ // Same operation is being done in JVM_BeforeHalt for handling the
+ // case where the application calls System.exit().
+ if (DynamicDumpSharedSpaces) {
+ DynamicArchive::prepare_for_dynamic_dumping_at_exit();
+ }
+#endif
+
EXCEPTION_MARK;
Klass* k =
SystemDictionary::resolve_or_null(vmSymbols::java_lang_Shutdown(),
diff --git a/hotspot/src/share/vm/services/diagnosticCommand.cpp b/hotspot/src/share/vm/services/diagnosticCommand.cpp
index ede8db156..358ec6e09 100644
--- a/hotspot/src/share/vm/services/diagnosticCommand.cpp
+++ b/hotspot/src/share/vm/services/diagnosticCommand.cpp
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "cds/dynamicArchive.hpp"
#include "classfile/classLoaderStats.hpp"
#include "gc_implementation/shared/vmGCOperations.hpp"
#include "runtime/javaCalls.hpp"
@@ -57,6 +58,7 @@ void DCmdRegistrant::register_dcmds(){
DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<FinalizerInfoDCmd>(full_export, true, false));
#if INCLUDE_SERVICES // Heap dumping/inspection supported
DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<HeapDumpDCmd>(DCmd_Source_Internal | DCmd_Source_AttachAPI, true, false));
+ DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<DynamicCDSDumpDCmd>(DCmd_Source_Internal | DCmd_Source_AttachAPI, true, false));
DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<ClassHistogramDCmd>(full_export, true, false));
DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<ClassStatsDCmd>(full_export, true, false));
#endif // INCLUDE_SERVICES
@@ -375,6 +377,17 @@ int HeapDumpDCmd::num_arguments() {
}
}
+void DynamicCDSDumpDCmd::execute(DCmdSource source, TRAPS) {
+#if INCLUDE_CDS
+ if (DynamicDumpSharedSpaces) {
+ DynamicArchive::dump();
+ ShouldNotReachHere();
+ } else {
+ warning("Dynamic CDS is not enabled");
+ }
+#endif
+}
+
ClassHistogramDCmd::ClassHistogramDCmd(outputStream* output, bool heap) :
DCmdWithParser(output, heap),
_all("-all", "Inspect all objects, including unreachable objects",
diff --git a/hotspot/src/share/vm/services/diagnosticCommand.hpp b/hotspot/src/share/vm/services/diagnosticCommand.hpp
index b1fb57e53..e28011f25 100644
--- a/hotspot/src/share/vm/services/diagnosticCommand.hpp
+++ b/hotspot/src/share/vm/services/diagnosticCommand.hpp
@@ -267,6 +267,29 @@ public:
};
#endif // INCLUDE_SERVICES
+class DynamicCDSDumpDCmd : public DCmdWithParser {
+public:
+ DynamicCDSDumpDCmd(outputStream* output, bool heap) : DCmdWithParser(output, heap) { }
+ static const char* name() {
+ return "GC.dynamic_cds_dump";
+ }
+ static const char* description() {
+ return "Dynamic CDS dump";
+ }
+ static const char* impact() {
+ return "Medium";
+ }
+ static const JavaPermission permission() {
+ JavaPermission p = {"java.lang.management.ManagementPermission",
+ "monitor", NULL};
+ return p;
+ }
+ static int num_arguments() {
+ return 0;
+ }
+ virtual void execute(DCmdSource source, TRAPS);
+};
+
// See also: inspectheap in attachListener.cpp
class ClassHistogramDCmd : public DCmdWithParser {
protected:
diff --git a/hotspot/src/share/vm/utilities/array.hpp b/hotspot/src/share/vm/utilities/array.hpp
index 920b87501..371876b56 100644
--- a/hotspot/src/share/vm/utilities/array.hpp
+++ b/hotspot/src/share/vm/utilities/array.hpp
@@ -302,6 +302,7 @@ define_array(intArray , int ) define_stack(intStack , intArray )
template <typename T>
class Array: public MetaspaceObj {
+ friend class ArchiveBuilder;
friend class MetadataFactory;
friend class VMStructs;
friend class MethodHandleCompiler; // special case
diff --git a/hotspot/src/share/vm/utilities/bitMap.cpp b/hotspot/src/share/vm/utilities/bitMap.cpp
index e64add155..12b4b4160 100644
--- a/hotspot/src/share/vm/utilities/bitMap.cpp
+++ b/hotspot/src/share/vm/utilities/bitMap.cpp
@@ -67,16 +67,14 @@ void BitMap::resize(idx_t size_in_bits, bool in_resource_area) {
idx_t new_size_in_words = size_in_words();
if (in_resource_area) {
_map = NEW_RESOURCE_ARRAY(bm_word_t, new_size_in_words);
+ Copy::disjoint_words((HeapWord*)old_map, (HeapWord*) _map,
+ MIN2(old_size_in_words, new_size_in_words));
} else {
- if (old_map != NULL) {
- _map_allocator.free();
- }
- _map = _map_allocator.allocate(new_size_in_words);
+ _map = _map_allocator.reallocate(new_size_in_words);
}
- Copy::disjoint_words((HeapWord*)old_map, (HeapWord*) _map,
- MIN2(old_size_in_words, new_size_in_words));
+
if (new_size_in_words > old_size_in_words) {
- clear_range_of_words(old_size_in_words, size_in_words());
+ clear_range_of_words(old_size_in_words, new_size_in_words);
}
}
@@ -454,6 +452,11 @@ bool BitMap::is_empty() const {
return rest == 0 || (*word & right_n_bits((int)rest)) == (bm_word_t) NoBits;
}
+void BitMap::write_to(bm_word_t* buffer, size_t buffer_size_in_bytes) const {
+ assert(buffer_size_in_bytes == (size_in_words() * BytesPerWord), "must be");
+ memcpy(buffer, _map, size_in_words() * BytesPerWord);
+}
+
void BitMap::clear_large() {
clear_large_range_of_words(0, size_in_words());
}
diff --git a/hotspot/src/share/vm/utilities/bitMap.hpp b/hotspot/src/share/vm/utilities/bitMap.hpp
index 51c58da8e..08452bd90 100644
--- a/hotspot/src/share/vm/utilities/bitMap.hpp
+++ b/hotspot/src/share/vm/utilities/bitMap.hpp
@@ -269,6 +269,7 @@ class BitMap VALUE_OBJ_CLASS_SPEC {
bool is_full() const;
bool is_empty() const;
+ void write_to(bm_word_t* buffer, size_t buffer_size_in_bytes) const;
void print_on_error(outputStream* st, const char* prefix) const;
#ifndef PRODUCT
diff --git a/hotspot/src/share/vm/utilities/constantTag.hpp b/hotspot/src/share/vm/utilities/constantTag.hpp
index ae99d5706..07a873743 100644
--- a/hotspot/src/share/vm/utilities/constantTag.hpp
+++ b/hotspot/src/share/vm/utilities/constantTag.hpp
@@ -43,7 +43,8 @@ enum {
JVM_CONSTANT_UnresolvedClassInError = 103, // Error tag due to resolution error
JVM_CONSTANT_MethodHandleInError = 104, // Error tag due to resolution error
JVM_CONSTANT_MethodTypeInError = 105, // Error tag due to resolution error
- JVM_CONSTANT_InternalMax = 105 // Last implementation tag
+ JVM_CONSTANT_ReplacedSymbol = 106,
+ JVM_CONSTANT_InternalMax = 106 // Last implementation tag
};
@@ -62,7 +63,7 @@ class constantTag VALUE_OBJ_CLASS_SPEC {
bool is_double() const { return _tag == JVM_CONSTANT_Double; }
bool is_name_and_type() const { return _tag == JVM_CONSTANT_NameAndType; }
bool is_utf8() const { return _tag == JVM_CONSTANT_Utf8; }
-
+ bool is_replaced_symbol() const { return _tag == JVM_CONSTANT_ReplacedSymbol; }
bool is_invalid() const { return _tag == JVM_CONSTANT_Invalid; }
bool is_unresolved_klass() const {
diff --git a/hotspot/src/share/vm/utilities/globalDefinitions.hpp b/hotspot/src/share/vm/utilities/globalDefinitions.hpp
index 81866b840..25f6f026c 100644
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp
@@ -1511,6 +1511,16 @@ static inline void* dereference_vptr(const void* addr) {
return *(void**)addr;
}
+
+template<typename K> unsigned primitive_hash(const K& k) {
+ unsigned hash = (unsigned)((uintptr_t)k);
+ return hash ^ (hash >> 3); // just in case we're dealing with aligned ptrs
+}
+
+template<typename K> bool primitive_equals(const K& k0, const K& k1) {
+ return k0 == k1;
+}
+
#ifndef PRODUCT
// For unit testing only
@@ -1519,7 +1529,6 @@ public:
static void test_globals();
static void test_proper_unit();
};
-
#endif // PRODUCT
#endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_HPP
diff --git a/hotspot/src/share/vm/utilities/hashtable.cpp b/hotspot/src/share/vm/utilities/hashtable.cpp
index c026e6a0e..66df8f1f8 100644
--- a/hotspot/src/share/vm/utilities/hashtable.cpp
+++ b/hotspot/src/share/vm/utilities/hashtable.cpp
@@ -34,7 +34,7 @@
#include "utilities/hashtable.hpp"
#include "utilities/hashtable.inline.hpp"
#include "utilities/numberSeq.hpp"
-
+#include "utilities/align.hpp"
// This hashtable is implemented as an open hash table with a fixed number of buckets.
@@ -145,7 +145,7 @@ template <MEMFLAGS F> void BasicHashtable<F>::free_buckets() {
// Don't delete the buckets in the shared space. They aren't
// allocated by os::malloc
if (!UseSharedSpaces ||
- !FileMapInfo::current_info()->is_in_shared_space(_buckets)) {
+ !MetaspaceShared::is_in_shared_space(_buckets)) {
FREE_C_HEAP_ARRAY(HashtableBucket, _buckets, F);
}
_buckets = NULL;
@@ -221,7 +221,7 @@ template <MEMFLAGS F> void BasicHashtable<F>::copy_table(char** top, char* end)
*top += entry_size();
}
}
- *plen = (char*)(*top) - (char*)plen - sizeof(*plen);
+ *plen = ((char*)(*top) - (char*)plen) - sizeof(*plen);
// Set the shared bit.
@@ -317,7 +317,6 @@ template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::dump_table(output
st->print_cr("Maximum bucket size : %9d", (int)summary.maximum());
}
-
// Dump the hash table buckets.
template <MEMFLAGS F> void BasicHashtable<F>::copy_buckets(char** top, char* end) {
@@ -335,6 +334,57 @@ template <MEMFLAGS F> void BasicHashtable<F>::copy_buckets(char** top, char* end
*top += len;
}
+template <MEMFLAGS F> bool BasicHashtable<F>::resize(int new_size) {
+
+ // Allocate new buckets
+ HashtableBucket<F>* buckets_new = NEW_C_HEAP_ARRAY2_RETURN_NULL(HashtableBucket<F>, new_size, F, CURRENT_PC);
+ if (buckets_new == NULL) {
+ return false;
+ }
+
+ // Clear the new buckets
+ for (int i = 0; i < new_size; i++) {
+ buckets_new[i].clear();
+ }
+
+ int table_size_old = _table_size;
+ // hash_to_index() uses _table_size, so switch the sizes now
+ _table_size = new_size;
+
+ // Move entries from the old table to a new table
+ for (int index_old = 0; index_old < table_size_old; index_old++) {
+ for (BasicHashtableEntry<F>* p = _buckets[index_old].get_entry(); p != NULL; ) {
+ BasicHashtableEntry<F>* next = p->next();
+ int index_new = hash_to_index(p->hash());
+
+ p->set_next(buckets_new[index_new].get_entry());
+ buckets_new[index_new].set_entry(p);
+ p = next;
+ }
+ }
+
+ // The old backets now can be released
+ BasicHashtable<F>::free_buckets();
+
+ // Switch to the new storage
+ _buckets = buckets_new;
+
+ return true;
+}
+
+template <MEMFLAGS F> bool BasicHashtable<F>::maybe_grow(int max_size, int load_factor) {
+ assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
+
+ if (table_size() >= max_size) {
+ return false;
+ }
+ if (number_of_entries() / table_size() > load_factor) {
+ resize(MIN2<int>(table_size() * 2, max_size));
+ return true;
+ } else {
+ return false;
+ }
+}
#ifndef PRODUCT
@@ -352,7 +402,6 @@ template <class T, MEMFLAGS F> void Hashtable<T, F>::print() {
}
}
-
template <MEMFLAGS F> void BasicHashtable<F>::verify() {
int count = 0;
for (int i = 0; i < table_size(); i++) {
@@ -406,3 +455,4 @@ template class BasicHashtable<mtClass>;
template class BasicHashtable<mtSymbol>;
template class BasicHashtable<mtCode>;
template class BasicHashtable<mtInternal>;
+template class BasicHashtable<mtClassShared>;
diff --git a/hotspot/src/share/vm/utilities/hashtable.hpp b/hotspot/src/share/vm/utilities/hashtable.hpp
index 30e442d15..358b09c3d 100644
--- a/hotspot/src/share/vm/utilities/hashtable.hpp
+++ b/hotspot/src/share/vm/utilities/hashtable.hpp
@@ -151,7 +151,7 @@ public:
void copy_table(char** top, char* end);
// Bucket handling
- int hash_to_index(unsigned int full_hash) {
+ int hash_to_index(unsigned int full_hash) const {
int h = full_hash % _table_size;
assert(h >= 0 && h < _table_size, "Illegal hash value");
return h;
@@ -184,7 +184,7 @@ protected:
int entry_size() const { return _entry_size; }
// The following method is MT-safe and may be used with caution.
- BasicHashtableEntry<F>* bucket(int i);
+ BasicHashtableEntry<F>* bucket(int i) const;
// The following method is not MT-safe and must be done under lock.
BasicHashtableEntry<F>** bucket_addr(int i) { return _buckets[i].entry_addr(); }
@@ -234,7 +234,7 @@ protected:
// is mt-safe wrt. to other calls of this method.
void bulk_free_entries(BucketUnlinkContext* context);
public:
- int table_size() { return _table_size; }
+ int table_size() const { return _table_size; }
void set_entry(int index, BasicHashtableEntry<F>* entry);
void add_entry(int index, BasicHashtableEntry<F>* entry);
@@ -243,6 +243,10 @@ public:
int number_of_entries() { return _number_of_entries; }
+ bool resize(int new_size);
+
+ bool maybe_grow(int max_size, int load_factor = 0);
+
void verify() PRODUCT_RETURN;
};
@@ -364,4 +368,92 @@ public:
}
};
+// A subclass of BasicHashtable that allows you to do a simple K -> V mapping
+// without using tons of boilerplate code.
+template<
+ typename K, typename V, MEMFLAGS F,
+ unsigned (*HASH) (K const&) = primitive_hash<K>,
+ bool (*EQUALS)(K const&, K const&) = primitive_equals<K>
+ >
+class KVHashtable : public BasicHashtable<F> {
+ class KVHashtableEntry : public BasicHashtableEntry<F> {
+ public:
+ K _key;
+ V _value;
+ KVHashtableEntry* next() {
+ return (KVHashtableEntry*)BasicHashtableEntry<F>::next();
+ }
+ };
+
+protected:
+ KVHashtableEntry* bucket(int i) const {
+ return (KVHashtableEntry*)BasicHashtable<F>::bucket(i);
+ }
+
+ KVHashtableEntry* new_entry(unsigned int hashValue, K key, V value) {
+ KVHashtableEntry* entry = (KVHashtableEntry*)BasicHashtable<F>::new_entry(hashValue);
+ entry->_key = key;
+ entry->_value = value;
+ return entry;
+ }
+
+public:
+ KVHashtable(int table_size) : BasicHashtable<F>(table_size, sizeof(KVHashtableEntry)) {}
+
+ V* add(K key, V value) {
+ unsigned int hash = HASH(key);
+ KVHashtableEntry* entry = new_entry(hash, key, value);
+ BasicHashtable<F>::add_entry(BasicHashtable<F>::hash_to_index(hash), entry);
+ return &(entry->_value);
+ }
+
+ V* lookup(K key) const {
+ unsigned int hash = HASH(key);
+ int index = BasicHashtable<F>::hash_to_index(hash);
+ for (KVHashtableEntry* e = bucket(index); e != NULL; e = e->next()) {
+ if (e->hash() == hash && EQUALS(e->_key, key)) {
+ return &(e->_value);
+ }
+ }
+ return NULL;
+ }
+
+ // Look up the key.
+ // If an entry for the key exists, leave map unchanged and return a pointer to its value.
+ // If no entry for the key exists, create a new entry from key and value and return a
+ // pointer to the value.
+ // *p_created is true if entry was created, false if entry pre-existed.
+ V* add_if_absent(K key, V value, bool* p_created) {
+ unsigned int hash = HASH(key);
+ int index = BasicHashtable<F>::hash_to_index(hash);
+ for (KVHashtableEntry* e = bucket(index); e != NULL; e = e->next()) {
+ if (e->hash() == hash && EQUALS(e->_key, key)) {
+ *p_created = false;
+ return &(e->_value);
+ }
+ }
+ KVHashtableEntry* entry = new_entry(hash, key, value);
+ BasicHashtable<F>::add_entry(BasicHashtable<F>::hash_to_index(hash), entry);
+ *p_created = true;
+ return &(entry->_value);
+ }
+
+ int table_size() const {
+ return BasicHashtable<F>::table_size();
+ }
+
+ // ITER contains bool do_entry(K, V const&), which will be
+ // called for each entry in the table. If do_entry() returns false,
+ // the iteration is cancelled.
+ template<class ITER>
+ void iterate(ITER* iter) const {
+ for (int index = 0; index < table_size(); index++) {
+ for (KVHashtableEntry* e = bucket(index); e != NULL; e = e->next()) {
+ bool cont = iter->do_entry(e->_key, &e->_value);
+ if (!cont) { return; }
+ }
+ }
+ }
+};
+
#endif // SHARE_VM_UTILITIES_HASHTABLE_HPP
diff --git a/hotspot/src/share/vm/utilities/hashtable.inline.hpp b/hotspot/src/share/vm/utilities/hashtable.inline.hpp
index 9356c985e..ee22ba835 100644
--- a/hotspot/src/share/vm/utilities/hashtable.inline.hpp
+++ b/hotspot/src/share/vm/utilities/hashtable.inline.hpp
@@ -72,7 +72,7 @@ template <MEMFLAGS F> inline void BasicHashtable<F>::initialize(int table_size,
// The following method is MT-safe and may be used with caution.
-template <MEMFLAGS F> inline BasicHashtableEntry<F>* BasicHashtable<F>::bucket(int i) {
+template <MEMFLAGS F> inline BasicHashtableEntry<F>* BasicHashtable<F>::bucket(int i) const {
return _buckets[i].get_entry();
}
diff --git a/hotspot/src/share/vm/utilities/ostream.cpp b/hotspot/src/share/vm/utilities/ostream.cpp
index fa199a235..14d82ad0f 100644
--- a/hotspot/src/share/vm/utilities/ostream.cpp
+++ b/hotspot/src/share/vm/utilities/ostream.cpp
@@ -379,6 +379,7 @@ xmlStream* xtty;
outputStream* tty;
outputStream* gclog_or_tty;
CDS_ONLY(jsaFileStream* classlist_file;) // Only dump the classes that can be stored into the CDS archive
+CDS_ONLY(outputStream* dynamic_cds_log;)
extern Mutex* tty_lock;
#define EXTRACHARLEN 32
@@ -1402,6 +1403,16 @@ void ostream_init_log() {
jsaFileStream(list_name);
FREE_C_HEAP_ARRAY(char, list_name, mtInternal);
}
+
+ // For -XX:DynamicCDSLog=<file> option
+ if (DynamicCDSLog != NULL) {
+ const char* log_name = make_log_name(DynamicCDSLog, NULL);
+ dynamic_cds_log = new(ResourceObj::C_HEAP, mtInternal)
+ fileStream(log_name);
+ FREE_C_HEAP_ARRAY(char, log_name, mtInternal);
+ } else {
+ dynamic_cds_log = tty;
+ }
#endif
// If we haven't lazily initialized the logfile yet, do it now,
diff --git a/hotspot/src/share/vm/utilities/ostream.hpp b/hotspot/src/share/vm/utilities/ostream.hpp
index c69289fb5..d0f9aac57 100644
--- a/hotspot/src/share/vm/utilities/ostream.hpp
+++ b/hotspot/src/share/vm/utilities/ostream.hpp
@@ -221,7 +221,7 @@ class jsaFileStream : public fileStream {
};
CDS_ONLY(extern jsaFileStream* classlist_file;)
-
+CDS_ONLY(extern outputStream* dynamic_cds_log;)
// unlike fileStream, fdStream does unbuffered I/O by calling
// open() and write() directly. It is async-safe, but output
// from multiple thread may be mixed together. Used by fatal
diff --git a/hotspot/src/share/vm/utilities/resourceHash.hpp b/hotspot/src/share/vm/utilities/resourceHash.hpp
index 82c1219b4..941f25996 100644
--- a/hotspot/src/share/vm/utilities/resourceHash.hpp
+++ b/hotspot/src/share/vm/utilities/resourceHash.hpp
@@ -27,21 +27,13 @@
#include "memory/allocation.hpp"
#include "utilities/top.hpp"
+#include "utilities/globalDefinitions.hpp"
template<typename K> struct ResourceHashtableFns {
typedef unsigned (*hash_fn)(K const&);
typedef bool (*equals_fn)(K const&, K const&);
};
-template<typename K> unsigned primitive_hash(const K& k) {
- unsigned hash = (unsigned)((uintptr_t)k);
- return hash ^ (hash >> 3); // just in case we're dealing with aligned ptrs
-}
-
-template<typename K> bool primitive_equals(const K& k0, const K& k1) {
- return k0 == k1;
-}
-
template<
typename K, typename V,
// xlC does not compile this:
@@ -66,6 +58,10 @@ class ResourceHashtable : public ResourceObj {
Node(unsigned hash, K const& key, V const& value) :
_hash(hash), _key(key), _value(value), _next(NULL) {}
+
+ // Create a node with a default-constructed value.
+ Node(unsigned hash, K const& key) :
+ _hash(hash), _key(key), _value(), _next(NULL) {}
};
Node* _table[SIZE];
@@ -139,6 +135,19 @@ class ResourceHashtable : public ResourceObj {
}
}
+ V* put_if_absent(K const& key, bool* p_created) {
+ unsigned hv = HASH(key);
+ Node** ptr = lookup_node(hv, key);
+ if (*ptr == NULL) {
+ *ptr = new (ALLOC_TYPE, MEM_TYPE) Node(hv, key);
+ *p_created = true;
+ } else {
+ *p_created = false;
+ }
+ return &(*ptr)->_value;
+ }
+
+
bool remove(K const& key) {
unsigned hv = HASH(key);
Node** ptr = lookup_node(hv, key);
--
2.17.1