OpenJDK / jdk / jdk
changeset 52516:d5eebe1e03fe
8213574: Deadlock in string table expansion when dumping lots of CDS classes
Reviewed-by: jiangli, iklam, dholmes
author | rehn |
---|---|
date | Wed, 14 Nov 2018 07:50:37 +0100 |
parents | 746df0ae4fe1 |
children | 59065e5d56ec |
files | src/hotspot/share/classfile/stringTable.cpp src/hotspot/share/classfile/symbolTable.cpp src/hotspot/share/utilities/concurrentHashTable.hpp src/hotspot/share/utilities/concurrentHashTable.inline.hpp |
diffstat | 4 files changed, 56 insertions(+), 3 deletions(-) [+] |
line wrap: on
line diff
--- a/src/hotspot/share/classfile/stringTable.cpp Tue Nov 13 23:33:17 2018 -0500 +++ b/src/hotspot/share/classfile/stringTable.cpp Wed Nov 14 07:50:37 2018 +0100 @@ -847,7 +847,7 @@ assert(HeapShared::is_heap_object_archiving_allowed(), "must be"); CopyToArchive copy(writer); - StringTable::the_table()->_local_table->do_scan(Thread::current(), copy); + StringTable::the_table()->_local_table->do_safepoint_scan(copy); } void StringTable::write_to_archive() {
--- a/src/hotspot/share/classfile/symbolTable.cpp Tue Nov 13 23:33:17 2018 -0500 +++ b/src/hotspot/share/classfile/symbolTable.cpp Wed Nov 14 07:50:37 2018 +0100 @@ -277,7 +277,7 @@ void SymbolTable::metaspace_pointers_do(MetaspaceClosure* it) { assert(DumpSharedSpaces, "called only during dump time"); MetaspacePointersDo mpd(it); - SymbolTable::the_table()->_local_table->do_scan(Thread::current(), mpd); + SymbolTable::the_table()->_local_table->do_safepoint_scan(mpd); } Symbol* SymbolTable::lookup_dynamic(const char* name, @@ -640,7 +640,7 @@ void SymbolTable::copy_shared_symbol_table(CompactHashtableWriter* writer) { CopyToArchive copy(writer); - SymbolTable::the_table()->_local_table->do_scan(Thread::current(), copy); + SymbolTable::the_table()->_local_table->do_safepoint_scan(copy); } void SymbolTable::write_to_archive() {
--- a/src/hotspot/share/utilities/concurrentHashTable.hpp Tue Nov 13 23:33:17 2018 -0500 +++ b/src/hotspot/share/utilities/concurrentHashTable.hpp Wed Nov 14 07:50:37 2018 +0100 @@ -472,6 +472,12 @@ template <typename SCAN_FUNC> void do_scan(Thread* thread, SCAN_FUNC& scan_f); + // Visit all items with SCAN_FUNC without any protection. + // It will assume there is no other thread accessing this + // table during the safepoint. Must be called with VM thread. + template <typename SCAN_FUNC> + void do_safepoint_scan(SCAN_FUNC& scan_f); + // Destroying items matching EVALUATE_FUNC, before destroying items // DELETE_FUNC is called, if resize lock is obtained. Else returns false. template <typename EVALUATE_FUNC, typename DELETE_FUNC>
--- a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp Tue Nov 13 23:33:17 2018 -0500 +++ b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp Wed Nov 14 07:50:37 2018 +0100 @@ -1116,6 +1116,8 @@ inline void ConcurrentHashTable<VALUE, CONFIG, F>:: do_scan(Thread* thread, SCAN_FUNC& scan_f) { + assert(!SafepointSynchronize::is_at_safepoint(), + "must be outside a safepoint"); assert(_resize_lock_owner != thread, "Re-size lock held"); lock_resize_lock(thread); do_scan_locked(thread, scan_f); @@ -1124,6 +1126,49 @@ } template <typename VALUE, typename CONFIG, MEMFLAGS F> +template <typename SCAN_FUNC> +inline void ConcurrentHashTable<VALUE, CONFIG, F>:: + do_safepoint_scan(SCAN_FUNC& scan_f) +{ + // We only allow this method to be used during a safepoint. + assert(SafepointSynchronize::is_at_safepoint(), + "must only be called in a safepoint"); + assert(Thread::current()->is_VM_thread(), + "should be in vm thread"); + + // Here we skip protection, + // thus no other thread may use this table at the same time. + InternalTable* table = get_table(); + for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) { + Bucket* bucket = table->get_bucket(bucket_it); + // If bucket have a redirect the items will be in the new table. + // We must visit them there since the new table will contain any + // concurrent inserts done after this bucket was resized. + // If the bucket don't have redirect flag all items is in this table. + if (!bucket->have_redirect()) { + if(!visit_nodes(bucket, scan_f)) { + return; + } + } else { + assert(bucket->is_locked(), "Bucket must be locked."); + } + } + // If there is a paused resize we also need to visit the already resized items. + table = get_new_table(); + if (table == NULL) { + return; + } + DEBUG_ONLY(if (table == POISON_PTR) { return; }) + for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) { + Bucket* bucket = table->get_bucket(bucket_it); + assert(!bucket->is_locked(), "Bucket must be unlocked."); + if (!visit_nodes(bucket, scan_f)) { + return; + } + } +} + +template <typename VALUE, typename CONFIG, MEMFLAGS F> template <typename EVALUATE_FUNC, typename DELETE_FUNC> inline bool ConcurrentHashTable<VALUE, CONFIG, F>:: try_bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) @@ -1142,6 +1187,8 @@ inline void ConcurrentHashTable<VALUE, CONFIG, F>:: bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) { + assert(!SafepointSynchronize::is_at_safepoint(), + "must be outside a safepoint"); lock_resize_lock(thread); do_bulk_delete_locked(thread, eval_f, del_f); unlock_resize_lock(thread);