changeset 59194:ada146daf105

8235341: Improve WorkerDataArray API to disallow separate instantiation of sub-items Reviewed-by: lkorinth, sjohanss
author tschatzl
date Fri, 06 Dec 2019 09:49:59 +0100
parents f55c453cef06
children 4938269b8b17
files src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp src/hotspot/share/gc/shared/referenceProcessorPhaseTimes.cpp src/hotspot/share/gc/shared/weakProcessorPhaseTimes.cpp src/hotspot/share/gc/shared/weakProcessorPhaseTimes.hpp src/hotspot/share/gc/shared/workerDataArray.hpp src/hotspot/share/gc/shared/workerDataArray.inline.hpp src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp test/hotspot/gtest/gc/shared/test_workerDataArray.cpp
diffstat 9 files changed, 82 insertions(+), 146 deletions(-) [+]
line wrap: on
line diff
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp	Fri Dec 06 09:49:57 2019 +0100
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp	Fri Dec 06 09:49:59 2019 +0100
@@ -48,119 +48,91 @@
 {
   assert(max_gc_threads > 0, "Must have some GC threads");
 
-  _gc_par_phases[GCWorkerStart] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Start (ms):");
-  _gc_par_phases[ExtRootScan] = new WorkerDataArray<double>(max_gc_threads, "Ext Root Scanning (ms):");
+  _gc_par_phases[GCWorkerStart] = new WorkerDataArray<double>("GC Worker Start (ms):", max_gc_threads);
+  _gc_par_phases[ExtRootScan] = new WorkerDataArray<double>("Ext Root Scanning (ms):", max_gc_threads);
 
   // Root scanning phases
-  _gc_par_phases[ThreadRoots] = new WorkerDataArray<double>(max_gc_threads, "Thread Roots (ms):");
-  _gc_par_phases[UniverseRoots] = new WorkerDataArray<double>(max_gc_threads, "Universe Roots (ms):");
-  _gc_par_phases[JNIRoots] = new WorkerDataArray<double>(max_gc_threads, "JNI Handles Roots (ms):");
-  _gc_par_phases[ObjectSynchronizerRoots] = new WorkerDataArray<double>(max_gc_threads, "ObjectSynchronizer Roots (ms):");
-  _gc_par_phases[ManagementRoots] = new WorkerDataArray<double>(max_gc_threads, "Management Roots (ms):");
-  _gc_par_phases[SystemDictionaryRoots] = new WorkerDataArray<double>(max_gc_threads, "SystemDictionary Roots (ms):");
-  _gc_par_phases[CLDGRoots] = new WorkerDataArray<double>(max_gc_threads, "CLDG Roots (ms):");
-  _gc_par_phases[JVMTIRoots] = new WorkerDataArray<double>(max_gc_threads, "JVMTI Roots (ms):");
-  AOT_ONLY(_gc_par_phases[AOTCodeRoots] = new WorkerDataArray<double>(max_gc_threads, "AOT Root Scan (ms):");)
-  _gc_par_phases[CMRefRoots] = new WorkerDataArray<double>(max_gc_threads, "CM RefProcessor Roots (ms):");
+  _gc_par_phases[ThreadRoots] = new WorkerDataArray<double>("Thread Roots (ms):", max_gc_threads);
+  _gc_par_phases[UniverseRoots] = new WorkerDataArray<double>("Universe Roots (ms):", max_gc_threads);
+  _gc_par_phases[JNIRoots] = new WorkerDataArray<double>("JNI Handles Roots (ms):", max_gc_threads);
+  _gc_par_phases[ObjectSynchronizerRoots] = new WorkerDataArray<double>("ObjectSynchronizer Roots (ms):", max_gc_threads);
+  _gc_par_phases[ManagementRoots] = new WorkerDataArray<double>("Management Roots (ms):", max_gc_threads);
+  _gc_par_phases[SystemDictionaryRoots] = new WorkerDataArray<double>("SystemDictionary Roots (ms):", max_gc_threads);
+  _gc_par_phases[CLDGRoots] = new WorkerDataArray<double>("CLDG Roots (ms):", max_gc_threads);
+  _gc_par_phases[JVMTIRoots] = new WorkerDataArray<double>("JVMTI Roots (ms):", max_gc_threads);
+  AOT_ONLY(_gc_par_phases[AOTCodeRoots] = new WorkerDataArray<double>("AOT Root Scan (ms):", max_gc_threads);)
+  _gc_par_phases[CMRefRoots] = new WorkerDataArray<double>("CM RefProcessor Roots (ms):", max_gc_threads);
 
-  _gc_par_phases[MergeER] = new WorkerDataArray<double>(max_gc_threads, "Eager Reclaim (ms):");
+  _gc_par_phases[MergeER] = new WorkerDataArray<double>("Eager Reclaim (ms):", max_gc_threads);
 
-  _gc_par_phases[MergeRS] = new WorkerDataArray<double>(max_gc_threads, "Remembered Sets (ms):");
-  _merge_rs_merged_sparse = new WorkerDataArray<size_t>(max_gc_threads, "Merged Sparse:");
-  _gc_par_phases[MergeRS]->link_thread_work_items(_merge_rs_merged_sparse, MergeRSMergedSparse);
-  _merge_rs_merged_fine = new WorkerDataArray<size_t>(max_gc_threads, "Merged Fine:");
-  _gc_par_phases[MergeRS]->link_thread_work_items(_merge_rs_merged_fine, MergeRSMergedFine);
-  _merge_rs_merged_coarse = new WorkerDataArray<size_t>(max_gc_threads, "Merged Coarse:");
-  _gc_par_phases[MergeRS]->link_thread_work_items(_merge_rs_merged_coarse, MergeRSMergedCoarse);
-  _merge_rs_dirty_cards = new WorkerDataArray<size_t>(max_gc_threads, "Dirty Cards:");
-  _gc_par_phases[MergeRS]->link_thread_work_items(_merge_rs_dirty_cards, MergeRSDirtyCards);
+  _gc_par_phases[MergeRS] = new WorkerDataArray<double>("Remembered Sets (ms):", max_gc_threads);
+  _gc_par_phases[MergeRS]->create_thread_work_items("Merged Sparse:", MergeRSMergedSparse);
+  _gc_par_phases[MergeRS]->create_thread_work_items("Merged Fine:", MergeRSMergedFine);
+  _gc_par_phases[MergeRS]->create_thread_work_items("Merged Coarse:", MergeRSMergedCoarse);
+  _gc_par_phases[MergeRS]->create_thread_work_items("Dirty Cards:", MergeRSDirtyCards);
 
-  _gc_par_phases[OptMergeRS] = new WorkerDataArray<double>(max_gc_threads, "Optional Remembered Sets (ms):");
-  _opt_merge_rs_merged_sparse = new WorkerDataArray<size_t>(max_gc_threads, "Merged Sparse:");
-  _gc_par_phases[OptMergeRS]->link_thread_work_items(_opt_merge_rs_merged_sparse, MergeRSMergedSparse);
-  _opt_merge_rs_merged_fine = new WorkerDataArray<size_t>(max_gc_threads, "Merged Fine:");
-  _gc_par_phases[OptMergeRS]->link_thread_work_items(_opt_merge_rs_merged_fine, MergeRSMergedFine);
-  _opt_merge_rs_merged_coarse = new WorkerDataArray<size_t>(max_gc_threads, "Merged Coarse:");
-  _gc_par_phases[OptMergeRS]->link_thread_work_items(_opt_merge_rs_merged_coarse, MergeRSMergedCoarse);
-  _opt_merge_rs_dirty_cards = new WorkerDataArray<size_t>(max_gc_threads, "Dirty Cards:");
-  _gc_par_phases[OptMergeRS]->link_thread_work_items(_opt_merge_rs_dirty_cards, MergeRSDirtyCards);
+  _gc_par_phases[OptMergeRS] = new WorkerDataArray<double>("Optional Remembered Sets (ms):", max_gc_threads);
+  _gc_par_phases[OptMergeRS]->create_thread_work_items("Merged Sparse:", MergeRSMergedSparse);
+  _gc_par_phases[OptMergeRS]->create_thread_work_items("Merged Fine:", MergeRSMergedFine);
+  _gc_par_phases[OptMergeRS]->create_thread_work_items("Merged Coarse:", MergeRSMergedCoarse);
+  _gc_par_phases[OptMergeRS]->create_thread_work_items("Dirty Cards:", MergeRSDirtyCards);
 
-  _gc_par_phases[MergeLB] = new WorkerDataArray<double>(max_gc_threads, "Log Buffers (ms):");
+  _gc_par_phases[MergeLB] = new WorkerDataArray<double>("Log Buffers (ms):", max_gc_threads);
   if (G1HotCardCache::default_use_cache()) {
-    _gc_par_phases[MergeHCC] = new WorkerDataArray<double>(max_gc_threads, "Hot Card Cache (ms):");
-    _merge_hcc_dirty_cards = new WorkerDataArray<size_t>(max_gc_threads, "Dirty Cards:");
-    _gc_par_phases[MergeHCC]->link_thread_work_items(_merge_hcc_dirty_cards, MergeHCCDirtyCards);
-    _merge_hcc_skipped_cards = new WorkerDataArray<size_t>(max_gc_threads, "Skipped Cards:");
-    _gc_par_phases[MergeHCC]->link_thread_work_items(_merge_hcc_skipped_cards, MergeHCCSkippedCards);
+    _gc_par_phases[MergeHCC] = new WorkerDataArray<double>("Hot Card Cache (ms):", max_gc_threads);
+    _gc_par_phases[MergeHCC]->create_thread_work_items("Dirty Cards:", MergeHCCDirtyCards);
+    _gc_par_phases[MergeHCC]->create_thread_work_items("Skipped Cards:", MergeHCCSkippedCards);
   } else {
     _gc_par_phases[MergeHCC] = NULL;
-    _merge_hcc_dirty_cards = NULL;
-    _merge_hcc_skipped_cards = NULL;
   }
-  _gc_par_phases[ScanHR] = new WorkerDataArray<double>(max_gc_threads, "Scan Heap Roots (ms):");
-  _gc_par_phases[OptScanHR] = new WorkerDataArray<double>(max_gc_threads, "Optional Scan Heap Roots (ms):");
-  _gc_par_phases[CodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Code Root Scan (ms):");
-  _gc_par_phases[OptCodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Optional Code Root Scan (ms):");
-  _gc_par_phases[ObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Object Copy (ms):");
-  _gc_par_phases[OptObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Optional Object Copy (ms):");
-  _gc_par_phases[Termination] = new WorkerDataArray<double>(max_gc_threads, "Termination (ms):");
-  _gc_par_phases[OptTermination] = new WorkerDataArray<double>(max_gc_threads, "Optional Termination (ms):");
-  _gc_par_phases[GCWorkerTotal] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Total (ms):");
-  _gc_par_phases[GCWorkerEnd] = new WorkerDataArray<double>(max_gc_threads, "GC Worker End (ms):");
-  _gc_par_phases[Other] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Other (ms):");
+  _gc_par_phases[ScanHR] = new WorkerDataArray<double>("Scan Heap Roots (ms):", max_gc_threads);
+  _gc_par_phases[OptScanHR] = new WorkerDataArray<double>("Optional Scan Heap Roots (ms):", max_gc_threads);
+  _gc_par_phases[CodeRoots] = new WorkerDataArray<double>("Code Root Scan (ms):", max_gc_threads);
+  _gc_par_phases[OptCodeRoots] = new WorkerDataArray<double>("Optional Code Root Scan (ms):", max_gc_threads);
+  _gc_par_phases[ObjCopy] = new WorkerDataArray<double>("Object Copy (ms):", max_gc_threads);
+  _gc_par_phases[OptObjCopy] = new WorkerDataArray<double>("Optional Object Copy (ms):", max_gc_threads);
+  _gc_par_phases[Termination] = new WorkerDataArray<double>("Termination (ms):", max_gc_threads);
+  _gc_par_phases[OptTermination] = new WorkerDataArray<double>("Optional Termination (ms):", max_gc_threads);
+  _gc_par_phases[GCWorkerTotal] = new WorkerDataArray<double>("GC Worker Total (ms):", max_gc_threads);
+  _gc_par_phases[GCWorkerEnd] = new WorkerDataArray<double>("GC Worker End (ms):", max_gc_threads);
+  _gc_par_phases[Other] = new WorkerDataArray<double>("GC Worker Other (ms):", max_gc_threads);
 
-  _scan_hr_scanned_cards = new WorkerDataArray<size_t>(max_gc_threads, "Scanned Cards:");
-  _gc_par_phases[ScanHR]->link_thread_work_items(_scan_hr_scanned_cards, ScanHRScannedCards);
-  _scan_hr_scanned_blocks = new WorkerDataArray<size_t>(max_gc_threads, "Scanned Blocks:");
-  _gc_par_phases[ScanHR]->link_thread_work_items(_scan_hr_scanned_blocks, ScanHRScannedBlocks);
-  _scan_hr_claimed_chunks = new WorkerDataArray<size_t>(max_gc_threads, "Claimed Chunks:");
-  _gc_par_phases[ScanHR]->link_thread_work_items(_scan_hr_claimed_chunks, ScanHRClaimedChunks);
+  _gc_par_phases[ScanHR]->create_thread_work_items("Scanned Cards:", ScanHRScannedCards);
+  _gc_par_phases[ScanHR]->create_thread_work_items("Scanned Blocks:", ScanHRScannedBlocks);
+  _gc_par_phases[ScanHR]->create_thread_work_items("Claimed Chunks:", ScanHRClaimedChunks);
 
-  _opt_scan_hr_scanned_cards = new WorkerDataArray<size_t>(max_gc_threads, "Scanned Cards:");
-  _gc_par_phases[OptScanHR]->link_thread_work_items(_opt_scan_hr_scanned_cards, ScanHRScannedCards);
-  _opt_scan_hr_scanned_blocks = new WorkerDataArray<size_t>(max_gc_threads, "Scanned Blocks:");
-  _gc_par_phases[OptScanHR]->link_thread_work_items(_opt_scan_hr_scanned_blocks, ScanHRScannedBlocks);
-  _opt_scan_hr_claimed_chunks = new WorkerDataArray<size_t>(max_gc_threads, "Claimed Chunks:");
-  _gc_par_phases[OptScanHR]->link_thread_work_items(_opt_scan_hr_claimed_chunks, ScanHRClaimedChunks);
-  _opt_scan_hr_scanned_opt_refs = new WorkerDataArray<size_t>(max_gc_threads, "Scanned Refs:");
-  _gc_par_phases[OptScanHR]->link_thread_work_items(_opt_scan_hr_scanned_opt_refs, ScanHRScannedOptRefs);
-  _opt_scan_hr_used_memory = new WorkerDataArray<size_t>(max_gc_threads, "Used Memory:");
-  _gc_par_phases[OptScanHR]->link_thread_work_items(_opt_scan_hr_used_memory, ScanHRUsedMemory);
+  _gc_par_phases[OptScanHR]->create_thread_work_items("Scanned Cards:", ScanHRScannedCards);
+  _gc_par_phases[OptScanHR]->create_thread_work_items("Scanned Blocks:", ScanHRScannedBlocks);
+  _gc_par_phases[OptScanHR]->create_thread_work_items("Claimed Chunks:", ScanHRClaimedChunks);
+  _gc_par_phases[OptScanHR]->create_thread_work_items("Scanned Refs:", ScanHRScannedOptRefs);
+  _gc_par_phases[OptScanHR]->create_thread_work_items("Used Memory:", ScanHRUsedMemory);
+
+  _gc_par_phases[MergeLB]->create_thread_work_items("Dirty Cards:", MergeLBDirtyCards);
+  _gc_par_phases[MergeLB]->create_thread_work_items("Skipped Cards:", MergeLBSkippedCards);
 
-  _merge_lb_dirty_cards = new WorkerDataArray<size_t>(max_gc_threads, "Dirty Cards:");
-  _gc_par_phases[MergeLB]->link_thread_work_items(_merge_lb_dirty_cards, MergeLBDirtyCards);
-  _merge_lb_skipped_cards = new WorkerDataArray<size_t>(max_gc_threads, "Skipped Cards:");
-  _gc_par_phases[MergeLB]->link_thread_work_items(_merge_lb_skipped_cards, MergeLBSkippedCards);
-
-  _gc_par_phases[MergePSS] = new WorkerDataArray<double>(1, "Merge Per-Thread State", true /* is_serial */);
+  _gc_par_phases[MergePSS] = new WorkerDataArray<double>("Merge Per-Thread State", 1 /* length */, true /* is_serial */);
 
-  _merge_pss_copied_bytes = new WorkerDataArray<size_t>(max_gc_threads, "Copied Bytes");
-  _gc_par_phases[MergePSS]->link_thread_work_items(_merge_pss_copied_bytes, MergePSSCopiedBytes);
-  _merge_pss_lab_waste_bytes = new WorkerDataArray<size_t>(max_gc_threads, "LAB Waste");
-  _gc_par_phases[MergePSS]->link_thread_work_items(_merge_pss_lab_waste_bytes, MergePSSLABWasteBytes);
-  _merge_pss_lab_undo_waste_bytes = new WorkerDataArray<size_t>(max_gc_threads, "LAB Undo Waste");
-  _gc_par_phases[MergePSS]->link_thread_work_items(_merge_pss_lab_undo_waste_bytes, MergePSSLABUndoWasteBytes);
+  _gc_par_phases[MergePSS]->create_thread_work_items("Copied Bytes", MergePSSCopiedBytes, max_gc_threads);
+  _gc_par_phases[MergePSS]->create_thread_work_items("LAB Waste", MergePSSLABWasteBytes, max_gc_threads);
+  _gc_par_phases[MergePSS]->create_thread_work_items("LAB Undo Waste", MergePSSLABUndoWasteBytes, max_gc_threads);
 
-  _termination_attempts = new WorkerDataArray<size_t>(max_gc_threads, "Termination Attempts:");
-  _gc_par_phases[Termination]->link_thread_work_items(_termination_attempts);
+  _gc_par_phases[Termination]->create_thread_work_items("Termination Attempts:");
 
-  _opt_termination_attempts = new WorkerDataArray<size_t>(max_gc_threads, "Optional Termination Attempts:");
-  _gc_par_phases[OptTermination]->link_thread_work_items(_opt_termination_attempts);
+  _gc_par_phases[OptTermination]->create_thread_work_items("Optional Termination Attempts:");
 
   if (UseStringDeduplication) {
-    _gc_par_phases[StringDedupQueueFixup] = new WorkerDataArray<double>(max_gc_threads, "Queue Fixup (ms):");
-    _gc_par_phases[StringDedupTableFixup] = new WorkerDataArray<double>(max_gc_threads, "Table Fixup (ms):");
+    _gc_par_phases[StringDedupQueueFixup] = new WorkerDataArray<double>("Queue Fixup (ms):", max_gc_threads);
+    _gc_par_phases[StringDedupTableFixup] = new WorkerDataArray<double>("Table Fixup (ms):", max_gc_threads);
   } else {
     _gc_par_phases[StringDedupQueueFixup] = NULL;
     _gc_par_phases[StringDedupTableFixup] = NULL;
   }
 
-  _gc_par_phases[RedirtyCards] = new WorkerDataArray<double>(max_gc_threads, "Parallel Redirty (ms):");
-  _redirtied_cards = new WorkerDataArray<size_t>(max_gc_threads, "Redirtied Cards:");
-  _gc_par_phases[RedirtyCards]->link_thread_work_items(_redirtied_cards);
+  _gc_par_phases[RedirtyCards] = new WorkerDataArray<double>("Parallel Redirty (ms):", max_gc_threads);
+  _gc_par_phases[RedirtyCards]->create_thread_work_items("Redirtied Cards:");
 
-  _gc_par_phases[YoungFreeCSet] = new WorkerDataArray<double>(max_gc_threads, "Young Free Collection Set (ms):");
-  _gc_par_phases[NonYoungFreeCSet] = new WorkerDataArray<double>(max_gc_threads, "Non-Young Free Collection Set (ms):");
+  _gc_par_phases[YoungFreeCSet] = new WorkerDataArray<double>("Young Free Collection Set (ms):", max_gc_threads);
+  _gc_par_phases[NonYoungFreeCSet] = new WorkerDataArray<double>("Non-Young Free Collection Set (ms):", max_gc_threads);
 
   reset();
 }
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp	Fri Dec 06 09:49:57 2019 +0100
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp	Fri Dec 06 09:49:59 2019 +0100
@@ -122,42 +122,6 @@
 
   WorkerDataArray<double>* _gc_par_phases[GCParPhasesSentinel];
 
-  WorkerDataArray<size_t>* _merge_rs_merged_sparse;
-  WorkerDataArray<size_t>* _merge_rs_merged_fine;
-  WorkerDataArray<size_t>* _merge_rs_merged_coarse;
-  WorkerDataArray<size_t>* _merge_rs_dirty_cards;
-
-  WorkerDataArray<size_t>* _merge_hcc_dirty_cards;
-  WorkerDataArray<size_t>* _merge_hcc_skipped_cards;
-
-  WorkerDataArray<size_t>* _merge_lb_dirty_cards;
-  WorkerDataArray<size_t>* _merge_lb_skipped_cards;
-
-  WorkerDataArray<size_t>* _scan_hr_scanned_cards;
-  WorkerDataArray<size_t>* _scan_hr_scanned_blocks;
-  WorkerDataArray<size_t>* _scan_hr_claimed_chunks;
-
-  WorkerDataArray<size_t>* _opt_merge_rs_merged_sparse;
-  WorkerDataArray<size_t>* _opt_merge_rs_merged_fine;
-  WorkerDataArray<size_t>* _opt_merge_rs_merged_coarse;
-  WorkerDataArray<size_t>* _opt_merge_rs_dirty_cards;
-
-  WorkerDataArray<size_t>* _opt_scan_hr_scanned_cards;
-  WorkerDataArray<size_t>* _opt_scan_hr_scanned_blocks;
-  WorkerDataArray<size_t>* _opt_scan_hr_claimed_chunks;
-  WorkerDataArray<size_t>* _opt_scan_hr_scanned_opt_refs;
-  WorkerDataArray<size_t>* _opt_scan_hr_used_memory;
-
-  WorkerDataArray<size_t>* _merge_pss_copied_bytes;
-  WorkerDataArray<size_t>* _merge_pss_lab_waste_bytes;
-  WorkerDataArray<size_t>* _merge_pss_lab_undo_waste_bytes;
-
-  WorkerDataArray<size_t>* _termination_attempts;
-
-  WorkerDataArray<size_t>* _opt_termination_attempts;
-
-  WorkerDataArray<size_t>* _redirtied_cards;
-
   double _cur_collection_initial_evac_time_ms;
   double _cur_optional_evac_time_ms;
   double _cur_collection_code_root_fixup_time_ms;
--- a/src/hotspot/share/gc/shared/referenceProcessorPhaseTimes.cpp	Fri Dec 06 09:49:57 2019 +0100
+++ b/src/hotspot/share/gc/shared/referenceProcessorPhaseTimes.cpp	Fri Dec 06 09:49:59 2019 +0100
@@ -177,9 +177,9 @@
   _processing_is_mt(false), _gc_timer(gc_timer) {
 
   for (uint i = 0; i < ReferenceProcessor::RefSubPhaseMax; i++) {
-    _sub_phases_worker_time_sec[i] = new WorkerDataArray<double>(max_gc_threads, SubPhasesParWorkTitle[i]);
+    _sub_phases_worker_time_sec[i] = new WorkerDataArray<double>(SubPhasesParWorkTitle[i], max_gc_threads);
   }
-  _phase2_worker_time_sec = new WorkerDataArray<double>(max_gc_threads, Phase2ParWorkTitle);
+  _phase2_worker_time_sec = new WorkerDataArray<double>(Phase2ParWorkTitle, max_gc_threads);
 
   reset();
 }
--- a/src/hotspot/share/gc/shared/weakProcessorPhaseTimes.cpp	Fri Dec 06 09:49:57 2019 +0100
+++ b/src/hotspot/share/gc/shared/weakProcessorPhaseTimes.cpp	Fri Dec 06 09:49:59 2019 +0100
@@ -80,9 +80,7 @@
   _max_threads(max_threads),
   _active_workers(0),
   _total_time_sec(uninitialized_time),
-  _worker_data(),
-  _worker_dead_items(),
-  _worker_total_items()
+  _worker_data()
 {
   assert(_max_threads > 0, "max_threads must not be zero");
 
@@ -93,9 +91,9 @@
   for ( ; !it.is_end(); ++it) {
     assert(size_t(wpt - _worker_data) < ARRAY_SIZE(_worker_data), "invariant");
     const char* description = it->name();
-    *wpt = new WorkerDataArray<double>(_max_threads, description);
-    (*wpt)->link_thread_work_items(new WorkerDataArray<size_t>(_max_threads, "Dead"), DeadItems);
-    (*wpt)->link_thread_work_items(new WorkerDataArray<size_t>(_max_threads, "Total"), TotalItems);
+    *wpt = new WorkerDataArray<double>(description, _max_threads);
+    (*wpt)->create_thread_work_items("Dead", DeadItems);
+    (*wpt)->create_thread_work_items("Total", TotalItems);
     wpt++;
   }
   assert(size_t(wpt - _worker_data) == ARRAY_SIZE(_worker_data), "invariant");
@@ -104,8 +102,6 @@
 WeakProcessorPhaseTimes::~WeakProcessorPhaseTimes() {
   for (size_t i = 0; i < ARRAY_SIZE(_worker_data); ++i) {
     delete _worker_data[i];
-    delete _worker_dead_items[i];
-    delete _worker_total_items[i];
   }
 }
 
--- a/src/hotspot/share/gc/shared/weakProcessorPhaseTimes.hpp	Fri Dec 06 09:49:57 2019 +0100
+++ b/src/hotspot/share/gc/shared/weakProcessorPhaseTimes.hpp	Fri Dec 06 09:49:59 2019 +0100
@@ -56,8 +56,6 @@
   // Per-worker times and linked items.
   static const uint worker_data_count = WeakProcessorPhases::oopstorage_phase_count;
   WorkerDataArray<double>* _worker_data[worker_data_count];
-  WorkerDataArray<size_t>* _worker_dead_items[worker_data_count];
-  WorkerDataArray<size_t>* _worker_total_items[worker_data_count];
 
   WorkerDataArray<double>* worker_data(WeakProcessorPhase phase) const;
 
--- a/src/hotspot/share/gc/shared/workerDataArray.hpp	Fri Dec 06 09:49:57 2019 +0100
+++ b/src/hotspot/share/gc/shared/workerDataArray.hpp	Fri Dec 06 09:49:59 2019 +0100
@@ -45,10 +45,14 @@
   WorkerDataArray<size_t>* _thread_work_items[MaxThreadWorkItems];
 
  public:
-  WorkerDataArray(uint length, const char* title, bool is_serial = false);
+  WorkerDataArray(const char* title, uint length, bool is_serial = false);
   ~WorkerDataArray();
 
-  void link_thread_work_items(WorkerDataArray<size_t>* thread_work_items, uint index = 0);
+  // Create an integer sub-item at the given index to this WorkerDataArray. If length_override
+  // is zero, use the same number of elements as this array, otherwise use the given
+  // number.
+  void create_thread_work_items(const char* title, uint index = 0, uint length_override = 0);
+
   void set_thread_work_item(uint worker_i, size_t value, uint index = 0);
   void add_thread_work_item(uint worker_i, size_t value, uint index = 0);
   void set_or_add_thread_work_item(uint worker_i, size_t value, uint index = 0);
--- a/src/hotspot/share/gc/shared/workerDataArray.inline.hpp	Fri Dec 06 09:49:57 2019 +0100
+++ b/src/hotspot/share/gc/shared/workerDataArray.inline.hpp	Fri Dec 06 09:49:59 2019 +0100
@@ -30,7 +30,7 @@
 #include "utilities/ostream.hpp"
 
 template <typename T>
-WorkerDataArray<T>::WorkerDataArray(uint length, const char* title, bool is_serial) :
+WorkerDataArray<T>::WorkerDataArray(const char* title, uint length, bool is_serial) :
  _data(NULL),
  _length(length),
  _title(title),
@@ -66,9 +66,11 @@
 }
 
 template <typename T>
-void WorkerDataArray<T>::link_thread_work_items(WorkerDataArray<size_t>* thread_work_items, uint index) {
+void WorkerDataArray<T>::create_thread_work_items(const char* title, uint index, uint length_override) {
   assert(index < MaxThreadWorkItems, "Tried to access thread work item %u (max %u)", index, MaxThreadWorkItems);
-  _thread_work_items[index] = thread_work_items;
+  assert(_thread_work_items[index] == NULL, "Tried to overwrite existing thread work item");
+  uint length = length_override != 0 ? length_override : _length;
+  _thread_work_items[index] = new WorkerDataArray<size_t>(title, length);
 }
 
 template <typename T>
--- a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp	Fri Dec 06 09:49:57 2019 +0100
+++ b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp	Fri Dec 06 09:49:59 2019 +0100
@@ -138,7 +138,7 @@
   assert(max_gc_threads > 0, "Must have some GC threads");
 
 #define GC_PAR_PHASE_DECLARE_WORKER_DATA(type, title) \
-  _gc_par_phases[ShenandoahPhaseTimings::type] = new WorkerDataArray<double>(max_gc_threads, title);
+  _gc_par_phases[ShenandoahPhaseTimings::type] = new WorkerDataArray<double>(title, max_gc_threads);
   // Root scanning phases
   SHENANDOAH_GC_PAR_PHASE_DO(GC_PAR_PHASE_DECLARE_WORKER_DATA)
 #undef GC_PAR_PHASE_DECLARE_WORKER_DATA
@@ -165,7 +165,7 @@
 
 
 ShenandoahTerminationTimings::ShenandoahTerminationTimings(uint max_gc_threads) {
-  _gc_termination_phase = new WorkerDataArray<double>(max_gc_threads, "Task Termination (ms):");
+  _gc_termination_phase = new WorkerDataArray<double>("Task Termination (ms):", max_gc_threads);
 }
 
 void ShenandoahTerminationTimings::record_time_secs(uint worker_id, double secs) {
--- a/test/hotspot/gtest/gc/shared/test_workerDataArray.cpp	Fri Dec 06 09:49:57 2019 +0100
+++ b/test/hotspot/gtest/gc/shared/test_workerDataArray.cpp	Fri Dec 06 09:49:59 2019 +0100
@@ -34,10 +34,10 @@
  protected:
   WorkerDataArrayTest() :
     title("Test array"),
-    array(3, title),
+    array(title, 3),
     sub_item_title("Sub item array") {
 
-    array.link_thread_work_items(new WorkerDataArray<size_t>(3, sub_item_title));
+    array.create_thread_work_items(sub_item_title);
   }
 
   const char* print_summary() {