changeset 59808:616dd4cb9804

8247470: Fix CHeap GrowableArray NMT accounting Reviewed-by: coleenp, tschatzl, kbarrett
author stefank
date Tue, 16 Jun 2020 09:37:53 +0200
parents 20d92fe3ac52
children c67493e1388d
files src/hotspot/os/linux/os_linux.cpp src/hotspot/os/windows/os_windows.cpp src/hotspot/share/aot/aotLoader.cpp src/hotspot/share/ci/ciTypeFlow.cpp src/hotspot/share/classfile/classListParser.cpp src/hotspot/share/classfile/classLoader.cpp src/hotspot/share/classfile/classLoaderData.cpp src/hotspot/share/classfile/classLoaderExt.cpp src/hotspot/share/classfile/compactHashtable.cpp src/hotspot/share/classfile/javaClasses.cpp src/hotspot/share/classfile/moduleEntry.cpp src/hotspot/share/classfile/packageEntry.cpp src/hotspot/share/classfile/stringTable.cpp src/hotspot/share/classfile/systemDictionaryShared.cpp src/hotspot/share/code/codeCache.cpp src/hotspot/share/compiler/compilerEvent.cpp src/hotspot/share/compiler/disassembler.cpp src/hotspot/share/gc/g1/g1Allocator.hpp src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp src/hotspot/share/gc/g1/g1SurvivorRegions.cpp src/hotspot/share/gc/parallel/mutableNUMASpace.cpp src/hotspot/share/gc/parallel/psCompactionManager.cpp src/hotspot/share/gc/shared/gcTimer.cpp src/hotspot/share/interpreter/interpreterRuntime.cpp src/hotspot/share/jfr/jni/jfrJavaSupport.cpp src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp src/hotspot/share/jfr/periodic/jfrNetworkUtilization.cpp src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadGroup.cpp src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.cpp src/hotspot/share/jfr/recorder/jfrRecorder.cpp src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp src/hotspot/share/jfr/recorder/service/jfrOptionSet.cpp src/hotspot/share/jfr/support/jfrJdkJfrEvent.cpp src/hotspot/share/jfr/support/jfrKlassUnloading.cpp src/hotspot/share/memory/allocation.hpp src/hotspot/share/memory/dynamicArchive.cpp src/hotspot/share/memory/filemap.cpp src/hotspot/share/memory/heapInspection.cpp src/hotspot/share/memory/heapShared.cpp src/hotspot/share/memory/metaspaceShared.cpp src/hotspot/share/oops/methodData.cpp src/hotspot/share/opto/macro.cpp src/hotspot/share/prims/jvmtiCodeBlobEvents.cpp src/hotspot/share/prims/jvmtiEnv.cpp src/hotspot/share/prims/jvmtiEnvBase.cpp src/hotspot/share/prims/jvmtiEnvThreadState.cpp src/hotspot/share/prims/jvmtiExport.cpp src/hotspot/share/prims/jvmtiExtensions.cpp src/hotspot/share/prims/jvmtiImpl.cpp src/hotspot/share/prims/jvmtiRawMonitor.cpp src/hotspot/share/prims/jvmtiTagMap.cpp src/hotspot/share/runtime/arguments.cpp src/hotspot/share/runtime/biasedLocking.cpp src/hotspot/share/runtime/flags/jvmFlagConstraintList.cpp src/hotspot/share/runtime/flags/jvmFlagRangeList.cpp src/hotspot/share/runtime/perfData.cpp src/hotspot/share/runtime/reflectionUtils.cpp src/hotspot/share/runtime/thread.cpp src/hotspot/share/runtime/unhandledOops.cpp src/hotspot/share/runtime/vframe_hp.cpp src/hotspot/share/services/diagnosticArgument.cpp src/hotspot/share/services/heapDumper.cpp src/hotspot/share/services/memoryService.cpp src/hotspot/share/services/threadService.cpp src/hotspot/share/utilities/growableArray.hpp src/hotspot/share/utilities/hashtable.inline.hpp src/hotspot/share/utilities/histogram.cpp
diffstat 67 files changed, 144 insertions(+), 158 deletions(-) [+]
line wrap: on
line diff
--- a/src/hotspot/os/linux/os_linux.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/os/linux/os_linux.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -3251,10 +3251,10 @@
         set_numa_interleave_bitmask(_numa_get_interleave_mask());
         set_numa_membind_bitmask(_numa_get_membind());
         // Create an index -> node mapping, since nodes are not always consecutive
-        _nindex_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
+        _nindex_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, mtInternal);
         rebuild_nindex_to_node_map();
         // Create a cpu -> node mapping
-        _cpu_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
+        _cpu_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, mtInternal);
         rebuild_cpu_to_node_map();
         return true;
       }
--- a/src/hotspot/os/windows/os_windows.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/os/windows/os_windows.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -71,18 +71,15 @@
 #include "utilities/decoder.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/events.hpp"
-#include "utilities/growableArray.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/vmError.hpp"
 #include "symbolengine.hpp"
 #include "windbghelp.hpp"
 
-
 #ifdef _DEBUG
 #include <crtdbg.h>
 #endif
 
-
 #include <windows.h>
 #include <sys/types.h>
 #include <sys/stat.h>
--- a/src/hotspot/share/aot/aotLoader.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/aot/aotLoader.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -34,8 +34,8 @@
 #include "runtime/os.inline.hpp"
 #include "runtime/timerTrace.hpp"
 
-GrowableArray<AOTCodeHeap*>* AOTLoader::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<AOTCodeHeap*> (2, true);
-GrowableArray<AOTLib*>* AOTLoader::_libraries = new(ResourceObj::C_HEAP, mtCode) GrowableArray<AOTLib*> (2, true);
+GrowableArray<AOTCodeHeap*>* AOTLoader::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<AOTCodeHeap*> (2, mtCode);
+GrowableArray<AOTLib*>* AOTLoader::_libraries = new(ResourceObj::C_HEAP, mtCode) GrowableArray<AOTLib*> (2, mtCode);
 
 // Iterate over all AOT CodeHeaps
 #define FOR_ALL_AOT_HEAPS(heap) for (GrowableArrayIterator<AOTCodeHeap*> heap = heaps()->begin(); heap != heaps()->end(); ++heap)
--- a/src/hotspot/share/ci/ciTypeFlow.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/ci/ciTypeFlow.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -61,7 +61,7 @@
     _set = new (arena) GrowableArray<JsrRecord*>(arena, default_len, 0, NULL);
   } else {
     // Allocate growable array in current ResourceArea.
-    _set = new GrowableArray<JsrRecord*>(4, 0, NULL, false);
+    _set = new GrowableArray<JsrRecord*>(4, 0, NULL);
   }
 }
 
--- a/src/hotspot/share/classfile/classListParser.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/classfile/classListParser.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -62,7 +62,7 @@
     vm_exit_during_initialization("Loading classlist failed", errmsg);
   }
   _line_no = 0;
-  _interfaces = new (ResourceObj::C_HEAP, mtClass) GrowableArray<int>(10, true);
+  _interfaces = new (ResourceObj::C_HEAP, mtClass) GrowableArray<int>(10, mtClass);
 }
 
 ClassListParser::~ClassListParser() {
--- a/src/hotspot/share/classfile/classLoader.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/classfile/classLoader.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -583,7 +583,7 @@
   int num_of_entries = patch_mod_args->length();
 
   // Set up the boot loader's _patch_mod_entries list
-  _patch_mod_entries = new (ResourceObj::C_HEAP, mtModule) GrowableArray<ModuleClassPathList*>(num_of_entries, true);
+  _patch_mod_entries = new (ResourceObj::C_HEAP, mtModule) GrowableArray<ModuleClassPathList*>(num_of_entries, mtModule);
 
   for (int i = 0; i < num_of_entries; i++) {
     const char* module_name = (patch_mod_args->at(i))->module_name();
@@ -1609,7 +1609,7 @@
     // subsequently do the first class load. So, no lock is needed for this.
     assert(_exploded_entries == NULL, "Should only get initialized once");
     _exploded_entries = new (ResourceObj::C_HEAP, mtModule)
-      GrowableArray<ModuleClassPathList*>(EXPLODED_ENTRY_SIZE, true);
+      GrowableArray<ModuleClassPathList*>(EXPLODED_ENTRY_SIZE, mtModule);
     add_to_exploded_build_list(vmSymbols::java_base(), CHECK);
   }
 }
--- a/src/hotspot/share/classfile/classLoaderData.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/classfile/classLoaderData.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -804,7 +804,7 @@
   if (!m->is_shared()) {
     MutexLocker ml(metaspace_lock(),  Mutex::_no_safepoint_check_flag);
     if (_deallocate_list == NULL) {
-      _deallocate_list = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Metadata*>(100, true);
+      _deallocate_list = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Metadata*>(100, mtClass);
     }
     _deallocate_list->append_if_missing(m);
     log_debug(class, loader, data)("deallocate added for %s", m->print_value_string());
--- a/src/hotspot/share/classfile/classLoaderExt.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/classfile/classLoaderExt.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -310,7 +310,7 @@
   // This is called from dump time so it's single threaded and there's no need for a lock.
   assert(DumpSharedSpaces, "this function is only used with -Xshare:dump");
   if (cached_path_entries == NULL) {
-    cached_path_entries = new (ResourceObj::C_HEAP, mtClass) GrowableArray<CachedClassPathEntry>(20, /*c heap*/ true);
+    cached_path_entries = new (ResourceObj::C_HEAP, mtClass) GrowableArray<CachedClassPathEntry>(20, mtClass);
   }
   CachedClassPathEntry ccpe;
   for (int i=0; i<cached_path_entries->length(); i++) {
--- a/src/hotspot/share/classfile/compactHashtable.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/classfile/compactHashtable.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -51,7 +51,7 @@
   _num_entries_written = 0;
   _buckets = NEW_C_HEAP_ARRAY(GrowableArray<Entry>*, _num_buckets, mtSymbol);
   for (int i=0; i<_num_buckets; i++) {
-    _buckets[i] = new (ResourceObj::C_HEAP, mtSymbol) GrowableArray<Entry>(0, true, mtSymbol);
+    _buckets[i] = new (ResourceObj::C_HEAP, mtSymbol) GrowableArray<Entry>(0, mtSymbol);
   }
 
   _stats = stats;
--- a/src/hotspot/share/classfile/javaClasses.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/classfile/javaClasses.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -965,11 +965,11 @@
 // Statically allocate fixup lists because they always get created.
 void java_lang_Class::allocate_fixup_lists() {
   GrowableArray<Klass*>* mirror_list =
-    new (ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(40, true);
+    new (ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(40, mtClass);
   set_fixup_mirror_list(mirror_list);
 
   GrowableArray<Klass*>* module_list =
-    new (ResourceObj::C_HEAP, mtModule) GrowableArray<Klass*>(500, true);
+    new (ResourceObj::C_HEAP, mtModule) GrowableArray<Klass*>(500, mtModule);
   set_fixup_module_field_list(module_list);
 }
 
--- a/src/hotspot/share/classfile/moduleEntry.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/classfile/moduleEntry.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -161,7 +161,7 @@
   } else {
     if (_reads == NULL) {
       // Lazily create a module's reads list
-      _reads = new (ResourceObj::C_HEAP, mtModule)GrowableArray<ModuleEntry*>(MODULE_READS_SIZE, true);
+      _reads = new (ResourceObj::C_HEAP, mtModule) GrowableArray<ModuleEntry*>(MODULE_READS_SIZE, mtModule);
     }
 
     // Determine, based on this newly established read edge to module m,
--- a/src/hotspot/share/classfile/packageEntry.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/classfile/packageEntry.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -53,7 +53,7 @@
   if (!has_qual_exports_list()) {
     // Lazily create a package's qualified exports list.
     // Initial size is small, do not anticipate export lists to be large.
-    _qualified_exports = new (ResourceObj::C_HEAP, mtModule) GrowableArray<ModuleEntry*>(QUAL_EXP_SIZE, true);
+    _qualified_exports = new (ResourceObj::C_HEAP, mtModule) GrowableArray<ModuleEntry*>(QUAL_EXP_SIZE, mtModule);
   }
 
   // Determine, based on this newly established export to module m,
--- a/src/hotspot/share/classfile/stringTable.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/classfile/stringTable.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -623,7 +623,7 @@
   Thread* thr = Thread::current();
   GrowableArray<oop>* oops =
     new (ResourceObj::C_HEAP, mtInternal)
-      GrowableArray<oop>((int)_current_size, true);
+      GrowableArray<oop>((int)_current_size, mtInternal);
 
   VerifyCompStrings vcs(oops);
   if (!_local_table->try_scan(thr, vcs)) {
--- a/src/hotspot/share/classfile/systemDictionaryShared.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -325,7 +325,7 @@
   DumpTimeLambdaProxyClassInfo() : _proxy_klasses(NULL) {}
   void add_proxy_klass(InstanceKlass* proxy_klass) {
     if (_proxy_klasses == NULL) {
-      _proxy_klasses = new (ResourceObj::C_HEAP, mtInternal)GrowableArray<InstanceKlass*>(5, true);
+      _proxy_klasses = new (ResourceObj::C_HEAP, mtClassShared)GrowableArray<InstanceKlass*>(5, mtClassShared);
     }
     assert(_proxy_klasses != NULL, "sanity");
     _proxy_klasses->append(proxy_klass);
@@ -1547,10 +1547,10 @@
 void DumpTimeSharedClassInfo::add_verification_constraint(InstanceKlass* k, Symbol* name,
          Symbol* from_name, bool from_field_is_protected, bool from_is_array, bool from_is_object) {
   if (_verifier_constraints == NULL) {
-    _verifier_constraints = new(ResourceObj::C_HEAP, mtClass) GrowableArray<DTVerifierConstraint>(4, true, mtClass);
+    _verifier_constraints = new(ResourceObj::C_HEAP, mtClass) GrowableArray<DTVerifierConstraint>(4, mtClass);
   }
   if (_verifier_constraint_flags == NULL) {
-    _verifier_constraint_flags = new(ResourceObj::C_HEAP, mtClass) GrowableArray<char>(4, true, mtClass);
+    _verifier_constraint_flags = new(ResourceObj::C_HEAP, mtClass) GrowableArray<char>(4, mtClass);
   }
   GrowableArray<DTVerifierConstraint>* vc_array = _verifier_constraints;
   for (int i = 0; i < vc_array->length(); i++) {
@@ -1730,7 +1730,7 @@
   assert(loader1 != loader2, "sanity");
   LogTarget(Info, class, loader, constraints) log;
   if (_loader_constraints == NULL) {
-    _loader_constraints = new (ResourceObj::C_HEAP, mtClass) GrowableArray<DTLoaderConstraint>(4, true, mtClass);
+    _loader_constraints = new (ResourceObj::C_HEAP, mtClass) GrowableArray<DTLoaderConstraint>(4, mtClass);
   }
   char lt1 = get_loader_type_by(loader1());
   char lt2 = get_loader_type_by(loader2());
--- a/src/hotspot/share/code/codeCache.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/code/codeCache.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -150,10 +150,10 @@
 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = NULL;
 
 // Initialize arrays of CodeHeap subsets
-GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
-GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
-GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
-GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true);
+GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, mtCode);
+GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, mtCode);
+GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, mtCode);
+GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, mtCode);
 
 void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) {
   size_t total_size = non_nmethod_size + profiled_size + non_profiled_size;
@@ -1043,7 +1043,7 @@
 
 static void add_to_old_table(CompiledMethod* c) {
   if (old_compiled_method_table == NULL) {
-    old_compiled_method_table = new (ResourceObj::C_HEAP, mtCode) GrowableArray<CompiledMethod*>(100, true);
+    old_compiled_method_table = new (ResourceObj::C_HEAP, mtCode) GrowableArray<CompiledMethod*>(100, mtCode);
   }
   old_compiled_method_table->push(c);
 }
--- a/src/hotspot/share/compiler/compilerEvent.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/compiler/compilerEvent.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -79,7 +79,7 @@
   {
     PhaseTypeGuard guard;
     if (phase_names == NULL) {
-      phase_names = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<const char*>(100, true);
+      phase_names = new (ResourceObj::C_HEAP, mtCompiler) GrowableArray<const char*>(100, mtCompiler);
       register_jfr_serializer = true;
     }
     idx = phase_names->length();
--- a/src/hotspot/share/compiler/disassembler.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/compiler/disassembler.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -266,7 +266,7 @@
           }
           _cached_src_lines->clear();
         } else {
-          _cached_src_lines = new (ResourceObj::C_HEAP, mtCode)GrowableArray<const char*>(0, true);
+          _cached_src_lines = new (ResourceObj::C_HEAP, mtCode)GrowableArray<const char*>(0, mtCode);
         }
 
         if ((fp = fopen(file, "r")) == NULL) {
--- a/src/hotspot/share/gc/g1/g1Allocator.hpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/gc/g1/g1Allocator.hpp	Tue Jun 16 09:37:53 2020 +0200
@@ -257,7 +257,7 @@
     _allocation_region(NULL),
     _allocated_regions((ResourceObj::set_allocation_type((address) &_allocated_regions,
                                                          ResourceObj::C_HEAP),
-                        2), true /* C_Heap */),
+                        2), mtGC),
     _summary_bytes_used(0),
     _bottom(NULL),
     _top(NULL),
--- a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -32,7 +32,7 @@
     _current_region(NULL),
     _threshold(NULL),
     _compaction_top(NULL) {
-  _compaction_regions = new (ResourceObj::C_HEAP, mtGC) GrowableArray<HeapRegion*>(32, true, mtGC);
+  _compaction_regions = new (ResourceObj::C_HEAP, mtGC) GrowableArray<HeapRegion*>(32, mtGC);
   _compaction_region_iterator = _compaction_regions->begin();
 }
 
--- a/src/hotspot/share/gc/g1/g1SurvivorRegions.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/gc/g1/g1SurvivorRegions.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -29,7 +29,7 @@
 #include "utilities/debug.hpp"
 
 G1SurvivorRegions::G1SurvivorRegions() :
-  _regions(new (ResourceObj::C_HEAP, mtGC) GrowableArray<HeapRegion*>(8, true, mtGC)),
+  _regions(new (ResourceObj::C_HEAP, mtGC) GrowableArray<HeapRegion*>(8, mtGC)),
   _used_bytes(0),
   _regions_on_node() {}
 
--- a/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -34,7 +34,7 @@
 #include "utilities/align.hpp"
 
 MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment), _must_use_large_pages(false) {
-  _lgrp_spaces = new (ResourceObj::C_HEAP, mtGC) GrowableArray<LGRPSpace*>(0, true);
+  _lgrp_spaces = new (ResourceObj::C_HEAP, mtGC) GrowableArray<LGRPSpace*>(0, mtGC);
   _page_size = os::vm_page_size();
   _adaptation_cycles = 0;
   _samples_count = 0;
--- a/src/hotspot/share/gc/parallel/psCompactionManager.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/gc/parallel/psCompactionManager.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -95,7 +95,7 @@
   assert(ParallelScavengeHeap::heap()->workers().total_workers() != 0,
     "Not initialized?");
 
-  _shadow_region_array = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<size_t >(10, true);
+  _shadow_region_array = new (ResourceObj::C_HEAP, mtGC) GrowableArray<size_t >(10, mtGC);
 
   _shadow_region_monitor = new Monitor(Mutex::barrier, "CompactionManager monitor",
                                        Mutex::_allow_vm_block_flag, Monitor::_safepoint_check_never);
--- a/src/hotspot/share/gc/shared/gcTimer.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/gc/shared/gcTimer.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -113,7 +113,7 @@
 }
 
 TimePartitions::TimePartitions() {
-  _phases = new (ResourceObj::C_HEAP, mtGC) GrowableArray<GCPhase>(INITIAL_CAPACITY, true, mtGC);
+  _phases = new (ResourceObj::C_HEAP, mtGC) GrowableArray<GCPhase>(INITIAL_CAPACITY, mtGC);
   clear();
 }
 
--- a/src/hotspot/share/interpreter/interpreterRuntime.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -1344,8 +1344,8 @@
                                       SignatureHandlerLibrary::buffer_size);
   _buffer = bb->code_begin();
 
-  _fingerprints = new(ResourceObj::C_HEAP, mtCode)GrowableArray<uint64_t>(32, true);
-  _handlers     = new(ResourceObj::C_HEAP, mtCode)GrowableArray<address>(32, true);
+  _fingerprints = new(ResourceObj::C_HEAP, mtCode)GrowableArray<uint64_t>(32, mtCode);
+  _handlers     = new(ResourceObj::C_HEAP, mtCode)GrowableArray<address>(32, mtCode);
 }
 
 address SignatureHandlerLibrary::set_handler(CodeBuffer* buffer) {
--- a/src/hotspot/share/jfr/jni/jfrJavaSupport.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/jfr/jni/jfrJavaSupport.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -681,7 +681,7 @@
 static int add_thread_to_exclusion_list(jobject thread) {
   ThreadExclusionListAccess lock;
   if (exclusion_list == NULL) {
-    exclusion_list = new (ResourceObj::C_HEAP, mtTracing) GrowableArray<jweak>(10, true, mtTracing);
+    exclusion_list = new (ResourceObj::C_HEAP, mtTracing) GrowableArray<jweak>(10, mtTracing);
   }
   assert(exclusion_list != NULL, "invariant");
   assert(thread_is_not_excluded(thread), "invariant");
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -52,7 +52,7 @@
 
 template <typename T>
 static GrowableArray<T>* c_heap_allocate_array(int size = initial_array_size) {
-  return new (ResourceObj::C_HEAP, mtTracing) GrowableArray<T>(size, true, mtTracing);
+  return new (ResourceObj::C_HEAP, mtTracing) GrowableArray<T>(size, mtTracing);
 }
 
 static GrowableArray<traceid>* unloaded_thread_id_set = NULL;
--- a/src/hotspot/share/jfr/periodic/jfrNetworkUtilization.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/jfr/periodic/jfrNetworkUtilization.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -77,7 +77,7 @@
 
 static GrowableArray<InterfaceEntry>* get_interfaces() {
   if (_interfaces == NULL) {
-    _interfaces = new(ResourceObj::C_HEAP, mtTracing) GrowableArray<InterfaceEntry>(10, true, mtTracing);
+    _interfaces = new(ResourceObj::C_HEAP, mtTracing) GrowableArray<InterfaceEntry>(10, mtTracing);
   }
   return _interfaces;
 }
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadGroup.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadGroup.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -106,7 +106,7 @@
 };
 
 JfrThreadGroupsHelper::JfrThreadGroupsHelper(const JavaThread* jt, Thread* current) {
-  _thread_group_hierarchy = new GrowableArray<JfrThreadGroupPointers*>(10, false, mtTracing);
+  _thread_group_hierarchy = new GrowableArray<JfrThreadGroupPointers*>(10);
   _current_iterator_pos = populate_thread_group_hierarchy(jt, current) - 1;
 }
 
@@ -259,7 +259,7 @@
 }
 
 JfrThreadGroup::JfrThreadGroup() :
-  _list(new (ResourceObj::C_HEAP, mtTracing) GrowableArray<JfrThreadGroupEntry*>(initial_array_size, true, mtTracing)) {}
+  _list(new (ResourceObj::C_HEAP, mtTracing) GrowableArray<JfrThreadGroupEntry*>(initial_array_size, mtTracing)) {}
 
 JfrThreadGroup::~JfrThreadGroup() {
   if (_list != NULL) {
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetUtils.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -268,8 +268,8 @@
   _symbol_id->set_class_unload(class_unload);
   _total_count = 0;
   // resource allocation
-  _klass_list = new GrowableArray<const Klass*>(initial_klass_list_size, false, mtTracing);
-  _klass_loader_set = new GrowableArray<const Klass*>(initial_klass_loader_set_size, false, mtTracing);
+  _klass_list = new GrowableArray<const Klass*>(initial_klass_list_size);
+  _klass_loader_set = new GrowableArray<const Klass*>(initial_klass_loader_set_size);
 }
 
 JfrArtifactSet::~JfrArtifactSet() {
--- a/src/hotspot/share/jfr/recorder/jfrRecorder.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/jfr/recorder/jfrRecorder.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -122,7 +122,7 @@
   const int length = options->length();
   assert(length >= 1, "invariant");
   assert(dcmd_recordings_array == NULL, "invariant");
-  dcmd_recordings_array = new (ResourceObj::C_HEAP, mtTracing)GrowableArray<JfrStartFlightRecordingDCmd*>(length, true, mtTracing);
+  dcmd_recordings_array = new (ResourceObj::C_HEAP, mtTracing)GrowableArray<JfrStartFlightRecordingDCmd*>(length, mtTracing);
   assert(dcmd_recordings_array != NULL, "invariant");
   for (int i = 0; i < length; ++i) {
     JfrStartFlightRecordingDCmd* const dcmd_recording = new(ResourceObj::C_HEAP, mtTracing) JfrStartFlightRecordingDCmd(tty, true);
--- a/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -296,7 +296,7 @@
     if (_path_buffer_file_name_offset == -1) {
       return;
     }
-    _file_names = new (ResourceObj::C_HEAP, mtTracing) GrowableArray<const char*>(10, true, mtTracing);
+    _file_names = new (ResourceObj::C_HEAP, mtTracing) GrowableArray<const char*>(10, mtTracing);
     if (_file_names == NULL) {
       log_error(jfr, system)("Unable to malloc memory during jfr emergency dump");
       return;
--- a/src/hotspot/share/jfr/recorder/service/jfrOptionSet.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/jfr/recorder/service/jfrOptionSet.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -702,7 +702,7 @@
   const size_t value_length = strlen(value);
 
   if (start_flight_recording_options_array == NULL) {
-    start_flight_recording_options_array = new (ResourceObj::C_HEAP, mtTracing) GrowableArray<const char*>(8, true, mtTracing);
+    start_flight_recording_options_array = new (ResourceObj::C_HEAP, mtTracing) GrowableArray<const char*>(8, mtTracing);
   }
   assert(start_flight_recording_options_array != NULL, "invariant");
   char* const startup_value = NEW_C_HEAP_ARRAY(char, value_length + 1, mtTracing);
--- a/src/hotspot/share/jfr/support/jfrJdkJfrEvent.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/jfr/support/jfrJdkJfrEvent.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -49,7 +49,7 @@
 
 template <typename T>
 static GrowableArray<T>* c_heap_allocate_array(int size = initial_array_size) {
-  return new (ResourceObj::C_HEAP, mtTracing) GrowableArray<T>(size, true, mtTracing);
+  return new (ResourceObj::C_HEAP, mtTracing) GrowableArray<T>(size, mtTracing);
 }
 
 static bool initialize(TRAPS) {
@@ -137,7 +137,7 @@
   }
 
   ResourceMark rm(THREAD);
-  GrowableArray<const void*> event_subklasses(THREAD, initial_array_size);
+  GrowableArray<const void*> event_subklasses(initial_array_size);
   fill_klasses(event_subklasses, klass, THREAD);
 
   if (event_subklasses.is_empty()) {
--- a/src/hotspot/share/jfr/support/jfrKlassUnloading.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/jfr/support/jfrKlassUnloading.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -33,7 +33,7 @@
 
 template <typename T>
 static GrowableArray<T>* c_heap_allocate_array(int size = initial_array_size) {
-  return new (ResourceObj::C_HEAP, mtTracing) GrowableArray<T>(size, true, mtTracing);
+  return new (ResourceObj::C_HEAP, mtTracing) GrowableArray<T>(size, mtTracing);
 }
 
 // Track the set of unloaded klasses during a chunk / epoch.
--- a/src/hotspot/share/memory/allocation.hpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/memory/allocation.hpp	Tue Jun 16 09:37:53 2020 +0200
@@ -114,33 +114,34 @@
 };
 #endif
 
-#define MEMORY_TYPES_DO(f) \
-  /* Memory type by sub systems. It occupies lower byte. */  \
-  f(mtJavaHeap,      "Java Heap")   /* Java heap                                 */ \
-  f(mtClass,         "Class")       /* Java classes                              */ \
-  f(mtThread,        "Thread")      /* thread objects                            */ \
-  f(mtThreadStack,   "Thread Stack")                                                \
-  f(mtCode,          "Code")        /* generated code                            */ \
-  f(mtGC,            "GC")                                                          \
-  f(mtCompiler,      "Compiler")                                                    \
-  f(mtJVMCI,         "JVMCI")                                                       \
-  f(mtInternal,      "Internal")    /* memory used by VM, but does not belong to */ \
-                                    /* any of above categories, and not used by  */ \
-                                    /* NMT                                       */ \
-  f(mtOther,         "Other")       /* memory not used by VM                     */ \
-  f(mtSymbol,        "Symbol")                                                      \
-  f(mtNMT,           "Native Memory Tracking")  /* memory used by NMT            */ \
-  f(mtClassShared,   "Shared class space")      /* class data sharing            */ \
-  f(mtChunk,         "Arena Chunk") /* chunk that holds content of arenas        */ \
-  f(mtTest,          "Test")        /* Test type for verifying NMT               */ \
-  f(mtTracing,       "Tracing")                                                     \
-  f(mtLogging,       "Logging")                                                     \
-  f(mtStatistics,    "Statistics")                                                  \
-  f(mtArguments,     "Arguments")                                                   \
-  f(mtModule,        "Module")                                                      \
-  f(mtSafepoint,     "Safepoint")                                                   \
-  f(mtSynchronizer,  "Synchronization")                                             \
-  f(mtNone,          "Unknown")                                                     \
+#define MEMORY_TYPES_DO(f)                                                           \
+  /* Memory type by sub systems. It occupies lower byte. */                          \
+  f(mtJavaHeap,       "Java Heap")   /* Java heap                                 */ \
+  f(mtClass,          "Class")       /* Java classes                              */ \
+  f(mtThread,         "Thread")      /* thread objects                            */ \
+  f(mtThreadStack,    "Thread Stack")                                                \
+  f(mtCode,           "Code")        /* generated code                            */ \
+  f(mtGC,             "GC")                                                          \
+  f(mtCompiler,       "Compiler")                                                    \
+  f(mtJVMCI,          "JVMCI")                                                       \
+  f(mtInternal,       "Internal")    /* memory used by VM, but does not belong to */ \
+                                     /* any of above categories, and not used by  */ \
+                                     /* NMT                                       */ \
+  f(mtOther,          "Other")       /* memory not used by VM                     */ \
+  f(mtSymbol,         "Symbol")                                                      \
+  f(mtNMT,            "Native Memory Tracking")  /* memory used by NMT            */ \
+  f(mtClassShared,    "Shared class space")      /* class data sharing            */ \
+  f(mtChunk,          "Arena Chunk") /* chunk that holds content of arenas        */ \
+  f(mtTest,           "Test")        /* Test type for verifying NMT               */ \
+  f(mtTracing,        "Tracing")                                                     \
+  f(mtLogging,        "Logging")                                                     \
+  f(mtStatistics,     "Statistics")                                                  \
+  f(mtArguments,      "Arguments")                                                   \
+  f(mtModule,         "Module")                                                      \
+  f(mtSafepoint,      "Safepoint")                                                   \
+  f(mtSynchronizer,   "Synchronization")                                             \
+  f(mtServiceability, "Serviceability")                                              \
+  f(mtNone,           "Unknown")                                                     \
   //end
 
 #define MEMORY_TYPE_DECLARE_ENUM(type, human_readable) \
--- a/src/hotspot/share/memory/dynamicArchive.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/memory/dynamicArchive.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -521,8 +521,8 @@
 
 public:
   DynamicArchiveBuilder() {
-    _klasses = new (ResourceObj::C_HEAP, mtClass) GrowableArray<InstanceKlass*>(100, true, mtInternal);
-    _symbols = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Symbol*>(1000, true, mtInternal);
+    _klasses = new (ResourceObj::C_HEAP, mtClass) GrowableArray<InstanceKlass*>(100, mtClass);
+    _symbols = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Symbol*>(1000, mtClass);
 
     _estimated_metsapceobj_bytes = 0;
     _estimated_hashtable_bytes = 0;
--- a/src/hotspot/share/memory/filemap.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/memory/filemap.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -499,7 +499,7 @@
   Arguments::assert_is_dumping_archive();
   log_info(class, path)("non-existent Class-Path entry %s", path);
   if (_non_existent_class_paths == NULL) {
-    _non_existent_class_paths = new (ResourceObj::C_HEAP, mtInternal)GrowableArray<const char*>(10, true);
+    _non_existent_class_paths = new (ResourceObj::C_HEAP, mtClass)GrowableArray<const char*>(10, mtClass);
   }
   _non_existent_class_paths->append(os::strdup(path));
 }
@@ -626,8 +626,7 @@
 }
 
 GrowableArray<const char*>* FileMapInfo::create_path_array(const char* paths) {
-  GrowableArray<const char*>* path_array =  new(ResourceObj::RESOURCE_AREA, mtInternal)
-      GrowableArray<const char*>(10);
+  GrowableArray<const char*>* path_array = new GrowableArray<const char*>(10);
 
   ClasspathStream cp_stream(paths);
   while (cp_stream.has_next()) {
--- a/src/hotspot/share/memory/heapInspection.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/memory/heapInspection.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -50,7 +50,7 @@
 
 inline void KlassInfoEntry::add_subclass(KlassInfoEntry* cie) {
   if (_subclasses == NULL) {
-    _subclasses = new  (ResourceObj::C_HEAP, mtInternal) GrowableArray<KlassInfoEntry*>(4, true);
+    _subclasses = new  (ResourceObj::C_HEAP, mtServiceability) GrowableArray<KlassInfoEntry*>(4, mtServiceability);
   }
   _subclasses->append(cie);
 }
@@ -243,7 +243,7 @@
 
 KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit) :
   _cit(cit) {
-  _elements = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<KlassInfoEntry*>(_histo_initial_size, true);
+  _elements = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<KlassInfoEntry*>(_histo_initial_size, mtServiceability);
 }
 
 KlassInfoHisto::~KlassInfoHisto() {
--- a/src/hotspot/share/memory/heapShared.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/memory/heapShared.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -309,7 +309,7 @@
   assert(DumpSharedSpaces, "dump time only");
   if (_subgraph_entry_fields == NULL) {
     _subgraph_entry_fields =
-      new(ResourceObj::C_HEAP, mtClass) GrowableArray<juint>(10, true);
+      new(ResourceObj::C_HEAP, mtClass) GrowableArray<juint>(10, mtClass);
   }
   _subgraph_entry_fields->append((juint)static_field_offset);
   _subgraph_entry_fields->append(CompressedOops::encode(v));
@@ -325,7 +325,7 @@
 
   if (_subgraph_object_klasses == NULL) {
     _subgraph_object_klasses =
-      new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, true);
+      new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, mtClass);
   }
 
   assert(relocated_k->is_shared(), "must be a shared class");
--- a/src/hotspot/share/memory/metaspaceShared.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/memory/metaspaceShared.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -459,7 +459,7 @@
 static GrowableArray<Handle>* _extra_interned_strings = NULL;
 
 void MetaspaceShared::read_extra_data(const char* filename, TRAPS) {
-  _extra_interned_strings = new (ResourceObj::C_HEAP, mtInternal)GrowableArray<Handle>(10000, true);
+  _extra_interned_strings = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray<Handle>(10000, mtClassShared);
 
   HashtableTextDump reader(filename);
   reader.check_version("VERSION: 1.0");
--- a/src/hotspot/share/oops/methodData.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/oops/methodData.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -245,7 +245,7 @@
   ArgumentOffsetComputer(Symbol* signature, int max)
     : SignatureIterator(signature),
       _max(max), _offset(0),
-      _offsets(Thread::current(), max) {
+      _offsets(max) {
     do_parameters_on(this);  // non-virtual template execution
   }
 
--- a/src/hotspot/share/opto/macro.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/opto/macro.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -451,7 +451,7 @@
   Node *alloc_mem = alloc->in(TypeFunc::Memory);
 
   uint length = mem->req();
-  GrowableArray <Node *> values(length, length, NULL, false);
+  GrowableArray <Node *> values(length, length, NULL);
 
   // create a new Phi for the value
   PhiNode *phi = new PhiNode(mem->in(0), phi_type, NULL, mem->_idx, instance_id, alias_idx, offset);
--- a/src/hotspot/share/prims/jvmtiCodeBlobEvents.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/prims/jvmtiCodeBlobEvents.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -172,7 +172,7 @@
   assert(_global_code_blobs == NULL, "checking");
 
   // create the global list
-  _global_code_blobs = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<JvmtiCodeBlobDesc*>(50,true);
+  _global_code_blobs = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<JvmtiCodeBlobDesc*>(50, mtServiceability);
 
   // iterate over the stub code descriptors and put them in the list first.
   for (StubCodeDesc* desc = StubCodeDesc::first(); desc != NULL; desc = StubCodeDesc::next(desc)) {
--- a/src/hotspot/share/prims/jvmtiEnv.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/prims/jvmtiEnv.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -1204,7 +1204,7 @@
 
   // growable array of jvmti monitors info on the C-heap
   GrowableArray<jvmtiMonitorStackDepthInfo*> *owned_monitors_list =
-      new (ResourceObj::C_HEAP, mtInternal) GrowableArray<jvmtiMonitorStackDepthInfo*>(1, true);
+      new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<jvmtiMonitorStackDepthInfo*>(1, mtServiceability);
 
   // It is only safe to perform the direct operation on the current
   // thread. All other usage needs to use a direct handshake for safety.
@@ -1249,7 +1249,7 @@
 
   // growable array of jvmti monitors info on the C-heap
   GrowableArray<jvmtiMonitorStackDepthInfo*> *owned_monitors_list =
-         new (ResourceObj::C_HEAP, mtInternal) GrowableArray<jvmtiMonitorStackDepthInfo*>(1, true);
+         new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<jvmtiMonitorStackDepthInfo*>(1, mtServiceability);
 
   // It is only safe to perform the direct operation on the current
   // thread. All other usage needs to use a direct handshake for safety.
--- a/src/hotspot/share/prims/jvmtiEnvBase.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/prims/jvmtiEnvBase.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -1145,7 +1145,7 @@
 
 ResourceTracker::ResourceTracker(JvmtiEnv* env) {
   _env = env;
-  _allocations = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<unsigned char*>(20, true);
+  _allocations = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<unsigned char*>(20, mtServiceability);
   _failed = false;
 }
 ResourceTracker::~ResourceTracker() {
--- a/src/hotspot/share/prims/jvmtiEnvThreadState.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/prims/jvmtiEnvThreadState.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -96,7 +96,7 @@
 //
 
 JvmtiFramePops::JvmtiFramePops() {
-  _pops = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int> (2, true);
+  _pops = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<int> (2, mtServiceability);
 }
 
 JvmtiFramePops::~JvmtiFramePops() {
--- a/src/hotspot/share/prims/jvmtiExport.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/prims/jvmtiExport.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -2812,7 +2812,7 @@
 // register a stub
 void JvmtiDynamicCodeEventCollector::register_stub(const char* name, address start, address end) {
  if (_code_blobs == NULL) {
-   _code_blobs = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<JvmtiCodeBlobDesc*>(1,true);
+   _code_blobs = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<JvmtiCodeBlobDesc*>(1, mtServiceability);
  }
  _code_blobs->append(new JvmtiCodeBlobDesc(name, start, end));
 }
@@ -2838,7 +2838,7 @@
 void JvmtiObjectAllocEventCollector::record_allocation(oop obj) {
   assert(is_enabled(), "Object alloc event collector is not enabled");
   if (_allocated == NULL) {
-    _allocated = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<oop>(1, true);
+    _allocated = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<oop>(1, mtServiceability);
   }
   _allocated->push(obj);
 }
--- a/src/hotspot/share/prims/jvmtiExtensions.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/prims/jvmtiExtensions.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -49,8 +49,8 @@
 // event. The function and the event are registered here.
 //
 void JvmtiExtensions::register_extensions() {
-  _ext_functions = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<jvmtiExtensionFunctionInfo*>(1,true);
-  _ext_events = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<jvmtiExtensionEventInfo*>(1,true);
+  _ext_functions = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<jvmtiExtensionFunctionInfo*>(1, mtServiceability);
+  _ext_events = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<jvmtiExtensionEventInfo*>(1, mtServiceability);
 
   // register our extension function
   static jvmtiParamInfo func_params[] = {
--- a/src/hotspot/share/prims/jvmtiImpl.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/prims/jvmtiImpl.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -146,7 +146,7 @@
 void GrowableCache::initialize(void *this_obj, void listener_fun(void *, address*) ) {
   _this_obj       = this_obj;
   _listener_fun   = listener_fun;
-  _elements       = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<GrowableElement*>(5,true);
+  _elements       = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<GrowableElement*>(5, mtServiceability);
   recache();
 }
 
--- a/src/hotspot/share/prims/jvmtiRawMonitor.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/prims/jvmtiRawMonitor.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -36,7 +36,7 @@
 }
 
 GrowableArray<JvmtiRawMonitor*>* JvmtiPendingMonitors::_monitors =
-  new (ResourceObj::C_HEAP, mtInternal) GrowableArray<JvmtiRawMonitor*>(1, true);
+  new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<JvmtiRawMonitor*>(1, mtServiceability);
 
 void JvmtiPendingMonitors::transition_raw_monitors() {
   assert((Threads::number_of_threads()==1),
--- a/src/hotspot/share/prims/jvmtiTagMap.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/prims/jvmtiTagMap.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -821,8 +821,8 @@
 };
 
 ClassFieldMap::ClassFieldMap() {
-  _fields = new (ResourceObj::C_HEAP, mtInternal)
-    GrowableArray<ClassFieldDescriptor*>(initial_field_count, true);
+  _fields = new (ResourceObj::C_HEAP, mtServiceability)
+    GrowableArray<ClassFieldDescriptor*>(initial_field_count, mtServiceability);
 }
 
 ClassFieldMap::~ClassFieldMap() {
@@ -958,8 +958,8 @@
 // record that the given InstanceKlass is caching a field map
 void JvmtiCachedClassFieldMap::add_to_class_list(InstanceKlass* ik) {
   if (_class_list == NULL) {
-    _class_list = new (ResourceObj::C_HEAP, mtInternal)
-      GrowableArray<InstanceKlass*>(initial_class_count, true);
+    _class_list = new (ResourceObj::C_HEAP, mtServiceability)
+      GrowableArray<InstanceKlass*>(initial_class_count, mtServiceability);
   }
   _class_list->push(ik);
 }
@@ -1531,8 +1531,8 @@
     _env = env;
     _tags = (jlong*)tags;
     _tag_count = tag_count;
-    _object_results = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<jobject>(1,true);
-    _tag_results = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<uint64_t>(1,true);
+    _object_results = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<jobject>(1, mtServiceability);
+    _tag_results = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<uint64_t>(1, mtServiceability);
   }
 
   ~TagObjectCollector() {
@@ -1674,8 +1674,8 @@
   Universe::heap()->ensure_parsability(false);  // no need to retire TLABs
 
   // create stacks for interesting headers
-  _saved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markWord>(4000, true);
-  _saved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<oop>(4000, true);
+  _saved_mark_stack = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<markWord>(4000, mtServiceability);
+  _saved_oop_stack = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<oop>(4000, mtServiceability);
 
   if (UseBiasedLocking) {
     BiasedLocking::preserve_marks();
@@ -2663,7 +2663,7 @@
   bool _reporting_string_values;
 
   GrowableArray<oop>* create_visit_stack() {
-    return new (ResourceObj::C_HEAP, mtInternal) GrowableArray<oop>(initial_visit_stack_size, true);
+    return new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<oop>(initial_visit_stack_size, mtServiceability);
   }
 
   // accessors
--- a/src/hotspot/share/runtime/arguments.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/runtime/arguments.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -3110,7 +3110,7 @@
 
   // Create GrowableArray lazily, only if --patch-module has been specified
   if (_patch_mod_prefix == NULL) {
-    _patch_mod_prefix = new (ResourceObj::C_HEAP, mtArguments) GrowableArray<ModulePatchPath*>(10, true);
+    _patch_mod_prefix = new (ResourceObj::C_HEAP, mtArguments) GrowableArray<ModulePatchPath*>(10, mtArguments);
   }
 
   _patch_mod_prefix->push(new ModulePatchPath(module_name, path));
@@ -3366,7 +3366,7 @@
 
     int length = args->nOptions + args_to_insert->nOptions - 1;
     GrowableArray<JavaVMOption> *options = new (ResourceObj::C_HEAP, mtArguments)
-              GrowableArray<JavaVMOption>(length, true);    // Construct new option array
+              GrowableArray<JavaVMOption>(length, mtArguments);    // Construct new option array
     for (int i = 0; i < args->nOptions; i++) {
       if (i == vm_options_file_pos) {
         // insert the new options starting at the same place as the
@@ -3478,7 +3478,7 @@
 }
 
 jint Arguments::parse_options_buffer(const char* name, char* buffer, const size_t buf_len, ScopedVMInitArgs* vm_args) {
-  GrowableArray<JavaVMOption> *options = new (ResourceObj::C_HEAP, mtArguments) GrowableArray<JavaVMOption>(2, true);    // Construct option array
+  GrowableArray<JavaVMOption> *options = new (ResourceObj::C_HEAP, mtArguments) GrowableArray<JavaVMOption>(2, mtArguments);    // Construct option array
 
   // some pointers to help with parsing
   char *buffer_end = buffer + buf_len;
--- a/src/hotspot/share/runtime/biasedLocking.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/runtime/biasedLocking.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -903,8 +903,8 @@
   // monitors in a prepass and, if they are biased, preserve their
   // mark words here. This should be a relatively small set of objects
   // especially compared to the number of objects in the heap.
-  _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markWord>(10, true);
-  _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true);
+  _preserved_mark_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<markWord>(10, mtGC);
+  _preserved_oop_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<Handle>(10, mtGC);
 
   ResourceMark rm;
   Thread* cur = Thread::current();
--- a/src/hotspot/share/runtime/flags/jvmFlagConstraintList.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/runtime/flags/jvmFlagConstraintList.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -250,7 +250,7 @@
 
 // Check the ranges of all flags that have them or print them out and exit if requested
 void JVMFlagConstraintList::init(void) {
-  _constraints = new (ResourceObj::C_HEAP, mtArguments) GrowableArray<JVMFlagConstraint*>(INITIAL_CONSTRAINTS_SIZE, true);
+  _constraints = new (ResourceObj::C_HEAP, mtArguments) GrowableArray<JVMFlagConstraint*>(INITIAL_CONSTRAINTS_SIZE, mtArguments);
 
   EMIT_CONSTRAINT_START
 
--- a/src/hotspot/share/runtime/flags/jvmFlagRangeList.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/runtime/flags/jvmFlagRangeList.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -315,7 +315,7 @@
 // Check the ranges of all flags that have them
 void JVMFlagRangeList::init(void) {
 
-  _ranges = new (ResourceObj::C_HEAP, mtArguments) GrowableArray<JVMFlagRange*>(INITIAL_RANGES_SIZE, true);
+  _ranges = new (ResourceObj::C_HEAP, mtArguments) GrowableArray<JVMFlagRange*>(INITIAL_RANGES_SIZE, mtArguments);
 
   EMIT_RANGE_START
 
--- a/src/hotspot/share/runtime/perfData.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/runtime/perfData.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -563,12 +563,12 @@
 
 PerfDataList::PerfDataList(int length) {
 
-  _set = new(ResourceObj::C_HEAP, mtInternal) PerfDataArray(length, true);
+  _set = new(ResourceObj::C_HEAP, mtInternal) PerfDataArray(length, mtInternal);
 }
 
 PerfDataList::PerfDataList(PerfDataList* p) {
 
-  _set = new(ResourceObj::C_HEAP, mtInternal) PerfDataArray(p->length(), true);
+  _set = new(ResourceObj::C_HEAP, mtInternal) PerfDataArray(p->length(), mtInternal);
 
   _set->appendAll(p->get_impl());
 }
--- a/src/hotspot/share/runtime/reflectionUtils.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/runtime/reflectionUtils.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -69,7 +69,7 @@
 
 
 GrowableArray<FilteredField*> *FilteredFieldsMap::_filtered_fields =
-  new (ResourceObj::C_HEAP, mtInternal) GrowableArray<FilteredField*>(3,true);
+  new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<FilteredField*>(3, mtServiceability);
 
 
 void FilteredFieldsMap::initialize() {
--- a/src/hotspot/share/runtime/thread.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/runtime/thread.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -232,7 +232,7 @@
   set_resource_area(new (mtThread)ResourceArea());
   DEBUG_ONLY(_current_resource_mark = NULL;)
   set_handle_area(new (mtThread) HandleArea(NULL));
-  set_metadata_handles(new (ResourceObj::C_HEAP, mtClass) GrowableArray<Metadata*>(30, true));
+  set_metadata_handles(new (ResourceObj::C_HEAP, mtClass) GrowableArray<Metadata*>(30, mtClass));
   set_active_handles(NULL);
   set_free_handle_block(NULL);
   set_last_handle_mark(NULL);
--- a/src/hotspot/share/runtime/unhandledOops.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/runtime/unhandledOops.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -35,8 +35,8 @@
 
 UnhandledOops::UnhandledOops(Thread* thread) {
   _thread = thread;
-  _oop_list = new (ResourceObj::C_HEAP, mtInternal)
-                    GrowableArray<UnhandledOopEntry>(free_list_size, true);
+  _oop_list = new (ResourceObj::C_HEAP, mtThread)
+                    GrowableArray<UnhandledOopEntry>(free_list_size, mtThread);
   _level = 0;
 }
 
--- a/src/hotspot/share/runtime/vframe_hp.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/runtime/vframe_hp.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -117,7 +117,7 @@
   } else {
     // No deferred updates pending for this thread.
     // allocate in C heap
-    deferred =  new(ResourceObj::C_HEAP, mtCompiler) GrowableArray<jvmtiDeferredLocalVariableSet*> (1, true);
+    deferred =  new(ResourceObj::C_HEAP, mtCompiler) GrowableArray<jvmtiDeferredLocalVariableSet*> (1, mtCompiler);
     thread()->set_deferred_locals(deferred);
   }
   if (locals == NULL) {
@@ -329,7 +329,7 @@
   _id = id;
   _vframe_id = vframe_id;
   // Alway will need at least one, must be on C heap
-  _locals = new(ResourceObj::C_HEAP, mtCompiler) GrowableArray<jvmtiDeferredLocalVariable*> (1, true);
+  _locals = new(ResourceObj::C_HEAP, mtCompiler) GrowableArray<jvmtiDeferredLocalVariable*> (1, mtCompiler);
 }
 
 jvmtiDeferredLocalVariableSet::~jvmtiDeferredLocalVariableSet() {
--- a/src/hotspot/share/services/diagnosticArgument.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/services/diagnosticArgument.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -30,7 +30,7 @@
 #include "services/diagnosticArgument.hpp"
 
 StringArrayArgument::StringArrayArgument() {
-  _array = new(ResourceObj::C_HEAP, mtInternal)GrowableArray<char *>(32, true);
+  _array = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<char *>(32, mtServiceability);
   assert(_array != NULL, "Sanity check");
 }
 
--- a/src/hotspot/share/services/heapDumper.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/services/heapDumper.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -1536,7 +1536,7 @@
     AbstractGangTask("dump heap") {
     _local_writer = writer;
     _gc_before_heap_dump = gc_before_heap_dump;
-    _klass_map = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Klass*>(INITIAL_CLASS_COUNT, true);
+    _klass_map = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<Klass*>(INITIAL_CLASS_COUNT, mtServiceability);
     _stack_traces = NULL;
     _num_threads = 0;
     if (oome) {
--- a/src/hotspot/share/services/memoryService.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/services/memoryService.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -44,13 +44,13 @@
 #include "utilities/macros.hpp"
 
 GrowableArray<MemoryPool*>* MemoryService::_pools_list =
-  new (ResourceObj::C_HEAP, mtInternal) GrowableArray<MemoryPool*>(init_pools_list_size, true);
+  new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<MemoryPool*>(init_pools_list_size, mtServiceability);
 GrowableArray<MemoryManager*>* MemoryService::_managers_list =
-  new (ResourceObj::C_HEAP, mtInternal) GrowableArray<MemoryManager*>(init_managers_list_size, true);
+  new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<MemoryManager*>(init_managers_list_size, mtServiceability);
 
 MemoryManager*   MemoryService::_code_cache_manager    = NULL;
 GrowableArray<MemoryPool*>* MemoryService::_code_heap_pools =
-    new (ResourceObj::C_HEAP, mtInternal) GrowableArray<MemoryPool*>(init_code_heap_pools_size, true);
+    new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<MemoryPool*>(init_code_heap_pools_size, mtServiceability);
 MemoryPool*      MemoryService::_metaspace_pool        = NULL;
 MemoryPool*      MemoryService::_compressed_class_pool = NULL;
 
--- a/src/hotspot/share/services/threadService.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/services/threadService.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -589,7 +589,7 @@
     GrowableArray<MonitorInfo*>* list = jvf->locked_monitors();
     int length = list->length();
     if (length > 0) {
-      _locked_monitors = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<oop>(length, true);
+      _locked_monitors = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<oop>(length, mtServiceability);
       for (int i = 0; i < length; i++) {
         MonitorInfo* monitor = list->at(i);
         assert(monitor->owner() != NULL, "This monitor must have an owning object");
@@ -646,11 +646,11 @@
 
 ThreadStackTrace::ThreadStackTrace(JavaThread* t, bool with_locked_monitors) {
   _thread = t;
-  _frames = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<StackFrameInfo*>(INITIAL_ARRAY_SIZE, true);
+  _frames = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<StackFrameInfo*>(INITIAL_ARRAY_SIZE, mtServiceability);
   _depth = 0;
   _with_locked_monitors = with_locked_monitors;
   if (_with_locked_monitors) {
-    _jni_locked_monitors = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<oop>(INITIAL_ARRAY_SIZE, true);
+    _jni_locked_monitors = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<oop>(INITIAL_ARRAY_SIZE, mtServiceability);
   } else {
     _jni_locked_monitors = NULL;
   }
@@ -776,7 +776,7 @@
   // dump all locked concurrent locks
   assert(SafepointSynchronize::is_at_safepoint(), "all threads are stopped");
 
-  GrowableArray<oop>* aos_objects = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<oop>(INITIAL_ARRAY_SIZE, true /* C_heap */);
+  GrowableArray<oop>* aos_objects = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<oop>(INITIAL_ARRAY_SIZE, mtServiceability);
 
   // Find all instances of AbstractOwnableSynchronizer
   HeapInspection::find_instances_at_safepoint(SystemDictionary::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass(),
@@ -850,7 +850,7 @@
 
 ThreadConcurrentLocks::ThreadConcurrentLocks(JavaThread* thread) {
   _thread = thread;
-  _owned_locks = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<instanceOop>(INITIAL_ARRAY_SIZE, true);
+  _owned_locks = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<instanceOop>(INITIAL_ARRAY_SIZE, mtServiceability);
   _next = NULL;
 }
 
@@ -962,7 +962,7 @@
 
 DeadlockCycle::DeadlockCycle() {
   _is_deadlock = false;
-  _threads = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<JavaThread*>(INITIAL_ARRAY_SIZE, true);
+  _threads = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<JavaThread*>(INITIAL_ARRAY_SIZE, mtServiceability);
   _next = NULL;
 }
 
--- a/src/hotspot/share/utilities/growableArray.hpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/utilities/growableArray.hpp	Tue Jun 16 09:37:53 2020 +0200
@@ -106,15 +106,14 @@
 
   // This GA will use the resource stack for storage if c_heap==false,
   // Else it will use the C heap.  Use clear_and_deallocate to avoid leaks.
-  GenericGrowableArray(int initial_size, int initial_len, bool c_heap, MEMFLAGS flags = mtNone) {
+  GenericGrowableArray(int initial_size, int initial_len, MEMFLAGS flags) {
     _len = initial_len;
     _max = initial_size;
     _memflags = flags;
 
-    // memory type has to be specified for C heap allocation
-    assert(!(c_heap && flags == mtNone), "memory type not specified for C heap object");
+    assert(_len >= 0 && _len <= _max, "initial_len too big");
 
-    assert(_len >= 0 && _len <= _max, "initial_len too big");
+    const bool c_heap = flags != mtNone;
     _arena = (c_heap ? (Arena*)1 : NULL);
     set_nesting();
     assert(!on_C_heap() || allocated_on_C_heap(), "growable array must be on C heap if elements are");
@@ -141,12 +140,6 @@
 
   void* raw_allocate(int elementSize);
 
-  // some uses pass the Thread explicitly for speed (4990299 tuning)
-  void* raw_allocate(Thread* thread, int elementSize) {
-    assert(on_stack(), "fast ResourceObj path only");
-    return (void*)resource_allocate_bytes(thread, elementSize * _max);
-  }
-
   void free_C_heap(void* elements);
 };
 
@@ -168,14 +161,10 @@
   void grow(int j);
   void raw_at_put_grow(int i, const E& p, const E& fill);
   void  clear_and_deallocate();
- public:
-  GrowableArray(Thread* thread, int initial_size) : GenericGrowableArray(initial_size, 0, false) {
-    _data = (E*)raw_allocate(thread, sizeof(E));
-    for (int i = 0; i < _max; i++) ::new ((void*)&_data[i]) E();
-  }
 
-  GrowableArray(int initial_size, bool C_heap = false, MEMFLAGS F = mtInternal)
-    : GenericGrowableArray(initial_size, 0, C_heap, F) {
+public:
+  GrowableArray(int initial_size, MEMFLAGS F = mtNone)
+    : GenericGrowableArray(initial_size, 0, F) {
     _data = (E*)raw_allocate(sizeof(E));
 // Needed for Visual Studio 2012 and older
 #ifdef _MSC_VER
@@ -184,8 +173,8 @@
     for (int i = 0; i < _max; i++) ::new ((void*)&_data[i]) E();
   }
 
-  GrowableArray(int initial_size, int initial_len, const E& filler, bool C_heap = false, MEMFLAGS memflags = mtInternal)
-    : GenericGrowableArray(initial_size, initial_len, C_heap, memflags) {
+  GrowableArray(int initial_size, int initial_len, const E& filler, MEMFLAGS memflags = mtNone)
+    : GenericGrowableArray(initial_size, initial_len, memflags) {
     _data = (E*)raw_allocate(sizeof(E));
     int i = 0;
     for (; i < _len; i++) ::new ((void*)&_data[i]) E(filler);
@@ -203,7 +192,7 @@
     for (; i < _max; i++) ::new ((void*)&_data[i]) E();
   }
 
-  GrowableArray() : GenericGrowableArray(2, 0, false) {
+  GrowableArray() : GenericGrowableArray(2, 0, mtNone) {
     _data = (E*)raw_allocate(sizeof(E));
     ::new ((void*)&_data[0]) E();
     ::new ((void*)&_data[1]) E();
--- a/src/hotspot/share/utilities/hashtable.inline.hpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/utilities/hashtable.inline.hpp	Tue Jun 16 09:37:53 2020 +0200
@@ -73,7 +73,7 @@
   _first_free_entry = NULL;
   _end_block = NULL;
   _number_of_entries = number_of_entries;
-  _entry_blocks = new(ResourceObj::C_HEAP, F) GrowableArray<char*>(4, true, F);
+  _entry_blocks = new(ResourceObj::C_HEAP, F) GrowableArray<char*>(4, F);
 }
 
 
--- a/src/hotspot/share/utilities/histogram.cpp	Tue Jun 16 03:16:41 2020 +0000
+++ b/src/hotspot/share/utilities/histogram.cpp	Tue Jun 16 09:37:53 2020 +0200
@@ -70,7 +70,7 @@
 
 Histogram::Histogram(const char* title,int estimatedCount) {
   _title = title;
-  _elements = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<HistogramElement*>(estimatedCount,true);
+  _elements = new (ResourceObj::C_HEAP, mtServiceability) GrowableArray<HistogramElement*>(estimatedCount, mtServiceability);
 }
 
 void Histogram::add_element(HistogramElement* element) {