OpenJDK / amber / amber
changeset 41288:d8eed711951a
Merge
author | sspitsyn |
---|---|
date | Wed, 21 Sep 2016 08:38:21 +0000 |
parents | 203e59c224b0 802f69f05345 |
children | e9a1638b8cea |
files | jdk/src/java.base/share/classes/sun/text/resources/BreakIteratorRulesProvider.java jdk/src/jdk.localedata/share/classes/sun/text/resources/thai_dict jdk/test/sun/security/krb5/auto/unreachable.krb5.conf jdk/test/sun/security/pkcs11/nss/lib/windows-amd64/libnspr4.dll jdk/test/sun/security/pkcs11/nss/lib/windows-amd64/libnspr4.lib jdk/test/sun/security/pkcs11/nss/lib/windows-amd64/libplc4.dll jdk/test/sun/security/pkcs11/nss/lib/windows-amd64/libplc4.lib jdk/test/sun/security/pkcs11/nss/lib/windows-amd64/libplds4.dll jdk/test/sun/security/pkcs11/nss/lib/windows-amd64/libplds4.lib jdk/test/sun/security/pkcs11/nss/lib/windows-i586/libnspr4.dll jdk/test/sun/security/pkcs11/nss/lib/windows-i586/libnspr4.lib jdk/test/sun/security/pkcs11/nss/lib/windows-i586/libplc4.dll jdk/test/sun/security/pkcs11/nss/lib/windows-i586/libplc4.lib jdk/test/sun/security/pkcs11/nss/lib/windows-i586/libplds4.dll jdk/test/sun/security/pkcs11/nss/lib/windows-i586/libplds4.lib jdk/test/sun/security/pkcs11/nss/src/nss-3.16_nspr-4.10_src.tar.gz jdk/test/sun/security/pkcs11/nss/src/nss-3.16_nspr-4.10_src.tar.gz.sha256 test/lib/share/classes/jdk/test/lib/Asserts.java test/lib/share/classes/jdk/test/lib/JDKToolFinder.java test/lib/share/classes/jdk/test/lib/JDKToolLauncher.java test/lib/share/classes/jdk/test/lib/Platform.java test/lib/share/classes/jdk/test/lib/Utils.java test/lib/share/classes/jdk/test/lib/apps/LingeredApp.java test/lib/share/classes/jdk/test/lib/apps/LingeredAppWithDeadlock.java test/lib/share/classes/jdk/test/lib/hprof/HprofParser.java test/lib/share/classes/jdk/test/lib/hprof/README test/lib/share/classes/jdk/test/lib/hprof/model/AbstractJavaHeapObjectVisitor.java test/lib/share/classes/jdk/test/lib/hprof/model/ArrayTypeCodes.java test/lib/share/classes/jdk/test/lib/hprof/model/HackJavaValue.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaBoolean.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaByte.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaChar.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaClass.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaDouble.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaField.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaFloat.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaHeapObject.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaHeapObjectVisitor.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaInt.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaLazyReadObject.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaLong.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaObject.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaObjectArray.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaObjectRef.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaShort.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaStatic.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaThing.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaValue.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaValueArray.java test/lib/share/classes/jdk/test/lib/hprof/model/ReachableExcludes.java test/lib/share/classes/jdk/test/lib/hprof/model/ReachableExcludesImpl.java test/lib/share/classes/jdk/test/lib/hprof/model/ReachableObjects.java test/lib/share/classes/jdk/test/lib/hprof/model/ReferenceChain.java test/lib/share/classes/jdk/test/lib/hprof/model/Root.java test/lib/share/classes/jdk/test/lib/hprof/model/Snapshot.java test/lib/share/classes/jdk/test/lib/hprof/model/StackFrame.java test/lib/share/classes/jdk/test/lib/hprof/model/StackTrace.java test/lib/share/classes/jdk/test/lib/hprof/parser/FileReadBuffer.java test/lib/share/classes/jdk/test/lib/hprof/parser/HprofReader.java test/lib/share/classes/jdk/test/lib/hprof/parser/MappedReadBuffer.java test/lib/share/classes/jdk/test/lib/hprof/parser/PositionDataInputStream.java test/lib/share/classes/jdk/test/lib/hprof/parser/PositionInputStream.java test/lib/share/classes/jdk/test/lib/hprof/parser/ReadBuffer.java test/lib/share/classes/jdk/test/lib/hprof/parser/Reader.java test/lib/share/classes/jdk/test/lib/hprof/util/ArraySorter.java test/lib/share/classes/jdk/test/lib/hprof/util/Comparer.java test/lib/share/classes/jdk/test/lib/hprof/util/CompositeEnumeration.java test/lib/share/classes/jdk/test/lib/hprof/util/Misc.java test/lib/share/classes/jdk/test/lib/hprof/util/VectorSorter.java test/lib/share/classes/jdk/test/lib/process/OutputAnalyzer.java test/lib/share/classes/jdk/test/lib/process/OutputBuffer.java test/lib/share/classes/jdk/test/lib/process/ProcessTools.java test/lib/share/classes/jdk/test/lib/process/StreamPumper.java |
diffstat | 1810 files changed, 54266 insertions(+), 19666 deletions(-) [+] |
line wrap: on
line diff
--- a/.hgtags Wed Sep 21 01:33:21 2016 -0700 +++ b/.hgtags Wed Sep 21 08:38:21 2016 +0000 @@ -376,3 +376,6 @@ 4d2a15091124488080d65848b704e25599b2aaeb jdk-9+131 2e83d21d78cd9c1d52e6cd2599e9c8aa36ea1f52 jdk-9+132 e17429a7e843c4a4ed3651458d0f950970edcbcc jdk-9+133 +a71210c0d9800eb6925b61ecd6198abd554f90ee jdk-9+134 +e384420383a5b79fa0012ebcb25d8f83cff7f777 jdk-9+135 +1b4b5d01aa11edf24b6fadbe3d2f3e411e3b02cd jdk-9+136
--- a/.hgtags-top-repo Wed Sep 21 01:33:21 2016 -0700 +++ b/.hgtags-top-repo Wed Sep 21 08:38:21 2016 +0000 @@ -376,3 +376,6 @@ 8728756c2f70a79a90188f4019cfd6b9a275765c jdk-9+131 a24702d4d5ab0015a5c553ed57f66fce7d85155e jdk-9+132 be1218f792a450dfb5d4b1f82616b9d95a6a732e jdk-9+133 +065724348690eda41fc69112278d8da6dcde548c jdk-9+134 +82b94cb5f342319d2cda77f9fa59703ad7fde576 jdk-9+135 +3ec350f5f32af249b59620d7e37b54bdcd77b233 jdk-9+136
--- a/common/autoconf/generated-configure.sh Wed Sep 21 01:33:21 2016 -0700 +++ b/common/autoconf/generated-configure.sh Wed Sep 21 08:38:21 2016 +0000 @@ -5095,7 +5095,7 @@ #CUSTOM_AUTOCONF_INCLUDE # Do not change or remove the following line, it is needed for consistency checks: -DATE_WHEN_GENERATED=1470863189 +DATE_WHEN_GENERATED=1472718471 ############################################################################### # @@ -15944,6 +15944,8 @@ HOTSPOT_TARGET_CPU_DEFINE=S390 elif test "x$OPENJDK_TARGET_CPU" = xs390x; then HOTSPOT_TARGET_CPU_DEFINE=S390 + elif test "x$OPENJDK_TARGET_CPU" != x; then + HOTSPOT_TARGET_CPU_DEFINE=$(echo $OPENJDK_TARGET_CPU | tr a-z A-Z) fi @@ -16117,6 +16119,8 @@ HOTSPOT_BUILD_CPU_DEFINE=S390 elif test "x$OPENJDK_BUILD_CPU" = xs390x; then HOTSPOT_BUILD_CPU_DEFINE=S390 + elif test "x$OPENJDK_BUILD_CPU" != x; then + HOTSPOT_BUILD_CPU_DEFINE=$(echo $OPENJDK_BUILD_CPU | tr a-z A-Z) fi
--- a/common/autoconf/platform.m4 Wed Sep 21 01:33:21 2016 -0700 +++ b/common/autoconf/platform.m4 Wed Sep 21 08:38:21 2016 +0000 @@ -454,6 +454,8 @@ HOTSPOT_$1_CPU_DEFINE=S390 elif test "x$OPENJDK_$1_CPU" = xs390x; then HOTSPOT_$1_CPU_DEFINE=S390 + elif test "x$OPENJDK_$1_CPU" != x; then + HOTSPOT_$1_CPU_DEFINE=$(echo $OPENJDK_$1_CPU | tr a-z A-Z) fi AC_SUBST(HOTSPOT_$1_CPU_DEFINE)
--- a/corba/.hgtags Wed Sep 21 01:33:21 2016 -0700 +++ b/corba/.hgtags Wed Sep 21 08:38:21 2016 +0000 @@ -376,3 +376,6 @@ f7e1d5337c2e550fe553df7a3886bbed80292ecd jdk-9+131 1ab4b9399c4cba584f66c1c088188f2f565fbf9c jdk-9+132 2021bfedf1c478a4808a7711a6090682a12f4c0e jdk-9+133 +1a497f5ca0cfd88115cc7daa8af8a62b8741caf2 jdk-9+134 +094d0db606db976045f594dba47d4593b715cc81 jdk-9+135 +aa053a3faf266c12b4fd5272da431a3e08e4a3e3 jdk-9+136
--- a/hotspot/.hgtags Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/.hgtags Wed Sep 21 08:38:21 2016 +0000 @@ -536,3 +536,6 @@ 943bf73b49c33c2d7cbd796f6a4ae3c7a00ae932 jdk-9+131 713951c08aa26813375175c2ab6cc99ff2a56903 jdk-9+132 a25e0fb6033245ab075136e744d362ce765464cd jdk-9+133 +b8b694c6b4d2ab0939aed7adaf0eec1ac321a085 jdk-9+134 +3b1c4562953db47e36b237a500f368d5c9746d47 jdk-9+135 +a20da289f646ee44440695b81abc0548330e4ca7 jdk-9+136
--- a/hotspot/make/test/JtregNative.gmk Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/make/test/JtregNative.gmk Wed Sep 21 08:38:21 2016 +0000 @@ -44,6 +44,7 @@ $(HOTSPOT_TOPDIR)/test/native_sanity \ $(HOTSPOT_TOPDIR)/test/runtime/jni/8025979 \ $(HOTSPOT_TOPDIR)/test/runtime/jni/8033445 \ + $(HOTSPOT_TOPDIR)/test/runtime/jni/checked \ $(HOTSPOT_TOPDIR)/test/runtime/jni/ToStringInInterfaceTest \ $(HOTSPOT_TOPDIR)/test/runtime/modules/getModuleJNI \ $(HOTSPOT_TOPDIR)/test/runtime/SameObject \
--- a/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -326,7 +326,8 @@ } void InterpreterMacroAssembler::push_l(Register r) { - str(r, pre(esp, 2 * -wordSize)); + str(zr, pre(esp, -wordSize)); + str(r, pre(esp, -wordsize)); } void InterpreterMacroAssembler::pop_f(FloatRegister r) {
--- a/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -2041,6 +2041,11 @@ __ verify_oop(r0); } + if (CheckJNICalls) { + // clear_pending_jni_exception_check + __ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset())); + } + if (!is_critical_native) { // reset handle block __ ldr(r2, Address(rthread, JavaThread::active_handles_offset()));
--- a/hotspot/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -1355,6 +1355,11 @@ // reset_last_Java_frame __ reset_last_Java_frame(true); + if (CheckJNICalls) { + // clear_pending_jni_exception_check + __ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset())); + } + // reset handle block __ ldr(t, Address(rthread, JavaThread::active_handles_offset())); __ str(zr, Address(t, JNIHandleBlock::top_offset_in_bytes()));
--- a/hotspot/src/cpu/sparc/vm/globals_sparc.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/cpu/sparc/vm/globals_sparc.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -57,10 +57,12 @@ #ifdef _LP64 // Stack slots are 2X larger in LP64 than in the 32 bit VM. +define_pd_global(intx, CompilerThreadStackSize, 1024); define_pd_global(intx, ThreadStackSize, 1024); define_pd_global(intx, VMThreadStackSize, 1024); #define DEFAULT_STACK_SHADOW_PAGES (20 DEBUG_ONLY(+2)) #else +define_pd_global(intx, CompilerThreadStackSize, 512); define_pd_global(intx, ThreadStackSize, 512); define_pd_global(intx, VMThreadStackSize, 512); #define DEFAULT_STACK_SHADOW_PAGES (6 DEBUG_ONLY(+2))
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -359,7 +359,7 @@ #ifdef _LP64 stx(l, r1, offset); // store something more useful here - debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);) + stx(G0, r1, offset+Interpreter::stackElementSize); #else st(l, r1, offset); st(l->successor(), r1, offset + Interpreter::stackElementSize);
--- a/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -2765,6 +2765,11 @@ __ verify_oop(I0); } + if (CheckJNICalls) { + // clear_pending_jni_exception_check + __ st_ptr(G0, G2_thread, JavaThread::pending_jni_exception_check_fn_offset()); + } + if (!is_critical_native) { // reset handle block __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5);
--- a/hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/cpu/sparc/vm/templateInterpreterGenerator_sparc.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -1487,6 +1487,11 @@ __ set(_thread_in_Java, G3_scratch); __ st(G3_scratch, thread_state); + if (CheckJNICalls) { + // clear_pending_jni_exception_check + __ st_ptr(G0, G2_thread, JavaThread::pending_jni_exception_check_fn_offset()); + } + // reset handle block __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch); __ st(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/cpu/x86/vm/interp_masm_x86.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -611,7 +611,8 @@ void InterpreterMacroAssembler::push_l(Register r) { subptr(rsp, 2 * wordSize); - movq(Address(rsp, 0), r); + movptr(Address(rsp, Interpreter::expr_offset_in_bytes(0)), r ); + movptr(Address(rsp, Interpreter::expr_offset_in_bytes(1)), NULL_WORD ); } void InterpreterMacroAssembler::pop(TosState state) {
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -2236,6 +2236,11 @@ __ verify_oop(rax); } + if (CheckJNICalls) { + // clear_pending_jni_exception_check + __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD); + } + if (!is_critical_native) { // reset handle block __ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -2589,6 +2589,11 @@ __ verify_oop(rax); } + if (CheckJNICalls) { + // clear_pending_jni_exception_check + __ movptr(Address(r15_thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD); + } + if (!is_critical_native) { // reset handle block __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
--- a/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -1169,6 +1169,11 @@ // reset_last_Java_frame __ reset_last_Java_frame(thread, true); + if (CheckJNICalls) { + // clear_pending_jni_exception_check + __ movptr(Address(thread, JavaThread::pending_jni_exception_check_fn_offset()), NULL_WORD); + } + // reset handle block __ movptr(t, Address(thread, JavaThread::active_handles_offset())); __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
--- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java Wed Sep 21 08:38:21 2016 +0000 @@ -68,6 +68,7 @@ Type type = db.lookupType("InstanceKlass"); arrayKlasses = new MetadataField(type.getAddressField("_array_klasses"), 0); methods = type.getAddressField("_methods"); + defaultMethods = type.getAddressField("_default_methods"); methodOrdering = type.getAddressField("_method_ordering"); localInterfaces = type.getAddressField("_local_interfaces"); transitiveInterfaces = type.getAddressField("_transitive_interfaces"); @@ -128,6 +129,7 @@ private static MetadataField arrayKlasses; private static AddressField methods; + private static AddressField defaultMethods; private static AddressField methodOrdering; private static AddressField localInterfaces; private static AddressField transitiveInterfaces; @@ -335,6 +337,20 @@ // Accessors for declared fields public Klass getArrayKlasses() { return (Klass) arrayKlasses.getValue(this); } public MethodArray getMethods() { return new MethodArray(methods.getValue(getAddress())); } + + public MethodArray getDefaultMethods() { + if (defaultMethods != null) { + Address addr = defaultMethods.getValue(getAddress()); + if ((addr != null) && (addr.getAddressAt(0) != null)) { + return new MethodArray(addr); + } else { + return null; + } + } else { + return null; + } + } + public KlassArray getLocalInterfaces() { return new KlassArray(localInterfaces.getValue(getAddress())); } public KlassArray getTransitiveInterfaces() { return new KlassArray(transitiveInterfaces.getValue(getAddress())); } public int getJavaFieldsCount() { return (int) javaFieldsCount.getValue(this); }
--- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/GrowableArray.java Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/utilities/GrowableArray.java Wed Sep 21 08:38:21 2016 +0000 @@ -65,4 +65,7 @@ super(addr); virtualConstructor = v; } + public Address getData() { + return dataField.getValue(getAddress()); + } }
--- a/hotspot/src/os/aix/vm/os_aix.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os/aix/vm/os_aix.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -847,7 +847,8 @@ return 0; } -bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { +bool os::create_thread(Thread* thread, ThreadType thr_type, + size_t req_stack_size) { assert(thread->osthread() == NULL, "caller responsible"); @@ -880,37 +881,12 @@ guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???"); // calculate stack size if it's not specified by caller - if (stack_size == 0) { - stack_size = os::Aix::default_stack_size(thr_type); - - switch (thr_type) { - case os::java_thread: - // Java threads use ThreadStackSize whose default value can be changed with the flag -Xss. - assert(JavaThread::stack_size_at_create() > 0, "this should be set"); - stack_size = JavaThread::stack_size_at_create(); - break; - case os::compiler_thread: - if (CompilerThreadStackSize > 0) { - stack_size = (size_t)(CompilerThreadStackSize * K); - break; - } // else fall through: - // use VMThreadStackSize if CompilerThreadStackSize is not defined - case os::vm_thread: - case os::pgc_thread: - case os::cgc_thread: - case os::watcher_thread: - if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); - break; - } - } - - stack_size = MAX2(stack_size, os::Aix::min_stack_allowed); + size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size); pthread_attr_setstacksize(&attr, stack_size); pthread_t tid; int ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread); - char buf[64]; if (ret == 0) { log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ", @@ -3593,32 +3569,11 @@ Aix::signal_sets_init(); Aix::install_signal_handlers(); - // Check minimum allowable stack size for thread creation and to initialize - // the java system classes, including StackOverflowError - depends on page - // size. Add two 4K pages for compiler2 recursion in main thread. - // Add in 4*BytesPerWord 4K pages to account for VM stack during - // class initialization depending on 32 or 64 bit VM. - os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed, - JavaThread::stack_guard_zone_size() + - JavaThread::stack_shadow_zone_size() + - (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K); - - os::Aix::min_stack_allowed = align_size_up(os::Aix::min_stack_allowed, os::vm_page_size()); - - size_t threadStackSizeInBytes = ThreadStackSize * K; - if (threadStackSizeInBytes != 0 && - threadStackSizeInBytes < os::Aix::min_stack_allowed) { - tty->print_cr("\nThe stack size specified is too small, " - "Specify at least %dk", - os::Aix::min_stack_allowed / K); + // Check and sets minimum stack sizes against command line options + if (Posix::set_minimum_stack_sizes() == JNI_ERR) { return JNI_ERR; } - // Make the stack size a multiple of the page size so that - // the yellow/red zones can be guarded. - // Note that this can be 0, if no default stacksize was set. - JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size())); - if (UseNUMA) { UseNUMA = false; warning("NUMA optimizations are not available on this OS.");
--- a/hotspot/src/os/aix/vm/os_aix.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os/aix/vm/os_aix.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2013, 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -140,14 +140,6 @@ // libpthread version string static void libpthread_init(); - // Minimum stack size a thread can be created with (allowing - // the VM to completely create the thread and enter user code) - static size_t min_stack_allowed; - - // Return default stack size or guard size for the specified thread type - static size_t default_stack_size(os::ThreadType thr_type); - static size_t default_guard_size(os::ThreadType thr_type); - // Function returns true if we run on OS/400 (pase), false if we run // on AIX. static bool on_pase() {
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os/bsd/vm/os_bsd.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -734,7 +734,8 @@ return 0; } -bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { +bool os::create_thread(Thread* thread, ThreadType thr_type, + size_t req_stack_size) { assert(thread->osthread() == NULL, "caller responsible"); // Allocate the OSThread object @@ -757,32 +758,7 @@ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); // calculate stack size if it's not specified by caller - if (stack_size == 0) { - stack_size = os::Bsd::default_stack_size(thr_type); - - switch (thr_type) { - case os::java_thread: - // Java threads use ThreadStackSize which default value can be - // changed with the flag -Xss - assert(JavaThread::stack_size_at_create() > 0, "this should be set"); - stack_size = JavaThread::stack_size_at_create(); - break; - case os::compiler_thread: - if (CompilerThreadStackSize > 0) { - stack_size = (size_t)(CompilerThreadStackSize * K); - break; - } // else fall through: - // use VMThreadStackSize if CompilerThreadStackSize is not defined - case os::vm_thread: - case os::pgc_thread: - case os::cgc_thread: - case os::watcher_thread: - if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); - break; - } - } - - stack_size = MAX2(stack_size, os::Bsd::min_stack_allowed); + size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size); pthread_attr_setstacksize(&attr, stack_size); ThreadState state; @@ -3502,32 +3478,11 @@ Bsd::signal_sets_init(); Bsd::install_signal_handlers(); - // Check minimum allowable stack size for thread creation and to initialize - // the java system classes, including StackOverflowError - depends on page - // size. Add two 4K pages for compiler2 recursion in main thread. - // Add in 4*BytesPerWord 4K pages to account for VM stack during - // class initialization depending on 32 or 64 bit VM. - os::Bsd::min_stack_allowed = MAX2(os::Bsd::min_stack_allowed, - JavaThread::stack_guard_zone_size() + - JavaThread::stack_shadow_zone_size() + - (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K); - - os::Bsd::min_stack_allowed = align_size_up(os::Bsd::min_stack_allowed, os::vm_page_size()); - - size_t threadStackSizeInBytes = ThreadStackSize * K; - if (threadStackSizeInBytes != 0 && - threadStackSizeInBytes < os::Bsd::min_stack_allowed) { - tty->print_cr("\nThe stack size specified is too small, " - "Specify at least %dk", - os::Bsd::min_stack_allowed/ K); + // Check and sets minimum stack sizes against command line options + if (Posix::set_minimum_stack_sizes() == JNI_ERR) { return JNI_ERR; } - // Make the stack size a multiple of the page size so that - // the yellow/red zones can be guarded. - JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, - vm_page_size())); - if (MaxFDLimit) { // set the number of file descriptors to max. print out error // if getrlimit/setrlimit fails but continue regardless.
--- a/hotspot/src/os/bsd/vm/os_bsd.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os/bsd/vm/os_bsd.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -120,14 +120,6 @@ static struct sigaction *get_chained_signal_action(int sig); static bool chained_handler(int sig, siginfo_t* siginfo, void* context); - // Minimum stack size a thread can be created with (allowing - // the VM to completely create the thread and enter user code) - static size_t min_stack_allowed; - - // Return default stack size or guard size for the specified thread type - static size_t default_stack_size(os::ThreadType thr_type); - static size_t default_guard_size(os::ThreadType thr_type); - // Real-time clock functions static void clock_init(void);
--- a/hotspot/src/os/linux/vm/os_linux.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os/linux/vm/os_linux.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -701,7 +701,7 @@ } bool os::create_thread(Thread* thread, ThreadType thr_type, - size_t stack_size) { + size_t req_stack_size) { assert(thread->osthread() == NULL, "caller responsible"); // Allocate the OSThread object @@ -723,34 +723,8 @@ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); - // stack size // calculate stack size if it's not specified by caller - if (stack_size == 0) { - stack_size = os::Linux::default_stack_size(thr_type); - - switch (thr_type) { - case os::java_thread: - // Java threads use ThreadStackSize which default value can be - // changed with the flag -Xss - assert(JavaThread::stack_size_at_create() > 0, "this should be set"); - stack_size = JavaThread::stack_size_at_create(); - break; - case os::compiler_thread: - if (CompilerThreadStackSize > 0) { - stack_size = (size_t)(CompilerThreadStackSize * K); - break; - } // else fall through: - // use VMThreadStackSize if CompilerThreadStackSize is not defined - case os::vm_thread: - case os::pgc_thread: - case os::cgc_thread: - case os::watcher_thread: - if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); - break; - } - } - - stack_size = MAX2(stack_size, os::Linux::min_stack_allowed); + size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size); pthread_attr_setstacksize(&attr, stack_size); // glibc guard page @@ -956,10 +930,9 @@ // bogus value for initial thread. void os::Linux::capture_initial_stack(size_t max_size) { // stack size is the easy part, get it from RLIMIT_STACK - size_t stack_size; struct rlimit rlim; getrlimit(RLIMIT_STACK, &rlim); - stack_size = rlim.rlim_cur; + size_t stack_size = rlim.rlim_cur; // 6308388: a bug in ld.so will relocate its own .data section to the // lower end of primordial stack; reduce ulimit -s value a little bit @@ -4793,32 +4766,10 @@ Linux::signal_sets_init(); Linux::install_signal_handlers(); - // Check minimum allowable stack size for thread creation and to initialize - // the java system classes, including StackOverflowError - depends on page - // size. Add two 4K pages for compiler2 recursion in main thread. - // Add in 4*BytesPerWord 4K pages to account for VM stack during - // class initialization depending on 32 or 64 bit VM. - os::Linux::min_stack_allowed = MAX2(os::Linux::min_stack_allowed, - JavaThread::stack_guard_zone_size() + - JavaThread::stack_shadow_zone_size() + - (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K); - - os::Linux::min_stack_allowed = align_size_up(os::Linux::min_stack_allowed, os::vm_page_size()); - - size_t threadStackSizeInBytes = ThreadStackSize * K; - if (threadStackSizeInBytes != 0 && - threadStackSizeInBytes < os::Linux::min_stack_allowed) { - tty->print_cr("\nThe stack size specified is too small, " - "Specify at least " SIZE_FORMAT "k", - os::Linux::min_stack_allowed/ K); + // Check and sets minimum stack sizes against command line options + if (Posix::set_minimum_stack_sizes() == JNI_ERR) { return JNI_ERR; } - - // Make the stack size a multiple of the page size so that - // the yellow/red zones can be guarded. - JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, - vm_page_size())); - Linux::capture_initial_stack(JavaThread::stack_size_at_create()); #if defined(IA32)
--- a/hotspot/src/os/linux/vm/os_linux.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os/linux/vm/os_linux.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -170,12 +170,8 @@ static void libpthread_init(); static bool libnuma_init(); static void* libnuma_dlsym(void* handle, const char* name); - // Minimum stack size a thread can be created with (allowing - // the VM to completely create the thread and enter user code) - static size_t min_stack_allowed; - // Return default stack size or guard size for the specified thread type - static size_t default_stack_size(os::ThreadType thr_type); + // Return default guard size for the specified thread type static size_t default_guard_size(os::ThreadType thr_type); static void capture_initial_stack(size_t max_size);
--- a/hotspot/src/os/posix/vm/os_posix.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os/posix/vm/os_posix.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -1099,6 +1099,123 @@ return buf; } +// Check minimum allowable stack sizes for thread creation and to initialize +// the java system classes, including StackOverflowError - depends on page +// size. Add two 4K pages for compiler2 recursion in main thread. +// Add in 4*BytesPerWord 4K pages to account for VM stack during +// class initialization depending on 32 or 64 bit VM. +jint os::Posix::set_minimum_stack_sizes() { + _java_thread_min_stack_allowed = MAX2(_java_thread_min_stack_allowed, + JavaThread::stack_guard_zone_size() + + JavaThread::stack_shadow_zone_size() + + (4 * BytesPerWord COMPILER2_PRESENT(+ 2)) * 4 * K); + + _java_thread_min_stack_allowed = align_size_up(_java_thread_min_stack_allowed, vm_page_size()); + + size_t stack_size_in_bytes = ThreadStackSize * K; + if (stack_size_in_bytes != 0 && + stack_size_in_bytes < _java_thread_min_stack_allowed) { + // The '-Xss' and '-XX:ThreadStackSize=N' options both set + // ThreadStackSize so we go with "Java thread stack size" instead + // of "ThreadStackSize" to be more friendly. + tty->print_cr("\nThe Java thread stack size specified is too small. " + "Specify at least " SIZE_FORMAT "k", + _java_thread_min_stack_allowed / K); + return JNI_ERR; + } + +#ifdef SOLARIS + // For 64kbps there will be a 64kb page size, which makes + // the usable default stack size quite a bit less. Increase the + // stack for 64kb (or any > than 8kb) pages, this increases + // virtual memory fragmentation (since we're not creating the + // stack on a power of 2 boundary. The real fix for this + // should be to fix the guard page mechanism. + + if (vm_page_size() > 8*K) { + stack_size_in_bytes = (stack_size_in_bytes != 0) + ? stack_size_in_bytes + + JavaThread::stack_red_zone_size() + + JavaThread::stack_yellow_zone_size() + : 0; + ThreadStackSize = stack_size_in_bytes/K; + } +#endif // SOLARIS + + // Make the stack size a multiple of the page size so that + // the yellow/red zones can be guarded. + JavaThread::set_stack_size_at_create(round_to(stack_size_in_bytes, + vm_page_size())); + + _compiler_thread_min_stack_allowed = align_size_up(_compiler_thread_min_stack_allowed, vm_page_size()); + + stack_size_in_bytes = CompilerThreadStackSize * K; + if (stack_size_in_bytes != 0 && + stack_size_in_bytes < _compiler_thread_min_stack_allowed) { + tty->print_cr("\nThe CompilerThreadStackSize specified is too small. " + "Specify at least " SIZE_FORMAT "k", + _compiler_thread_min_stack_allowed / K); + return JNI_ERR; + } + + _vm_internal_thread_min_stack_allowed = align_size_up(_vm_internal_thread_min_stack_allowed, vm_page_size()); + + stack_size_in_bytes = VMThreadStackSize * K; + if (stack_size_in_bytes != 0 && + stack_size_in_bytes < _vm_internal_thread_min_stack_allowed) { + tty->print_cr("\nThe VMThreadStackSize specified is too small. " + "Specify at least " SIZE_FORMAT "k", + _vm_internal_thread_min_stack_allowed / K); + return JNI_ERR; + } + return JNI_OK; +} + +// Called when creating the thread. The minimum stack sizes have already been calculated +size_t os::Posix::get_initial_stack_size(ThreadType thr_type, size_t req_stack_size) { + size_t stack_size; + if (req_stack_size == 0) { + stack_size = default_stack_size(thr_type); + } else { + stack_size = req_stack_size; + } + + switch (thr_type) { + case os::java_thread: + // Java threads use ThreadStackSize which default value can be + // changed with the flag -Xss + if (req_stack_size == 0 && JavaThread::stack_size_at_create() > 0) { + // no requested size and we have a more specific default value + stack_size = JavaThread::stack_size_at_create(); + } + stack_size = MAX2(stack_size, + _java_thread_min_stack_allowed); + break; + case os::compiler_thread: + if (req_stack_size == 0 && CompilerThreadStackSize > 0) { + // no requested size and we have a more specific default value + stack_size = (size_t)(CompilerThreadStackSize * K); + } + stack_size = MAX2(stack_size, + _compiler_thread_min_stack_allowed); + break; + case os::vm_thread: + case os::pgc_thread: + case os::cgc_thread: + case os::watcher_thread: + default: // presume the unknown thr_type is a VM internal + if (req_stack_size == 0 && VMThreadStackSize > 0) { + // no requested size and we have a more specific default value + stack_size = (size_t)(VMThreadStackSize * K); + } + + stack_size = MAX2(stack_size, + _vm_internal_thread_min_stack_allowed); + break; + } + + return stack_size; +} os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() { assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
--- a/hotspot/src/os/posix/vm/os_posix.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os/posix/vm/os_posix.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,7 +42,18 @@ static void print_libversion_info(outputStream* st); static void print_load_average(outputStream* st); + // Minimum stack size a thread can be created with (allowing + // the VM to completely create the thread and enter user code) + static size_t _compiler_thread_min_stack_allowed; + static size_t _java_thread_min_stack_allowed; + static size_t _vm_internal_thread_min_stack_allowed; + public: + // Return default stack size for the specified thread type + static size_t default_stack_size(os::ThreadType thr_type); + // Check and sets minimum stack sizes + static jint set_minimum_stack_sizes(); + static size_t get_initial_stack_size(ThreadType thr_type, size_t req_stack_size); // Returns true if signal is valid. static bool is_valid_signal(int sig);
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os/solaris/vm/os_solaris.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -917,8 +917,15 @@ return buf; } +// return default stack size for thr_type +size_t os::Posix::default_stack_size(os::ThreadType thr_type) { + // default stack size when not specified by caller is 1M (2M for LP64) + size_t s = (BytesPerWord >> 2) * K * K; + return s; +} + bool os::create_thread(Thread* thread, ThreadType thr_type, - size_t stack_size) { + size_t req_stack_size) { // Allocate the OSThread object OSThread* osthread = new OSThread(NULL, NULL); if (osthread == NULL) { @@ -953,31 +960,8 @@ tty->print_cr("In create_thread, creating a %s thread\n", thrtyp); } - // Calculate stack size if it's not specified by caller. - if (stack_size == 0) { - // The default stack size 1M (2M for LP64). - stack_size = (BytesPerWord >> 2) * K * K; - - switch (thr_type) { - case os::java_thread: - // Java threads use ThreadStackSize which default value can be changed with the flag -Xss - if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create(); - break; - case os::compiler_thread: - if (CompilerThreadStackSize > 0) { - stack_size = (size_t)(CompilerThreadStackSize * K); - break; - } // else fall through: - // use VMThreadStackSize if CompilerThreadStackSize is not defined - case os::vm_thread: - case os::pgc_thread: - case os::cgc_thread: - case os::watcher_thread: - if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); - break; - } - } - stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed); + // calculate stack size if it's not specified by caller + size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size); // Initial state is ALLOCATED but not INITIALIZED osthread->set_state(ALLOCATED); @@ -4400,7 +4384,12 @@ // Constant minimum stack size allowed. It must be at least // the minimum of what the OS supports (thr_min_stack()), and // enough to allow the thread to get to user bytecode execution. - Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed); + Posix::_compiler_thread_min_stack_allowed = MAX2(thr_min_stack(), + Posix::_compiler_thread_min_stack_allowed); + Posix::_java_thread_min_stack_allowed = MAX2(thr_min_stack(), + Posix::_java_thread_min_stack_allowed); + Posix::_vm_internal_thread_min_stack_allowed = MAX2(thr_min_stack(), + Posix::_vm_internal_thread_min_stack_allowed); // dynamic lookup of functions that may not be available in our lowest // supported Solaris release @@ -4445,47 +4434,11 @@ log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page)); } - // Check minimum allowable stack size for thread creation and to initialize - // the java system classes, including StackOverflowError - depends on page - // size. Add two 4K pages for compiler2 recursion in main thread. - // Add in 4*BytesPerWord 4K pages to account for VM stack during - // class initialization depending on 32 or 64 bit VM. - os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed, - JavaThread::stack_guard_zone_size() + - JavaThread::stack_shadow_zone_size() + - (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K); - - os::Solaris::min_stack_allowed = align_size_up(os::Solaris::min_stack_allowed, os::vm_page_size()); - - size_t threadStackSizeInBytes = ThreadStackSize * K; - if (threadStackSizeInBytes != 0 && - threadStackSizeInBytes < os::Solaris::min_stack_allowed) { - tty->print_cr("\nThe stack size specified is too small, Specify at least %dk", - os::Solaris::min_stack_allowed/K); + // Check and sets minimum stack sizes against command line options + if (Posix::set_minimum_stack_sizes() == JNI_ERR) { return JNI_ERR; } - // For 64kbps there will be a 64kb page size, which makes - // the usable default stack size quite a bit less. Increase the - // stack for 64kb (or any > than 8kb) pages, this increases - // virtual memory fragmentation (since we're not creating the - // stack on a power of 2 boundary. The real fix for this - // should be to fix the guard page mechanism. - - if (vm_page_size() > 8*K) { - threadStackSizeInBytes = (threadStackSizeInBytes != 0) - ? threadStackSizeInBytes + - JavaThread::stack_red_zone_size() + - JavaThread::stack_yellow_zone_size() - : 0; - ThreadStackSize = threadStackSizeInBytes/K; - } - - // Make the stack size a multiple of the page size so that - // the yellow/red zones can be guarded. - JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, - vm_page_size())); - Solaris::libthread_init(); if (UseNUMA) {
--- a/hotspot/src/os/solaris/vm/os_solaris.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os/solaris/vm/os_solaris.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -292,10 +292,6 @@ static jint _os_thread_limit; static volatile jint _os_thread_count; - // Minimum stack size a thread can be created with (allowing - // the VM to completely create the thread and enter user code) - - static size_t min_stack_allowed; // Stack overflow handling
--- a/hotspot/src/os/windows/vm/os_windows.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os/windows/vm/os_windows.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -4215,7 +4215,7 @@ min_stack_allowed = align_size_up(min_stack_allowed, os::vm_page_size()); if (actual_reserve_size < min_stack_allowed) { - tty->print_cr("\nThe stack size specified is too small, " + tty->print_cr("\nThe Java thread stack size specified is too small. " "Specify at least %dk", min_stack_allowed / K); return JNI_ERR;
--- a/hotspot/src/os_cpu/aix_ppc/vm/globals_aix_ppc.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os_cpu/aix_ppc/vm/globals_aix_ppc.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2015 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -33,10 +33,6 @@ define_pd_global(intx, ThreadStackSize, 2048); // 0 => use system default define_pd_global(intx, VMThreadStackSize, 2048); -// if we set CompilerThreadStackSize to a value different than 0, it will -// be used in os::create_thread(). Otherwise, due the strange logic in os::create_thread(), -// the stack size for compiler threads will default to VMThreadStackSize, although it -// is defined to 4M in os::Aix::default_stack_size()! define_pd_global(intx, CompilerThreadStackSize, 4096); // Allow extra space in DEBUG builds for asserts.
--- a/hotspot/src/os_cpu/aix_ppc/vm/os_aix_ppc.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os_cpu/aix_ppc/vm/os_aix_ppc.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -192,8 +192,10 @@ intptr_t* csp = (intptr_t*) *((intptr_t*) os::current_stack_pointer()); // hack. frame topframe(csp, (address)0x8); - // return sender of current topframe which hopefully has pc != NULL. - return os::get_sender_for_C_frame(&topframe); + // Return sender of sender of current topframe which hopefully + // both have pc != NULL. + frame tmp = os::get_sender_for_C_frame(&topframe); + return os::get_sender_for_C_frame(&tmp); } // Utility functions @@ -533,23 +535,17 @@ //////////////////////////////////////////////////////////////////////////////// // thread stack -size_t os::Aix::min_stack_allowed = 128*K; +size_t os::Posix::_compiler_thread_min_stack_allowed = 128 * K; +size_t os::Posix::_java_thread_min_stack_allowed = 128 * K; +size_t os::Posix::_vm_internal_thread_min_stack_allowed = 128 * K; // return default stack size for thr_type -size_t os::Aix::default_stack_size(os::ThreadType thr_type) { +size_t os::Posix::default_stack_size(os::ThreadType thr_type) { // default stack size (compiler thread needs larger stack) - // Notice that the setting for compiler threads here have no impact - // because of the strange 'fallback logic' in os::create_thread(). - // Better set CompilerThreadStackSize in globals_<os_cpu>.hpp if you want to - // specify a different stack size for compiler threads! size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M); return s; } -size_t os::Aix::default_guard_size(os::ThreadType thr_type) { - return 2 * page_size(); -} - ///////////////////////////////////////////////////////////////////////////// // helper functions for fatal error handler
--- a/hotspot/src/os_cpu/bsd_x86/vm/globals_bsd_x86.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os_cpu/bsd_x86/vm/globals_bsd_x86.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,9 +31,11 @@ // define_pd_global(bool, DontYieldALot, false); #ifdef AMD64 +define_pd_global(intx, CompilerThreadStackSize, 1024); define_pd_global(intx, ThreadStackSize, 1024); // 0 => use system default define_pd_global(intx, VMThreadStackSize, 1024); #else +define_pd_global(intx, CompilerThreadStackSize, 512); // ThreadStackSize 320 allows a couple of test cases to run while // keeping the number of threads that can be created high. System // default ThreadStackSize appears to be 512 which is too big. @@ -41,7 +43,6 @@ define_pd_global(intx, VMThreadStackSize, 512); #endif // AMD64 -define_pd_global(intx, CompilerThreadStackSize, 0); define_pd_global(size_t, JVMInvokeMethodSlack, 8192);
--- a/hotspot/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -838,9 +838,13 @@ // thread stack #ifdef AMD64 -size_t os::Bsd::min_stack_allowed = 64 * K; +size_t os::Posix::_compiler_thread_min_stack_allowed = 64 * K; +size_t os::Posix::_java_thread_min_stack_allowed = 64 * K; +size_t os::Posix::_vm_internal_thread_min_stack_allowed = 64 * K; #else -size_t os::Bsd::min_stack_allowed = (48 DEBUG_ONLY(+4))*K; +size_t os::Posix::_compiler_thread_min_stack_allowed = (48 DEBUG_ONLY(+ 4)) * K; +size_t os::Posix::_java_thread_min_stack_allowed = (48 DEBUG_ONLY(+ 4)) * K; +size_t os::Posix::_vm_internal_thread_min_stack_allowed = (48 DEBUG_ONLY(+ 4)) * K; #ifdef __GNUC__ #define GET_GS() ({int gs; __asm__ volatile("movw %%gs, %w0":"=q"(gs)); gs&0xffff;}) @@ -849,7 +853,7 @@ #endif // AMD64 // return default stack size for thr_type -size_t os::Bsd::default_stack_size(os::ThreadType thr_type) { +size_t os::Posix::default_stack_size(os::ThreadType thr_type) { // default stack size (compiler thread needs larger stack) #ifdef AMD64 size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M); @@ -859,11 +863,6 @@ return s; } -size_t os::Bsd::default_guard_size(os::ThreadType thr_type) { - // Creating guard page is very expensive. Java thread has HotSpot - // guard page, only enable glibc guard page for non-Java threads. - return (thr_type == java_thread ? 0 : page_size()); -} // Java thread: //
--- a/hotspot/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -282,9 +282,11 @@ /////////////////////////////////////////////////////////////////////////////// // thread stack -size_t os::Bsd::min_stack_allowed = 64 * K; +size_t os::Posix::_compiler_thread_min_stack_allowed = 64 * K; +size_t os::Posix::_java_thread_min_stack_allowed = 64 * K; +size_t os::Posix::_vm_internal_thread_min_stack_allowed = 64 * K; -size_t os::Bsd::default_stack_size(os::ThreadType thr_type) { +size_t os::Posix::default_stack_size(os::ThreadType thr_type) { #ifdef _LP64 size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M); #else @@ -293,12 +295,6 @@ return s; } -size_t os::Bsd::default_guard_size(os::ThreadType thr_type) { - // Only enable glibc guard pages for non-Java threads - // (Java threads have HotSpot guard pages) - return (thr_type == java_thread ? 0 : page_size()); -} - static void current_stack_region(address *bottom, size_t *size) { address stack_bottom; address stack_top;
--- a/hotspot/src/os_cpu/linux_aarch64/vm/globals_linux_aarch64.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os_cpu/linux_aarch64/vm/globals_linux_aarch64.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -33,7 +33,7 @@ define_pd_global(intx, ThreadStackSize, 2048); // 0 => use system default define_pd_global(intx, VMThreadStackSize, 2048); -define_pd_global(intx, CompilerThreadStackSize, 0); +define_pd_global(intx, CompilerThreadStackSize, 2048); define_pd_global(uintx,JVMInvokeMethodSlack, 8192);
--- a/hotspot/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -473,10 +473,12 @@ //////////////////////////////////////////////////////////////////////////////// // thread stack -size_t os::Linux::min_stack_allowed = 64 * K; +size_t os::Posix::_compiler_thread_min_stack_allowed = 64 * K; +size_t os::Posix::_java_thread_min_stack_allowed = 64 * K; +size_t os::Posix::_vm_internal_thread_min_stack_allowed = 64 * K; // return default stack size for thr_type -size_t os::Linux::default_stack_size(os::ThreadType thr_type) { +size_t os::Posix::default_stack_size(os::ThreadType thr_type) { // default stack size (compiler thread needs larger stack) size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M); return s;
--- a/hotspot/src/os_cpu/linux_ppc/vm/globals_linux_ppc.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os_cpu/linux_ppc/vm/globals_linux_ppc.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2015 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -33,10 +33,6 @@ define_pd_global(intx, ThreadStackSize, 2048); // 0 => use system default define_pd_global(intx, VMThreadStackSize, 2048); -// if we set CompilerThreadStackSize to a value different than 0, it will -// be used in os::create_thread(). Otherwise, due the strange logic in os::create_thread(), -// the stack size for compiler threads will default to VMThreadStackSize, although it -// is defined to 4M in os::Linux::default_stack_size()! define_pd_global(intx, CompilerThreadStackSize, 4096); // Allow extra space in DEBUG builds for asserts.
--- a/hotspot/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -205,8 +205,10 @@ intptr_t* csp = (intptr_t*) *((intptr_t*) os::current_stack_pointer()); // hack. frame topframe(csp, (address)0x8); - // return sender of current topframe which hopefully has pc != NULL. - return os::get_sender_for_C_frame(&topframe); + // Return sender of sender of current topframe which hopefully + // both have pc != NULL. + frame tmp = os::get_sender_for_C_frame(&topframe); + return os::get_sender_for_C_frame(&tmp); } // Utility functions @@ -533,15 +535,13 @@ //////////////////////////////////////////////////////////////////////////////// // thread stack -size_t os::Linux::min_stack_allowed = 128*K; +size_t os::Posix::_compiler_thread_min_stack_allowed = 128 * K; +size_t os::Posix::_java_thread_min_stack_allowed = 128 * K; +size_t os::Posix::_vm_internal_thread_min_stack_allowed = 128 * K; // return default stack size for thr_type -size_t os::Linux::default_stack_size(os::ThreadType thr_type) { +size_t os::Posix::default_stack_size(os::ThreadType thr_type) { // default stack size (compiler thread needs larger stack) - // Notice that the setting for compiler threads here have no impact - // because of the strange 'fallback logic' in os::create_thread(). - // Better set CompilerThreadStackSize in globals_<os_cpu>.hpp if you want to - // specify a different stack size for compiler threads! size_t s = (thr_type == os::compiler_thread ? 4 * M : 1024 * K); return s; }
--- a/hotspot/src/os_cpu/linux_sparc/vm/globals_linux_sparc.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os_cpu/linux_sparc/vm/globals_linux_sparc.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +31,6 @@ // define_pd_global(size_t, JVMInvokeMethodSlack, 12288); -define_pd_global(intx, CompilerThreadStackSize, 0); // Used on 64 bit platforms for UseCompressedOops base address define_pd_global(size_t, HeapBaseMinAddress, CONST64(4)*G);
--- a/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -726,10 +726,12 @@ /////////////////////////////////////////////////////////////////////////////// // thread stack -size_t os::Linux::min_stack_allowed = 128 * K; +size_t os::Posix::_compiler_thread_min_stack_allowed = 128 * K; +size_t os::Posix::_java_thread_min_stack_allowed = 128 * K; +size_t os::Posix::_vm_internal_thread_min_stack_allowed = 128 * K; // return default stack size for thr_type -size_t os::Linux::default_stack_size(os::ThreadType thr_type) { +size_t os::Posix::default_stack_size(os::ThreadType thr_type) { // default stack size (compiler thread needs larger stack) size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M); return s;
--- a/hotspot/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,9 +30,11 @@ define_pd_global(bool, DontYieldALot, false); #ifdef AMD64 +define_pd_global(intx, CompilerThreadStackSize, 1024); define_pd_global(intx, ThreadStackSize, 1024); // 0 => use system default define_pd_global(intx, VMThreadStackSize, 1024); #else +define_pd_global(intx, CompilerThreadStackSize, 512); // ThreadStackSize 320 allows a couple of test cases to run while // keeping the number of threads that can be created high. System // default ThreadStackSize appears to be 512 which is too big. @@ -40,8 +42,6 @@ define_pd_global(intx, VMThreadStackSize, 512); #endif // AMD64 -define_pd_global(intx, CompilerThreadStackSize, 0); - define_pd_global(size_t, JVMInvokeMethodSlack, 8192); // Used on 64 bit platforms for UseCompressedOops base address
--- a/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -676,13 +676,17 @@ // thread stack #ifdef AMD64 -size_t os::Linux::min_stack_allowed = 64 * K; +size_t os::Posix::_compiler_thread_min_stack_allowed = 64 * K; +size_t os::Posix::_java_thread_min_stack_allowed = 64 * K; +size_t os::Posix::_vm_internal_thread_min_stack_allowed = 64 * K; #else -size_t os::Linux::min_stack_allowed = (48 DEBUG_ONLY(+4))*K; +size_t os::Posix::_compiler_thread_min_stack_allowed = (48 DEBUG_ONLY(+ 4)) * K; +size_t os::Posix::_java_thread_min_stack_allowed = (48 DEBUG_ONLY(+ 4)) * K; +size_t os::Posix::_vm_internal_thread_min_stack_allowed = (48 DEBUG_ONLY(+ 4)) * K; #endif // AMD64 // return default stack size for thr_type -size_t os::Linux::default_stack_size(os::ThreadType thr_type) { +size_t os::Posix::default_stack_size(os::ThreadType thr_type) { // default stack size (compiler thread needs larger stack) #ifdef AMD64 size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
--- a/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -307,9 +307,11 @@ /////////////////////////////////////////////////////////////////////////////// // thread stack -size_t os::Linux::min_stack_allowed = 64 * K; +size_t os::Posix::_compiler_thread_min_stack_allowed = 64 * K; +size_t os::Posix::_java_thread_min_stack_allowed = 64 * K; +size_t os::Posix::_vm_internal_thread_min_stack_allowed = 64 * K; -size_t os::Linux::default_stack_size(os::ThreadType thr_type) { +size_t os::Posix::default_stack_size(os::ThreadType thr_type) { #ifdef _LP64 size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M); #else
--- a/hotspot/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +31,6 @@ // define_pd_global(size_t, JVMInvokeMethodSlack, 12288); -define_pd_global(intx, CompilerThreadStackSize, 0); // Used on 64 bit platforms for UseCompressedOops base address #ifdef _LP64
--- a/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -84,9 +84,13 @@ // Minimum stack size for the VM. It's easier to document a constant // but it's different for x86 and sparc because the page sizes are different. #ifdef _LP64 -size_t os::Solaris::min_stack_allowed = 128*K; +size_t os::Posix::_compiler_thread_min_stack_allowed = 128 * K; +size_t os::Posix::_java_thread_min_stack_allowed = 128 * K; +size_t os::Posix::_vm_internal_thread_min_stack_allowed = 128 * K; #else -size_t os::Solaris::min_stack_allowed = 96*K; +size_t os::Posix::_compiler_thread_min_stack_allowed = 96 * K; +size_t os::Posix::_java_thread_min_stack_allowed = 96 * K; +size_t os::Posix::_vm_internal_thread_min_stack_allowed = 96 * K; #endif int os::Solaris::max_register_window_saves_before_flushing() {
--- a/hotspot/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,10 +30,12 @@ define_pd_global(bool, DontYieldALot, true); // Determined in the design center #ifdef AMD64 +define_pd_global(intx, CompilerThreadStackSize, 1024); define_pd_global(intx, ThreadStackSize, 1024); // 0 => use system default define_pd_global(intx, VMThreadStackSize, 1024); define_pd_global(size_t, JVMInvokeMethodSlack, 8*K); #else +define_pd_global(intx, CompilerThreadStackSize, 512); // ThreadStackSize 320 allows a couple of test cases to run while // keeping the number of threads that can be created high. define_pd_global(intx, ThreadStackSize, 320); @@ -41,7 +43,6 @@ define_pd_global(size_t, JVMInvokeMethodSlack, 10*K); #endif // AMD64 -define_pd_global(intx, CompilerThreadStackSize, 0); // Used on 64 bit platforms for UseCompressedOops base address define_pd_global(size_t, HeapBaseMinAddress, 2*G);
--- a/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -86,15 +86,19 @@ #define MAX_PATH (2 * K) -// Minimum stack size for the VM. It's easier to document a constant value +// Minimum stack sizes for the VM. It's easier to document a constant value // but it's different for x86 and sparc because the page sizes are different. #ifdef AMD64 -size_t os::Solaris::min_stack_allowed = 224*K; +size_t os::Posix::_compiler_thread_min_stack_allowed = 394 * K; +size_t os::Posix::_java_thread_min_stack_allowed = 224 * K; +size_t os::Posix::_vm_internal_thread_min_stack_allowed = 224 * K; #define REG_SP REG_RSP #define REG_PC REG_RIP #define REG_FP REG_RBP #else -size_t os::Solaris::min_stack_allowed = 64*K; +size_t os::Posix::_compiler_thread_min_stack_allowed = 64 * K; +size_t os::Posix::_java_thread_min_stack_allowed = 64 * K; +size_t os::Posix::_vm_internal_thread_min_stack_allowed = 64 * K; #define REG_SP UESP #define REG_PC EIP #define REG_FP EBP
--- a/hotspot/src/share/vm/classfile/classLoader.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/classfile/classLoader.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -85,6 +85,7 @@ typedef jzentry* (JNICALL *GetNextEntry_t)(jzfile *zip, jint n); typedef jboolean (JNICALL *ZipInflateFully_t)(void *inBuf, jlong inLen, void *outBuf, jlong outLen, char **pmsg); typedef jint (JNICALL *Crc32_t)(jint crc, const jbyte *buf, jint len); +typedef void (JNICALL *FreeEntry_t)(jzfile *zip, jzentry *entry); static ZipOpen_t ZipOpen = NULL; static ZipClose_t ZipClose = NULL; @@ -95,6 +96,7 @@ static canonicalize_fn_t CanonicalizeEntry = NULL; static ZipInflateFully_t ZipInflateFully = NULL; static Crc32_t Crc32 = NULL; +static FreeEntry_t FreeEntry = NULL; // Entry points for jimage.dll for loading jimage file entries @@ -150,6 +152,7 @@ GrowableArray<char*>* ClassLoader::_boot_modules_array = NULL; GrowableArray<char*>* ClassLoader::_platform_modules_array = NULL; SharedPathsMiscInfo* ClassLoader::_shared_paths_misc_info = NULL; +int ClassLoader::_num_patch_mod_prefixes = 0; #endif // helper routines @@ -319,6 +322,20 @@ FREE_C_HEAP_ARRAY(char, _zip_name); } +bool ClassPathZipEntry::stream_exists(const char* name) { + // enable call to C land + JavaThread* thread = JavaThread::current(); + ThreadToNativeFromVM ttn(thread); + // check whether zip archive contains name + jint name_len, filesize; + jzentry* entry = (*FindEntry)(_zip, name, &filesize, &name_len); + if (entry != NULL) { + (*FreeEntry)(_zip, entry); + return true; + } + return false; +} + u1* ClassPathZipEntry::open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS) { // enable call to C land JavaThread* thread = JavaThread::current(); @@ -640,7 +657,7 @@ struct stat st; if (os::stat(path, &st) == 0) { - if ((st.st_mode & S_IFREG) != S_IFREG) { // is directory + if ((st.st_mode & S_IFMT) != S_IFREG) { // is not a regular file if (!os::dir_is_empty(path)) { tty->print_cr("Error: non-empty directory '%s'", path); exit_with_path_failure("CDS allows only empty directories in archived classpaths", NULL); @@ -693,8 +710,6 @@ GrowableArray<ModulePatchPath*>* patch_mod_args = Arguments::get_patch_mod_prefix(); int num_of_entries = patch_mod_args->length(); - assert(!DumpSharedSpaces, "DumpSharedSpaces not supported with --patch-module"); - assert(!UseSharedSpaces, "UseSharedSpaces not supported with --patch-module"); // Set up the boot loader's _patch_mod_entries list _patch_mod_entries = new (ResourceObj::C_HEAP, mtModule) GrowableArray<ModuleClassPathList*>(num_of_entries, true); @@ -851,7 +866,7 @@ bool is_boot_append, TRAPS) { JavaThread* thread = JavaThread::current(); ClassPathEntry* new_entry = NULL; - if ((st->st_mode & S_IFREG) == S_IFREG) { + if ((st->st_mode & S_IFMT) == S_IFREG) { ResourceMark rm(thread); // Regular file, should be a zip or jimage file // Canonicalized filename @@ -914,7 +929,7 @@ // check for a regular file struct stat st; if (os::stat(path, &st) == 0) { - if ((st.st_mode & S_IFREG) == S_IFREG) { + if ((st.st_mode & S_IFMT) == S_IFREG) { char canonical_path[JVM_MAXPATHLEN]; if (get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) { char* error_msg = NULL; @@ -1068,6 +1083,7 @@ GetNextEntry = CAST_TO_FN_PTR(GetNextEntry_t, os::dll_lookup(handle, "ZIP_GetNextEntry")); ZipInflateFully = CAST_TO_FN_PTR(ZipInflateFully_t, os::dll_lookup(handle, "ZIP_InflateFully")); Crc32 = CAST_TO_FN_PTR(Crc32_t, os::dll_lookup(handle, "ZIP_CRC32")); + FreeEntry = CAST_TO_FN_PTR(FreeEntry_t, os::dll_lookup(handle, "ZIP_FreeEntry")); // ZIP_Close is not exported on Windows in JDK5.0 so don't abort if ZIP_Close is NULL if (ZipOpen == NULL || FindEntry == NULL || ReadEntry == NULL || @@ -1358,7 +1374,7 @@ if (!Universe::is_module_initialized() && !ModuleEntryTable::javabase_defined() && mod_entry == NULL) { - mod_entry = ModuleEntryTable::javabase_module(); + mod_entry = ModuleEntryTable::javabase_moduleEntry(); } // The module must be a named module @@ -1395,6 +1411,57 @@ return NULL; } +#if INCLUDE_CDS +// The following function is only used during CDS dump time. +// It checks if a class can be found in the jar entries of the _patch_mod_entries. +// It does not support non-jar entries. +bool ClassLoader::is_in_patch_module(const char* const file_name) { + assert(DumpSharedSpaces, "dump time only"); + if (_patch_mod_entries == NULL) { + return false; + } + + int num_of_entries = _patch_mod_entries->length(); + char* class_module_name = NULL; + ResourceMark rm; + const char *pkg_name = package_from_name(file_name); + // Using the jimage to obtain the class' module name. + // The ModuleEntryTable cannot be used at this point during dump time + // because the module system hasn't been initialized yet. + if (pkg_name != NULL) { + JImageFile *jimage = _jrt_entry->jimage(); + class_module_name = (char*)(*JImagePackageToModule)(jimage, pkg_name); + } + + if (class_module_name == NULL) { + return false; + } + + // Loop through all the patch module entries looking for module + for (int i = 0; i < num_of_entries; i++) { + ModuleClassPathList* module_cpl = _patch_mod_entries->at(i); + Symbol* module_cpl_name = module_cpl->module_name(); + + if (strcmp(module_cpl_name->as_C_string(), class_module_name) == 0) { + // Class' module has been located, attempt to locate + // the class from the module's ClassPathEntry list. + ClassPathEntry* e = module_cpl->module_first_entry(); + while (e != NULL) { + if (e->is_jar_file()) { + if (e->stream_exists(file_name)) { + return true; + } else { + e = e->next(); + } + } + } + } + } + + return false; +} +#endif // INCLUDE_CDS + instanceKlassHandle ClassLoader::load_class(Symbol* name, bool search_append_only, TRAPS) { assert(name != NULL, "invariant"); assert(THREAD->is_Java_thread(), "must be a JavaThread"); @@ -1420,8 +1487,8 @@ // If DumpSharedSpaces is true boot loader visibility boundaries are set to: // - [jimage] + [_first_append_entry to _last_append_entry] (all path entries). - // No --patch-module entries or exploded module builds are included since CDS - // is not supported if --patch-module or exploded module builds are used. + // If a class is found in the --patch-module entries, the class will not be included in the + // CDS archive. Also, CDS is not supported if exploded module builds are used. // // If search_append_only is true, boot loader visibility boundaries are // set to be _first_append_entry to the end. This includes: @@ -1444,8 +1511,17 @@ // found within its module specification, the search should continue to Load Attempt #2. // Note: The --patch-module entries are never searched if the boot loader's // visibility boundary is limited to only searching the append entries. - if (_patch_mod_entries != NULL && !search_append_only && !DumpSharedSpaces) { - stream = search_module_entries(_patch_mod_entries, class_name, file_name, CHECK_NULL); + if (_patch_mod_entries != NULL && !search_append_only) { + if (!DumpSharedSpaces) { + stream = search_module_entries(_patch_mod_entries, class_name, file_name, CHECK_NULL); + } else { +#if INCLUDE_CDS + if (is_in_patch_module(file_name)) { + tty->print_cr("Preload Warning: Skip archiving class %s found in --patch-module entry", class_name); + return NULL; + } +#endif + } } // Load Attempt #2: [jimage | exploded build] @@ -1596,8 +1672,57 @@ } #if INCLUDE_CDS +// Capture all the --patch-module entries specified during CDS dump time. +// It also captures the non-existing path(s) and the required file(s) during inspecting +// the entries. +void ClassLoader::setup_patch_mod_path() { + assert(DumpSharedSpaces, "only used with -Xshare:dump"); + ResourceMark rm; + GrowableArray<ModulePatchPath*>* patch_mod_args = Arguments::get_patch_mod_prefix(); + if (patch_mod_args != NULL) { + int num_of_entries = patch_mod_args->length(); + for (int i = 0; i < num_of_entries; i++) { + const char* module_name = (patch_mod_args->at(i))->module_name(); + const char* module_path = (patch_mod_args->at(i))->path_string(); + int path_len = (int)strlen(module_path); + int name_len = (int)strlen(module_name); + int buf_len = name_len + path_len + 2; // add 2 for the '=' and NULL terminator + int end = 0; + char* buf = NEW_C_HEAP_ARRAY(char, buf_len, mtInternal); + // Iterate over the module's class path entries + for (int start = 0; start < path_len; start = end) { + while (module_path[end] && module_path[end] != os::path_separator()[0]) { + end++; + } + strncpy(buf, &module_path[start], end - start); + buf[end - start] = '\0'; + struct stat st; + if (os::stat(buf, &st) != 0) { + // File not found + _shared_paths_misc_info->add_nonexist_path(buf); + } else { + if ((st.st_mode & S_IFMT) != S_IFREG) { // is not a regular file + vm_exit_during_initialization( + "--patch-module requires a regular file during dumping", buf); + } else { + _shared_paths_misc_info->add_required_file(buf); + } + } + while (module_path[end] == os::path_separator()[0]) { + end++; + } + }; + jio_snprintf(buf, buf_len, "%s=%s", module_name, module_path); + _shared_paths_misc_info->add_patch_mod_classpath((const char*)buf); + _num_patch_mod_prefixes++; + FREE_C_HEAP_ARRAY(char, buf); + } + } +} + void ClassLoader::initialize_shared_path() { if (DumpSharedSpaces) { + setup_patch_mod_path(); ClassLoaderExt::setup_search_paths(); _shared_paths_misc_info->write_jint(0); // see comments in SharedPathsMiscInfo::check() } @@ -1708,7 +1833,7 @@ if (jb_module == NULL) { vm_exit_during_initialization("Unable to create ModuleEntry for java.base"); } - ModuleEntryTable::set_javabase_module(jb_module); + ModuleEntryTable::set_javabase_moduleEntry(jb_module); } }
--- a/hotspot/src/share/vm/classfile/classLoader.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/classfile/classLoader.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -69,6 +69,7 @@ // Attempt to locate file_name through this class path entry. // Returns a class file parsing stream if successfull. virtual ClassFileStream* open_stream(const char* name, TRAPS) = 0; + virtual bool stream_exists(const char* name) = 0; // Debugging NOT_PRODUCT(virtual void compile_the_world(Handle loader, TRAPS) = 0;) }; @@ -83,6 +84,7 @@ JImageFile* jimage() const { return NULL; } ClassPathDirEntry(const char* dir); ClassFileStream* open_stream(const char* name, TRAPS); + bool stream_exists(const char* name) { return false; } // Debugging NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);) }; @@ -126,6 +128,7 @@ ClassFileStream* open_stream(const char* name, TRAPS); void contents_do(void f(const char* name, void* context), void* context); bool is_multiple_versioned(TRAPS) NOT_CDS_RETURN_(false); + bool stream_exists(const char* name); // Debugging NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);) }; @@ -145,6 +148,7 @@ ClassPathImageEntry(JImageFile* jimage, const char* name); ~ClassPathImageEntry(); ClassFileStream* open_stream(const char* name, TRAPS); + bool stream_exists(const char* name) { return false; } // Debugging NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);) @@ -255,6 +259,7 @@ // Info used by CDS CDS_ONLY(static SharedPathsMiscInfo * _shared_paths_misc_info;) + CDS_ONLY(static int _num_patch_mod_prefixes;) // Initialization: // - setup the boot loader's system class path @@ -427,6 +432,9 @@ static void initialize_module_loader_map(JImageFile* jimage); static s2 classloader_type(Symbol* class_name, ClassPathEntry* e, int classpath_index, TRAPS); + static bool is_in_patch_module(const char* const file_name); + static void setup_patch_mod_path(); // Only when -Xshare:dump + static int num_patch_mod_prefixes() { return _num_patch_mod_prefixes; } #endif static void trace_class_path(const char* msg, const char* name = NULL);
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/classfile/javaClasses.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -773,6 +773,41 @@ InstanceKlass::cast(k())->do_local_static_fields(&initialize_static_field, mirror, CHECK); } +// Set the java.lang.reflect.Module module field in the java_lang_Class mirror +void java_lang_Class::set_mirror_module_field(KlassHandle k, Handle mirror, Handle module, TRAPS) { + if (module.is_null()) { + // During startup, the module may be NULL only if java.base has not been defined yet. + // Put the class on the fixup_module_list to patch later when the java.lang.reflect.Module + // for java.base is known. + assert(!Universe::is_module_initialized(), "Incorrect java.lang.reflect.Module pre module system initialization"); + MutexLocker m1(Module_lock, THREAD); + // Keep list of classes needing java.base module fixup + if (!ModuleEntryTable::javabase_defined()) { + if (fixup_module_field_list() == NULL) { + GrowableArray<Klass*>* list = + new (ResourceObj::C_HEAP, mtModule) GrowableArray<Klass*>(500, true); + set_fixup_module_field_list(list); + } + k->class_loader_data()->inc_keep_alive(); + fixup_module_field_list()->push(k()); + } else { + // java.base was defined at some point between calling create_mirror() + // and obtaining the Module_lock, patch this particular class with java.base. + ModuleEntry *javabase_entry = ModuleEntryTable::javabase_moduleEntry(); + assert(javabase_entry != NULL && javabase_entry->module() != NULL, + "Setting class module field, java.base should be defined"); + Handle javabase_handle(THREAD, JNIHandles::resolve(javabase_entry->module())); + set_module(mirror(), javabase_handle()); + } + } else { + assert(Universe::is_module_initialized() || + (ModuleEntryTable::javabase_defined() && + (module() == JNIHandles::resolve(ModuleEntryTable::javabase_moduleEntry()->module()))), + "Incorrect java.lang.reflect.Module specification while creating mirror"); + set_module(mirror(), module()); + } +} + void java_lang_Class::create_mirror(KlassHandle k, Handle class_loader, Handle module, Handle protection_domain, TRAPS) { assert(k->java_mirror() == NULL, "should only assign mirror once"); @@ -835,25 +870,13 @@ set_class_loader(mirror(), class_loader()); // set the module field in the java_lang_Class instance - // This may be null during bootstrap but will get fixed up later on. - set_module(mirror(), module()); + set_mirror_module_field(k, mirror, module, THREAD); // Setup indirection from klass->mirror last // after any exceptions can happen during allocations. if (!k.is_null()) { k->set_java_mirror(mirror()); } - - // Keep list of classes needing java.base module fixup. - if (!ModuleEntryTable::javabase_defined()) { - if (fixup_module_field_list() == NULL) { - GrowableArray<Klass*>* list = - new (ResourceObj::C_HEAP, mtModule) GrowableArray<Klass*>(500, true); - set_fixup_module_field_list(list); - } - k->class_loader_data()->inc_keep_alive(); - fixup_module_field_list()->push(k()); - } } else { if (fixup_mirror_list() == NULL) { GrowableArray<Klass*>* list =
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/classfile/javaClasses.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -219,6 +219,7 @@ static void set_class_loader(oop java_class, oop class_loader); static void set_component_mirror(oop java_class, oop comp_mirror); static void initialize_mirror_fields(KlassHandle k, Handle mirror, Handle protection_domain, TRAPS); + static void set_mirror_module_field(KlassHandle K, Handle mirror, Handle module, TRAPS); public: static void compute_offsets();
--- a/hotspot/src/share/vm/classfile/klassFactory.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/classfile/klassFactory.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -25,12 +25,85 @@ #include "precompiled.hpp" #include "classfile/classFileParser.hpp" #include "classfile/classFileStream.hpp" +#include "classfile/classLoader.hpp" #include "classfile/classLoaderData.hpp" +#include "classfile/classLoaderData.inline.hpp" #include "classfile/klassFactory.hpp" +#include "classfile/sharedClassUtil.hpp" +#include "memory/metaspaceShared.hpp" #include "memory/resourceArea.hpp" #include "prims/jvmtiEnvBase.hpp" +#include "prims/jvmtiRedefineClasses.hpp" #include "trace/traceMacros.hpp" +// called during initial loading of a shared class +instanceKlassHandle KlassFactory::check_shared_class_file_load_hook( + instanceKlassHandle ik, + Symbol* class_name, + Handle class_loader, + Handle protection_domain, TRAPS) { +#if INCLUDE_CDS && INCLUDE_JVMTI + assert(ik.not_null(), "sanity"); + assert(ik()->is_shared(), "expecting a shared class"); + + if (JvmtiExport::should_post_class_file_load_hook()) { + assert(THREAD->is_Java_thread(), "must be JavaThread"); + + // Post the CFLH + JvmtiCachedClassFileData* cached_class_file = NULL; + JvmtiCachedClassFileData* archived_class_data = ik->get_archived_class_data(); + assert(archived_class_data != NULL, "shared class has no archived class data"); + unsigned char* ptr = + VM_RedefineClasses::get_cached_class_file_bytes(archived_class_data); + unsigned char* end_ptr = + ptr + VM_RedefineClasses::get_cached_class_file_len(archived_class_data); + unsigned char* old_ptr = ptr; + JvmtiExport::post_class_file_load_hook(class_name, + class_loader, + protection_domain, + &ptr, + &end_ptr, + &cached_class_file); + if (old_ptr != ptr) { + // JVMTI agent has modified class file data. + // Set new class file stream using JVMTI agent modified class file data. + ClassLoaderData* loader_data = + ClassLoaderData::class_loader_data(class_loader()); + int path_index = ik->shared_classpath_index(); + SharedClassPathEntry* ent = + (SharedClassPathEntry*)FileMapInfo::shared_classpath(path_index); + ClassFileStream* stream = new ClassFileStream(ptr, + end_ptr - ptr, + ent->_name, + ClassFileStream::verify); + ClassFileParser parser(stream, + class_name, + loader_data, + protection_domain, + NULL, + NULL, + ClassFileParser::BROADCAST, // publicity level + CHECK_NULL); + instanceKlassHandle new_ik = parser.create_instance_klass(true /* changed_by_loadhook */, + CHECK_NULL); + if (cached_class_file != NULL) { + new_ik->set_cached_class_file(cached_class_file); + } + + if (class_loader.is_null()) { + ResourceMark rm; + ClassLoader::add_package(class_name->as_C_string(), path_index, THREAD); + } + + return new_ik; + } + } +#endif + + return NULL; +} + + static ClassFileStream* check_class_file_load_hook(ClassFileStream* stream, Symbol* name, ClassLoaderData* loader_data, @@ -97,7 +170,6 @@ const InstanceKlass* host_klass, GrowableArray<Handle>* cp_patches, TRAPS) { - assert(stream != NULL, "invariant"); assert(loader_data != NULL, "invariant"); assert(THREAD->is_Java_thread(), "must be a JavaThread"); @@ -142,5 +214,27 @@ TRACE_KLASS_CREATION(result, parser, THREAD); +#if INCLUDE_CDS && INCLUDE_JVMTI + if (DumpSharedSpaces) { + assert(cached_class_file == NULL, "Sanity"); + // Archive the class stream data into the optional data section + JvmtiCachedClassFileData *p; + int len; + const unsigned char *bytes; + // event based tracing might set cached_class_file + if ((bytes = result->get_cached_class_file_bytes()) != NULL) { + len = result->get_cached_class_file_len(); + } else { + len = stream->length(); + bytes = stream->buffer(); + } + p = (JvmtiCachedClassFileData*)MetaspaceShared::optional_data_space_alloc( + offset_of(JvmtiCachedClassFileData, data) + len); + p->length = len; + memcpy(p->data, bytes, len); + result->set_archived_class_data(p); + } +#endif + return result; }
--- a/hotspot/src/share/vm/classfile/klassFactory.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/classfile/klassFactory.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -75,6 +75,12 @@ const InstanceKlass* host_klass, GrowableArray<Handle>* cp_patches, TRAPS); + public: + static instanceKlassHandle check_shared_class_file_load_hook( + instanceKlassHandle ik, + Symbol* class_name, + Handle class_loader, + Handle protection_domain, TRAPS); }; #endif // SHARE_VM_CLASSFILE_KLASSFACTORY_HPP
--- a/hotspot/src/share/vm/classfile/moduleEntry.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/classfile/moduleEntry.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -92,7 +92,7 @@ // read java.base. If either of these conditions // hold, readability has been established. if (!this->is_named() || - (m == ModuleEntryTable::javabase_module())) { + (m == ModuleEntryTable::javabase_moduleEntry())) { return true; } @@ -358,16 +358,27 @@ } // Set java.lang.reflect.Module, version and location for java.base - ModuleEntry* jb_module = javabase_module(); + ModuleEntry* jb_module = javabase_moduleEntry(); assert(jb_module != NULL, "java.base ModuleEntry not defined"); - jb_module->set_module(boot_loader_data->add_handle(module_handle)); jb_module->set_version(version); jb_module->set_location(location); + // Once java.base's ModuleEntry _module field is set with the known + // java.lang.reflect.Module, java.base is considered "defined" to the VM. + jb_module->set_module(boot_loader_data->add_handle(module_handle)); + // Store pointer to the ModuleEntry for java.base in the java.lang.reflect.Module object. java_lang_reflect_Module::set_module_entry(module_handle(), jb_module); + + // Patch any previously loaded classes' module field with java.base's java.lang.reflect.Module. + patch_javabase_entries(module_handle); } +// Within java.lang.Class instances there is a java.lang.reflect.Module field +// that must be set with the defining module. During startup, prior to java.base's +// definition, classes needing their module field set are added to the fixup_module_list. +// Their module field is set once java.base's java.lang.reflect.Module is known to the VM. void ModuleEntryTable::patch_javabase_entries(Handle module_handle) { + assert(Module_lock->owned_by_self(), "should have the Module_lock"); if (module_handle.is_null()) { fatal("Unable to patch the module field of classes loaded prior to java.base's definition, invalid java.lang.reflect.Module"); } @@ -389,9 +400,7 @@ for (int i = 0; i < list_length; i++) { Klass* k = list->at(i); assert(k->is_klass(), "List should only hold classes"); - Thread* THREAD = Thread::current(); - KlassHandle kh(THREAD, k); - java_lang_Class::fixup_module_field(kh, module_handle); + java_lang_Class::fixup_module_field(KlassHandle(k), module_handle); k->class_loader_data()->dec_keep_alive(); }
--- a/hotspot/src/share/vm/classfile/moduleEntry.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/classfile/moduleEntry.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -78,11 +78,11 @@ _must_walk_reads = false; } - Symbol* name() const { return literal(); } - void set_name(Symbol* n) { set_literal(n); } + Symbol* name() const { return literal(); } + void set_name(Symbol* n) { set_literal(n); } - jobject module() const { return _module; } - void set_module(jobject j) { _module = j; } + jobject module() const { return _module; } + void set_module(jobject j) { _module = j; } // The shared ProtectionDomain reference is set once the VM loads a shared class // originated from the current Module. The referenced ProtectionDomain object is @@ -217,13 +217,13 @@ // Special handling for unnamed module, one per class loader's ModuleEntryTable void create_unnamed_module(ClassLoaderData* loader_data); - ModuleEntry* unnamed_module() { return _unnamed_module; } + ModuleEntry* unnamed_module() { return _unnamed_module; } // Special handling for java.base - static ModuleEntry* javabase_module() { return _javabase_module; } - static void set_javabase_module(ModuleEntry* java_base) { _javabase_module = java_base; } - static bool javabase_defined() { return ((_javabase_module != NULL) && - (_javabase_module->module() != NULL)); } + static ModuleEntry* javabase_moduleEntry() { return _javabase_module; } + static void set_javabase_moduleEntry(ModuleEntry* java_base) { _javabase_module = java_base; } + static bool javabase_defined() { return ((_javabase_module != NULL) && + (_javabase_module->module() != NULL)); } static void finalize_javabase(Handle module_handle, Symbol* version, Symbol* location); static void patch_javabase_entries(Handle module_handle);
--- a/hotspot/src/share/vm/classfile/modules.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/classfile/modules.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -206,7 +206,7 @@ assert(pkg_list->length() == 0 || package_table != NULL, "Bad package_table"); // Ensure java.base's ModuleEntry has been created - assert(ModuleEntryTable::javabase_module() != NULL, "No ModuleEntry for java.base"); + assert(ModuleEntryTable::javabase_moduleEntry() != NULL, "No ModuleEntry for java.base"); bool duplicate_javabase = false; { @@ -226,7 +226,7 @@ for (int x = 0; x < pkg_list->length(); x++) { // Some of java.base's packages were added early in bootstrapping, ignore duplicates. if (package_table->lookup_only(pkg_list->at(x)) == NULL) { - pkg = package_table->locked_create_entry_or_null(pkg_list->at(x), ModuleEntryTable::javabase_module()); + pkg = package_table->locked_create_entry_or_null(pkg_list->at(x), ModuleEntryTable::javabase_moduleEntry()); assert(pkg != NULL, "Unable to create a java.base package entry"); } // Unable to have a GrowableArray of TempNewSymbol. Must decrement the refcount of @@ -255,9 +255,6 @@ log_trace(modules)("define_javabase_module(): creation of package %s for module java.base", (pkg_list->at(x))->as_C_string()); } - - // Patch any previously loaded classes' module field with java.base's jlr.Module. - ModuleEntryTable::patch_javabase_entries(module_handle); } void Modules::define_module(jobject module, jstring version,
--- a/hotspot/src/share/vm/classfile/sharedPathsMiscInfo.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/classfile/sharedPathsMiscInfo.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -86,6 +86,9 @@ case REQUIRED: out->print("Expecting that file %s must exist and is not altered", path); break; + case PATCH_MOD: + out->print("Expecting --patch-module=%s", path); + break; default: ShouldNotReachHere(); } @@ -146,6 +149,9 @@ // But we want it to not exist -> fail return fail("File must not exist"); } + if ((st.st_mode & S_IFMT) != S_IFREG) { + return fail("Did not get a regular file as expected."); + } time_t timestamp; long filesize; @@ -161,7 +167,26 @@ } } break; - + case PATCH_MOD: + { + GrowableArray<ModulePatchPath*>* patch_mod_args = Arguments::get_patch_mod_prefix(); + if (patch_mod_args != NULL) { + int num_of_entries = patch_mod_args->length(); + for (int i = 0; i < num_of_entries; i++) { + const char* module_name = (patch_mod_args->at(i))->module_name(); + const char* path_string = (patch_mod_args->at(i))->path_string(); + size_t n = strlen(module_name); + // path contains the module name, followed by '=', and one or more entries. + // E.g.: "java.base=foo" or "java.naming=dir1:dir2:dir3" + if ((strncmp(module_name, path, n) != 0) || + (path[n] != '=') || + (strcmp(path + n + 1, path_string) != 0)) { + return fail("--patch-module mismatch, path not found in run time: ", path); + } + } + } + } + break; default: return fail("Corrupted archive file header"); }
--- a/hotspot/src/share/vm/classfile/sharedPathsMiscInfo.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/classfile/sharedPathsMiscInfo.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -104,10 +104,28 @@ add_path(path, NON_EXIST); } + // The path must exist and have required size and modification time + void add_required_file(const char* path) { + add_path(path, REQUIRED); + + struct stat st; + if (os::stat(path, &st) != 0) { + assert(0, "sanity"); +#if INCLUDE_CDS + ClassLoader::exit_with_path_failure("failed to os::stat(%s)", path); // should not happen +#endif + } + write_time(st.st_mtime); + write_long(st.st_size); + } + // The path must exist, and must contain exactly <num_entries> files/dirs void add_boot_classpath(const char* path) { add_path(path, BOOT); } + void add_patch_mod_classpath(const char* path) { + add_path(path, PATCH_MOD); + } int write_jint(jint num) { write(&num, sizeof(num)); return 0; @@ -129,7 +147,8 @@ enum { BOOT = 1, NON_EXIST = 2, - REQUIRED = 3 + REQUIRED = 3, + PATCH_MOD = 4 }; virtual const char* type_name(int type) { @@ -137,6 +156,7 @@ case BOOT: return "BOOT"; case NON_EXIST: return "NON_EXIST"; case REQUIRED: return "REQUIRED"; + case PATCH_MOD: return "PATCH_MOD"; default: ShouldNotReachHere(); return "?"; } }
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -1210,16 +1210,12 @@ instanceKlassHandle SystemDictionary::load_shared_class( Symbol* class_name, Handle class_loader, TRAPS) { - // Don't load shared class when JvmtiExport::should_post_class_file_load_hook() - // is enabled since posting CFLH is not supported when loading shared class. - if (!JvmtiExport::should_post_class_file_load_hook()) { - instanceKlassHandle ik (THREAD, find_shared_class(class_name)); - // Make sure we only return the boot class for the NULL classloader. - if (ik.not_null() && - ik->is_shared_boot_class() && class_loader.is_null()) { - Handle protection_domain; - return load_shared_class(ik, class_loader, protection_domain, THREAD); - } + instanceKlassHandle ik (THREAD, find_shared_class(class_name)); + // Make sure we only return the boot class for the NULL classloader. + if (ik.not_null() && + ik->is_shared_boot_class() && class_loader.is_null()) { + Handle protection_domain; + return load_shared_class(ik, class_loader, protection_domain, THREAD); } return instanceKlassHandle(); } @@ -1303,11 +1299,6 @@ Handle class_loader, Handle protection_domain, TRAPS) { instanceKlassHandle nh = instanceKlassHandle(); // null Handle - if (JvmtiExport::should_post_class_file_load_hook()) { - // Don't load shared class when JvmtiExport::should_post_class_file_load_hook() - // is enabled since posting CFLH is not supported when loading shared class. - return nh; - } if (ik.not_null()) { Symbol* class_name = ik->name(); @@ -1358,6 +1349,14 @@ } } + instanceKlassHandle new_ik = KlassFactory::check_shared_class_file_load_hook( + ik, class_name, class_loader, protection_domain, CHECK_(nh)); + if (new_ik.not_null()) { + // The class is changed by CFLH. Return the new class. The shared class is + // not used. + return new_ik; + } + // Adjust methods to recover missing data. They need addresses for // interpreter entry points and their default native method address // must be reset.
--- a/hotspot/src/share/vm/classfile/verificationType.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/classfile/verificationType.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -95,7 +95,8 @@ Category2_2nd = (Category2_2ndFlag << 1 * BitsPerByte) | Primitive, // Primitive values (type descriminator stored in most-signifcant bytes) - Bogus = (ITEM_Bogus << 2 * BitsPerByte) | Category1, + // Bogus needs the " | Primitive". Else, is_reference(Bogus) returns TRUE. + Bogus = (ITEM_Bogus << 2 * BitsPerByte) | Primitive, Boolean = (ITEM_Boolean << 2 * BitsPerByte) | Category1, Byte = (ITEM_Byte << 2 * BitsPerByte) | Category1, Short = (ITEM_Short << 2 * BitsPerByte) | Category1,
--- a/hotspot/src/share/vm/gc/cms/cmsOopClosures.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/gc/cms/cmsOopClosures.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -258,16 +258,15 @@ // the closure ParMarkFromRootsClosure. class ParPushOrMarkClosure: public MetadataAwareOopClosure { private: - CMSCollector* _collector; - MemRegion _whole_span; - MemRegion _span; // local chunk - CMSBitMap* _bit_map; - OopTaskQueue* _work_queue; - CMSMarkStack* _overflow_stack; - HeapWord* const _finger; - HeapWord** const _global_finger_addr; - ParMarkFromRootsClosure* const - _parent; + CMSCollector* _collector; + MemRegion _whole_span; + MemRegion _span; // local chunk + CMSBitMap* _bit_map; + OopTaskQueue* _work_queue; + CMSMarkStack* _overflow_stack; + HeapWord* const _finger; + HeapWord* volatile* const _global_finger_addr; + ParMarkFromRootsClosure* const _parent; protected: DO_OOP_WORK_DEFN public: @@ -277,7 +276,7 @@ OopTaskQueue* work_queue, CMSMarkStack* mark_stack, HeapWord* finger, - HeapWord** global_finger_addr, + HeapWord* volatile* global_finger_addr, ParMarkFromRootsClosure* parent); virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p);
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -3025,14 +3025,14 @@ // MT Concurrent Marking Task class CMSConcMarkingTask: public YieldingFlexibleGangTask { - CMSCollector* _collector; - uint _n_workers; // requested/desired # workers - bool _result; - CompactibleFreeListSpace* _cms_space; - char _pad_front[64]; // padding to ... - HeapWord* _global_finger; // ... avoid sharing cache line - char _pad_back[64]; - HeapWord* _restart_addr; + CMSCollector* _collector; + uint _n_workers; // requested/desired # workers + bool _result; + CompactibleFreeListSpace* _cms_space; + char _pad_front[64]; // padding to ... + HeapWord* volatile _global_finger; // ... avoid sharing cache line + char _pad_back[64]; + HeapWord* _restart_addr; // Exposed here for yielding support Mutex* const _bit_map_lock; @@ -3068,7 +3068,7 @@ OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); } - HeapWord** global_finger_addr() { return &_global_finger; } + HeapWord* volatile* global_finger_addr() { return &_global_finger; } CMSConcMarkingTerminator* terminator() { return &_term; } @@ -6554,7 +6554,7 @@ // Note: the local finger doesn't advance while we drain // the stack below, but the global finger sure can and will. - HeapWord** gfa = _task->global_finger_addr(); + HeapWord* volatile* gfa = _task->global_finger_addr(); ParPushOrMarkClosure pushOrMarkClosure(_collector, _span, _bit_map, _work_queue, @@ -6721,7 +6721,7 @@ OopTaskQueue* work_queue, CMSMarkStack* overflow_stack, HeapWord* finger, - HeapWord** global_finger_addr, + HeapWord* volatile* global_finger_addr, ParMarkFromRootsClosure* parent) : MetadataAwareOopClosure(collector->ref_processor()), _collector(collector),
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -724,12 +724,12 @@ // Support for parallelizing young gen rescan in CMS remark phase ParNewGeneration* _young_gen; - HeapWord** _top_addr; // ... Top of Eden - HeapWord** _end_addr; // ... End of Eden - Mutex* _eden_chunk_lock; - HeapWord** _eden_chunk_array; // ... Eden partitioning array - size_t _eden_chunk_index; // ... top (exclusive) of array - size_t _eden_chunk_capacity; // ... max entries in array + HeapWord* volatile* _top_addr; // ... Top of Eden + HeapWord** _end_addr; // ... End of Eden + Mutex* _eden_chunk_lock; + HeapWord** _eden_chunk_array; // ... Eden partitioning array + size_t _eden_chunk_index; // ... top (exclusive) of array + size_t _eden_chunk_capacity; // ... max entries in array // Support for parallelizing survivor space rescan HeapWord** _survivor_chunk_array;
--- a/hotspot/src/share/vm/gc/cms/parNewGeneration.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/gc/cms/parNewGeneration.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -1366,22 +1366,25 @@ return false; } assert(prefix != NULL && prefix != BUSY, "Error"); - size_t i = 1; oop cur = prefix; - while (i < objsFromOverflow && cur->klass_or_null() != NULL) { - i++; cur = cur->list_ptr_from_klass(); + for (size_t i = 1; i < objsFromOverflow; ++i) { + oop next = cur->list_ptr_from_klass(); + if (next == NULL) break; + cur = next; } + assert(cur != NULL, "Loop postcondition"); // Reattach remaining (suffix) to overflow list - if (cur->klass_or_null() == NULL) { + oop suffix = cur->list_ptr_from_klass(); + if (suffix == NULL) { // Write back the NULL in lieu of the BUSY we wrote // above and it is still the same value. if (_overflow_list == BUSY) { (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); } } else { - assert(cur->klass_or_null() != (Klass*)(address)BUSY, "Error"); - oop suffix = cur->list_ptr_from_klass(); // suffix will be put back on global list + assert(suffix != BUSY, "Error"); + // suffix will be put back on global list cur->set_klass_to_list_ptr(NULL); // break off suffix // It's possible that the list is still in the empty(busy) state // we left it in a short while ago; in that case we may be @@ -1401,8 +1404,10 @@ // Too bad, someone else got in in between; we'll need to do a splice. // Find the last item of suffix list oop last = suffix; - while (last->klass_or_null() != NULL) { - last = last->list_ptr_from_klass(); + while (true) { + oop next = last->list_ptr_from_klass(); + if (next == NULL) break; + last = next; } // Atomically prepend suffix to current overflow list observed_overflow_list = _overflow_list;
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -1479,7 +1479,7 @@ "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)", capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio); - expand(expand_bytes); + expand(expand_bytes, _workers); // No expansion, now see if we want to shrink } else if (capacity_after_gc > maximum_desired_capacity) { @@ -1599,7 +1599,7 @@ word_size * HeapWordSize); - if (expand(expand_bytes)) { + if (expand(expand_bytes, _workers)) { _hrm.verify_optional(); _verifier->verify_region_sets_optional(); return attempt_allocation_at_safepoint(word_size, @@ -1609,7 +1609,7 @@ return NULL; } -bool G1CollectedHeap::expand(size_t expand_bytes, double* expand_time_ms) { +bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, double* expand_time_ms) { size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); aligned_expand_bytes = align_size_up(aligned_expand_bytes, HeapRegion::GrainBytes); @@ -1626,7 +1626,7 @@ uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes); assert(regions_to_expand > 0, "Must expand by at least one region"); - uint expanded_by = _hrm.expand_by(regions_to_expand); + uint expanded_by = _hrm.expand_by(regions_to_expand, pretouch_workers); if (expand_time_ms != NULL) { *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS; } @@ -1927,7 +1927,7 @@ _cmThread = _cm->cmThread(); // Now expand into the initial heap size. - if (!expand(init_byte_size)) { + if (!expand(init_byte_size, _workers)) { vm_shutdown_during_initialization("Failed to allocate initial heap."); return JNI_ENOMEM; } @@ -2474,8 +2474,16 @@ } jlong G1CollectedHeap::millis_since_last_gc() { - // assert(false, "NYI"); - return 0; + // See the notes in GenCollectedHeap::millis_since_last_gc() + // for more information about the implementation. + jlong ret_val = (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) - + _g1_policy->collection_pause_end_millis(); + if (ret_val < 0) { + log_warning(gc)("millis_since_last_gc() would return : " JLONG_FORMAT + ". returning zero instead.", ret_val); + return 0; + } + return ret_val; } void G1CollectedHeap::prepare_for_verify() { @@ -3165,7 +3173,6 @@ assert(_verifier->check_cset_fast_test(), "Inconsistency in the InCSetState table."); - _cm->note_start_of_gc(); // We call this after finalize_cset() to // ensure that the CSet has been finalized. _cm->verify_no_cset_oops(); @@ -3241,7 +3248,7 @@ // No need for an ergo logging here, // expansion_amount() does this when it returns a value > 0. double expand_ms; - if (!expand(expand_bytes, &expand_ms)) { + if (!expand(expand_bytes, _workers, &expand_ms)) { // We failed to expand the heap. Cannot do anything about it. } g1_policy()->phase_times()->record_expand_heap_time(expand_ms); @@ -3251,7 +3258,6 @@ // We redo the verification but now wrt to the new CSet which // has just got initialized after the previous CSet was freed. _cm->verify_no_cset_oops(); - _cm->note_end_of_gc(); // This timing is only used by the ergonomics to handle our pause target. // It is unclear why this should not include the full pause. We will
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -557,7 +557,7 @@ // Returns true if the heap was expanded by the requested amount; // false otherwise. // (Rounds up to a HeapRegion boundary.) - bool expand(size_t expand_bytes, double* expand_time_ms = NULL); + bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL); // Returns the PLAB statistics for a given destination. inline G1EvacStats* alloc_buffer_stats(InCSetState dest);
--- a/hotspot/src/share/vm/gc/g1/g1CollectionSet.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/gc/g1/g1CollectionSet.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -32,6 +32,7 @@ #include "gc/g1/heapRegionSet.hpp" #include "logging/logStream.hpp" #include "utilities/debug.hpp" +#include "utilities/quickSort.hpp" G1CollectorState* G1CollectionSet::collector_state() { return _g1->collector_state(); @@ -396,6 +397,16 @@ return time_remaining_ms; } +static int compare_region_idx(const uint a, const uint b) { + if (a > b) { + return 1; + } else if (a == b) { + return 0; + } else { + return -1; + } +} + void G1CollectionSet::finalize_old_part(double time_remaining_ms) { double non_young_start_time_sec = os::elapsedTime(); double predicted_old_time_ms = 0.0; @@ -493,6 +504,8 @@ double non_young_end_time_sec = os::elapsedTime(); phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0); + + QuickSort::sort<uint>(_collection_set_regions, (int)_collection_set_cur_length, compare_region_idx, true); } #ifdef ASSERT
--- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -133,129 +133,184 @@ } G1CMMarkStack::G1CMMarkStack() : - _reserved_space(), + _max_chunk_capacity(0), _base(NULL), - _capacity(0), - _saved_index((size_t)AllBits), + _chunk_capacity(0), + _out_of_memory(false), _should_expand(false) { set_empty(); } bool G1CMMarkStack::resize(size_t new_capacity) { assert(is_empty(), "Only resize when stack is empty."); - assert(new_capacity <= MarkStackSizeMax, - "Trying to resize stack to " SIZE_FORMAT " elements when the maximum is " SIZE_FORMAT, new_capacity, MarkStackSizeMax); - - size_t reservation_size = ReservedSpace::allocation_align_size_up(new_capacity * sizeof(oop)); - - ReservedSpace rs(reservation_size); - if (!rs.is_reserved()) { - log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " elements and size " SIZE_FORMAT "B.", new_capacity, reservation_size); + assert(new_capacity <= _max_chunk_capacity, + "Trying to resize stack to " SIZE_FORMAT " chunks when the maximum is " SIZE_FORMAT, new_capacity, _max_chunk_capacity); + + OopChunk* new_base = MmapArrayAllocator<OopChunk, mtGC>::allocate_or_null(new_capacity); + + if (new_base == NULL) { + log_warning(gc)("Failed to reserve memory for new overflow mark stack with " SIZE_FORMAT " chunks and size " SIZE_FORMAT "B.", new_capacity, new_capacity * sizeof(OopChunk)); return false; } - - VirtualSpace vs; - - if (!vs.initialize(rs, rs.size())) { - rs.release(); - log_warning(gc)("Failed to commit memory for new overflow mark stack of size " SIZE_FORMAT "B.", rs.size()); - return false; + // Release old mapping. + if (_base != NULL) { + MmapArrayAllocator<OopChunk, mtGC>::free(_base, _chunk_capacity); } - assert(vs.committed_size() == rs.size(), "Failed to commit all of the mark stack."); - - // Release old mapping. - _reserved_space.release(); - - // Save new mapping for future unmapping. - _reserved_space = rs; - - MemTracker::record_virtual_memory_type((address)_reserved_space.base(), mtGC); - - _base = (oop*) vs.low(); - _capacity = new_capacity; + _base = new_base; + _chunk_capacity = new_capacity; set_empty(); _should_expand = false; return true; } -bool G1CMMarkStack::allocate(size_t capacity) { - return resize(capacity); +size_t G1CMMarkStack::capacity_alignment() { + return (size_t)lcm(os::vm_allocation_granularity(), sizeof(OopChunk)) / sizeof(void*); +} + +bool G1CMMarkStack::initialize(size_t initial_capacity, size_t max_capacity) { + guarantee(_max_chunk_capacity == 0, "G1CMMarkStack already initialized."); + + size_t const OopChunkSizeInVoidStar = sizeof(OopChunk) / sizeof(void*); + + _max_chunk_capacity = (size_t)align_size_up(max_capacity, capacity_alignment()) / OopChunkSizeInVoidStar; + size_t initial_chunk_capacity = (size_t)align_size_up(initial_capacity, capacity_alignment()) / OopChunkSizeInVoidStar; + + guarantee(initial_chunk_capacity <= _max_chunk_capacity, + "Maximum chunk capacity " SIZE_FORMAT " smaller than initial capacity " SIZE_FORMAT, + _max_chunk_capacity, + initial_chunk_capacity); + + log_debug(gc)("Initialize mark stack with " SIZE_FORMAT " chunks, maximum " SIZE_FORMAT, + initial_chunk_capacity, _max_chunk_capacity); + + return resize(initial_chunk_capacity); } void G1CMMarkStack::expand() { // Clear expansion flag _should_expand = false; - if (_capacity == MarkStackSizeMax) { - log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " elements.", _capacity); + if (_chunk_capacity == _max_chunk_capacity) { + log_debug(gc)("Can not expand overflow mark stack further, already at maximum capacity of " SIZE_FORMAT " chunks.", _chunk_capacity); return; } - size_t old_capacity = _capacity; + size_t old_capacity = _chunk_capacity; // Double capacity if possible - size_t new_capacity = MIN2(old_capacity * 2, MarkStackSizeMax); + size_t new_capacity = MIN2(old_capacity * 2, _max_chunk_capacity); if (resize(new_capacity)) { - log_debug(gc)("Expanded marking stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " elements", + log_debug(gc)("Expanded mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", old_capacity, new_capacity); } else { - log_warning(gc)("Failed to expand marking stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " elements", + log_warning(gc)("Failed to expand mark stack capacity from " SIZE_FORMAT " to " SIZE_FORMAT " chunks", old_capacity, new_capacity); } } G1CMMarkStack::~G1CMMarkStack() { if (_base != NULL) { - _base = NULL; - _reserved_space.release(); - } -} - -void G1CMMarkStack::par_push_arr(oop* buffer, size_t n) { - MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); - size_t start = _index; - size_t next_index = start + n; - if (next_index > _capacity) { - _overflow = true; - return; - } - // Otherwise. - _index = next_index; - for (size_t i = 0; i < n; i++) { - size_t ind = start + i; - assert(ind < _capacity, "By overflow test above."); - _base[ind] = buffer[i]; + MmapArrayAllocator<OopChunk, mtGC>::free(_base, _chunk_capacity); } } -bool G1CMMarkStack::par_pop_arr(oop* buffer, size_t max, size_t* n) { - MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); - size_t index = _index; - if (index == 0) { - *n = 0; +void G1CMMarkStack::add_chunk_to_list(OopChunk* volatile* list, OopChunk* elem) { + elem->next = *list; + *list = elem; +} + +void G1CMMarkStack::add_chunk_to_chunk_list(OopChunk* elem) { + MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); + add_chunk_to_list(&_chunk_list, elem); + _chunks_in_chunk_list++; +} + +void G1CMMarkStack::add_chunk_to_free_list(OopChunk* elem) { + MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); + add_chunk_to_list(&_free_list, elem); +} + +G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_list(OopChunk* volatile* list) { + OopChunk* result = *list; + if (result != NULL) { + *list = (*list)->next; + } + return result; +} + +G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_chunk_list() { + MutexLockerEx x(MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag); + OopChunk* result = remove_chunk_from_list(&_chunk_list); + if (result != NULL) { + _chunks_in_chunk_list--; + } + return result; +} + +G1CMMarkStack::OopChunk* G1CMMarkStack::remove_chunk_from_free_list() { + MutexLockerEx x(MarkStackFreeList_lock, Mutex::_no_safepoint_check_flag); + return remove_chunk_from_list(&_free_list); +} + +G1CMMarkStack::OopChunk* G1CMMarkStack::allocate_new_chunk() { + // This dirty read of _hwm is okay because we only ever increase the _hwm in parallel code. + // Further this limits _hwm to a value of _chunk_capacity + #threads, avoiding + // wraparound of _hwm. + if (_hwm >= _chunk_capacity) { + return NULL; + } + + size_t cur_idx = Atomic::add(1, &_hwm) - 1; + if (cur_idx >= _chunk_capacity) { + return NULL; + } + + OopChunk* result = ::new (&_base[cur_idx]) OopChunk; + result->next = NULL; + return result; +} + +bool G1CMMarkStack::par_push_chunk(oop* ptr_arr) { + // Get a new chunk. + OopChunk* new_chunk = remove_chunk_from_free_list(); + + if (new_chunk == NULL) { + // Did not get a chunk from the free list. Allocate from backing memory. + new_chunk = allocate_new_chunk(); + } + + if (new_chunk == NULL) { + _out_of_memory = true; return false; - } else { - size_t k = MIN2(max, index); - size_t new_ind = index - k; - for (size_t j = 0; j < k; j++) { - buffer[j] = _base[new_ind + j]; - } - _index = new_ind; - *n = k; - return true; } + + Copy::conjoint_memory_atomic(ptr_arr, new_chunk->data, OopsPerChunk * sizeof(oop)); + + add_chunk_to_chunk_list(new_chunk); + + return true; } -void G1CMMarkStack::note_start_of_gc() { - assert(_saved_index == (size_t)AllBits, "note_start_of_gc()/end_of_gc() calls bracketed incorrectly"); - _saved_index = _index; +bool G1CMMarkStack::par_pop_chunk(oop* ptr_arr) { + OopChunk* cur = remove_chunk_from_chunk_list(); + + if (cur == NULL) { + return false; + } + + Copy::conjoint_memory_atomic(cur->data, ptr_arr, OopsPerChunk * sizeof(oop)); + + add_chunk_to_free_list(cur); + return true; } -void G1CMMarkStack::note_end_of_gc() { - guarantee(!stack_modified(), "Saved index " SIZE_FORMAT " must be the same as " SIZE_FORMAT, _saved_index, _index); - - _saved_index = (size_t)AllBits; +void G1CMMarkStack::set_empty() { + _chunks_in_chunk_list = 0; + _hwm = 0; + clear_out_of_memory(); + _chunk_list = NULL; + _free_list = NULL; } G1CMRootRegions::G1CMRootRegions() : @@ -483,9 +538,8 @@ } } - if (!_global_mark_stack.allocate(MarkStackSize)) { + if (!_global_mark_stack.initialize(MarkStackSize, MarkStackSizeMax)) { vm_exit_during_initialization("Failed to allocate initial concurrent mark overflow mark stack."); - return; } _tasks = NEW_C_HEAP_ARRAY(G1CMTask*, _max_worker_id, mtGC); @@ -1695,10 +1749,10 @@ // oop closures will set the has_overflown flag if we overflow the // global marking stack. - assert(_global_mark_stack.overflow() || _global_mark_stack.is_empty(), - "mark stack should be empty (unless it overflowed)"); - - if (_global_mark_stack.overflow()) { + assert(_global_mark_stack.is_out_of_memory() || _global_mark_stack.is_empty(), + "Mark stack should be empty (unless it is out of memory)"); + + if (_global_mark_stack.is_out_of_memory()) { // This should have been done already when we tried to push an // entry on to the global mark stack. But let's do it again. set_has_overflown(); @@ -1904,7 +1958,8 @@ assert(_g1h->is_in_g1_reserved(finger), "invariant"); HeapRegion* curr_region = _g1h->heap_region_containing(finger); - + // Make sure that the reads below do not float before loading curr_region. + OrderAccess::loadload(); // Above heap_region_containing may return NULL as we always scan claim // until the end of the heap. In this case, just jump to the next region. HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; @@ -2342,49 +2397,54 @@ } void G1CMTask::move_entries_to_global_stack() { - // local array where we'll store the entries that will be popped - // from the local queue - oop buffer[global_stack_transfer_size]; - - int n = 0; + // Local array where we'll store the entries that will be popped + // from the local queue. + oop buffer[G1CMMarkStack::OopsPerChunk]; + + size_t n = 0; oop obj; - while (n < global_stack_transfer_size && _task_queue->pop_local(obj)) { + while (n < G1CMMarkStack::OopsPerChunk && _task_queue->pop_local(obj)) { buffer[n] = obj; ++n; } + if (n < G1CMMarkStack::OopsPerChunk) { + buffer[n] = NULL; + } if (n > 0) { - // we popped at least one entry from the local queue - - if (!_cm->mark_stack_push(buffer, n)) { + if (!_cm->mark_stack_push(buffer)) { set_has_aborted(); } } - // this operation was quite expensive, so decrease the limits + // This operation was quite expensive, so decrease the limits. decrease_limits(); } -void G1CMTask::get_entries_from_global_stack() { - // local array where we'll store the entries that will be popped +bool G1CMTask::get_entries_from_global_stack() { + // Local array where we'll store the entries that will be popped // from the global stack. - oop buffer[global_stack_transfer_size]; - size_t n; - _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n); - assert(n <= global_stack_transfer_size, - "we should not pop more than the given limit"); - if (n > 0) { - // yes, we did actually pop at least one entry - for (size_t i = 0; i < n; ++i) { - bool success = _task_queue->push(buffer[i]); - // We only call this when the local queue is empty or under a - // given target limit. So, we do not expect this push to fail. - assert(success, "invariant"); + oop buffer[G1CMMarkStack::OopsPerChunk]; + + if (!_cm->mark_stack_pop(buffer)) { + return false; + } + + // We did actually pop at least one entry. + for (size_t i = 0; i < G1CMMarkStack::OopsPerChunk; ++i) { + oop elem = buffer[i]; + if (elem == NULL) { + break; } + bool success = _task_queue->push(elem); + // We only call this when the local queue is empty or under a + // given target limit. So, we do not expect this push to fail. + assert(success, "invariant"); } - // this operation was quite expensive, so decrease the limits + // This operation was quite expensive, so decrease the limits decrease_limits(); + return true; } void G1CMTask::drain_local_queue(bool partially) { @@ -2428,20 +2488,21 @@ // Decide what the target size is, depending whether we're going to // drain it partially (so that other tasks can steal if they run out - // of things to do) or totally (at the very end). Notice that, - // because we move entries from the global stack in chunks or - // because another task might be doing the same, we might in fact - // drop below the target. But, this is not a problem. - size_t target_size; + // of things to do) or totally (at the very end). + // Notice that when draining the global mark stack partially, due to the racyness + // of the mark stack size update we might in fact drop below the target. But, + // this is not a problem. + // In case of total draining, we simply process until the global mark stack is + // totally empty, disregarding the size counter. if (partially) { - target_size = _cm->partial_mark_stack_size_target(); + size_t const target_size = _cm->partial_mark_stack_size_target(); + while (!has_aborted() && _cm->mark_stack_size() > target_size) { + if (get_entries_from_global_stack()) { + drain_local_queue(partially); + } + } } else { - target_size = 0; - } - - if (_cm->mark_stack_size() > target_size) { - while (!has_aborted() && _cm->mark_stack_size() > target_size) { - get_entries_from_global_stack(); + while (!has_aborted() && get_entries_from_global_stack()) { drain_local_queue(partially); } }
--- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -149,42 +149,98 @@ // // Stores oops in a huge buffer in virtual memory that is always fully committed. // Resizing may only happen during a STW pause when the stack is empty. +// +// Memory is allocated on a "chunk" basis, i.e. a set of oops. For this, the mark +// stack memory is split into evenly sized chunks of oops. Users can only +// add or remove entries on that basis. +// Chunks are filled in increasing address order. Not completely filled chunks +// have a NULL element as a terminating element. +// +// Every chunk has a header containing a single pointer element used for memory +// management. This wastes some space, but is negligible (< .1% with current sizing). +// +// Memory management is done using a mix of tracking a high water-mark indicating +// that all chunks at a lower address are valid chunks, and a singly linked free +// list connecting all empty chunks. class G1CMMarkStack VALUE_OBJ_CLASS_SPEC { - ReservedSpace _reserved_space; // Space currently reserved for the mark stack. +public: + // Number of oops that can fit in a single chunk. + static const size_t OopsPerChunk = 1024 - 1 /* One reference for the next pointer */; +private: + struct OopChunk { + OopChunk* next; + oop data[OopsPerChunk]; + }; + + size_t _max_chunk_capacity; // Maximum number of OopChunk elements on the stack. + + OopChunk* _base; // Bottom address of allocated memory area. + size_t _chunk_capacity; // Current maximum number of OopChunk elements. - oop* _base; // Bottom address of allocated memory area. - size_t _capacity; // Maximum number of elements. - size_t _index; // One more than last occupied index. + char _pad0[DEFAULT_CACHE_LINE_SIZE]; + OopChunk* volatile _free_list; // Linked list of free chunks that can be allocated by users. + char _pad1[DEFAULT_CACHE_LINE_SIZE - sizeof(OopChunk*)]; + OopChunk* volatile _chunk_list; // List of chunks currently containing data. + volatile size_t _chunks_in_chunk_list; + char _pad2[DEFAULT_CACHE_LINE_SIZE - sizeof(OopChunk*) - sizeof(size_t)]; + + volatile size_t _hwm; // High water mark within the reserved space. + char _pad4[DEFAULT_CACHE_LINE_SIZE - sizeof(size_t)]; + + // Allocate a new chunk from the reserved memory, using the high water mark. Returns + // NULL if out of memory. + OopChunk* allocate_new_chunk(); - size_t _saved_index; // Value of _index saved at start of GC to detect mark stack modifications during that time. + volatile bool _out_of_memory; - bool _overflow; + // Atomically add the given chunk to the list. + void add_chunk_to_list(OopChunk* volatile* list, OopChunk* elem); + // Atomically remove and return a chunk from the given list. Returns NULL if the + // list is empty. + OopChunk* remove_chunk_from_list(OopChunk* volatile* list); + + void add_chunk_to_chunk_list(OopChunk* elem); + void add_chunk_to_free_list(OopChunk* elem); + + OopChunk* remove_chunk_from_chunk_list(); + OopChunk* remove_chunk_from_free_list(); + bool _should_expand; // Resizes the mark stack to the given new capacity. Releases any previous // memory if successful. bool resize(size_t new_capacity); - bool stack_modified() const { return _index != _saved_index; } public: G1CMMarkStack(); ~G1CMMarkStack(); - bool allocate(size_t capacity); + // Alignment and minimum capacity of this mark stack in number of oops. + static size_t capacity_alignment(); + + // Allocate and initialize the mark stack with the given number of oops. + bool initialize(size_t initial_capacity, size_t max_capacity); - // Pushes the first "n" elements of the given buffer on the stack. - void par_push_arr(oop* buffer, size_t n); + // Pushes the given buffer containing at most OopsPerChunk elements on the mark + // stack. If less than OopsPerChunk elements are to be pushed, the array must + // be terminated with a NULL. + // Returns whether the buffer contents were successfully pushed to the global mark + // stack. + bool par_push_chunk(oop* buffer); - // Moves up to max elements from the stack into the given buffer. Returns - // the number of elements pushed, and false if the array has been empty. - // Returns true if the buffer contains at least one element. - bool par_pop_arr(oop* buffer, size_t max, size_t* n); + // Pops a chunk from this mark stack, copying them into the given buffer. This + // chunk may contain up to OopsPerChunk elements. If there are less, the last + // element in the array is a NULL pointer. + bool par_pop_chunk(oop* buffer); - bool is_empty() const { return _index == 0; } - size_t capacity() const { return _capacity; } + // Return whether the chunk list is empty. Racy due to unsynchronized access to + // _chunk_list. + bool is_empty() const { return _chunk_list == NULL; } - bool overflow() const { return _overflow; } - void clear_overflow() { _overflow = false; } + size_t capacity() const { return _chunk_capacity; } + + bool is_out_of_memory() const { return _out_of_memory; } + void clear_out_of_memory() { _out_of_memory = false; } bool should_expand() const { return _should_expand; } void set_should_expand(bool value) { _should_expand = value; } @@ -192,20 +248,15 @@ // Expand the stack, typically in response to an overflow condition void expand(); - size_t size() const { return _index; } - - void set_empty() { _index = 0; clear_overflow(); } - - // Record the current index. - void note_start_of_gc(); + // Return the approximate number of oops on this mark stack. Racy due to + // unsynchronized access to _chunks_in_chunk_list. + size_t size() const { return _chunks_in_chunk_list * OopsPerChunk; } - // Make sure that we have not added any entries to the stack during GC. - void note_end_of_gc(); + void set_empty(); - // Apply fn to each oop in the mark stack, up to the bound recorded - // via one of the above "note" functions. The mark stack must not + // Apply Fn to every oop on the mark stack. The mark stack must not // be modified while iterating. - template<typename Fn> void iterate(Fn fn); + template<typename Fn> void iterate(Fn fn) const PRODUCT_RETURN; }; // Root Regions are regions that are not empty at the beginning of a @@ -278,7 +329,6 @@ friend class G1CMDrainMarkingStackClosure; friend class G1CMBitMapClosure; friend class G1CMConcurrentMarkingTask; - friend class G1CMMarkStack; friend class G1CMRemarkTask; friend class G1CMTask; @@ -479,22 +529,20 @@ public: // Manipulation of the global mark stack. // The push and pop operations are used by tasks for transfers - // between task-local queues and the global mark stack, and use - // locking for concurrency safety. - bool mark_stack_push(oop* arr, size_t n) { - _global_mark_stack.par_push_arr(arr, n); - if (_global_mark_stack.overflow()) { + // between task-local queues and the global mark stack. + bool mark_stack_push(oop* arr) { + if (!_global_mark_stack.par_push_chunk(arr)) { set_has_overflown(); return false; } return true; } - void mark_stack_pop(oop* arr, size_t max, size_t* n) { - _global_mark_stack.par_pop_arr(arr, max, n); + bool mark_stack_pop(oop* arr) { + return _global_mark_stack.par_pop_chunk(arr); } size_t mark_stack_size() { return _global_mark_stack.size(); } size_t partial_mark_stack_size_target() { return _global_mark_stack.capacity()/3; } - bool mark_stack_overflow() { return _global_mark_stack.overflow(); } + bool mark_stack_overflow() { return _global_mark_stack.is_out_of_memory(); } bool mark_stack_empty() { return _global_mark_stack.is_empty(); } G1CMRootRegions* root_regions() { return &_root_regions; } @@ -599,16 +647,6 @@ // read-only, so use this carefully! void clearRangePrevBitmap(MemRegion mr); - // Notify data structures that a GC has started. - void note_start_of_gc() { - _global_mark_stack.note_start_of_gc(); - } - - // Notify data structures that a GC is finished. - void note_end_of_gc() { - _global_mark_stack.note_end_of_gc(); - } - // Verify that there are no CSet oops on the stacks (taskqueues / // global mark stack) and fingers (global / per-task). // If marking is not in progress, it's a no-op. @@ -670,10 +708,7 @@ // references reaches this limit refs_reached_period = 384, // Initial value for the hash seed, used in the work stealing code - init_hash_seed = 17, - // How many entries will be transferred between global stack and - // local queues at once. - global_stack_transfer_size = 1024 + init_hash_seed = 17 }; uint _worker_id; @@ -858,9 +893,10 @@ // It pushes an object on the local queue. inline void push(oop obj); - // These two move entries to/from the global stack. + // Move entries to the global stack. void move_entries_to_global_stack(); - void get_entries_from_global_stack(); + // Move entries from the global stack, return true if we were successful to do so. + bool get_entries_from_global_stack(); // It pops and scans objects from the local queue. If partially is // true, then it stops when the queue size is of a given limit. If
--- a/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.inline.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/gc/g1/g1ConcurrentMark.inline.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -89,14 +89,28 @@ #undef check_mark +#ifndef PRODUCT template<typename Fn> -inline void G1CMMarkStack::iterate(Fn fn) { +inline void G1CMMarkStack::iterate(Fn fn) const { assert_at_safepoint(true); - assert(!stack_modified(), "Saved index " SIZE_FORMAT " must be the same as " SIZE_FORMAT, _saved_index, _index); - for (size_t i = 0; i < _index; ++i) { - fn(_base[i]); + + size_t num_chunks = 0; + + OopChunk* cur = _chunk_list; + while (cur != NULL) { + guarantee(num_chunks <= _chunks_in_chunk_list, "Found " SIZE_FORMAT " oop chunks which is more than there should be", num_chunks); + + for (size_t i = 0; i < OopsPerChunk; ++i) { + if (cur->data[i] == NULL) { + break; + } + fn(cur->data[i]); + } + cur = cur->next; + num_chunks++; } } +#endif // It scans an object and visits its children. inline void G1CMTask::scan_object(oop obj) { process_grey_object<true>(obj); }
--- a/hotspot/src/share/vm/gc/g1/g1DefaultPolicy.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/gc/g1/g1DefaultPolicy.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -66,7 +66,8 @@ _phase_times(new G1GCPhaseTimes(ParallelGCThreads)), _tenuring_threshold(MaxTenuringThreshold), _max_survivor_regions(0), - _survivors_age_table(true) { } + _survivors_age_table(true), + _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC) { } G1DefaultPolicy::~G1DefaultPolicy() { delete _ihop_control; @@ -575,6 +576,8 @@ record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec); + _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; + last_pause_included_initial_mark = collector_state()->during_initial_mark_pause(); if (last_pause_included_initial_mark) { record_concurrent_mark_init_end(0.0);
--- a/hotspot/src/share/vm/gc/g1/g1DefaultPolicy.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/gc/g1/g1DefaultPolicy.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -64,6 +64,8 @@ double _full_collection_start_sec; + jlong _collection_pause_end_millis; + uint _young_list_target_length; uint _young_list_fixed_length; @@ -237,6 +239,8 @@ double reclaimable_bytes_perc(size_t reclaimable_bytes) const; + jlong collection_pause_end_millis() { return _collection_pause_end_millis; } + private: // Sets up marking if proper conditions are met. void maybe_start_marking();
--- a/hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/gc/g1/g1MarkSweep.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -132,9 +132,16 @@ MarkingCodeBlobClosure follow_code_closure(&GenMarkSweep::follow_root_closure, !CodeBlobToOopClosure::FixRelocations); { G1RootProcessor root_processor(g1h, 1); - root_processor.process_strong_roots(&GenMarkSweep::follow_root_closure, - &GenMarkSweep::follow_cld_closure, - &follow_code_closure); + if (ClassUnloading) { + root_processor.process_strong_roots(&GenMarkSweep::follow_root_closure, + &GenMarkSweep::follow_cld_closure, + &follow_code_closure); + } else { + root_processor.process_all_roots_no_string_table( + &GenMarkSweep::follow_root_closure, + &GenMarkSweep::follow_cld_closure, + &follow_code_closure); + } } { @@ -157,7 +164,7 @@ // This is the point where the entire marking should have completed. assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed"); - { + if (ClassUnloading) { GCTraceTime(Debug, gc, phases) trace("Class Unloading", gc_timer()); // Unload classes and purge the SystemDictionary.
--- a/hotspot/src/share/vm/gc/g1/g1OopClosures.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/gc/g1/g1OopClosures.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -34,7 +34,6 @@ class G1ConcurrentMark; class DirtyCardToOopClosure; class G1CMBitMap; -class G1CMMarkStack; class G1ParScanThreadState; class G1CMTask; class ReferenceProcessor;
--- a/hotspot/src/share/vm/gc/g1/g1PageBasedVirtualSpace.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/gc/g1/g1PageBasedVirtualSpace.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -24,8 +24,10 @@ #include "precompiled.hpp" #include "gc/g1/g1PageBasedVirtualSpace.hpp" +#include "gc/shared/workgroup.hpp" #include "oops/markOop.hpp" #include "oops/oop.inline.hpp" +#include "runtime/atomic.hpp" #include "runtime/os.inline.hpp" #include "services/memTracker.hpp" #include "utilities/bitMap.inline.hpp" @@ -177,7 +179,7 @@ guarantee(start_page < end_page, "Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page); - os::pretouch_memory(page_start(start_page), bounded_end_addr(end_page)); + os::pretouch_memory(page_start(start_page), bounded_end_addr(end_page), _page_size); } bool G1PageBasedVirtualSpace::commit(size_t start_page, size_t size_in_pages) { @@ -198,9 +200,6 @@ } _committed.set_range(start_page, end_page); - if (AlwaysPreTouch) { - pretouch_internal(start_page, end_page); - } return zero_filled; } @@ -227,6 +226,53 @@ _committed.clear_range(start_page, end_page); } +class G1PretouchTask : public AbstractGangTask { +private: + char* volatile _cur_addr; + char* const _start_addr; + char* const _end_addr; + size_t const _page_size; +public: + G1PretouchTask(char* start_address, char* end_address, size_t page_size) : + AbstractGangTask("G1 PreTouch", + Universe::is_fully_initialized() ? GCId::current_raw() : + // During VM initialization there is + // no GC cycle that this task can be + // associated with. + GCId::undefined()), + _cur_addr(start_address), + _start_addr(start_address), + _end_addr(end_address), + _page_size(page_size) { + } + + virtual void work(uint worker_id) { + size_t const actual_chunk_size = MAX2(chunk_size(), _page_size); + while (true) { + char* touch_addr = (char*)Atomic::add_ptr((intptr_t)actual_chunk_size, (volatile void*) &_cur_addr) - actual_chunk_size; + if (touch_addr < _start_addr || touch_addr >= _end_addr) { + break; + } + char* end_addr = touch_addr + MIN2(actual_chunk_size, pointer_delta(_end_addr, touch_addr, sizeof(char))); + os::pretouch_memory(touch_addr, end_addr, _page_size); + } + } + + static size_t chunk_size() { return PreTouchParallelChunkSize; } +}; + +void G1PageBasedVirtualSpace::pretouch(size_t start_page, size_t size_in_pages, WorkGang* pretouch_gang) { + guarantee(pretouch_gang != NULL, "No pretouch gang specified."); + + size_t num_chunks = MAX2((size_t)1, size_in_pages * _page_size / MAX2(G1PretouchTask::chunk_size(), _page_size)); + + uint num_workers = MIN2((uint)num_chunks, pretouch_gang->active_workers()); + G1PretouchTask cl(page_start(start_page), bounded_end_addr(start_page + size_in_pages), _page_size); + log_debug(gc, heap)("Running %s with %u workers for " SIZE_FORMAT " work units pre-touching " SIZE_FORMAT "B.", + cl.name(), num_workers, num_chunks, size_in_pages * _page_size); + pretouch_gang->run_task(&cl, num_workers); +} + bool G1PageBasedVirtualSpace::contains(const void* p) const { return _low_boundary <= (const char*) p && (const char*) p < _high_boundary; }
--- a/hotspot/src/share/vm/gc/g1/g1PageBasedVirtualSpace.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/gc/g1/g1PageBasedVirtualSpace.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -30,6 +30,8 @@ #include "memory/virtualspace.hpp" #include "utilities/bitMap.hpp" +class WorkGang; + // Virtual space management helper for a virtual space with an OS page allocation // granularity. // (De-)Allocation requests are always OS page aligned by passing a page index @@ -117,6 +119,8 @@ // Uncommit the given area of pages starting at start being size_in_pages large. void uncommit(size_t start_page, size_t size_in_pages); + void pretouch(size_t start_page, size_t size_in_pages, WorkGang* pretouch_gang = NULL); + // Initialize the given reserved space with the given base address and the size // actually used. // Prefer to commit in page_size chunks.
--- a/hotspot/src/share/vm/gc/g1/g1Policy.hpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/gc/g1/g1Policy.hpp Wed Sep 21 08:38:21 2016 +0000 @@ -119,6 +119,8 @@ virtual void record_full_collection_start() = 0; virtual void record_full_collection_end() = 0; + virtual jlong collection_pause_end_millis() = 0; + // Must currently be called while the world is stopped. virtual void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) = 0;
--- a/hotspot/src/share/vm/gc/g1/g1RegionToSpaceMapper.cpp Wed Sep 21 01:33:21 2016 -0700 +++ b/hotspot/src/share/vm/gc/g1/g1RegionToSpaceMapper.cpp Wed Sep 21 08:38:21 2016 +0000 @@ -66,8 +66,12 @@ guarantee(alloc_granularity >= page_size, "allocation granularity smaller than commit granularity"); } - virtual void commit_regions(uint start_idx, size_t num_regions) { - bool zero_filled = _storage.commit((size_t)start_idx * _pages_per_region, num_regions * _pages_per_region); + virtual void commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) { + size_t const start_page = (size_t)start_idx * _pages_per_region; + bool zero_filled = _storage.commit(start_page, num_regions * _pages_per_region); + if (AlwaysPreTouch) { + _storage.pretouch(start_page, num_regions * _pages_per_region, pretouch_gang); + } _commit_map.set_range(start_idx, start_idx + num_regions); fire_on_commit(start_idx, num_regions, zero_filled); } @@ -110,19 +114,38 @@ _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + align_size_up(rs.size(), page_size)), page_size); } - virtual void commit_regions(uint start_idx, size_t num_regions) { + virtual void commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) { + size_t const NoPage = ~(size_t)0; + + size_t first_committed = NoPage; + size_t num_committed = 0; + + bool all_zero_filled = true; + for (uint i = start_idx; i < start_idx + num_regions; i++) { assert(!_commit_map.at(i), "Trying to commit storage at region %u that is already committed", i); size_t idx = region_idx_to_page_idx(i); uint old_refcount = _refcounts.get_by_index(idx); + bool zero_filled = false; if (old_refcount == 0) { + if (first_committed == NoPage) { + first_committed = idx; + num_committed = 1; + } else { + num_committed++; + } zero_filled = _storage.commit(idx, 1); } + all_zero_filled &= zero_filled; + _refcounts.set_by_index(idx, old_refcount + 1); _commit_map.set_bit(i); - fire_on_commit(i, 1, zero_filled); } + if (AlwaysPreTouch && num_committed > 0) { + _storage.pretouch(first_committed, num_committed, pretouch_gang); + } + fire_on_commit(start_idx, num_regions, all_zero_filled); } virtual void uncommit_regions(uint start_idx, size_t num_regions) {