OpenJDK / jdk / jdk12
changeset 26278:0340b27c88c2
Merge
author | duke |
---|---|
date | Wed, 05 Jul 2017 19:59:00 +0200 |
parents | 639910c83675 44122c02db6b |
children | c36c00926937 |
files | jdk/make/CopyIntoClasses.gmk jdk/make/ModulesXml.gmk jdk/make/bundle/JDK-Info.plist jdk/make/bundle/JRE-Info.plist jdk/make/src/classes/build/tools/module/GenerateModulesXml.java jdk/src/demo/aix/jvmti/hprof/porting_aix.c jdk/src/demo/aix/jvmti/hprof/porting_aix.h jdk/src/demo/share/jpda/com/sun/tools/example/README jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/AccessWatchpointSpec.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/AmbiguousMethodException.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/BreakpointSpec.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/ChildSession.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/EvaluationException.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/EventRequestSpec.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/EventRequestSpecList.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/ExceptionSpec.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/ExecutionManager.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/FrameIndexOutOfBoundsException.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/InputListener.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/JDIEventSource.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/LineBreakpointSpec.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/LineNotFoundException.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/MalformedMemberNameException.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/MethodBreakpointSpec.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/MethodNotFoundException.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/ModificationWatchpointSpec.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/NoSessionException.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/NoThreadException.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/OutputListener.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/ParseException.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/PatternReferenceTypeSpec.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/ReferenceTypeSpec.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/Session.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/SessionListener.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/SourceNameReferenceTypeSpec.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/SpecErrorEvent.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/SpecEvent.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/SpecListener.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/ThreadGroupIterator.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/ThreadInfo.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/ThreadIterator.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/Utils.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/VMLaunchFailureException.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/VMNotInterruptedException.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/bdi/WatchpointSpec.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/event/AbstractEventSet.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/event/AccessWatchpointEventSet.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/event/ClassPrepareEventSet.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/event/ClassUnloadEventSet.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/event/ExceptionEventSet.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/event/JDIAdapter.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/event/JDIListener.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/event/LocatableEventSet.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/event/LocationTriggerEventSet.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/event/ModificationWatchpointEventSet.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/event/ThreadDeathEventSet.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/event/ThreadStartEventSet.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/event/VMDeathEventSet.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/event/VMDisconnectEventSet.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/event/VMStartEventSet.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/event/WatchpointEventSet.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/expr/Expr.jj jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/ApplicationTool.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/ClassManager.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/ClassTreeTool.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/CommandInterpreter.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/CommandTool.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/ContextListener.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/ContextManager.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/CurrentFrameChangedEvent.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/Environment.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/GUI.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/Icons.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/JDBFileFilter.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/JDBMenuBar.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/JDBToolBar.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/LaunchTool.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/MonitorListModel.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/MonitorTool.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/OutputSink.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/SearchPath.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/SingleLeafTreeSelectionModel.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/SourceListener.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/SourceManager.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/SourceModel.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/SourceTool.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/SourceTreeTool.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/SourcepathChangedEvent.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/StackTraceTool.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/ThreadTreeTool.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/TypeScript.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/TypeScriptOutputListener.java jdk/src/demo/share/jpda/com/sun/tools/example/debug/gui/TypeScriptWriter.java jdk/src/demo/share/jpda/com/sun/tools/example/doc/index.html jdk/src/demo/share/jpda/com/sun/tools/example/doc/javadt.html jdk/src/demo/share/jpda/com/sun/tools/example/doc/jdb.html jdk/src/demo/share/jpda/com/sun/tools/example/doc/trace.html jdk/src/demo/share/jpda/com/sun/tools/example/trace/EventThread.java jdk/src/demo/share/jpda/com/sun/tools/example/trace/StreamRedirectThread.java jdk/src/demo/share/jpda/com/sun/tools/example/trace/Trace.java jdk/src/demo/share/jvmti/hprof/README.txt jdk/src/demo/share/jvmti/hprof/debug_malloc.c jdk/src/demo/share/jvmti/hprof/debug_malloc.h jdk/src/demo/share/jvmti/hprof/hprof.h jdk/src/demo/share/jvmti/hprof/hprof_b_spec.h jdk/src/demo/share/jvmti/hprof/hprof_blocks.c jdk/src/demo/share/jvmti/hprof/hprof_blocks.h jdk/src/demo/share/jvmti/hprof/hprof_check.c jdk/src/demo/share/jvmti/hprof/hprof_check.h jdk/src/demo/share/jvmti/hprof/hprof_class.c jdk/src/demo/share/jvmti/hprof/hprof_class.h jdk/src/demo/share/jvmti/hprof/hprof_cpu.c jdk/src/demo/share/jvmti/hprof/hprof_cpu.h jdk/src/demo/share/jvmti/hprof/hprof_error.c jdk/src/demo/share/jvmti/hprof/hprof_error.h jdk/src/demo/share/jvmti/hprof/hprof_event.c jdk/src/demo/share/jvmti/hprof/hprof_event.h jdk/src/demo/share/jvmti/hprof/hprof_frame.c jdk/src/demo/share/jvmti/hprof/hprof_frame.h jdk/src/demo/share/jvmti/hprof/hprof_init.c jdk/src/demo/share/jvmti/hprof/hprof_init.h jdk/src/demo/share/jvmti/hprof/hprof_io.c jdk/src/demo/share/jvmti/hprof/hprof_io.h jdk/src/demo/share/jvmti/hprof/hprof_ioname.c jdk/src/demo/share/jvmti/hprof/hprof_ioname.h jdk/src/demo/share/jvmti/hprof/hprof_listener.c jdk/src/demo/share/jvmti/hprof/hprof_listener.h jdk/src/demo/share/jvmti/hprof/hprof_loader.c jdk/src/demo/share/jvmti/hprof/hprof_loader.h jdk/src/demo/share/jvmti/hprof/hprof_md.h jdk/src/demo/share/jvmti/hprof/hprof_monitor.c jdk/src/demo/share/jvmti/hprof/hprof_monitor.h jdk/src/demo/share/jvmti/hprof/hprof_object.c jdk/src/demo/share/jvmti/hprof/hprof_object.h jdk/src/demo/share/jvmti/hprof/hprof_reference.c jdk/src/demo/share/jvmti/hprof/hprof_reference.h jdk/src/demo/share/jvmti/hprof/hprof_site.c jdk/src/demo/share/jvmti/hprof/hprof_site.h jdk/src/demo/share/jvmti/hprof/hprof_stack.c jdk/src/demo/share/jvmti/hprof/hprof_stack.h jdk/src/demo/share/jvmti/hprof/hprof_string.c jdk/src/demo/share/jvmti/hprof/hprof_string.h jdk/src/demo/share/jvmti/hprof/hprof_table.c jdk/src/demo/share/jvmti/hprof/hprof_table.h jdk/src/demo/share/jvmti/hprof/hprof_tag.c jdk/src/demo/share/jvmti/hprof/hprof_tag.h jdk/src/demo/share/jvmti/hprof/hprof_tls.c jdk/src/demo/share/jvmti/hprof/hprof_tls.h jdk/src/demo/share/jvmti/hprof/hprof_trace.c jdk/src/demo/share/jvmti/hprof/hprof_trace.h jdk/src/demo/share/jvmti/hprof/hprof_tracker.c jdk/src/demo/share/jvmti/hprof/hprof_tracker.h jdk/src/demo/share/jvmti/hprof/hprof_util.c jdk/src/demo/share/jvmti/hprof/hprof_util.h jdk/src/demo/share/jvmti/hprof/jvm.hprof.txt jdk/src/demo/share/jvmti/hprof/manual.html jdk/src/demo/share/jvmti/hprof/sample.makefile.txt jdk/src/demo/unix/jvmti/hprof/hprof_md.c jdk/src/demo/windows/jvmti/hprof/hprof_md.c jdk/src/jdk.attach/aix/classes/sun/tools/attach/AixAttachProvider.java jdk/src/jdk.attach/aix/classes/sun/tools/attach/AixVirtualMachine.java jdk/src/jdk.attach/aix/native/libattach/AixVirtualMachine.c jdk/src/jdk.attach/unix/classes/sun/tools/attach/BsdAttachProvider.java jdk/src/jdk.attach/unix/classes/sun/tools/attach/BsdVirtualMachine.java jdk/src/jdk.attach/unix/classes/sun/tools/attach/LinuxAttachProvider.java jdk/src/jdk.attach/unix/classes/sun/tools/attach/LinuxVirtualMachine.java jdk/src/jdk.attach/unix/classes/sun/tools/attach/SolarisAttachProvider.java jdk/src/jdk.attach/unix/classes/sun/tools/attach/SolarisVirtualMachine.java jdk/src/jdk.attach/unix/native/libattach/BsdVirtualMachine.c jdk/src/jdk.attach/unix/native/libattach/LinuxVirtualMachine.c jdk/src/jdk.attach/unix/native/libattach/SolarisVirtualMachine.c jdk/src/jdk.attach/windows/classes/sun/tools/attach/WindowsAttachProvider.java jdk/src/jdk.attach/windows/classes/sun/tools/attach/WindowsVirtualMachine.java jdk/src/jdk.attach/windows/native/libattach/WindowsAttachProvider.c jdk/src/jdk.attach/windows/native/libattach/WindowsVirtualMachine.c jdk/test/com/sun/net/httpserver/SimpleSSLContext.java jdk/test/com/sun/net/httpserver/testkeys jdk/test/java/lang/instrument/ilib/ClassDump.java jdk/test/java/lang/instrument/ilib/ClassReaderWriter.java jdk/test/java/lang/instrument/ilib/Info.java jdk/test/java/lang/instrument/ilib/Inject.java jdk/test/java/lang/instrument/ilib/InjectBytecodes.java jdk/test/java/lang/instrument/ilib/Injector.java jdk/test/java/lang/instrument/ilib/Options.java jdk/test/java/lang/instrument/ilib/RuntimeConstants.java make/common/SetupJava.gmk |
diffstat | 665 files changed, 40830 insertions(+), 50453 deletions(-) [+] |
line wrap: on
line diff
--- a/.hgtags-top-repo Fri Aug 29 11:57:50 2014 -0700 +++ b/.hgtags-top-repo Wed Jul 05 19:59:00 2017 +0200 @@ -270,3 +270,4 @@ aefd8899a8d6615fb34ba99b2e38996a7145baa8 jdk9-b25 d3ec8d048e6c3c46b6e0ee011cc551ad386dfba5 jdk9-b26 ba5645f2735b41ed085d07ba20fa7b322afff318 jdk9-b27 +ea2f7981236f3812436958748ab3d26e80a35130 jdk9-b28
--- a/Makefile Fri Aug 29 11:57:50 2014 -0700 +++ b/Makefile Wed Jul 05 19:59:00 2017 +0200 @@ -136,10 +136,12 @@ $(info . make docs # Create all docs) $(info . make docs-javadoc # Create just javadocs, depends on less than full docs) $(info . make profiles # Create complete j2re compact profile images) - $(info . make bootcycle-images # Build images twice, second time with newly build JDK) + $(info . make bootcycle-images # Build images twice, second time with newly built JDK) $(info . make install # Install the generated images locally) $(info . make clean # Remove all files generated by make, but not those) - $(info . # generated by configure) + $(info . # generated by configure. Do not run clean and other) + $(info . # targets together as that might behave in an) + $(info . # unexpected way.) $(info . make dist-clean # Remove all files, including configuration) $(info . make help # Give some help on using make) $(info . make test # Run tests, default is all tests (see TEST below))
--- a/common/autoconf/basics.m4 Fri Aug 29 11:57:50 2014 -0700 +++ b/common/autoconf/basics.m4 Wed Jul 05 19:59:00 2017 +0200 @@ -849,7 +849,12 @@ if test -f $DELETEDIR/TestIfFindSupportsDelete; then # No, it does not. rm $DELETEDIR/TestIfFindSupportsDelete - FIND_DELETE="-exec rm \{\} \+" + if test "x$OPENJDK_TARGET_OS" = "xaix"; then + # AIX 'find' is buggy if called with '-exec {} \+' and an empty file list + FIND_DELETE="-print | xargs rm" + else + FIND_DELETE="-exec rm \{\} \+" + fi AC_MSG_RESULT([no]) else AC_MSG_RESULT([yes])
--- a/common/autoconf/boot-jdk.m4 Fri Aug 29 11:57:50 2014 -0700 +++ b/common/autoconf/boot-jdk.m4 Wed Jul 05 19:59:00 2017 +0200 @@ -370,18 +370,27 @@ # Maximum amount of heap memory. # Maximum stack size. + JVM_MAX_HEAP=`expr $MEMORY_SIZE / 2` if test "x$BUILD_NUM_BITS" = x32; then - JVM_MAX_HEAP=1100M + if test "$JVM_MAX_HEAP" -gt "1100"; then + JVM_MAX_HEAP=1100 + elif test "$JVM_MAX_HEAP" -lt "512"; then + JVM_MAX_HEAP=512 + fi STACK_SIZE=768 else # Running Javac on a JVM on a 64-bit machine, takes more space since 64-bit # pointers are used. Apparently, we need to increase the heap and stack # space for the jvm. More specifically, when running javac to build huge # jdk batch - JVM_MAX_HEAP=1600M + if test "$JVM_MAX_HEAP" -gt "1600"; then + JVM_MAX_HEAP=1600 + elif test "$JVM_MAX_HEAP" -lt "512"; then + JVM_MAX_HEAP=512 + fi STACK_SIZE=1536 fi - ADD_JVM_ARG_IF_OK([-Xmx$JVM_MAX_HEAP],boot_jdk_jvmargs_big,[$JAVA]) + ADD_JVM_ARG_IF_OK([-Xmx${JVM_MAX_HEAP}M],boot_jdk_jvmargs_big,[$JAVA]) ADD_JVM_ARG_IF_OK([-XX:ThreadStackSize=$STACK_SIZE],boot_jdk_jvmargs_big,[$JAVA]) AC_MSG_RESULT([$boot_jdk_jvmargs_big])
--- a/common/autoconf/build-performance.m4 Fri Aug 29 11:57:50 2014 -0700 +++ b/common/autoconf/build-performance.m4 Wed Jul 05 19:59:00 2017 +0200 @@ -131,8 +131,8 @@ if test "x$with_jobs" = x; then # Number of jobs was not specified, calculate. AC_MSG_CHECKING([for appropriate number of jobs to run in parallel]) - # Approximate memory in GB, rounding up a bit. - memory_gb=`expr $MEMORY_SIZE / 1100` + # Approximate memory in GB. + memory_gb=`expr $MEMORY_SIZE / 1024` # Pick the lowest of memory in gb and number of cores. if test "$memory_gb" -lt "$NUM_CORES"; then JOBS="$memory_gb" @@ -291,16 +291,11 @@ AC_MSG_ERROR([Could not execute server java: $SJAVAC_SERVER_JAVA]) fi else - SJAVAC_SERVER_JAVA="" - # Hotspot specific options. - ADD_JVM_ARG_IF_OK([-verbosegc],SJAVAC_SERVER_JAVA,[$JAVA]) - # JRockit specific options. - ADD_JVM_ARG_IF_OK([-Xverbose:gc],SJAVAC_SERVER_JAVA,[$JAVA]) - SJAVAC_SERVER_JAVA="$JAVA $SJAVAC_SERVER_JAVA" + SJAVAC_SERVER_JAVA="$JAVA" fi AC_SUBST(SJAVAC_SERVER_JAVA) - if test "$MEMORY_SIZE" -gt "2500"; then + if test "$MEMORY_SIZE" -gt "3000"; then ADD_JVM_ARG_IF_OK([-d64],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA]) if test "$JVM_ARG_OK" = true; then JVM_64BIT=true @@ -308,34 +303,33 @@ fi fi + MX_VALUE=`expr $MEMORY_SIZE / 2` if test "$JVM_64BIT" = true; then - if test "$MEMORY_SIZE" -gt "17000"; then - ADD_JVM_ARG_IF_OK([-Xms10G -Xmx10G],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA]) + # Set ms lower than mx since more than one instance of the server might + # get launched at the same time before they figure out which instance won. + MS_VALUE=512 + if test "$MX_VALUE" -gt "2048"; then + MX_VALUE=2048 fi - if test "$MEMORY_SIZE" -gt "10000" && test "$JVM_ARG_OK" = false; then - ADD_JVM_ARG_IF_OK([-Xms6G -Xmx6G],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA]) - fi - if test "$MEMORY_SIZE" -gt "5000" && test "$JVM_ARG_OK" = false; then - ADD_JVM_ARG_IF_OK([-Xms1G -Xmx3G],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA]) - fi - if test "$MEMORY_SIZE" -gt "3800" && test "$JVM_ARG_OK" = false; then - ADD_JVM_ARG_IF_OK([-Xms1G -Xmx2500M],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA]) + else + MS_VALUE=256 + if test "$MX_VALUE" -gt "1500"; then + MX_VALUE=1500 fi fi - if test "$MEMORY_SIZE" -gt "2500" && test "$JVM_ARG_OK" = false; then - ADD_JVM_ARG_IF_OK([-Xms1000M -Xmx1500M],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA]) - fi - if test "$MEMORY_SIZE" -gt "1000" && test "$JVM_ARG_OK" = false; then - ADD_JVM_ARG_IF_OK([-Xms400M -Xmx1100M],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA]) + if test "$MX_VALUE" -lt "512"; then + MX_VALUE=512 fi - if test "$JVM_ARG_OK" = false; then - ADD_JVM_ARG_IF_OK([-Xms256M -Xmx512M],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA]) - fi + ADD_JVM_ARG_IF_OK([-Xms${MS_VALUE}M -Xmx${MX_VALUE}M],SJAVAC_SERVER_JAVA,[$SJAVAC_SERVER_JAVA]) - AC_MSG_CHECKING([whether to use sjavac]) AC_ARG_ENABLE([sjavac], [AS_HELP_STRING([--enable-sjavac], [use sjavac to do fast incremental compiles @<:@disabled@:>@])], [ENABLE_SJAVAC="${enableval}"], [ENABLE_SJAVAC='no']) + if test "x$JVM_ARG_OK" = "xfalse"; then + AC_MSG_WARN([Could not set -Xms${MS_VALUE}M -Xmx${MX_VALUE}M, disabling sjavac]) + ENABLE_SJAVAC=no; + fi + AC_MSG_CHECKING([whether to use sjavac]) AC_MSG_RESULT([$ENABLE_SJAVAC]) AC_SUBST(ENABLE_SJAVAC)
--- a/common/autoconf/configure.ac Fri Aug 29 11:57:50 2014 -0700 +++ b/common/autoconf/configure.ac Wed Jul 05 19:59:00 2017 +0200 @@ -142,7 +142,6 @@ ############################################################################### BOOTJDK_SETUP_BOOT_JDK -BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS ############################################################################### # @@ -233,6 +232,9 @@ BPERF_SETUP_BUILD_MEMORY BPERF_SETUP_BUILD_JOBS +# Setup arguments for the boot jdk (after cores and memory have been setup) +BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS + # Setup smart javac (after cores and memory have been setup) BPERF_SETUP_SMART_JAVAC
--- a/common/autoconf/flags.m4 Fri Aug 29 11:57:50 2014 -0700 +++ b/common/autoconf/flags.m4 Wed Jul 05 19:59:00 2017 +0200 @@ -342,17 +342,15 @@ # no adjustment ;; fastdebug ) - # Add compile time bounds checks. - CFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1" - CXXFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1" + # no adjustment ;; slowdebug ) - # Add runtime bounds checks and symbol info. - CFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1" - CXXFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1" + # Add runtime stack smashing and undefined behavior checks + CFLAGS_DEBUG_OPTIONS="-fstack-protector-all --param ssp-buffer-size=1" + CXXFLAGS_DEBUG_OPTIONS="-fstack-protector-all --param ssp-buffer-size=1" if test "x$HAS_CFLAG_DETECT_UNDEFINED_BEHAVIOR" = "xtrue"; then CFLAGS_DEBUG_OPTIONS="$CFLAGS_DEBUG_OPTIONS $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG" - CXXFLAGS_DEBUG_OPTIONS="$CXXFLAGS_DEBUG_OPTIONS $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG" + CXXFLAGS_DEBUG_OPTIONS="$CXXFLAGS_DEBUG_OPTIONS $CFLAG_DETECT_UNDEFINED_BEHAVIsOR_FLAG" fi ;; esac
--- a/common/autoconf/generated-configure.sh Fri Aug 29 11:57:50 2014 -0700 +++ b/common/autoconf/generated-configure.sh Wed Jul 05 19:59:00 2017 +0200 @@ -634,6 +634,10 @@ SJAVAC_SERVER_DIR ENABLE_SJAVAC SJAVAC_SERVER_JAVA +JAVA_TOOL_FLAGS_SMALL +JAVA_FLAGS_SMALL +JAVA_FLAGS_BIG +JAVA_FLAGS JOBS MEMORY_SIZE NUM_CORES @@ -805,10 +809,6 @@ JAXP_TOPDIR CORBA_TOPDIR LANGTOOLS_TOPDIR -JAVA_TOOL_FLAGS_SMALL -JAVA_FLAGS_SMALL -JAVA_FLAGS_BIG -JAVA_FLAGS JAVAC_FLAGS BOOT_JDK_SOURCETARGET JARSIGNER @@ -1064,7 +1064,6 @@ with_user_release_suffix with_build_number with_boot_jdk -with_boot_jdk_jvmargs with_add_source_root with_override_source_root with_adds_and_overrides @@ -1106,6 +1105,7 @@ with_num_cores with_memory_size with_jobs +with_boot_jdk_jvmargs with_sjavac_server_java enable_sjavac enable_precompiled_headers @@ -1904,10 +1904,6 @@ number is not set.[username_builddateb00] --with-build-number Set build number value for build [b00] --with-boot-jdk path to Boot JDK (used to bootstrap build) [probed] - --with-boot-jdk-jvmargs specify JVM arguments to be passed to all java - invocations of boot JDK, overriding the default - values, e.g --with-boot-jdk-jvmargs="-Xmx8G - -enableassertions" --with-add-source-root for each and every source directory, look in this additional source root for the same directory; if it exists and have files in it, include it in the build @@ -1979,6 +1975,10 @@ --with-memory-size=1024 [probed] --with-jobs number of parallel jobs to let make run [calculated based on cores and memory] + --with-boot-jdk-jvmargs specify JVM arguments to be passed to all java + invocations of boot JDK, overriding the default + values, e.g --with-boot-jdk-jvmargs="-Xmx8G + -enableassertions" --with-sjavac-server-java use this java binary for running the sjavac background server [Boot JDK java] @@ -4321,7 +4321,7 @@ #CUSTOM_AUTOCONF_INCLUDE # Do not change or remove the following line, it is needed for consistency checks: -DATE_WHEN_GENERATED=1408448519 +DATE_WHEN_GENERATED=1409311712 ############################################################################### # @@ -17284,7 +17284,12 @@ if test -f $DELETEDIR/TestIfFindSupportsDelete; then # No, it does not. rm $DELETEDIR/TestIfFindSupportsDelete - FIND_DELETE="-exec rm \{\} \+" + if test "x$OPENJDK_TARGET_OS" = "xaix"; then + # AIX 'find' is buggy if called with '-exec {} \+' and an empty file list + FIND_DELETE="-print | xargs rm" + else + FIND_DELETE="-exec rm \{\} \+" + fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } else @@ -26315,197 +26320,6 @@ - ############################################################################## - # - # Specify jvm options for anything that is run with the Boot JDK. - # Not all JVM:s accept the same arguments on the command line. - # - -# Check whether --with-boot-jdk-jvmargs was given. -if test "${with_boot_jdk_jvmargs+set}" = set; then : - withval=$with_boot_jdk_jvmargs; -fi - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for boot jdk java command " >&5 -$as_echo_n "checking flags for boot jdk java command ... " >&6; } - - # Disable special log output when a debug build is used as Boot JDK... - - $ECHO "Check if jvm arg is ok: -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput" >&5 - $ECHO "Command: $JAVA -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput -version" >&5 - OUTPUT=`$JAVA -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput -version 2>&1` - FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` - FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` - if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then - boot_jdk_jvmargs="$boot_jdk_jvmargs -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput" - JVM_ARG_OK=true - else - $ECHO "Arg failed:" >&5 - $ECHO "$OUTPUT" >&5 - JVM_ARG_OK=false - fi - - - # Apply user provided options. - - $ECHO "Check if jvm arg is ok: $with_boot_jdk_jvmargs" >&5 - $ECHO "Command: $JAVA $with_boot_jdk_jvmargs -version" >&5 - OUTPUT=`$JAVA $with_boot_jdk_jvmargs -version 2>&1` - FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` - FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` - if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then - boot_jdk_jvmargs="$boot_jdk_jvmargs $with_boot_jdk_jvmargs" - JVM_ARG_OK=true - else - $ECHO "Arg failed:" >&5 - $ECHO "$OUTPUT" >&5 - JVM_ARG_OK=false - fi - - - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $boot_jdk_jvmargs" >&5 -$as_echo "$boot_jdk_jvmargs" >&6; } - - # For now, general JAVA_FLAGS are the same as the boot jdk jvmargs - JAVA_FLAGS=$boot_jdk_jvmargs - - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for boot jdk java command for big workloads" >&5 -$as_echo_n "checking flags for boot jdk java command for big workloads... " >&6; } - - # Starting amount of heap memory. - - $ECHO "Check if jvm arg is ok: -Xms64M" >&5 - $ECHO "Command: $JAVA -Xms64M -version" >&5 - OUTPUT=`$JAVA -Xms64M -version 2>&1` - FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` - FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` - if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then - boot_jdk_jvmargs_big="$boot_jdk_jvmargs_big -Xms64M" - JVM_ARG_OK=true - else - $ECHO "Arg failed:" >&5 - $ECHO "$OUTPUT" >&5 - JVM_ARG_OK=false - fi - - - # Maximum amount of heap memory. - # Maximum stack size. - if test "x$BUILD_NUM_BITS" = x32; then - JVM_MAX_HEAP=1100M - STACK_SIZE=768 - else - # Running Javac on a JVM on a 64-bit machine, takes more space since 64-bit - # pointers are used. Apparently, we need to increase the heap and stack - # space for the jvm. More specifically, when running javac to build huge - # jdk batch - JVM_MAX_HEAP=1600M - STACK_SIZE=1536 - fi - - $ECHO "Check if jvm arg is ok: -Xmx$JVM_MAX_HEAP" >&5 - $ECHO "Command: $JAVA -Xmx$JVM_MAX_HEAP -version" >&5 - OUTPUT=`$JAVA -Xmx$JVM_MAX_HEAP -version 2>&1` - FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` - FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` - if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then - boot_jdk_jvmargs_big="$boot_jdk_jvmargs_big -Xmx$JVM_MAX_HEAP" - JVM_ARG_OK=true - else - $ECHO "Arg failed:" >&5 - $ECHO "$OUTPUT" >&5 - JVM_ARG_OK=false - fi - - - $ECHO "Check if jvm arg is ok: -XX:ThreadStackSize=$STACK_SIZE" >&5 - $ECHO "Command: $JAVA -XX:ThreadStackSize=$STACK_SIZE -version" >&5 - OUTPUT=`$JAVA -XX:ThreadStackSize=$STACK_SIZE -version 2>&1` - FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` - FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` - if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then - boot_jdk_jvmargs_big="$boot_jdk_jvmargs_big -XX:ThreadStackSize=$STACK_SIZE" - JVM_ARG_OK=true - else - $ECHO "Arg failed:" >&5 - $ECHO "$OUTPUT" >&5 - JVM_ARG_OK=false - fi - - - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $boot_jdk_jvmargs_big" >&5 -$as_echo "$boot_jdk_jvmargs_big" >&6; } - - JAVA_FLAGS_BIG=$boot_jdk_jvmargs_big - - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for boot jdk java command for small workloads" >&5 -$as_echo_n "checking flags for boot jdk java command for small workloads... " >&6; } - - # Use serial gc for small short lived tools if possible - - $ECHO "Check if jvm arg is ok: -XX:+UseSerialGC" >&5 - $ECHO "Command: $JAVA -XX:+UseSerialGC -version" >&5 - OUTPUT=`$JAVA -XX:+UseSerialGC -version 2>&1` - FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` - FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` - if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then - boot_jdk_jvmargs_small="$boot_jdk_jvmargs_small -XX:+UseSerialGC" - JVM_ARG_OK=true - else - $ECHO "Arg failed:" >&5 - $ECHO "$OUTPUT" >&5 - JVM_ARG_OK=false - fi - - - $ECHO "Check if jvm arg is ok: -Xms32M" >&5 - $ECHO "Command: $JAVA -Xms32M -version" >&5 - OUTPUT=`$JAVA -Xms32M -version 2>&1` - FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` - FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` - if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then - boot_jdk_jvmargs_small="$boot_jdk_jvmargs_small -Xms32M" - JVM_ARG_OK=true - else - $ECHO "Arg failed:" >&5 - $ECHO "$OUTPUT" >&5 - JVM_ARG_OK=false - fi - - - $ECHO "Check if jvm arg is ok: -Xmx512M" >&5 - $ECHO "Command: $JAVA -Xmx512M -version" >&5 - OUTPUT=`$JAVA -Xmx512M -version 2>&1` - FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` - FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` - if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then - boot_jdk_jvmargs_small="$boot_jdk_jvmargs_small -Xmx512M" - JVM_ARG_OK=true - else - $ECHO "Arg failed:" >&5 - $ECHO "$OUTPUT" >&5 - JVM_ARG_OK=false - fi - - - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $boot_jdk_jvmargs_small" >&5 -$as_echo "$boot_jdk_jvmargs_small" >&6; } - - JAVA_FLAGS_SMALL=$boot_jdk_jvmargs_small - - - JAVA_TOOL_FLAGS_SMALL="" - for f in $JAVA_FLAGS_SMALL; do - JAVA_TOOL_FLAGS_SMALL="$JAVA_TOOL_FLAGS_SMALL -J$f" - done - - - ############################################################################### # # Configure the sources to use. We can add or override individual directories. @@ -42515,17 +42329,15 @@ # no adjustment ;; fastdebug ) - # Add compile time bounds checks. - CFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1" - CXXFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1" + # no adjustment ;; slowdebug ) - # Add runtime bounds checks and symbol info. - CFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1" - CXXFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1" + # Add runtime stack smashing and undefined behavior checks + CFLAGS_DEBUG_OPTIONS="-fstack-protector-all --param ssp-buffer-size=1" + CXXFLAGS_DEBUG_OPTIONS="-fstack-protector-all --param ssp-buffer-size=1" if test "x$HAS_CFLAG_DETECT_UNDEFINED_BEHAVIOR" = "xtrue"; then CFLAGS_DEBUG_OPTIONS="$CFLAGS_DEBUG_OPTIONS $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG" - CXXFLAGS_DEBUG_OPTIONS="$CXXFLAGS_DEBUG_OPTIONS $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG" + CXXFLAGS_DEBUG_OPTIONS="$CXXFLAGS_DEBUG_OPTIONS $CFLAG_DETECT_UNDEFINED_BEHAVIsOR_FLAG" fi ;; esac @@ -49884,8 +49696,8 @@ # Number of jobs was not specified, calculate. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for appropriate number of jobs to run in parallel" >&5 $as_echo_n "checking for appropriate number of jobs to run in parallel... " >&6; } - # Approximate memory in GB, rounding up a bit. - memory_gb=`expr $MEMORY_SIZE / 1100` + # Approximate memory in GB. + memory_gb=`expr $MEMORY_SIZE / 1024` # Pick the lowest of memory in gb and number of cores. if test "$memory_gb" -lt "$NUM_CORES"; then JOBS="$memory_gb" @@ -49911,6 +49723,208 @@ +# Setup arguments for the boot jdk (after cores and memory have been setup) + + ############################################################################## + # + # Specify jvm options for anything that is run with the Boot JDK. + # Not all JVM:s accept the same arguments on the command line. + # + +# Check whether --with-boot-jdk-jvmargs was given. +if test "${with_boot_jdk_jvmargs+set}" = set; then : + withval=$with_boot_jdk_jvmargs; +fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for boot jdk java command " >&5 +$as_echo_n "checking flags for boot jdk java command ... " >&6; } + + # Disable special log output when a debug build is used as Boot JDK... + + $ECHO "Check if jvm arg is ok: -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput" >&5 + $ECHO "Command: $JAVA -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput -version" >&5 + OUTPUT=`$JAVA -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput -version 2>&1` + FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` + FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` + if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then + boot_jdk_jvmargs="$boot_jdk_jvmargs -XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput" + JVM_ARG_OK=true + else + $ECHO "Arg failed:" >&5 + $ECHO "$OUTPUT" >&5 + JVM_ARG_OK=false + fi + + + # Apply user provided options. + + $ECHO "Check if jvm arg is ok: $with_boot_jdk_jvmargs" >&5 + $ECHO "Command: $JAVA $with_boot_jdk_jvmargs -version" >&5 + OUTPUT=`$JAVA $with_boot_jdk_jvmargs -version 2>&1` + FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` + FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` + if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then + boot_jdk_jvmargs="$boot_jdk_jvmargs $with_boot_jdk_jvmargs" + JVM_ARG_OK=true + else + $ECHO "Arg failed:" >&5 + $ECHO "$OUTPUT" >&5 + JVM_ARG_OK=false + fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $boot_jdk_jvmargs" >&5 +$as_echo "$boot_jdk_jvmargs" >&6; } + + # For now, general JAVA_FLAGS are the same as the boot jdk jvmargs + JAVA_FLAGS=$boot_jdk_jvmargs + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for boot jdk java command for big workloads" >&5 +$as_echo_n "checking flags for boot jdk java command for big workloads... " >&6; } + + # Starting amount of heap memory. + + $ECHO "Check if jvm arg is ok: -Xms64M" >&5 + $ECHO "Command: $JAVA -Xms64M -version" >&5 + OUTPUT=`$JAVA -Xms64M -version 2>&1` + FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` + FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` + if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then + boot_jdk_jvmargs_big="$boot_jdk_jvmargs_big -Xms64M" + JVM_ARG_OK=true + else + $ECHO "Arg failed:" >&5 + $ECHO "$OUTPUT" >&5 + JVM_ARG_OK=false + fi + + + # Maximum amount of heap memory. + # Maximum stack size. + JVM_MAX_HEAP=`expr $MEMORY_SIZE / 2` + if test "x$BUILD_NUM_BITS" = x32; then + if test "$JVM_MAX_HEAP" -gt "1100"; then + JVM_MAX_HEAP=1100 + elif test "$JVM_MAX_HEAP" -lt "512"; then + JVM_MAX_HEAP=512 + fi + STACK_SIZE=768 + else + # Running Javac on a JVM on a 64-bit machine, takes more space since 64-bit + # pointers are used. Apparently, we need to increase the heap and stack + # space for the jvm. More specifically, when running javac to build huge + # jdk batch + if test "$JVM_MAX_HEAP" -gt "1600"; then + JVM_MAX_HEAP=1600 + elif test "$JVM_MAX_HEAP" -lt "512"; then + JVM_MAX_HEAP=512 + fi + STACK_SIZE=1536 + fi + + $ECHO "Check if jvm arg is ok: -Xmx${JVM_MAX_HEAP}M" >&5 + $ECHO "Command: $JAVA -Xmx${JVM_MAX_HEAP}M -version" >&5 + OUTPUT=`$JAVA -Xmx${JVM_MAX_HEAP}M -version 2>&1` + FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` + FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` + if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then + boot_jdk_jvmargs_big="$boot_jdk_jvmargs_big -Xmx${JVM_MAX_HEAP}M" + JVM_ARG_OK=true + else + $ECHO "Arg failed:" >&5 + $ECHO "$OUTPUT" >&5 + JVM_ARG_OK=false + fi + + + $ECHO "Check if jvm arg is ok: -XX:ThreadStackSize=$STACK_SIZE" >&5 + $ECHO "Command: $JAVA -XX:ThreadStackSize=$STACK_SIZE -version" >&5 + OUTPUT=`$JAVA -XX:ThreadStackSize=$STACK_SIZE -version 2>&1` + FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` + FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` + if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then + boot_jdk_jvmargs_big="$boot_jdk_jvmargs_big -XX:ThreadStackSize=$STACK_SIZE" + JVM_ARG_OK=true + else + $ECHO "Arg failed:" >&5 + $ECHO "$OUTPUT" >&5 + JVM_ARG_OK=false + fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $boot_jdk_jvmargs_big" >&5 +$as_echo "$boot_jdk_jvmargs_big" >&6; } + + JAVA_FLAGS_BIG=$boot_jdk_jvmargs_big + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for boot jdk java command for small workloads" >&5 +$as_echo_n "checking flags for boot jdk java command for small workloads... " >&6; } + + # Use serial gc for small short lived tools if possible + + $ECHO "Check if jvm arg is ok: -XX:+UseSerialGC" >&5 + $ECHO "Command: $JAVA -XX:+UseSerialGC -version" >&5 + OUTPUT=`$JAVA -XX:+UseSerialGC -version 2>&1` + FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` + FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` + if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then + boot_jdk_jvmargs_small="$boot_jdk_jvmargs_small -XX:+UseSerialGC" + JVM_ARG_OK=true + else + $ECHO "Arg failed:" >&5 + $ECHO "$OUTPUT" >&5 + JVM_ARG_OK=false + fi + + + $ECHO "Check if jvm arg is ok: -Xms32M" >&5 + $ECHO "Command: $JAVA -Xms32M -version" >&5 + OUTPUT=`$JAVA -Xms32M -version 2>&1` + FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` + FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` + if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then + boot_jdk_jvmargs_small="$boot_jdk_jvmargs_small -Xms32M" + JVM_ARG_OK=true + else + $ECHO "Arg failed:" >&5 + $ECHO "$OUTPUT" >&5 + JVM_ARG_OK=false + fi + + + $ECHO "Check if jvm arg is ok: -Xmx512M" >&5 + $ECHO "Command: $JAVA -Xmx512M -version" >&5 + OUTPUT=`$JAVA -Xmx512M -version 2>&1` + FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` + FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` + if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then + boot_jdk_jvmargs_small="$boot_jdk_jvmargs_small -Xmx512M" + JVM_ARG_OK=true + else + $ECHO "Arg failed:" >&5 + $ECHO "$OUTPUT" >&5 + JVM_ARG_OK=false + fi + + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $boot_jdk_jvmargs_small" >&5 +$as_echo "$boot_jdk_jvmargs_small" >&6; } + + JAVA_FLAGS_SMALL=$boot_jdk_jvmargs_small + + + JAVA_TOOL_FLAGS_SMALL="" + for f in $JAVA_FLAGS_SMALL; do + JAVA_TOOL_FLAGS_SMALL="$JAVA_TOOL_FLAGS_SMALL -J$f" + done + + + # Setup smart javac (after cores and memory have been setup) @@ -49927,44 +49941,11 @@ as_fn_error $? "Could not execute server java: $SJAVAC_SERVER_JAVA" "$LINENO" 5 fi else - SJAVAC_SERVER_JAVA="" - # Hotspot specific options. - - $ECHO "Check if jvm arg is ok: -verbosegc" >&5 - $ECHO "Command: $JAVA -verbosegc -version" >&5 - OUTPUT=`$JAVA -verbosegc -version 2>&1` - FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` - FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` - if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then - SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -verbosegc" - JVM_ARG_OK=true - else - $ECHO "Arg failed:" >&5 - $ECHO "$OUTPUT" >&5 - JVM_ARG_OK=false - fi - - # JRockit specific options. - - $ECHO "Check if jvm arg is ok: -Xverbose:gc" >&5 - $ECHO "Command: $JAVA -Xverbose:gc -version" >&5 - OUTPUT=`$JAVA -Xverbose:gc -version 2>&1` - FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` - FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` - if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then - SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -Xverbose:gc" - JVM_ARG_OK=true - else - $ECHO "Arg failed:" >&5 - $ECHO "$OUTPUT" >&5 - JVM_ARG_OK=false - fi - - SJAVAC_SERVER_JAVA="$JAVA $SJAVAC_SERVER_JAVA" - fi - - - if test "$MEMORY_SIZE" -gt "2500"; then + SJAVAC_SERVER_JAVA="$JAVA" + fi + + + if test "$MEMORY_SIZE" -gt "3000"; then $ECHO "Check if jvm arg is ok: -d64" >&5 $ECHO "Command: $SJAVAC_SERVER_JAVA -d64 -version" >&5 @@ -49986,67 +49967,31 @@ fi fi + MX_VALUE=`expr $MEMORY_SIZE / 2` if test "$JVM_64BIT" = true; then - if test "$MEMORY_SIZE" -gt "17000"; then - - $ECHO "Check if jvm arg is ok: -Xms10G -Xmx10G" >&5 - $ECHO "Command: $SJAVAC_SERVER_JAVA -Xms10G -Xmx10G -version" >&5 - OUTPUT=`$SJAVAC_SERVER_JAVA -Xms10G -Xmx10G -version 2>&1` - FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` - FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` - if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then - SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -Xms10G -Xmx10G" - JVM_ARG_OK=true - else - $ECHO "Arg failed:" >&5 - $ECHO "$OUTPUT" >&5 - JVM_ARG_OK=false - fi - - fi - if test "$MEMORY_SIZE" -gt "10000" && test "$JVM_ARG_OK" = false; then - - $ECHO "Check if jvm arg is ok: -Xms6G -Xmx6G" >&5 - $ECHO "Command: $SJAVAC_SERVER_JAVA -Xms6G -Xmx6G -version" >&5 - OUTPUT=`$SJAVAC_SERVER_JAVA -Xms6G -Xmx6G -version 2>&1` + # Set ms lower than mx since more than one instance of the server might + # get launched at the same time before they figure out which instance won. + MS_VALUE=512 + if test "$MX_VALUE" -gt "2048"; then + MX_VALUE=2048 + fi + else + MS_VALUE=256 + if test "$MX_VALUE" -gt "1500"; then + MX_VALUE=1500 + fi + fi + if test "$MX_VALUE" -lt "512"; then + MX_VALUE=512 + fi + + $ECHO "Check if jvm arg is ok: -Xms${MS_VALUE}M -Xmx${MX_VALUE}M" >&5 + $ECHO "Command: $SJAVAC_SERVER_JAVA -Xms${MS_VALUE}M -Xmx${MX_VALUE}M -version" >&5 + OUTPUT=`$SJAVAC_SERVER_JAVA -Xms${MS_VALUE}M -Xmx${MX_VALUE}M -version 2>&1` FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then - SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -Xms6G -Xmx6G" - JVM_ARG_OK=true - else - $ECHO "Arg failed:" >&5 - $ECHO "$OUTPUT" >&5 - JVM_ARG_OK=false - fi - - fi - if test "$MEMORY_SIZE" -gt "5000" && test "$JVM_ARG_OK" = false; then - - $ECHO "Check if jvm arg is ok: -Xms1G -Xmx3G" >&5 - $ECHO "Command: $SJAVAC_SERVER_JAVA -Xms1G -Xmx3G -version" >&5 - OUTPUT=`$SJAVAC_SERVER_JAVA -Xms1G -Xmx3G -version 2>&1` - FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` - FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` - if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then - SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -Xms1G -Xmx3G" - JVM_ARG_OK=true - else - $ECHO "Arg failed:" >&5 - $ECHO "$OUTPUT" >&5 - JVM_ARG_OK=false - fi - - fi - if test "$MEMORY_SIZE" -gt "3800" && test "$JVM_ARG_OK" = false; then - - $ECHO "Check if jvm arg is ok: -Xms1G -Xmx2500M" >&5 - $ECHO "Command: $SJAVAC_SERVER_JAVA -Xms1G -Xmx2500M -version" >&5 - OUTPUT=`$SJAVAC_SERVER_JAVA -Xms1G -Xmx2500M -version 2>&1` - FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` - FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` - if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then - SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -Xms1G -Xmx2500M" + SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -Xms${MS_VALUE}M -Xmx${MX_VALUE}M" JVM_ARG_OK=true else $ECHO "Arg failed:" >&5 @@ -50054,62 +49999,7 @@ JVM_ARG_OK=false fi - fi - fi - if test "$MEMORY_SIZE" -gt "2500" && test "$JVM_ARG_OK" = false; then - - $ECHO "Check if jvm arg is ok: -Xms1000M -Xmx1500M" >&5 - $ECHO "Command: $SJAVAC_SERVER_JAVA -Xms1000M -Xmx1500M -version" >&5 - OUTPUT=`$SJAVAC_SERVER_JAVA -Xms1000M -Xmx1500M -version 2>&1` - FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` - FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` - if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then - SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -Xms1000M -Xmx1500M" - JVM_ARG_OK=true - else - $ECHO "Arg failed:" >&5 - $ECHO "$OUTPUT" >&5 - JVM_ARG_OK=false - fi - - fi - if test "$MEMORY_SIZE" -gt "1000" && test "$JVM_ARG_OK" = false; then - - $ECHO "Check if jvm arg is ok: -Xms400M -Xmx1100M" >&5 - $ECHO "Command: $SJAVAC_SERVER_JAVA -Xms400M -Xmx1100M -version" >&5 - OUTPUT=`$SJAVAC_SERVER_JAVA -Xms400M -Xmx1100M -version 2>&1` - FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` - FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` - if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then - SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -Xms400M -Xmx1100M" - JVM_ARG_OK=true - else - $ECHO "Arg failed:" >&5 - $ECHO "$OUTPUT" >&5 - JVM_ARG_OK=false - fi - - fi - if test "$JVM_ARG_OK" = false; then - - $ECHO "Check if jvm arg is ok: -Xms256M -Xmx512M" >&5 - $ECHO "Command: $SJAVAC_SERVER_JAVA -Xms256M -Xmx512M -version" >&5 - OUTPUT=`$SJAVAC_SERVER_JAVA -Xms256M -Xmx512M -version 2>&1` - FOUND_WARN=`$ECHO "$OUTPUT" | grep -i warn` - FOUND_VERSION=`$ECHO $OUTPUT | grep " version \""` - if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then - SJAVAC_SERVER_JAVA="$SJAVAC_SERVER_JAVA -Xms256M -Xmx512M" - JVM_ARG_OK=true - else - $ECHO "Arg failed:" >&5 - $ECHO "$OUTPUT" >&5 - JVM_ARG_OK=false - fi - - fi - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use sjavac" >&5 -$as_echo_n "checking whether to use sjavac... " >&6; } + # Check whether --enable-sjavac was given. if test "${enable_sjavac+set}" = set; then : enableval=$enable_sjavac; ENABLE_SJAVAC="${enableval}" @@ -50117,6 +50007,13 @@ ENABLE_SJAVAC='no' fi + if test "x$JVM_ARG_OK" = "xfalse"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Could not set -Xms${MS_VALUE}M -Xmx${MX_VALUE}M, disabling sjavac" >&5 +$as_echo "$as_me: WARNING: Could not set -Xms${MS_VALUE}M -Xmx${MX_VALUE}M, disabling sjavac" >&2;} + ENABLE_SJAVAC=no; + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use sjavac" >&5 +$as_echo_n "checking whether to use sjavac... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ENABLE_SJAVAC" >&5 $as_echo "$ENABLE_SJAVAC" >&6; }
--- a/common/bin/hgforest.sh Fri Aug 29 11:57:50 2014 -0700 +++ b/common/bin/hgforest.sh Wed Jul 05 19:59:00 2017 +0200 @@ -77,6 +77,11 @@ shift done +# debug mode +if [ "${HGFOREST_DEBUG:-false}" = "true" ] ; then + global_opts="${global_opts} --debug" +fi + # silence standard output? if [ ${qflag} = "true" ] ; then global_opts="${global_opts} -q" @@ -89,14 +94,26 @@ fi # Make sure we have a command. -if [ $# -lt 1 -o -z "${1:-}" ] ; then - echo "ERROR: No command to hg supplied!" - usage +if [ ${#} -lt 1 -o -z "${1:-}" ] ; then + echo "ERROR: No command to hg supplied!" > ${status_output} + usage > ${status_output} fi -command="$1"; shift +# grab command +command="${1}"; shift + +if [ ${vflag} = "true" ] ; then + echo "# Mercurial command: ${command}" > ${status_output} +fi + + +# capture command options and arguments (if any) command_args="${@:-}" +if [ ${vflag} = "true" ] ; then + echo "# Mercurial command arguments: ${command_args}" > ${status_output} +fi + # Clean out the temporary directory that stores the pid files. tmp=/tmp/forest.$$ rm -f -r ${tmp} @@ -104,7 +121,8 @@ if [ "${HGFOREST_DEBUG:-false}" = "true" ] ; then - echo "DEBUG: temp files are in: ${tmp}" + # ignores redirection. + echo "DEBUG: temp files are in: ${tmp}" >&2 fi # Check if we can use fifos for monitoring sub-process completion. @@ -377,21 +395,33 @@ fi fi done + + if [ ${have_fifos} = "true" ]; then + # done with the fifo + exec 3>&- + fi fi # Wait for all subprocesses to complete wait # Terminate with exit 0 only if all subprocesses were successful +# Terminate with highest exit code of subprocesses ec=0 if [ -d ${tmp} ]; then rcfiles="`(ls -a ${tmp}/*.pid.rc 2> /dev/null) || echo ''`" for rc in ${rcfiles} ; do exit_code=`cat ${rc} | tr -d ' \n\r'` if [ "${exit_code}" != "0" ] ; then + if [ ${exit_code} -gt 1 ]; then + # mercurial exit codes greater than "1" signal errors. repo="`echo ${rc} | sed -e 's@^'${tmp}'@@' -e 's@/*\([^/]*\)\.pid\.rc$@\1@' -e 's@_@/@g'`" echo "WARNING: ${repo} exited abnormally (${exit_code})" > ${status_output} - ec=1 + fi + if [ ${exit_code} -gt ${ec} ]; then + # assume that larger exit codes are more significant + ec=${exit_code} + fi fi done fi
--- a/corba/.hgtags Fri Aug 29 11:57:50 2014 -0700 +++ b/corba/.hgtags Wed Jul 05 19:59:00 2017 +0200 @@ -270,3 +270,4 @@ da08cca6b97f41b7081a3e176dcb400af6e4bb26 jdk9-b25 6c777df597bbf5abba3488d44c401edfe73c74af jdk9-b26 7e06bf1dcb0907b80ddf59315426ce9ce775e56d jdk9-b27 +a00b04ef067e39f50b9a0fea6f1904e35d632a73 jdk9-b28
--- a/corba/make/GensrcCorba.gmk Fri Aug 29 11:57:50 2014 -0700 +++ b/corba/make/GensrcCorba.gmk Wed Jul 05 19:59:00 2017 +0200 @@ -50,8 +50,9 @@ INCLUDES := com/sun/tools/corba/se/idl, \ EXCLUDE_FILES := ResourceBundleUtil.java)) +# Force the language to english for predictable source code generation. TOOL_IDLJ_CMD := $(JAVA) -cp $(CORBA_OUTPUTDIR)/idlj_classes \ - com.sun.tools.corba.se.idl.toJavaPortable.Compile + -Duser.language=en com.sun.tools.corba.se.idl.toJavaPortable.Compile ################################################################################
--- a/get_source.sh Fri Aug 29 11:57:50 2014 -0700 +++ b/get_source.sh Wed Jul 05 19:59:00 2017 +0200 @@ -67,7 +67,7 @@ error "Could not locate Mercurial command" fi -hgversion="`hg --version 2> /dev/null | sed -n -e 's@^Mercurial Distributed SCM (version \([^+]*\).*)\$@\1@p'`" +hgversion="`LANGUAGE=en hg --version 2> /dev/null | sed -n -e 's@^Mercurial Distributed SCM (version \([^+]*\).*)\$@\1@p'`" if [ "x${hgversion}" = "x" ] ; then error "Could not determine Mercurial version of $hgwhere" fi
--- a/hotspot/.hgtags Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/.hgtags Wed Jul 05 19:59:00 2017 +0200 @@ -430,3 +430,4 @@ 6de94e8693240cec8aae11f6b42f43433456a733 jdk9-b25 48b95a073d752d6891cc0d1d2836b321ecf3ce0c jdk9-b26 f95347244306affc32ce3056f27ceff7b2100810 jdk9-b27 +657294869d7ff063e055f5492cab7ce5612ca851 jdk9-b28
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java Wed Jul 05 19:59:00 2017 +0200 @@ -45,8 +45,8 @@ public class G1CollectedHeap extends SharedHeap { // HeapRegionSeq _seq; static private long hrsFieldOffset; - // MemRegion _g1_committed; - static private long g1CommittedFieldOffset; + // MemRegion _g1_reserved; + static private long g1ReservedFieldOffset; // size_t _summary_bytes_used; static private CIntegerField summaryBytesUsedField; // G1MonitoringSupport* _g1mm; @@ -68,7 +68,6 @@ Type type = db.lookupType("G1CollectedHeap"); hrsFieldOffset = type.getField("_hrs").getOffset(); - g1CommittedFieldOffset = type.getField("_g1_committed").getOffset(); summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used"); g1mmField = type.getAddressField("_g1mm"); oldSetFieldOffset = type.getField("_old_set").getOffset(); @@ -76,9 +75,7 @@ } public long capacity() { - Address g1CommittedAddr = addr.addOffsetTo(g1CommittedFieldOffset); - MemRegion g1Committed = new MemRegion(g1CommittedAddr); - return g1Committed.byteSize(); + return hrs().capacity(); } public long used() {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1HeapRegionTable.java Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1HeapRegionTable.java Wed Jul 05 19:59:00 2017 +0200 @@ -93,19 +93,35 @@ private class HeapRegionIterator implements Iterator<HeapRegion> { private long index; private long length; + private HeapRegion next; - @Override - public boolean hasNext() { return index < length; } + public HeapRegion positionToNext() { + HeapRegion result = next; + while (index < length && at(index) == null) { + index++; + } + if (index < length) { + next = at(index); + index++; // restart search at next element + } else { + next = null; + } + return result; + } @Override - public HeapRegion next() { return at(index++); } + public boolean hasNext() { return next != null; } + + @Override + public HeapRegion next() { return positionToNext(); } @Override - public void remove() { /* not supported */ } + public void remove() { /* not supported */ } - HeapRegionIterator(long committedLength) { + HeapRegionIterator(long totalLength) { index = 0; - length = committedLength; + length = totalLength; + positionToNext(); } }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java Wed Jul 05 19:59:00 2017 +0200 @@ -43,7 +43,7 @@ // G1HeapRegionTable _regions static private long regionsFieldOffset; // uint _committed_length - static private CIntegerField committedLengthField; + static private CIntegerField numCommittedField; static { VM.registerVMInitializedObserver(new Observer() { @@ -57,7 +57,7 @@ Type type = db.lookupType("HeapRegionSeq"); regionsFieldOffset = type.getField("_regions").getOffset(); - committedLengthField = type.getCIntegerField("_committed_length"); + numCommittedField = type.getCIntegerField("_num_committed"); } private G1HeapRegionTable regions() { @@ -66,16 +66,20 @@ regionsAddr); } + public long capacity() { + return length() * HeapRegion.grainBytes(); + } + public long length() { return regions().length(); } public long committedLength() { - return committedLengthField.getValue(addr); + return numCommittedField.getValue(addr); } public Iterator<HeapRegion> heapRegionIterator() { - return regions().heapRegionIterator(committedLength()); + return regions().heapRegionIterator(length()); } public HeapRegionSeq(Address addr) {
--- a/hotspot/make/bsd/makefiles/gcc.make Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/make/bsd/makefiles/gcc.make Wed Jul 05 19:59:00 2017 +0200 @@ -508,13 +508,9 @@ ifeq ($(USE_CLANG),) # Enable bounds checking. - # _FORTIFY_SOURCE appears in GCC 4.0+ ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) )" "1" - # compile time size bounds checks - FASTDEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1 - - # and runtime size bounds checks and paranoid stack smashing checks. - DEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1 + # stack smashing checks. + DEBUG_CFLAGS += -fstack-protector-all --param ssp-buffer-size=1 endif endif
--- a/hotspot/make/excludeSrc.make Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/make/excludeSrc.make Wed Jul 05 19:59:00 2017 +0200 @@ -70,7 +70,8 @@ CXXFLAGS += -DINCLUDE_CDS=0 CFLAGS += -DINCLUDE_CDS=0 - Src_Files_EXCLUDE += filemap.cpp metaspaceShared.cpp + Src_Files_EXCLUDE += filemap.cpp metaspaceShared*.cpp sharedPathsMiscInfo.cpp \ + systemDictionaryShared.cpp classLoaderExt.cpp sharedClassUtil.cpp endif ifeq ($(INCLUDE_ALL_GCS), false)
--- a/hotspot/make/jprt.properties Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/make/jprt.properties Wed Jul 05 19:59:00 2017 +0200 @@ -374,6 +374,7 @@ ${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_compiler}, \ ${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_gc}, \ ${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_runtime}, \ + ${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_runtime_closed}, \ ${jprt.make.rule.test.targets.standard.reg.group:GROUP=hotspot_serviceability} jprt.make.rule.test.targets.embedded = \
--- a/hotspot/make/linux/makefiles/gcc.make Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/make/linux/makefiles/gcc.make Wed Jul 05 19:59:00 2017 +0200 @@ -365,16 +365,13 @@ ifeq ($(USE_CLANG),) # Enable bounds checking. - # _FORTIFY_SOURCE appears in GCC 4.0+ ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) )" "1" - # compile time size bounds checks - FASTDEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1 - - # and runtime size bounds checks and paranoid stack smashing checks. - DEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1 + # stack smashing checks. + DEBUG_CFLAGS += -fstack-protector-all --param ssp-buffer-size=1 endif endif + # If we are building HEADLESS, pass on to VM # so it can set the java.awt.headless property ifdef HEADLESS
--- a/hotspot/make/solaris/makefiles/gcc.make Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/make/solaris/makefiles/gcc.make Wed Jul 05 19:59:00 2017 +0200 @@ -240,11 +240,7 @@ endif # Enable bounds checking. -# _FORTIFY_SOURCE appears in GCC 4.0+ ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) )" "1" - # compile time size bounds checks - FASTDEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1 - - # and runtime size bounds checks and paranoid stack smashing checks. - DEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1 + # stack smashing checks. + DEBUG_CFLAGS += -fstack-protector-all --param ssp-buffer-size=1 endif
--- a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Wed Jul 05 19:59:00 2017 +0200 @@ -731,7 +731,7 @@ if (method->is_static()) object = method->constants()->pool_holder()->java_mirror(); else - object = (oop) locals[0]; + object = (oop) (void*)locals[0]; monitor->set_obj(object); }
--- a/hotspot/src/cpu/zero/vm/frame_zero.inline.hpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/cpu/zero/vm/frame_zero.inline.hpp Wed Jul 05 19:59:00 2017 +0200 @@ -26,6 +26,8 @@ #ifndef CPU_ZERO_VM_FRAME_ZERO_INLINE_HPP #define CPU_ZERO_VM_FRAME_ZERO_INLINE_HPP +#include "code/codeCache.hpp" + // Constructors inline frame::frame() {
--- a/hotspot/src/os/linux/vm/os_linux.cpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/os/linux/vm/os_linux.cpp Wed Jul 05 19:59:00 2017 +0200 @@ -2246,7 +2246,7 @@ const siginfo_t* si = (const siginfo_t*)siginfo; os::Posix::print_siginfo_brief(st, si); - +#if INCLUDE_CDS if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) && UseSharedSpaces) { FileMapInfo* mapinfo = FileMapInfo::current_info(); @@ -2256,6 +2256,7 @@ " possible disk/network problem."); } } +#endif st->cr(); }
--- a/hotspot/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp Wed Jul 05 19:59:00 2017 +0200 @@ -66,129 +66,92 @@ } int VM_Version::platform_features(int features) { - // getisax(2), SI_ARCHITECTURE_32, and SI_ARCHITECTURE_64 are - // supported on Solaris 10 and later. - if (os::Solaris::supports_getisax()) { + assert(os::Solaris::supports_getisax(), "getisax() must be available"); - // Check 32-bit architecture. - do_sysinfo(SI_ARCHITECTURE_32, "sparc", &features, v8_instructions_m); + // Check 32-bit architecture. + do_sysinfo(SI_ARCHITECTURE_32, "sparc", &features, v8_instructions_m); - // Check 64-bit architecture. - do_sysinfo(SI_ARCHITECTURE_64, "sparcv9", &features, generic_v9_m); + // Check 64-bit architecture. + do_sysinfo(SI_ARCHITECTURE_64, "sparcv9", &features, generic_v9_m); - // Extract valid instruction set extensions. - uint_t avs[2]; - uint_t avn = os::Solaris::getisax(avs, 2); - assert(avn <= 2, "should return two or less av's"); - uint_t av = avs[0]; + // Extract valid instruction set extensions. + uint_t avs[2]; + uint_t avn = os::Solaris::getisax(avs, 2); + assert(avn <= 2, "should return two or less av's"); + uint_t av = avs[0]; #ifndef PRODUCT - if (PrintMiscellaneous && Verbose) { - tty->print("getisax(2) returned: " PTR32_FORMAT, av); - if (avn > 1) { - tty->print(", " PTR32_FORMAT, avs[1]); - } - tty->cr(); + if (PrintMiscellaneous && Verbose) { + tty->print("getisax(2) returned: " PTR32_FORMAT, av); + if (avn > 1) { + tty->print(", " PTR32_FORMAT, avs[1]); } + tty->cr(); + } #endif - if (av & AV_SPARC_MUL32) features |= hardware_mul32_m; - if (av & AV_SPARC_DIV32) features |= hardware_div32_m; - if (av & AV_SPARC_FSMULD) features |= hardware_fsmuld_m; - if (av & AV_SPARC_V8PLUS) features |= v9_instructions_m; - if (av & AV_SPARC_POPC) features |= hardware_popc_m; - if (av & AV_SPARC_VIS) features |= vis1_instructions_m; - if (av & AV_SPARC_VIS2) features |= vis2_instructions_m; - if (avn > 1) { - uint_t av2 = avs[1]; + if (av & AV_SPARC_MUL32) features |= hardware_mul32_m; + if (av & AV_SPARC_DIV32) features |= hardware_div32_m; + if (av & AV_SPARC_FSMULD) features |= hardware_fsmuld_m; + if (av & AV_SPARC_V8PLUS) features |= v9_instructions_m; + if (av & AV_SPARC_POPC) features |= hardware_popc_m; + if (av & AV_SPARC_VIS) features |= vis1_instructions_m; + if (av & AV_SPARC_VIS2) features |= vis2_instructions_m; + if (avn > 1) { + uint_t av2 = avs[1]; #ifndef AV2_SPARC_SPARC5 #define AV2_SPARC_SPARC5 0x00000008 /* The 29 new fp and sub instructions */ #endif - if (av2 & AV2_SPARC_SPARC5) features |= sparc5_instructions_m; - } + if (av2 & AV2_SPARC_SPARC5) features |= sparc5_instructions_m; + } - // Next values are not defined before Solaris 10 - // but Solaris 8 is used for jdk6 update builds. + // We only build on Solaris 10 and up, but some of the values below + // are not defined on all versions of Solaris 10, so we define them, + // if necessary. #ifndef AV_SPARC_ASI_BLK_INIT #define AV_SPARC_ASI_BLK_INIT 0x0080 /* ASI_BLK_INIT_xxx ASI */ #endif - if (av & AV_SPARC_ASI_BLK_INIT) features |= blk_init_instructions_m; + if (av & AV_SPARC_ASI_BLK_INIT) features |= blk_init_instructions_m; #ifndef AV_SPARC_FMAF #define AV_SPARC_FMAF 0x0100 /* Fused Multiply-Add */ #endif - if (av & AV_SPARC_FMAF) features |= fmaf_instructions_m; + if (av & AV_SPARC_FMAF) features |= fmaf_instructions_m; #ifndef AV_SPARC_FMAU -#define AV_SPARC_FMAU 0x0200 /* Unfused Multiply-Add */ +#define AV_SPARC_FMAU 0x0200 /* Unfused Multiply-Add */ #endif - if (av & AV_SPARC_FMAU) features |= fmau_instructions_m; + if (av & AV_SPARC_FMAU) features |= fmau_instructions_m; #ifndef AV_SPARC_VIS3 -#define AV_SPARC_VIS3 0x0400 /* VIS3 instruction set extensions */ +#define AV_SPARC_VIS3 0x0400 /* VIS3 instruction set extensions */ #endif - if (av & AV_SPARC_VIS3) features |= vis3_instructions_m; + if (av & AV_SPARC_VIS3) features |= vis3_instructions_m; #ifndef AV_SPARC_CBCOND #define AV_SPARC_CBCOND 0x10000000 /* compare and branch instrs supported */ #endif - if (av & AV_SPARC_CBCOND) features |= cbcond_instructions_m; + if (av & AV_SPARC_CBCOND) features |= cbcond_instructions_m; #ifndef AV_SPARC_AES #define AV_SPARC_AES 0x00020000 /* aes instrs supported */ #endif - if (av & AV_SPARC_AES) features |= aes_instructions_m; + if (av & AV_SPARC_AES) features |= aes_instructions_m; #ifndef AV_SPARC_SHA1 #define AV_SPARC_SHA1 0x00400000 /* sha1 instruction supported */ #endif - if (av & AV_SPARC_SHA1) features |= sha1_instruction_m; + if (av & AV_SPARC_SHA1) features |= sha1_instruction_m; #ifndef AV_SPARC_SHA256 #define AV_SPARC_SHA256 0x00800000 /* sha256 instruction supported */ #endif - if (av & AV_SPARC_SHA256) features |= sha256_instruction_m; + if (av & AV_SPARC_SHA256) features |= sha256_instruction_m; #ifndef AV_SPARC_SHA512 #define AV_SPARC_SHA512 0x01000000 /* sha512 instruction supported */ #endif - if (av & AV_SPARC_SHA512) features |= sha512_instruction_m; - - } else { - // getisax(2) failed, use the old legacy code. -#ifndef PRODUCT - if (PrintMiscellaneous && Verbose) - tty->print_cr("getisax(2) is not supported."); -#endif - - char tmp; - size_t bufsize = sysinfo(SI_ISALIST, &tmp, 1); - char* buf = (char*) os::malloc(bufsize, mtInternal); - - if (buf != NULL) { - if (sysinfo(SI_ISALIST, buf, bufsize) == bufsize) { - // Figure out what kind of sparc we have - char *sparc_string = strstr(buf, "sparc"); - if (sparc_string != NULL) { features |= v8_instructions_m; - if (sparc_string[5] == 'v') { - if (sparc_string[6] == '8') { - if (sparc_string[7] == '-') { features |= hardware_mul32_m; - features |= hardware_div32_m; - } else if (sparc_string[7] == 'p') features |= generic_v9_m; - else features |= generic_v8_m; - } else if (sparc_string[6] == '9') features |= generic_v9_m; - } - } - - // Check for visualization instructions - char *vis = strstr(buf, "vis"); - if (vis != NULL) { features |= vis1_instructions_m; - if (vis[3] == '2') features |= vis2_instructions_m; - } - } - os::free(buf); - } - } + if (av & AV_SPARC_SHA512) features |= sha512_instruction_m; // Determine the machine type. do_sysinfo(SI_MACHINE, "sun4v", &features, sun4v_m); @@ -203,27 +166,7 @@ kstat_named_t* knm = (kstat_named_t *)ksp->ks_data; for (int i = 0; i < ksp->ks_ndata; i++) { if (strcmp((const char*)&(knm[i].name),"implementation") == 0) { -#ifndef KSTAT_DATA_STRING -#define KSTAT_DATA_STRING 9 -#endif - if (knm[i].data_type == KSTAT_DATA_CHAR) { - // VM is running on Solaris 8 which does not have value.str. - implementation = &(knm[i].value.c[0]); - } else if (knm[i].data_type == KSTAT_DATA_STRING) { - // VM is running on Solaris 10. -#ifndef KSTAT_NAMED_STR_PTR - // Solaris 8 was used to build VM, define the structure it misses. - struct str_t { - union { - char *ptr; /* NULL-term string */ - char __pad[8]; /* 64-bit padding */ - } addr; - uint32_t len; /* # bytes for strlen + '\0' */ - }; -#define KSTAT_NAMED_STR_PTR(knptr) (( (str_t*)&((knptr)->value) )->addr.ptr) -#endif - implementation = KSTAT_NAMED_STR_PTR(&knm[i]); - } + implementation = KSTAT_NAMED_STR_PTR(&knm[i]); #ifndef PRODUCT if (PrintMiscellaneous && Verbose) { tty->print_cr("cpu_info.implementation: %s", implementation); @@ -234,6 +177,7 @@ for (int i = 0; impl[i] != 0; i++) impl[i] = (char)toupper((uint)impl[i]); + if (strstr(impl, "SPARC64") != NULL) { features |= sparc64_family_m; } else if (strstr(impl, "SPARC-M") != NULL) { @@ -248,8 +192,10 @@ if (strstr(impl, "SPARC") == NULL) { #ifndef PRODUCT // kstat on Solaris 8 virtual machines (branded zones) - // returns "(unsupported)" implementation. - warning("kstat cpu_info implementation = '%s', should contain SPARC", impl); + // returns "(unsupported)" implementation. Solaris 8 is not + // supported anymore, but include this check to be on the + // safe side. + warning("kstat cpu_info implementation = '%s', assume generic SPARC", impl); #endif implementation = "SPARC"; }
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp Wed Jul 05 19:59:00 2017 +0200 @@ -546,13 +546,18 @@ // normal bytecode execution. thread->clear_exception_oop_and_pc(); + Handle original_exception(thread, exception()); + continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false); // If an exception was thrown during exception dispatch, the exception oop may have changed thread->set_exception_oop(exception()); thread->set_exception_pc(pc); // the exception cache is used only by non-implicit exceptions - if (continuation != NULL) { + // Update the exception cache only when there didn't happen + // another exception during the computation of the compiled + // exception handler. + if (continuation != NULL && original_exception() == exception()) { nm->add_handler_for_exception_and_pc(exception, pc, continuation); } }
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/classfile/classFileParser.cpp Wed Jul 05 19:59:00 2017 +0200 @@ -31,6 +31,9 @@ #include "classfile/javaClasses.hpp" #include "classfile/symbolTable.hpp" #include "classfile/systemDictionary.hpp" +#if INCLUDE_CDS +#include "classfile/systemDictionaryShared.hpp" +#endif #include "classfile/verificationType.hpp" #include "classfile/verifier.hpp" #include "classfile/vmSymbols.hpp" @@ -60,6 +63,7 @@ #include "services/threadService.hpp" #include "utilities/array.hpp" #include "utilities/globalDefinitions.hpp" +#include "utilities/ostream.hpp" // We generally try to create the oops directly when parsing, rather than // allocating temporary data structures and copying the bytes twice. A @@ -3786,7 +3790,15 @@ instanceKlassHandle nullHandle; // Figure out whether we can skip format checking (matching classic VM behavior) - _need_verify = Verifier::should_verify_for(class_loader(), verify); + if (DumpSharedSpaces) { + // verify == true means it's a 'remote' class (i.e., non-boot class) + // Verification decision is based on BytecodeVerificationRemote flag + // for those classes. + _need_verify = (verify) ? BytecodeVerificationRemote : + BytecodeVerificationLocal; + } else { + _need_verify = Verifier::should_verify_for(class_loader(), verify); + } // Set the verify flag in stream cfs->set_verify(_need_verify); @@ -3805,6 +3817,18 @@ u2 minor_version = cfs->get_u2_fast(); u2 major_version = cfs->get_u2_fast(); + if (DumpSharedSpaces && major_version < JAVA_1_5_VERSION) { + ResourceMark rm; + warning("Pre JDK 1.5 class not supported by CDS: %u.%u %s", + major_version, minor_version, name->as_C_string()); + Exceptions::fthrow( + THREAD_AND_LOCATION, + vmSymbols::java_lang_UnsupportedClassVersionError(), + "Unsupported major.minor version for dump time %u.%u", + major_version, + minor_version); + } + // Check version numbers - we check this even with verifier off if (!is_supported_version(major_version, minor_version)) { if (name == NULL) { @@ -3912,6 +3936,18 @@ if (cfs->source() != NULL) tty->print(" from %s", cfs->source()); tty->print_cr("]"); } +#if INCLUDE_CDS + if (DumpLoadedClassList != NULL && cfs->source() != NULL && classlist_file->is_open()) { + // Only dump the classes that can be stored into CDS archive + if (SystemDictionaryShared::is_sharing_possible(loader_data)) { + if (name != NULL) { + ResourceMark rm(THREAD); + classlist_file->print_cr("%s", name->as_C_string()); + classlist_file->flush(); + } + } + } +#endif u2 super_class_index = cfs->get_u2_fast(); instanceKlassHandle super_klass = parse_super_class(super_class_index,
--- a/hotspot/src/share/vm/classfile/classLoader.cpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/classfile/classLoader.cpp Wed Jul 05 19:59:00 2017 +0200 @@ -26,8 +26,13 @@ #include "classfile/classFileParser.hpp" #include "classfile/classFileStream.hpp" #include "classfile/classLoader.hpp" +#include "classfile/classLoaderExt.hpp" #include "classfile/classLoaderData.inline.hpp" #include "classfile/javaClasses.hpp" +#if INCLUDE_CDS +#include "classfile/sharedPathsMiscInfo.hpp" +#include "classfile/sharedClassUtil.hpp" +#endif #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" #include "compiler/compileBroker.hpp" @@ -35,6 +40,7 @@ #include "interpreter/bytecodeStream.hpp" #include "interpreter/oopMapCache.hpp" #include "memory/allocation.inline.hpp" +#include "memory/filemap.hpp" #include "memory/generation.hpp" #include "memory/oopFactory.hpp" #include "memory/universe.inline.hpp" @@ -114,8 +120,12 @@ ClassPathEntry* ClassLoader::_first_entry = NULL; ClassPathEntry* ClassLoader::_last_entry = NULL; +int ClassLoader::_num_entries = 0; PackageHashtable* ClassLoader::_package_hash_table = NULL; +#if INCLUDE_CDS +SharedPathsMiscInfo* ClassLoader::_shared_paths_misc_info = NULL; +#endif // helper routines bool string_starts_with(const char* str, const char* str_to_find) { size_t str_len = strlen(str); @@ -194,6 +204,14 @@ // check if file exists struct stat st; if (os::stat(path, &st) == 0) { +#if INCLUDE_CDS + if (DumpSharedSpaces) { + // We have already check in ClassLoader::check_shared_classpath() that the directory is empty, so + // we should never find a file underneath it -- unless user has added a new file while we are running + // the dump, in which case let's quit! + ShouldNotReachHere(); + } +#endif // found file, open it int file_handle = os::open(path, 0, 0); if (file_handle != -1) { @@ -228,13 +246,13 @@ FREE_C_HEAP_ARRAY(char, _zip_name, mtClass); } -ClassFileStream* ClassPathZipEntry::open_stream(const char* name, TRAPS) { - // enable call to C land +u1* ClassPathZipEntry::open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS) { + // enable call to C land JavaThread* thread = JavaThread::current(); ThreadToNativeFromVM ttn(thread); // check whether zip archive contains name - jint filesize, name_len; - jzentry* entry = (*FindEntry)(_zip, name, &filesize, &name_len); + jint name_len; + jzentry* entry = (*FindEntry)(_zip, name, filesize, &name_len); if (entry == NULL) return NULL; u1* buffer; char name_buf[128]; @@ -245,19 +263,33 @@ filename = NEW_RESOURCE_ARRAY(char, name_len + 1); } - // file found, get pointer to class in mmaped jar file. + // file found, get pointer to the entry in mmapped jar file. if (ReadMappedEntry == NULL || !(*ReadMappedEntry)(_zip, entry, &buffer, filename)) { - // mmaped access not available, perhaps due to compression, + // mmapped access not available, perhaps due to compression, // read contents into resource array - buffer = NEW_RESOURCE_ARRAY(u1, filesize); + int size = (*filesize) + ((nul_terminate) ? 1 : 0); + buffer = NEW_RESOURCE_ARRAY(u1, size); if (!(*ReadEntry)(_zip, entry, buffer, filename)) return NULL; } + + // return result + if (nul_terminate) { + buffer[*filesize] = 0; + } + return buffer; +} + +ClassFileStream* ClassPathZipEntry::open_stream(const char* name, TRAPS) { + jint filesize; + u1* buffer = open_entry(name, &filesize, false, CHECK_NULL); + if (buffer == NULL) { + return NULL; + } if (UsePerfData) { ClassLoader::perf_sys_classfile_bytes_read()->inc(filesize); } - // return result - return new ClassFileStream(buffer, filesize, _zip_name); // Resource allocated + return new ClassFileStream(buffer, filesize, _zip_name); // Resource allocated } // invoke function for each entry in the zip file @@ -272,12 +304,13 @@ } } -LazyClassPathEntry::LazyClassPathEntry(char* path, const struct stat* st) : ClassPathEntry() { +LazyClassPathEntry::LazyClassPathEntry(char* path, const struct stat* st, bool throw_exception) : ClassPathEntry() { _path = os::strdup_check_oom(path); _st = *st; _meta_index = NULL; _resolved_entry = NULL; _has_error = false; + _throw_exception = throw_exception; } LazyClassPathEntry::~LazyClassPathEntry() { @@ -293,7 +326,11 @@ return (ClassPathEntry*) _resolved_entry; } ClassPathEntry* new_entry = NULL; - new_entry = ClassLoader::create_class_path_entry(_path, &_st, false, CHECK_NULL); + new_entry = ClassLoader::create_class_path_entry(_path, &_st, false, _throw_exception, CHECK_NULL); + if (!_throw_exception && new_entry == NULL) { + assert(!HAS_PENDING_EXCEPTION, "must be"); + return NULL; + } { ThreadCritical tc; if (_resolved_entry == NULL) { @@ -327,6 +364,23 @@ return true; } +u1* LazyClassPathEntry::open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS) { + if (_has_error) { + return NULL; + } + ClassPathEntry* cpe = resolve_entry(THREAD); + if (cpe == NULL) { + _has_error = true; + return NULL; + } else if (cpe->is_jar_file()) { + return ((ClassPathZipEntry*)cpe)->open_entry(name, filesize, nul_terminate,THREAD); + } else { + ShouldNotReachHere(); + *filesize = 0; + return NULL; + } +} + static void print_meta_index(LazyClassPathEntry* entry, GrowableArray<char*>& meta_packages) { tty->print("[Meta index for %s=", entry->name()); @@ -337,15 +391,62 @@ tty->print_cr("]"); } +#if INCLUDE_CDS +void ClassLoader::exit_with_path_failure(const char* error, const char* message) { + assert(DumpSharedSpaces, "only called at dump time"); + tty->print_cr("Hint: enable -XX:+TraceClassPaths to diagnose the failure"); + vm_exit_during_initialization(error, message); +} +#endif -void ClassLoader::setup_meta_index() { +void ClassLoader::trace_class_path(const char* msg, const char* name) { + if (!TraceClassPaths) { + return; + } + + if (msg) { + tty->print("%s", msg); + } + if (name) { + if (strlen(name) < 256) { + tty->print("%s", name); + } else { + // For very long paths, we need to print each character separately, + // as print_cr() has a length limit + while (name[0] != '\0') { + tty->print("%c", name[0]); + name++; + } + } + } + if (msg && msg[0] == '[') { + tty->print_cr("]"); + } else { + tty->cr(); + } +} + +void ClassLoader::setup_bootstrap_meta_index() { // Set up meta index which allows us to open boot jars lazily if // class data sharing is enabled + const char* meta_index_path = Arguments::get_meta_index_path(); + const char* meta_index_dir = Arguments::get_meta_index_dir(); + setup_meta_index(meta_index_path, meta_index_dir, 0); +} + +void ClassLoader::setup_meta_index(const char* meta_index_path, const char* meta_index_dir, int start_index) { const char* known_version = "% VERSION 2"; - char* meta_index_path = Arguments::get_meta_index_path(); - char* meta_index_dir = Arguments::get_meta_index_dir(); FILE* file = fopen(meta_index_path, "r"); int line_no = 0; +#if INCLUDE_CDS + if (DumpSharedSpaces) { + if (file != NULL) { + _shared_paths_misc_info->add_required_file(meta_index_path); + } else { + _shared_paths_misc_info->add_nonexist_path(meta_index_path); + } + } +#endif if (file != NULL) { ResourceMark rm; LazyClassPathEntry* cur_entry = NULL; @@ -380,7 +481,7 @@ // Hand off current packages to current lazy entry (if any) if ((cur_entry != NULL) && (boot_class_path_packages.length() > 0)) { - if (TraceClassLoading && Verbose) { + if ((TraceClassLoading || TraceClassPaths) && Verbose) { print_meta_index(cur_entry, boot_class_path_packages); } MetaIndex* index = new MetaIndex(boot_class_path_packages.adr_at(0), @@ -391,8 +492,10 @@ boot_class_path_packages.clear(); // Find lazy entry corresponding to this jar file - for (ClassPathEntry* entry = _first_entry; entry != NULL; entry = entry->next()) { - if (entry->is_lazy() && + int count = 0; + for (ClassPathEntry* entry = _first_entry; entry != NULL; entry = entry->next(), count++) { + if (count >= start_index && + entry->is_lazy() && string_starts_with(entry->name(), meta_index_dir) && string_ends_with(entry->name(), &package_name[2])) { cur_entry = (LazyClassPathEntry*) entry; @@ -429,7 +532,7 @@ // Hand off current packages to current lazy entry (if any) if ((cur_entry != NULL) && (boot_class_path_packages.length() > 0)) { - if (TraceClassLoading && Verbose) { + if ((TraceClassLoading || TraceClassPaths) && Verbose) { print_meta_index(cur_entry, boot_class_path_packages); } MetaIndex* index = new MetaIndex(boot_class_path_packages.adr_at(0), @@ -440,37 +543,88 @@ } } +#if INCLUDE_CDS +void ClassLoader::check_shared_classpath(const char *path) { + if (strcmp(path, "") == 0) { + exit_with_path_failure("Cannot have empty path in archived classpaths", NULL); + } + + struct stat st; + if (os::stat(path, &st) == 0) { + if ((st.st_mode & S_IFREG) != S_IFREG) { // is directory + if (!os::dir_is_empty(path)) { + tty->print_cr("Error: non-empty directory '%s'", path); + exit_with_path_failure("CDS allows only empty directories in archived classpaths", NULL); + } + } + } +} +#endif + void ClassLoader::setup_bootstrap_search_path() { assert(_first_entry == NULL, "should not setup bootstrap class search path twice"); char* sys_class_path = os::strdup_check_oom(Arguments::get_sysclasspath()); - if (TraceClassLoading && Verbose) { - tty->print_cr("[Bootstrap loader class path=%s]", sys_class_path); + if (!PrintSharedArchiveAndExit) { + trace_class_path("[Bootstrap loader class path=", sys_class_path); + } +#if INCLUDE_CDS + if (DumpSharedSpaces) { + _shared_paths_misc_info->add_boot_classpath(Arguments::get_sysclasspath()); } +#endif + setup_search_path(sys_class_path); + os::free(sys_class_path); +} - int len = (int)strlen(sys_class_path); +#if INCLUDE_CDS +int ClassLoader::get_shared_paths_misc_info_size() { + return _shared_paths_misc_info->get_used_bytes(); +} + +void* ClassLoader::get_shared_paths_misc_info() { + return _shared_paths_misc_info->buffer(); +} + +bool ClassLoader::check_shared_paths_misc_info(void *buf, int size) { + SharedPathsMiscInfo* checker = SharedClassUtil::allocate_shared_paths_misc_info((char*)buf, size); + bool result = checker->check(); + delete checker; + return result; +} +#endif + +void ClassLoader::setup_search_path(char *class_path) { + int offset = 0; + int len = (int)strlen(class_path); int end = 0; // Iterate over class path entries for (int start = 0; start < len; start = end) { - while (sys_class_path[end] && sys_class_path[end] != os::path_separator()[0]) { + while (class_path[end] && class_path[end] != os::path_separator()[0]) { end++; } - char* path = NEW_C_HEAP_ARRAY(char, end-start+1, mtClass); - strncpy(path, &sys_class_path[start], end-start); - path[end-start] = '\0'; + EXCEPTION_MARK; + ResourceMark rm(THREAD); + char* path = NEW_RESOURCE_ARRAY(char, end - start + 1); + strncpy(path, &class_path[start], end - start); + path[end - start] = '\0'; update_class_path_entry_list(path, false); - FREE_C_HEAP_ARRAY(char, path, mtClass); - while (sys_class_path[end] == os::path_separator()[0]) { +#if INCLUDE_CDS + if (DumpSharedSpaces) { + check_shared_classpath(path); + } +#endif + while (class_path[end] == os::path_separator()[0]) { end++; } } - os::free(sys_class_path); } -ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct stat* st, bool lazy, TRAPS) { +ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct stat* st, + bool lazy, bool throw_exception, TRAPS) { JavaThread* thread = JavaThread::current(); if (lazy) { - return new LazyClassPathEntry(path, st); + return new LazyClassPathEntry(path, st, throw_exception); } ClassPathEntry* new_entry = NULL; if ((st->st_mode & S_IFREG) == S_IFREG) { @@ -479,7 +633,11 @@ char canonical_path[JVM_MAXPATHLEN]; if (!get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) { // This matches the classic VM - THROW_MSG_(vmSymbols::java_io_IOException(), "Bad pathname", NULL); + if (throw_exception) { + THROW_MSG_(vmSymbols::java_io_IOException(), "Bad pathname", NULL); + } else { + return NULL; + } } char* error_msg = NULL; jzfile* zip; @@ -491,7 +649,7 @@ } if (zip != NULL && error_msg == NULL) { new_entry = new ClassPathZipEntry(zip, path); - if (TraceClassLoading) { + if (TraceClassLoading || TraceClassPaths) { tty->print_cr("[Opened %s]", path); } } else { @@ -505,12 +663,16 @@ msg = NEW_RESOURCE_ARRAY(char, len); ; jio_snprintf(msg, len - 1, "error in opening JAR file <%s> %s", error_msg, path); } - THROW_MSG_(vmSymbols::java_lang_ClassNotFoundException(), msg, NULL); + if (throw_exception) { + THROW_MSG_(vmSymbols::java_lang_ClassNotFoundException(), msg, NULL); + } else { + return NULL; + } } } else { // Directory new_entry = new ClassPathDirEntry(path); - if (TraceClassLoading) { + if (TraceClassLoading || TraceClassPaths) { tty->print_cr("[Path %s]", path); } } @@ -571,23 +733,37 @@ _last_entry = new_entry; } } + _num_entries ++; } -void ClassLoader::update_class_path_entry_list(char *path, - bool check_for_duplicates) { +// Returns true IFF the file/dir exists and the entry was successfully created. +bool ClassLoader::update_class_path_entry_list(char *path, + bool check_for_duplicates, + bool throw_exception) { struct stat st; if (os::stat(path, &st) == 0) { // File or directory found ClassPathEntry* new_entry = NULL; Thread* THREAD = Thread::current(); - new_entry = create_class_path_entry(path, &st, LazyBootClassLoader, CHECK); + new_entry = create_class_path_entry(path, &st, LazyBootClassLoader, throw_exception, CHECK_(false)); + if (new_entry == NULL) { + return false; + } // The kernel VM adds dynamically to the end of the classloader path and // doesn't reorder the bootclasspath which would break java.lang.Package // (see PackageInfo). // Add new entry to linked list if (!check_for_duplicates || !contains_entry(new_entry)) { - add_to_list(new_entry); + ClassLoaderExt::add_class_path_entry(path, check_for_duplicates, new_entry); } + return true; + } else { +#if INCLUDE_CDS + if (DumpSharedSpaces) { + _shared_paths_misc_info->add_nonexist_path(path); + } + return false; +#endif } } @@ -739,10 +915,10 @@ assert(n == number_of_entries(), "just checking"); } - void copy_table(char** top, char* end, PackageHashtable* table); + CDS_ONLY(void copy_table(char** top, char* end, PackageHashtable* table);) }; - +#if INCLUDE_CDS void PackageHashtable::copy_table(char** top, char* end, PackageHashtable* table) { // Copy (relocate) the table to the shared space. @@ -750,33 +926,30 @@ // Calculate the space needed for the package name strings. int i; - int n = 0; - for (i = 0; i < table_size(); ++i) { - for (PackageInfo* pp = table->bucket(i); - pp != NULL; - pp = pp->next()) { - n += (int)(strlen(pp->pkgname()) + 1); - } - } - if (*top + n + sizeof(intptr_t) >= end) { - report_out_of_shared_space(SharedMiscData); - } - - // Copy the table data (the strings) to the shared space. - n = align_size_up(n, sizeof(HeapWord)); - *(intptr_t*)(*top) = n; - *top += sizeof(intptr_t); + intptr_t* tableSize = (intptr_t*)(*top); + *top += sizeof(intptr_t); // For table size + char* tableStart = *top; for (i = 0; i < table_size(); ++i) { for (PackageInfo* pp = table->bucket(i); pp != NULL; pp = pp->next()) { int n1 = (int)(strlen(pp->pkgname()) + 1); + if (*top + n1 >= end) { + report_out_of_shared_space(SharedMiscData); + } pp->set_pkgname((char*)memcpy(*top, pp->pkgname(), n1)); *top += n1; } } *top = (char*)align_size_up((intptr_t)*top, sizeof(HeapWord)); + if (*top >= end) { + report_out_of_shared_space(SharedMiscData); + } + + // Write table size + intptr_t len = *top - (char*)tableStart; + *tableSize = len; } @@ -787,7 +960,7 @@ void ClassLoader::copy_package_info_table(char** top, char* end) { _package_hash_table->copy_table(top, end, _package_hash_table); } - +#endif PackageInfo* ClassLoader::lookup_package(const char *pkgname) { const char *cp = strrchr(pkgname, '/'); @@ -880,7 +1053,8 @@ instanceKlassHandle ClassLoader::load_classfile(Symbol* h_name, TRAPS) { ResourceMark rm(THREAD); - EventMark m("loading class %s", h_name->as_C_string()); + const char* class_name = h_name->as_C_string(); + EventMark m("loading class %s", class_name); ThreadProfilerMark tpm(ThreadProfilerMark::classLoaderRegion); stringStream st; @@ -888,18 +1062,24 @@ // st.print("%s.class", h_name->as_utf8()); st.print_raw(h_name->as_utf8()); st.print_raw(".class"); - char* name = st.as_string(); + const char* file_name = st.as_string(); + ClassLoaderExt::Context context(class_name, file_name, THREAD); // Lookup stream for parsing .class file ClassFileStream* stream = NULL; int classpath_index = 0; + ClassPathEntry* e = NULL; + instanceKlassHandle h; { PerfClassTraceTime vmtimer(perf_sys_class_lookup_time(), ((JavaThread*) THREAD)->get_thread_stat()->perf_timers_addr(), PerfClassTraceTime::CLASS_LOAD); - ClassPathEntry* e = _first_entry; + e = _first_entry; while (e != NULL) { - stream = e->open_stream(name, CHECK_NULL); + stream = e->open_stream(file_name, CHECK_NULL); + if (!context.check(stream, classpath_index)) { + return h; // NULL + } if (stream != NULL) { break; } @@ -908,9 +1088,7 @@ } } - instanceKlassHandle h; if (stream != NULL) { - // class file found, parse it ClassFileParser parser(stream); ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data(); @@ -920,12 +1098,19 @@ loader_data, protection_domain, parsed_name, - false, - CHECK_(h)); - - // add to package table - if (add_package(name, classpath_index, THREAD)) { - h = result; + context.should_verify(classpath_index), + THREAD); + if (HAS_PENDING_EXCEPTION) { + ResourceMark rm; + if (DumpSharedSpaces) { + tty->print_cr("Preload Error: Failed to load %s", class_name); + } + return h; + } + h = context.record_result(classpath_index, e, result, THREAD); + } else { + if (DumpSharedSpaces) { + tty->print_cr("Preload Error: Cannot find %s", class_name); } } @@ -1020,14 +1205,27 @@ // lookup zip library entry points load_zip_library(); +#if INCLUDE_CDS // initialize search path + if (DumpSharedSpaces) { + _shared_paths_misc_info = SharedClassUtil::allocate_shared_paths_misc_info(); + } +#endif setup_bootstrap_search_path(); if (LazyBootClassLoader) { // set up meta index which makes boot classpath initialization lazier - setup_meta_index(); + setup_bootstrap_meta_index(); } } +#if INCLUDE_CDS +void ClassLoader::initialize_shared_path() { + if (DumpSharedSpaces) { + ClassLoaderExt::setup_search_paths(); + _shared_paths_misc_info->write_jint(0); // see comments in SharedPathsMiscInfo::check() + } +} +#endif jlong ClassLoader::classloader_time_ms() { return UsePerfData ?
--- a/hotspot/src/share/vm/classfile/classLoader.hpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/classfile/classLoader.hpp Wed Jul 05 19:59:00 2017 +0200 @@ -107,6 +107,7 @@ const char* name() { return _zip_name; } ClassPathZipEntry(jzfile* zip, const char* zip_name); ~ClassPathZipEntry(); + u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS); ClassFileStream* open_stream(const char* name, TRAPS); void contents_do(void f(const char* name, void* context), void* context); // Debugging @@ -122,13 +123,15 @@ struct stat _st; MetaIndex* _meta_index; bool _has_error; + bool _throw_exception; volatile ClassPathEntry* _resolved_entry; ClassPathEntry* resolve_entry(TRAPS); public: bool is_jar_file(); const char* name() { return _path; } - LazyClassPathEntry(char* path, const struct stat* st); + LazyClassPathEntry(char* path, const struct stat* st, bool throw_exception); virtual ~LazyClassPathEntry(); + u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS); ClassFileStream* open_stream(const char* name, TRAPS); void set_meta_index(MetaIndex* meta_index) { _meta_index = meta_index; } @@ -140,6 +143,7 @@ class PackageHashtable; class PackageInfo; +class SharedPathsMiscInfo; template <MEMFLAGS F> class HashtableBucket; class ClassLoader: AllStatic { @@ -147,7 +151,7 @@ enum SomeConstants { package_hash_table_size = 31 // Number of buckets }; - private: + protected: friend class LazyClassPathEntry; // Performance counters @@ -189,10 +193,15 @@ static ClassPathEntry* _first_entry; // Last entry in linked list of ClassPathEntry instances static ClassPathEntry* _last_entry; + static int _num_entries; + // Hash table used to keep track of loaded packages static PackageHashtable* _package_hash_table; static const char* _shared_archive; + // Info used by CDS + CDS_ONLY(static SharedPathsMiscInfo * _shared_paths_misc_info;) + // Hash function static unsigned int hash(const char *s, int n); // Returns the package file name corresponding to the specified package @@ -203,19 +212,23 @@ static bool add_package(const char *pkgname, int classpath_index, TRAPS); // Initialization - static void setup_meta_index(); + static void setup_bootstrap_meta_index(); + static void setup_meta_index(const char* meta_index_path, const char* meta_index_dir, + int start_index); static void setup_bootstrap_search_path(); + static void setup_search_path(char *class_path); + static void load_zip_library(); static ClassPathEntry* create_class_path_entry(char *path, const struct stat* st, - bool lazy, TRAPS); + bool lazy, bool throw_exception, TRAPS); // Canonicalizes path names, so strcmp will work properly. This is mainly // to avoid confusing the zip library static bool get_canonical_path(char* orig, char* out, int len); public: - // Used by the kernel jvm. - static void update_class_path_entry_list(char *path, - bool check_for_duplicates); + static bool update_class_path_entry_list(char *path, + bool check_for_duplicates, + bool throw_exception=true); static void print_bootclasspath(); // Timing @@ -298,6 +311,7 @@ // Initialization static void initialize(); + CDS_ONLY(static void initialize_shared_path();) static void create_package_info_table(); static void create_package_info_table(HashtableBucket<mtClass> *t, int length, int number_of_entries); @@ -312,10 +326,21 @@ return e; } +#if INCLUDE_CDS // Sharing dump and restore static void copy_package_info_buckets(char** top, char* end); static void copy_package_info_table(char** top, char* end); + static void check_shared_classpath(const char *path); + static void finalize_shared_paths_misc_info(); + static int get_shared_paths_misc_info_size(); + static void* get_shared_paths_misc_info(); + static bool check_shared_paths_misc_info(void* info, int size); + static void exit_with_path_failure(const char* error, const char* message); +#endif + + static void trace_class_path(const char* msg, const char* name = NULL); + // VM monitoring and management support static jlong classloader_time_ms(); static jlong class_method_total_size(); @@ -339,7 +364,7 @@ // Force compilation of all methods in all classes in bootstrap class path (stress test) #ifndef PRODUCT - private: + protected: static int _compile_the_world_class_counter; static int _compile_the_world_method_counter; public:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/classfile/classLoaderExt.hpp Wed Jul 05 19:59:00 2017 +0200 @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP +#define SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP + +#include "classfile/classLoader.hpp" + +class ClassLoaderExt: public ClassLoader { // AllStatic +public: + + class Context { + const char* _file_name; + public: + Context(const char* class_name, const char* file_name, TRAPS) { + _file_name = file_name; + } + + bool check(ClassFileStream* stream, const int classpath_index) { + return true; + } + + bool should_verify(int classpath_index) { + return false; + } + + instanceKlassHandle record_result(const int classpath_index, + ClassPathEntry* e, instanceKlassHandle result, TRAPS) { + if (ClassLoader::add_package(_file_name, classpath_index, THREAD)) { + if (DumpSharedSpaces) { + result->set_shared_classpath_index(classpath_index); + } + return result; + } else { + return instanceKlassHandle(); // NULL + } + } + }; + + + static void add_class_path_entry(char* path, bool check_for_duplicates, + ClassPathEntry* new_entry) { + ClassLoader::add_to_list(new_entry); + } + static void setup_search_paths() {} +}; + +#endif // SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP
--- a/hotspot/src/share/vm/classfile/dictionary.cpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/classfile/dictionary.cpp Wed Jul 05 19:59:00 2017 +0200 @@ -130,15 +130,13 @@ } -bool Dictionary::do_unloading() { +void Dictionary::do_unloading() { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); - bool class_was_unloaded = false; - int index = 0; // Defined here for portability! Do not move // Remove unloadable entries and classes from system dictionary // The placeholder array has been handled in always_strong_oops_do. DictionaryEntry* probe = NULL; - for (index = 0; index < table_size(); index++) { + for (int index = 0; index < table_size(); index++) { for (DictionaryEntry** p = bucket_addr(index); *p != NULL; ) { probe = *p; Klass* e = probe->klass(); @@ -158,16 +156,8 @@ // Do we need to delete this system dictionary entry? if (loader_data->is_unloading()) { // If the loader is not live this entry should always be - // removed (will never be looked up again). Note that this is - // not the same as unloading the referred class. - if (k_def_class_loader_data == loader_data) { - // This is the defining entry, so the referred class is about - // to be unloaded. - class_was_unloaded = true; - } - // Also remove this system dictionary entry. + // removed (will never be looked up again). purge_entry = true; - } else { // The loader in this entry is alive. If the klass is dead, // (determined by checking the defining class loader) @@ -196,7 +186,6 @@ p = probe->next_addr(); } } - return class_was_unloaded; } void Dictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) { @@ -220,6 +209,29 @@ _pd_cache_table->roots_oops_do(strong, weak); } +void Dictionary::remove_classes_in_error_state() { + assert(DumpSharedSpaces, "supported only when dumping"); + DictionaryEntry* probe = NULL; + for (int index = 0; index < table_size(); index++) { + for (DictionaryEntry** p = bucket_addr(index); *p != NULL; ) { + probe = *p; + InstanceKlass* ik = InstanceKlass::cast(probe->klass()); + if (ik->is_in_error_state()) { // purge this entry + *p = probe->next(); + if (probe == _current_class_entry) { + _current_class_entry = NULL; + } + free_entry(probe); + ResourceMark rm; + tty->print_cr("Removed error class: %s", ik->external_name()); + continue; + } + + p = probe->next_addr(); + } + } +} + void Dictionary::always_strong_oops_do(OopClosure* blk) { // Follow all system classes and temporary placeholders in dictionary; only // protection domain oops contain references into the heap. In a first @@ -693,16 +705,17 @@ // ---------------------------------------------------------------------------- -#ifndef PRODUCT -void Dictionary::print() { +void Dictionary::print(bool details) { ResourceMark rm; HandleMark hm; - tty->print_cr("Java system dictionary (table_size=%d, classes=%d)", - table_size(), number_of_entries()); - tty->print_cr("^ indicates that initiating loader is different from " - "defining loader"); + if (details) { + tty->print_cr("Java system dictionary (table_size=%d, classes=%d)", + table_size(), number_of_entries()); + tty->print_cr("^ indicates that initiating loader is different from " + "defining loader"); + } for (int index = 0; index < table_size(); index++) { for (DictionaryEntry* probe = bucket(index); @@ -713,21 +726,28 @@ ClassLoaderData* loader_data = probe->loader_data(); bool is_defining_class = (loader_data == InstanceKlass::cast(e)->class_loader_data()); - tty->print("%s%s", is_defining_class ? " " : "^", + tty->print("%s%s", ((!details) || is_defining_class) ? " " : "^", e->external_name()); + if (details) { tty->print(", loader "); - loader_data->print_value(); + if (loader_data != NULL) { + loader_data->print_value(); + } else { + tty->print("NULL"); + } + } tty->cr(); } } - tty->cr(); - _pd_cache_table->print(); + + if (details) { + tty->cr(); + _pd_cache_table->print(); + } tty->cr(); } -#endif - void Dictionary::verify() { guarantee(number_of_entries() >= 0, "Verify of system dictionary failed");
--- a/hotspot/src/share/vm/classfile/dictionary.hpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/classfile/dictionary.hpp Wed Jul 05 19:59:00 2017 +0200 @@ -100,6 +100,7 @@ void methods_do(void f(Method*)); void unlink(BoolObjectClosure* is_alive); + void remove_classes_in_error_state(); // Classes loaded by the bootstrap loader are always strongly reachable. // If we're not doing class unloading, all classes are strongly reachable. @@ -108,9 +109,8 @@ return (loader_data->is_the_null_class_loader_data() || !ClassUnloading); } - // Unload (that is, break root links to) all unmarked classes and - // loaders. Returns "true" iff something was unloaded. - bool do_unloading(); + // Unload (that is, break root links to) all unmarked classes and loaders. + void do_unloading(); // Protection domains Klass* find(int index, unsigned int hash, Symbol* name, @@ -127,9 +127,7 @@ ProtectionDomainCacheEntry* cache_get(oop protection_domain); -#ifndef PRODUCT - void print(); -#endif + void print(bool details = true); void verify(); };
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/classfile/sharedClassUtil.hpp Wed Jul 05 19:59:00 2017 +0200 @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_CLASSFILE_SHAREDCLASSUTIL_HPP +#define SHARE_VM_CLASSFILE_SHAREDCLASSUTIL_HPP + +#include "classfile/sharedPathsMiscInfo.hpp" +#include "memory/filemap.hpp" + +class SharedClassUtil : AllStatic { +public: + + static SharedPathsMiscInfo* allocate_shared_paths_misc_info() { + return new SharedPathsMiscInfo(); + } + + static SharedPathsMiscInfo* allocate_shared_paths_misc_info(char* buf, int size) { + return new SharedPathsMiscInfo(buf, size); + } + + static FileMapInfo::FileMapHeader* allocate_file_map_header() { + return new FileMapInfo::FileMapHeader(); + } + + static size_t file_map_header_size() { + return sizeof(FileMapInfo::FileMapHeader); + } + + static size_t shared_class_path_entry_size() { + return sizeof(SharedClassPathEntry); + } + + static void update_shared_classpath(ClassPathEntry *cpe, + SharedClassPathEntry* ent, + time_t timestamp, + long filesize, TRAPS) { + ent->_timestamp = timestamp; + ent->_filesize = filesize; + } + + static void initialize(TRAPS) {} + + inline static bool is_shared_boot_class(Klass* klass) { + return (klass->_shared_class_path_index >= 0); + } +}; + +#endif // SHARE_VM_CLASSFILE_SHAREDCLASSUTIL_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/classfile/sharedPathsMiscInfo.cpp Wed Jul 05 19:59:00 2017 +0200 @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "classfile/classLoader.hpp" +#include "classfile/classLoaderData.inline.hpp" +#include "classfile/sharedPathsMiscInfo.hpp" +#include "memory/allocation.inline.hpp" +#include "memory/metaspaceShared.hpp" +#include "runtime/arguments.hpp" + +void SharedPathsMiscInfo::add_path(const char* path, int type) { + if (TraceClassPaths) { + tty->print("[type=%s] ", type_name(type)); + trace_class_path("[Add misc shared path ", path); + } + write(path, strlen(path) + 1); + write_jint(jint(type)); +} + +void SharedPathsMiscInfo::ensure_size(size_t needed_bytes) { + assert(_allocated, "cannot modify buffer during validation."); + int used = get_used_bytes(); + int target = used + int(needed_bytes); + if (target > _buf_size) { + _buf_size = _buf_size * 2 + (int)needed_bytes; + _buf_start = REALLOC_C_HEAP_ARRAY(char, _buf_start, _buf_size, mtClass); + _cur_ptr = _buf_start + used; + _end_ptr = _buf_start + _buf_size; + } +} + +void SharedPathsMiscInfo::write(const void* ptr, size_t size) { + ensure_size(size); + memcpy(_cur_ptr, ptr, size); + _cur_ptr += size; +} + +bool SharedPathsMiscInfo::read(void* ptr, size_t size) { + if (_cur_ptr + size <= _end_ptr) { + memcpy(ptr, _cur_ptr, size); + _cur_ptr += size; + return true; + } + return false; +} + +bool SharedPathsMiscInfo::fail(const char* msg, const char* name) { + ClassLoader::trace_class_path(msg, name); + MetaspaceShared::set_archive_loading_failed(); + return false; +} + +bool SharedPathsMiscInfo::check() { + // The whole buffer must be 0 terminated so that we can use strlen and strcmp + // without fear. + _end_ptr -= sizeof(jint); + if (_cur_ptr >= _end_ptr) { + return fail("Truncated archive file header"); + } + if (*_end_ptr != 0) { + return fail("Corrupted archive file header"); + } + + while (_cur_ptr < _end_ptr) { + jint type; + const char* path = _cur_ptr; + _cur_ptr += strlen(path) + 1; + if (!read_jint(&type)) { + return fail("Corrupted archive file header"); + } + if (TraceClassPaths) { + tty->print("[type=%s ", type_name(type)); + print_path(tty, type, path); + tty->print_cr("]"); + } + if (!check(type, path)) { + if (!PrintSharedArchiveAndExit) { + return false; + } + } else { + trace_class_path("[ok"); + } + } + + return true; +} + +bool SharedPathsMiscInfo::check(jint type, const char* path) { + switch (type) { + case BOOT: + if (strcmp(path, Arguments::get_sysclasspath()) != 0) { + return fail("[BOOT classpath mismatch, actual: -Dsun.boot.class.path=", Arguments::get_sysclasspath()); + } + break; + case NON_EXIST: // fall-through + case REQUIRED: + { + struct stat st; + if (os::stat(path, &st) != 0) { + // The file does not actually exist + if (type == REQUIRED) { + // but we require it to exist -> fail + return fail("Required file doesn't exist"); + } + } else { + // The file actually exists + if (type == NON_EXIST) { + // But we want it to not exist -> fail + return fail("File must not exist"); + } + time_t timestamp; + long filesize; + + if (!read_time(×tamp) || !read_long(&filesize)) { + return fail("Corrupted archive file header"); + } + if (timestamp != st.st_mtime) { + return fail("Timestamp mismatch"); + } + if (filesize != st.st_size) { + return fail("File size mismatch"); + } + } + } + break; + + default: + return fail("Corrupted archive file header"); + } + + return true; +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/classfile/sharedPathsMiscInfo.hpp Wed Jul 05 19:59:00 2017 +0200 @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_CLASSFILE_SHAREDPATHSMISCINFO_HPP +#define SHARE_VM_CLASSFILE_SHAREDPATHSMISCINFO_HPP + +#include "runtime/os.hpp" + +// During dumping time, when processing class paths, we build up the dump-time +// classpath. The JAR files that exist are stored in the list ClassLoader::_first_entry. +// However, we need to store other "misc" information for run-time checking, such as +// +// + The values of Arguments::get_sysclasspath() used during dumping. +// +// + The meta-index file(s) used during dumping (incl modification time and size) +// +// + The class path elements specified during dumping but did not exist -- +// these elements must also be specified at run time, and they also must not +// exist at run time. +// +// These misc items are stored in a linear buffer in SharedPathsMiscInfo. +// The storage format is stream oriented to minimize its size. +// +// When writing the information to the archive file, SharedPathsMiscInfo is stored in +// the archive file header. At run-time, this information is used only during initialization +// (accessed using read() instead of mmap()), and is deallocated afterwards to save space. +// +// The SharedPathsMiscInfo class is used for both creating the the information (during +// dumping time) and validation (at run time). Different constructors are used in the +// two situations. See below. + +class SharedPathsMiscInfo : public CHeapObj<mtClass> { +protected: + char* _buf_start; + char* _cur_ptr; + char* _end_ptr; + int _buf_size; + bool _allocated; // was _buf_start allocated by me? + void ensure_size(size_t needed_bytes); + void add_path(const char* path, int type); + + void write(const void* ptr, size_t size); + bool read(void* ptr, size_t size); + + static void trace_class_path(const char* msg, const char* name = NULL) { + ClassLoader::trace_class_path(msg, name); + } +protected: + static bool fail(const char* msg, const char* name = NULL); + virtual bool check(jint type, const char* path); + +public: + enum { + INITIAL_BUF_SIZE = 128 + }; + // This constructor is used when creating the misc information (during dump) + SharedPathsMiscInfo() { + _buf_size = INITIAL_BUF_SIZE; + _cur_ptr = _buf_start = NEW_C_HEAP_ARRAY(char, _buf_size, mtClass); + _allocated = true; + } + // This constructor is used when validating the misc info (during run time) + SharedPathsMiscInfo(char *buff, int size) { + _cur_ptr = _buf_start = buff; + _end_ptr = _buf_start + size; + _buf_size = size; + _allocated = false; + } + ~SharedPathsMiscInfo() { + if (_allocated) { + FREE_C_HEAP_ARRAY(char, _buf_start, mtClass); + } + } + int get_used_bytes() { + return _cur_ptr - _buf_start; + } + void* buffer() { + return _buf_start; + } + + // writing -- + + // The path must not exist at run-time + void add_nonexist_path(const char* path) { + add_path(path, NON_EXIST); + } + + // The path must exist and have required size and modification time + void add_required_file(const char* path) { + add_path(path, REQUIRED); + + struct stat st; + if (os::stat(path, &st) != 0) { + assert(0, "sanity"); + ClassLoader::exit_with_path_failure("failed to os::stat(%s)", path); // should not happen + } + write_time(st.st_mtime); + write_long(st.st_size); + } + + // The path must exist, and must contain exactly <num_entries> files/dirs + void add_boot_classpath(const char* path) { + add_path(path, BOOT); + } + int write_jint(jint num) { + write(&num, sizeof(num)); + return 0; + } + void write_time(time_t t) { + write(&t, sizeof(t)); + } + void write_long(long l) { + write(&l, sizeof(l)); + } + + bool dump_to_file(int fd) { + int n = get_used_bytes(); + return (os::write(fd, _buf_start, n) == (size_t)n); + } + + // reading -- + + enum { + BOOT = 1, + NON_EXIST = 2, + REQUIRED = 3 + }; + + virtual const char* type_name(int type) { + switch (type) { + case BOOT: return "BOOT"; + case NON_EXIST: return "NON_EXIST"; + case REQUIRED: return "REQUIRED"; + default: ShouldNotReachHere(); return "?"; + } + } + + virtual void print_path(outputStream* out, int type, const char* path) { + switch (type) { + case BOOT: + out->print("Expecting -Dsun.boot.class.path=%s", path); + break; + case NON_EXIST: + out->print("Expecting that %s does not exist", path); + break; + case REQUIRED: + out->print("Expecting that file %s must exist and not altered", path); + break; + default: + ShouldNotReachHere(); + } + } + + bool check(); + bool read_jint(jint *ptr) { + return read(ptr, sizeof(jint)); + } + bool read_long(long *ptr) { + return read(ptr, sizeof(long)); + } + bool read_time(time_t *ptr) { + return read(ptr, sizeof(time_t)); + } +}; + +#endif // SHARE_VM_CLASSFILE_SHAREDPATHSMISCINFO_HPP
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp Wed Jul 05 19:59:00 2017 +0200 @@ -31,10 +31,15 @@ #include "classfile/resolutionErrors.hpp" #include "classfile/stringTable.hpp" #include "classfile/systemDictionary.hpp" +#if INCLUDE_CDS +#include "classfile/sharedClassUtil.hpp" +#include "classfile/systemDictionaryShared.hpp" +#endif #include "classfile/vmSymbols.hpp" #include "compiler/compileBroker.hpp" #include "interpreter/bytecodeStream.hpp" #include "interpreter/interpreter.hpp" +#include "memory/filemap.hpp" #include "memory/gcLocker.hpp" #include "memory/oopFactory.hpp" #include "oops/instanceKlass.hpp" @@ -110,6 +115,8 @@ CHECK); _java_system_loader = (oop)result.get_jobject(); + + CDS_ONLY(SystemDictionaryShared::initialize(CHECK);) } @@ -974,6 +981,7 @@ // Create a new CLD for anonymous class, that uses the same class loader // as the host_klass guarantee(host_klass->class_loader() == class_loader(), "should be the same"); + guarantee(!DumpSharedSpaces, "must not create anonymous classes when dumping"); loader_data = ClassLoaderData::anonymous_class_loader_data(class_loader(), CHECK_NULL); loader_data->record_dependency(host_klass(), CHECK_NULL); } else { @@ -1134,7 +1142,7 @@ return k(); } - +#if INCLUDE_CDS void SystemDictionary::set_shared_dictionary(HashtableBucket<mtClass>* t, int length, int number_of_entries) { assert(length == _nof_buckets * sizeof(HashtableBucket<mtClass>), @@ -1167,15 +1175,21 @@ instanceKlassHandle SystemDictionary::load_shared_class( Symbol* class_name, Handle class_loader, TRAPS) { instanceKlassHandle ik (THREAD, find_shared_class(class_name)); - return load_shared_class(ik, class_loader, THREAD); + // Make sure we only return the boot class for the NULL classloader. + if (ik.not_null() && + SharedClassUtil::is_shared_boot_class(ik()) && class_loader.is_null()) { + Handle protection_domain; + return load_shared_class(ik, class_loader, protection_domain, THREAD); + } + return instanceKlassHandle(); } -instanceKlassHandle SystemDictionary::load_shared_class( - instanceKlassHandle ik, Handle class_loader, TRAPS) { - assert(class_loader.is_null(), "non-null classloader for shared class?"); +instanceKlassHandle SystemDictionary::load_shared_class(instanceKlassHandle ik, + Handle class_loader, + Handle protection_domain, TRAPS) { if (ik.not_null()) { instanceKlassHandle nh = instanceKlassHandle(); // null Handle - Symbol* class_name = ik->name(); + Symbol* class_name = ik->name(); // Found the class, now load the superclass and interfaces. If they // are shared, add them to the main system dictionary and reset @@ -1184,7 +1198,7 @@ if (ik->super() != NULL) { Symbol* cn = ik->super()->name(); resolve_super_or_fail(class_name, cn, - class_loader, Handle(), true, CHECK_(nh)); + class_loader, protection_domain, true, CHECK_(nh)); } Array<Klass*>* interfaces = ik->local_interfaces(); @@ -1197,7 +1211,7 @@ // reinitialized yet (they will be once the interface classes // are loaded) Symbol* name = k->name(); - resolve_super_or_fail(class_name, name, class_loader, Handle(), false, CHECK_(nh)); + resolve_super_or_fail(class_name, name, class_loader, protection_domain, false, CHECK_(nh)); } // Adjust methods to recover missing data. They need addresses for @@ -1206,30 +1220,47 @@ // Updating methods must be done under a lock so multiple // threads don't update these in parallel - // Shared classes are all currently loaded by the bootstrap - // classloader, so this will never cause a deadlock on - // a custom class loader lock. + // + // Shared classes are all currently loaded by either the bootstrap or + // internal parallel class loaders, so this will never cause a deadlock + // on a custom class loader lock. + ClassLoaderData* loader_data = ClassLoaderData::class_loader_data(class_loader()); { Handle lockObject = compute_loader_lock_object(class_loader, THREAD); check_loader_lock_contention(lockObject, THREAD); ObjectLocker ol(lockObject, THREAD, true); - ik->restore_unshareable_info(CHECK_(nh)); + ik->restore_unshareable_info(loader_data, protection_domain, CHECK_(nh)); } if (TraceClassLoading) { ResourceMark rm; tty->print("[Loaded %s", ik->external_name()); tty->print(" from shared objects file"); + if (class_loader.not_null()) { + tty->print(" by %s", loader_data->loader_name()); + } tty->print_cr("]"); } + +#if INCLUDE_CDS + if (DumpLoadedClassList != NULL && classlist_file->is_open()) { + // Only dump the classes that can be stored into CDS archive + if (SystemDictionaryShared::is_sharing_possible(loader_data)) { + ResourceMark rm(THREAD); + classlist_file->print_cr("%s", ik->name()->as_C_string()); + classlist_file->flush(); + } + } +#endif + // notify a class loaded from shared object ClassLoadingService::notify_class_loaded(InstanceKlass::cast(ik()), true /* shared class */); } return ik; } - +#endif instanceKlassHandle SystemDictionary::load_instance_class(Symbol* class_name, Handle class_loader, TRAPS) { instanceKlassHandle nh = instanceKlassHandle(); // null Handle @@ -1239,8 +1270,10 @@ // shared spaces. instanceKlassHandle k; { +#if INCLUDE_CDS PerfTraceTime vmtimer(ClassLoader::perf_shared_classload_time()); k = load_shared_class(class_name, class_loader, THREAD); +#endif } if (k.is_null()) { @@ -1599,7 +1632,6 @@ Universe::flush_dependents_on(k); } - // ---------------------------------------------------------------------------- // GC support @@ -1661,10 +1693,9 @@ // Note: anonymous classes are not in the SD. bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) { // First, mark for unload all ClassLoaderData referencing a dead class loader. - bool has_dead_loaders = ClassLoaderDataGraph::do_unloading(is_alive); - bool unloading_occurred = false; - if (has_dead_loaders) { - unloading_occurred = dictionary()->do_unloading(); + bool unloading_occurred = ClassLoaderDataGraph::do_unloading(is_alive); + if (unloading_occurred) { + dictionary()->do_unloading(); constraints()->purge_loader_constraints(); resolution_errors()->purge_resolution_errors(); } @@ -1682,6 +1713,7 @@ void SystemDictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) { strong->do_oop(&_java_system_loader); strong->do_oop(&_system_loader_lock_obj); + CDS_ONLY(SystemDictionaryShared::roots_oops_do(strong);) // Adjust dictionary dictionary()->roots_oops_do(strong, weak); @@ -1693,6 +1725,7 @@ void SystemDictionary::oops_do(OopClosure* f) { f->do_oop(&_java_system_loader); f->do_oop(&_system_loader_lock_obj); + CDS_ONLY(SystemDictionaryShared::oops_do(f);) // Adjust dictionary dictionary()->oops_do(f); @@ -1754,6 +1787,10 @@ invoke_method_table()->methods_do(f); } +void SystemDictionary::remove_classes_in_error_state() { + dictionary()->remove_classes_in_error_state(); +} + // ---------------------------------------------------------------------------- // Lazily load klasses @@ -2563,10 +2600,12 @@ // ---------------------------------------------------------------------------- -#ifndef PRODUCT +void SystemDictionary::print_shared(bool details) { + shared_dictionary()->print(details); +} -void SystemDictionary::print() { - dictionary()->print(); +void SystemDictionary::print(bool details) { + dictionary()->print(details); // Placeholders GCMutexLocker mu(SystemDictionary_lock); @@ -2576,7 +2615,6 @@ constraints()->print(); } -#endif void SystemDictionary::verify() { guarantee(dictionary() != NULL, "Verify of system dictionary failed");
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp Wed Jul 05 19:59:00 2017 +0200 @@ -111,6 +111,7 @@ do_klass(SecurityManager_klass, java_lang_SecurityManager, Pre ) \ do_klass(ProtectionDomain_klass, java_security_ProtectionDomain, Pre ) \ do_klass(AccessControlContext_klass, java_security_AccessControlContext, Pre ) \ + do_klass(SecureClassLoader_klass, java_security_SecureClassLoader, Pre ) \ do_klass(ClassNotFoundException_klass, java_lang_ClassNotFoundException, Pre ) \ do_klass(NoClassDefFoundError_klass, java_lang_NoClassDefFoundError, Pre ) \ do_klass(LinkageError_klass, java_lang_LinkageError, Pre ) \ @@ -166,6 +167,15 @@ do_klass(StringBuilder_klass, java_lang_StringBuilder, Pre ) \ do_klass(misc_Unsafe_klass, sun_misc_Unsafe, Pre ) \ \ + /* support for CDS */ \ + do_klass(ByteArrayInputStream_klass, java_io_ByteArrayInputStream, Pre ) \ + do_klass(File_klass, java_io_File, Pre ) \ + do_klass(URLClassLoader_klass, java_net_URLClassLoader, Pre ) \ + do_klass(URL_klass, java_net_URL, Pre ) \ + do_klass(Jar_Manifest_klass, java_util_jar_Manifest, Pre ) \ + do_klass(sun_misc_Launcher_klass, sun_misc_Launcher, Pre ) \ + do_klass(CodeSource_klass, java_security_CodeSource, Pre ) \ + \ /* It's NULL in non-1.4 JDKs. */ \ do_klass(StackTraceElement_klass, java_lang_StackTraceElement, Opt ) \ /* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \ @@ -221,7 +231,7 @@ static Klass* resolve_or_fail(Symbol* class_name, Handle class_loader, Handle protection_domain, bool throw_error, TRAPS); // Convenient call for null loader and protection domain. static Klass* resolve_or_fail(Symbol* class_name, bool throw_error, TRAPS); -private: +protected: // handle error translation for resolve_or_null results static Klass* handle_resolution_exception(Symbol* class_name, bool throw_error, KlassHandle klass_h, TRAPS); @@ -326,6 +336,9 @@ // loaders. Returns "true" iff something was unloaded. static bool do_unloading(BoolObjectClosure* is_alive); + // Used by DumpSharedSpaces only to remove classes that failed verification + static void remove_classes_in_error_state(); + static int calculate_systemdictionary_size(int loadedclasses); // Applies "f->do_oop" to all root oops in the system dictionary. @@ -335,7 +348,7 @@ // System loader lock static oop system_loader_lock() { return _system_loader_lock_obj; } -private: +protected: // Extended Redefine classes support (tbi) static void preloaded_classes_do(KlassClosure* f); static void lazily_loaded_classes_do(KlassClosure* f); @@ -348,7 +361,8 @@ static void set_shared_dictionary(HashtableBucket<mtClass>* t, int length, int number_of_entries); // Printing - static void print() PRODUCT_RETURN; + static void print(bool details = true); + static void print_shared(bool details = true); static void print_class_statistics() PRODUCT_RETURN; static void print_method_statistics() PRODUCT_RETURN; @@ -424,7 +438,7 @@ static void load_abstract_ownable_synchronizer_klass(TRAPS); -private: +protected: // Tells whether ClassLoader.loadClassInternal is present static bool has_loadClassInternal() { return _has_loadClassInternal; } @@ -452,7 +466,7 @@ // Register a new class loader static ClassLoaderData* register_loader(Handle class_loader, TRAPS); -private: +protected: // Mirrors for primitive classes (created eagerly) static oop check_mirror(oop m) { assert(m != NULL, "mirror not initialized"); @@ -523,7 +537,7 @@ static Symbol* find_resolution_error(constantPoolHandle pool, int which, Symbol** message); - private: + protected: enum Constants { _loader_constraint_size = 107, // number of entries in constraint table @@ -574,7 +588,7 @@ friend class CounterDecay; static Klass* try_get_next_class(); -private: +protected: static void validate_protection_domain(instanceKlassHandle klass, Handle class_loader, Handle protection_domain, TRAPS); @@ -601,10 +615,10 @@ static instanceKlassHandle find_or_define_instance_class(Symbol* class_name, Handle class_loader, instanceKlassHandle k, TRAPS); - static instanceKlassHandle load_shared_class(Symbol* class_name, - Handle class_loader, TRAPS); static instanceKlassHandle load_shared_class(instanceKlassHandle ik, - Handle class_loader, TRAPS); + Handle class_loader, + Handle protection_domain, + TRAPS); static instanceKlassHandle load_instance_class(Symbol* class_name, Handle class_loader, TRAPS); static Handle compute_loader_lock_object(Handle class_loader, TRAPS); static void check_loader_lock_contention(Handle loader_lock, TRAPS); @@ -612,9 +626,12 @@ static bool is_parallelDefine(Handle class_loader); public: + static instanceKlassHandle load_shared_class(Symbol* class_name, + Handle class_loader, + TRAPS); static bool is_ext_class_loader(Handle class_loader); -private: +protected: static Klass* find_shared_class(Symbol* class_name); // Setup link to hierarchy
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/classfile/systemDictionaryShared.hpp Wed Jul 05 19:59:00 2017 +0200 @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + + +#ifndef SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP +#define SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP + +#include "classfile/systemDictionary.hpp" + +class SystemDictionaryShared: public SystemDictionary { +public: + static void initialize(TRAPS) {} + static instanceKlassHandle find_or_load_shared_class(Symbol* class_name, + Handle class_loader, + TRAPS) { + return instanceKlassHandle(); + } + static void roots_oops_do(OopClosure* blk) {} + static void oops_do(OopClosure* f) {} + static bool is_sharing_possible(ClassLoaderData* loader_data) { + oop class_loader = loader_data->class_loader(); + return (class_loader == NULL); + } +}; + +#endif // SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp Wed Jul 05 19:59:00 2017 +0200 @@ -91,11 +91,17 @@ template(java_lang_CharSequence, "java/lang/CharSequence") \ template(java_lang_SecurityManager, "java/lang/SecurityManager") \ template(java_security_AccessControlContext, "java/security/AccessControlContext") \ + template(java_security_CodeSource, "java/security/CodeSource") \ template(java_security_ProtectionDomain, "java/security/ProtectionDomain") \ + template(java_security_SecureClassLoader, "java/security/SecureClassLoader") \ + template(java_net_URLClassLoader, "java/net/URLClassLoader") \ + template(java_net_URL, "java/net/URL") \ + template(java_util_jar_Manifest, "java/util/jar/Manifest") \ template(impliesCreateAccessControlContext_name, "impliesCreateAccessControlContext") \ template(java_io_OutputStream, "java/io/OutputStream") \ template(java_io_Reader, "java/io/Reader") \ template(java_io_BufferedReader, "java/io/BufferedReader") \ + template(java_io_File, "java/io/File") \ template(java_io_FileInputStream, "java/io/FileInputStream") \ template(java_io_ByteArrayInputStream, "java/io/ByteArrayInputStream") \ template(java_io_Serializable, "java/io/Serializable") \ @@ -106,6 +112,7 @@ template(java_util_Hashtable, "java/util/Hashtable") \ template(java_lang_Compiler, "java/lang/Compiler") \ template(sun_misc_Signal, "sun/misc/Signal") \ + template(sun_misc_Launcher, "sun/misc/Launcher") \ template(java_lang_AssertionStatusDirectives, "java/lang/AssertionStatusDirectives") \ template(getBootClassPathEntryForClass_name, "getBootClassPathEntryForClass") \ template(sun_misc_PostVMInitHook, "sun/misc/PostVMInitHook") \ @@ -396,6 +403,14 @@ template(signers_name, "signers_name") \ template(loader_data_name, "loader_data") \ template(dependencies_name, "dependencies") \ + template(input_stream_void_signature, "(Ljava/io/InputStream;)V") \ + template(getFileURL_name, "getFileURL") \ + template(getFileURL_signature, "(Ljava/io/File;)Ljava/net/URL;") \ + template(definePackageInternal_name, "definePackageInternal") \ + template(definePackageInternal_signature, "(Ljava/lang/String;Ljava/util/jar/Manifest;Ljava/net/URL;)V") \ + template(getProtectionDomain_name, "getProtectionDomain") \ + template(getProtectionDomain_signature, "(Ljava/security/CodeSource;)Ljava/security/ProtectionDomain;") \ + template(url_code_signer_array_void_signature, "(Ljava/net/URL;[Ljava/security/CodeSigner;)V") \ \ /* non-intrinsic name/signature pairs: */ \ template(register_method_name, "register") \
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Wed Jul 05 19:59:00 2017 +0200 @@ -2734,10 +2734,12 @@ } } -void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) { - assert(fl->count() == 0, "Precondition."); - assert(word_sz < CompactibleFreeListSpace::IndexSetSize, - "Precondition"); +// Used by par_get_chunk_of_blocks() for the chunks from the +// indexed_free_lists. Looks for a chunk with size that is a multiple +// of "word_sz" and if found, splits it into "word_sz" chunks and add +// to the free list "fl". "n" is the maximum number of chunks to +// be added to "fl". +bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) { // We'll try all multiples of word_sz in the indexed set, starting with // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples, @@ -2818,11 +2820,15 @@ Mutex::_no_safepoint_check_flag); ssize_t births = _indexedFreeList[word_sz].split_births() + num; _indexedFreeList[word_sz].set_split_births(births); - return; + return true; } } + return found; } - // Otherwise, we'll split a block from the dictionary. +} + +FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) { + FreeChunk* fc = NULL; FreeChunk* rem_fc = NULL; size_t rem; @@ -2833,16 +2839,12 @@ fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()), FreeBlockDictionary<FreeChunk>::atLeast); if (fc != NULL) { - _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk - dictionary()->dict_census_update(fc->size(), - true /*split*/, - false /*birth*/); break; } else { n--; } } - if (fc == NULL) return; + if (fc == NULL) return NULL; // Otherwise, split up that block. assert((ssize_t)n >= 1, "Control point invariant"); assert(fc->is_free(), "Error: should be a free block"); @@ -2864,10 +2866,14 @@ // dictionary and return, leaving "fl" empty. if (n == 0) { returnChunkToDictionary(fc); - assert(fl->count() == 0, "We never allocated any blocks"); - return; + return NULL; } + _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk + dictionary()->dict_census_update(fc->size(), + true /*split*/, + false /*birth*/); + // First return the remainder, if any. // Note that we hold the lock until we decide if we're going to give // back the remainder to the dictionary, since a concurrent allocation @@ -2900,7 +2906,24 @@ _indexedFreeList[rem].return_chunk_at_head(rem_fc); smallSplitBirth(rem); } - assert((ssize_t)n > 0 && fc != NULL, "Consistency"); + assert(n * word_sz == fc->size(), + err_msg("Chunk size " SIZE_FORMAT " is not exactly splittable by " + SIZE_FORMAT " sized chunks of size " SIZE_FORMAT, + fc->size(), n, word_sz)); + return fc; +} + +void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) { + + FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks); + + if (fc == NULL) { + return; + } + + size_t n = fc->size() / word_sz; + + assert((ssize_t)n > 0, "Consistency"); // Now do the splitting up. // Must do this in reverse order, so that anybody attempting to // access the main chunk sees it as a single free block until we @@ -2948,6 +2971,20 @@ assert(fl->tail()->next() == NULL, "List invariant."); } +void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) { + assert(fl->count() == 0, "Precondition."); + assert(word_sz < CompactibleFreeListSpace::IndexSetSize, + "Precondition"); + + if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) { + // Got it + return; + } + + // Otherwise, we'll split a block from the dictionary. + par_get_chunk_of_blocks_dictionary(word_sz, n, fl); +} + // Set up the space's par_seq_tasks structure for work claiming // for parallel rescan. See CMSParRemarkTask where this is currently used. // XXX Need to suitably abstract and generalize this and the next
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Wed Jul 05 19:59:00 2017 +0200 @@ -172,6 +172,20 @@ // list of size "word_sz", and must now be decremented. void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl); + // Used by par_get_chunk_of_blocks() for the chunks from the + // indexed_free_lists. + bool par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl); + + // Used by par_get_chunk_of_blocks_dictionary() to get a chunk + // evenly splittable into "n" "word_sz" chunks. Returns that + // evenly splittable chunk. May split a larger chunk to get the + // evenly splittable chunk. + FreeChunk* get_n_way_chunk_to_split(size_t word_sz, size_t n); + + // Used by par_get_chunk_of_blocks() for the chunks from the + // dictionary. + void par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl); + // Allocation helper functions // Allocate using a strategy that takes from the indexed free lists // first. This allocation strategy assumes a companion sweeping
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp Wed Jul 05 19:59:00 2017 +0200 @@ -81,8 +81,8 @@ } } -void ConcurrentG1Refine::init() { - _hot_card_cache.initialize(); +void ConcurrentG1Refine::init(G1RegionToSpaceMapper* card_counts_storage) { + _hot_card_cache.initialize(card_counts_storage); } void ConcurrentG1Refine::stop() {
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp Wed Jul 05 19:59:00 2017 +0200 @@ -34,6 +34,7 @@ class ConcurrentG1RefineThread; class G1CollectedHeap; class G1HotCardCache; +class G1RegionToSpaceMapper; class G1RemSet; class DirtyCardQueue; @@ -74,7 +75,7 @@ ConcurrentG1Refine(G1CollectedHeap* g1h, CardTableEntryClosure* refine_closure); ~ConcurrentG1Refine(); - void init(); // Accomplish some initialization that has to wait. + void init(G1RegionToSpaceMapper* card_counts_storage); void stop(); void reinitialize_threads();
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Wed Jul 05 19:59:00 2017 +0200 @@ -36,6 +36,7 @@ #include "gc_implementation/g1/heapRegion.inline.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp" #include "gc_implementation/g1/heapRegionSeq.inline.hpp" +#include "gc_implementation/g1/heapRegionSet.inline.hpp" #include "gc_implementation/shared/vmGCOperations.hpp" #include "gc_implementation/shared/gcTimer.hpp" #include "gc_implementation/shared/gcTrace.hpp" @@ -99,12 +100,12 @@ } #ifndef PRODUCT -bool CMBitMapRO::covers(ReservedSpace heap_rs) const { +bool CMBitMapRO::covers(MemRegion heap_rs) const { // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize, "size inconsistency"); - return _bmStartWord == (HeapWord*)(heap_rs.base()) && - _bmWordSize == heap_rs.size()>>LogHeapWordSize; + return _bmStartWord == (HeapWord*)(heap_rs.start()) && + _bmWordSize == heap_rs.word_size(); } #endif @@ -112,33 +113,73 @@ _bm.print_on_error(st, prefix); } -bool CMBitMap::allocate(ReservedSpace heap_rs) { - _bmStartWord = (HeapWord*)(heap_rs.base()); - _bmWordSize = heap_rs.size()/HeapWordSize; // heap_rs.size() is in bytes - ReservedSpace brs(ReservedSpace::allocation_align_size_up( - (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1)); - if (!brs.is_reserved()) { - warning("ConcurrentMark marking bit map allocation failure"); +size_t CMBitMap::compute_size(size_t heap_size) { + return heap_size / mark_distance(); +} + +size_t CMBitMap::mark_distance() { + return MinObjAlignmentInBytes * BitsPerByte; +} + +void CMBitMap::initialize(MemRegion heap, G1RegionToSpaceMapper* storage) { + _bmStartWord = heap.start(); + _bmWordSize = heap.word_size(); + + _bm.set_map((BitMap::bm_word_t*) storage->reserved().start()); + _bm.set_size(_bmWordSize >> _shifter); + + storage->set_mapping_changed_listener(&_listener); +} + +void CMBitMapMappingChangedListener::on_commit(uint start_region, size_t num_regions) { + // We need to clear the bitmap on commit, removing any existing information. + MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_region), num_regions * HeapRegion::GrainWords); + _bm->clearRange(mr); +} + +// Closure used for clearing the given mark bitmap. +class ClearBitmapHRClosure : public HeapRegionClosure { + private: + ConcurrentMark* _cm; + CMBitMap* _bitmap; + bool _may_yield; // The closure may yield during iteration. If yielded, abort the iteration. + public: + ClearBitmapHRClosure(ConcurrentMark* cm, CMBitMap* bitmap, bool may_yield) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap), _may_yield(may_yield) { + assert(!may_yield || cm != NULL, "CM must be non-NULL if this closure is expected to yield."); + } + + virtual bool doHeapRegion(HeapRegion* r) { + size_t const chunk_size_in_words = M / HeapWordSize; + + HeapWord* cur = r->bottom(); + HeapWord* const end = r->end(); + + while (cur < end) { + MemRegion mr(cur, MIN2(cur + chunk_size_in_words, end)); + _bitmap->clearRange(mr); + + cur += chunk_size_in_words; + + // Abort iteration if after yielding the marking has been aborted. + if (_may_yield && _cm->do_yield_check() && _cm->has_aborted()) { + return true; + } + // Repeat the asserts from before the start of the closure. We will do them + // as asserts here to minimize their overhead on the product. However, we + // will have them as guarantees at the beginning / end of the bitmap + // clearing to get some checking in the product. + assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant"); + assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant"); + } + return false; } - MemTracker::record_virtual_memory_type((address)brs.base(), mtGC); - // For now we'll just commit all of the bit map up front. - // Later on we'll try to be more parsimonious with swap. - if (!_virtual_space.initialize(brs, brs.size())) { - warning("ConcurrentMark marking bit map backing store failure"); - return false; - } - assert(_virtual_space.committed_size() == brs.size(), - "didn't reserve backing store for all of concurrent marking bit map?"); - _bm.set_map((BitMap::bm_word_t*)_virtual_space.low()); - assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >= - _bmWordSize, "inconsistency in bit map sizing"); - _bm.set_size(_bmWordSize >> _shifter); - return true; -} +}; void CMBitMap::clearAll() { - _bm.clear(); + ClearBitmapHRClosure cl(NULL, this, false /* may_yield */); + G1CollectedHeap::heap()->heap_region_iterate(&cl); + guarantee(cl.complete(), "Must have completed iteration."); return; } @@ -483,10 +524,10 @@ return MAX2((n_par_threads + 2) / 4, 1U); } -ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) : +ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage) : _g1h(g1h), - _markBitMap1(log2_intptr(MinObjAlignment)), - _markBitMap2(log2_intptr(MinObjAlignment)), + _markBitMap1(), + _markBitMap2(), _parallel_marking_threads(0), _max_parallel_marking_threads(0), _sleep_factor(0.0), @@ -495,7 +536,7 @@ _cleanup_task_overhead(1.0), _cleanup_list("Cleanup List"), _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/), - _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >> + _card_bm((g1h->reserved_region().byte_size() + CardTableModRefBS::card_size - 1) >> CardTableModRefBS::card_shift, false /* in_resource_area*/), @@ -545,14 +586,8 @@ "heap end = " PTR_FORMAT, p2i(_heap_start), p2i(_heap_end)); } - if (!_markBitMap1.allocate(heap_rs)) { - warning("Failed to allocate first CM bit map"); - return; - } - if (!_markBitMap2.allocate(heap_rs)) { - warning("Failed to allocate second CM bit map"); - return; - } + _markBitMap1.initialize(g1h->reserved_region(), prev_bitmap_storage); + _markBitMap2.initialize(g1h->reserved_region(), next_bitmap_storage); // Create & start a ConcurrentMark thread. _cmThread = new ConcurrentMarkThread(this); @@ -563,8 +598,8 @@ } assert(CGC_lock != NULL, "Where's the CGC_lock?"); - assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency"); - assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency"); + assert(_markBitMap1.covers(g1h->reserved_region()), "_markBitMap1 inconsistency"); + assert(_markBitMap2.covers(g1h->reserved_region()), "_markBitMap2 inconsistency"); SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set(); satb_qs.set_buffer_size(G1SATBBufferSize); @@ -724,38 +759,17 @@ clear_all_count_data(); // so that the call below can read a sensible value - _heap_start = (HeapWord*) heap_rs.base(); + _heap_start = g1h->reserved_region().start(); set_non_marking_state(); _completed_initialization = true; } -void ConcurrentMark::update_g1_committed(bool force) { - // If concurrent marking is not in progress, then we do not need to - // update _heap_end. - if (!concurrent_marking_in_progress() && !force) return; - - MemRegion committed = _g1h->g1_committed(); - assert(committed.start() == _heap_start, "start shouldn't change"); - HeapWord* new_end = committed.end(); - if (new_end > _heap_end) { - // The heap has been expanded. - - _heap_end = new_end; - } - // Notice that the heap can also shrink. However, this only happens - // during a Full GC (at least currently) and the entire marking - // phase will bail out and the task will not be restarted. So, let's - // do nothing. -} - void ConcurrentMark::reset() { // Starting values for these two. This should be called in a STW - // phase. CM will be notified of any future g1_committed expansions - // will be at the end of evacuation pauses, when tasks are - // inactive. - MemRegion committed = _g1h->g1_committed(); - _heap_start = committed.start(); - _heap_end = committed.end(); + // phase. + MemRegion reserved = _g1h->g1_reserved(); + _heap_start = reserved.start(); + _heap_end = reserved.end(); // Separated the asserts so that we know which one fires. assert(_heap_start != NULL, "heap bounds should look ok"); @@ -827,7 +841,6 @@ assert(out_of_regions(), err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT, p2i(_finger), p2i(_heap_end))); - update_g1_committed(true); } } @@ -846,7 +859,6 @@ void ConcurrentMark::clearNextBitmap() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); - G1CollectorPolicy* g1p = g1h->g1_policy(); // Make sure that the concurrent mark thread looks to still be in // the current cycle. @@ -858,41 +870,36 @@ // is the case. guarantee(!g1h->mark_in_progress(), "invariant"); - // clear the mark bitmap (no grey objects to start with). - // We need to do this in chunks and offer to yield in between - // each chunk. - HeapWord* start = _nextMarkBitMap->startWord(); - HeapWord* end = _nextMarkBitMap->endWord(); - HeapWord* cur = start; - size_t chunkSize = M; - while (cur < end) { - HeapWord* next = cur + chunkSize; - if (next > end) { - next = end; - } - MemRegion mr(cur,next); - _nextMarkBitMap->clearRange(mr); - cur = next; - do_yield_check(); - - // Repeat the asserts from above. We'll do them as asserts here to - // minimize their overhead on the product. However, we'll have - // them as guarantees at the beginning / end of the bitmap - // clearing to get some checking in the product. - assert(cmThread()->during_cycle(), "invariant"); - assert(!g1h->mark_in_progress(), "invariant"); + ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */); + g1h->heap_region_iterate(&cl); + + // Clear the liveness counting data. If the marking has been aborted, the abort() + // call already did that. + if (cl.complete()) { + clear_all_count_data(); } - // Clear the liveness counting data - clear_all_count_data(); - // Repeat the asserts from above. guarantee(cmThread()->during_cycle(), "invariant"); guarantee(!g1h->mark_in_progress(), "invariant"); } +class CheckBitmapClearHRClosure : public HeapRegionClosure { + CMBitMap* _bitmap; + bool _error; + public: + CheckBitmapClearHRClosure(CMBitMap* bitmap) : _bitmap(bitmap) { + } + + virtual bool doHeapRegion(HeapRegion* r) { + return _bitmap->getNextMarkedWordAddress(r->bottom(), r->end()) != r->end(); + } +}; + bool ConcurrentMark::nextMarkBitmapIsClear() { - return _nextMarkBitMap->getNextMarkedWordAddress(_heap_start, _heap_end) == _heap_end; + CheckBitmapClearHRClosure cl(_nextMarkBitMap); + _g1h->heap_region_iterate(&cl); + return cl.complete(); } class NoteStartOfMarkHRClosure: public HeapRegionClosure { @@ -2193,10 +2200,10 @@ _cleanup_list.length()); } - // Noone else should be accessing the _cleanup_list at this point, - // so it's not necessary to take any locks + // No one else should be accessing the _cleanup_list at this point, + // so it is not necessary to take any locks while (!_cleanup_list.is_empty()) { - HeapRegion* hr = _cleanup_list.remove_head(); + HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */); assert(hr != NULL, "Got NULL from a non-empty list"); hr->par_clear(); tmp_free_list.add_ordered(hr); @@ -2980,22 +2987,25 @@ // claim_region() and a humongous object allocation might force us // to do a bit of unnecessary work (due to some unnecessary bitmap // iterations) but it should not introduce and correctness issues. - HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); - HeapWord* bottom = curr_region->bottom(); - HeapWord* end = curr_region->end(); - HeapWord* limit = curr_region->next_top_at_mark_start(); - - if (verbose_low()) { - gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" " - "["PTR_FORMAT", "PTR_FORMAT"), " - "limit = "PTR_FORMAT, - worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit)); - } + HeapRegion* curr_region = _g1h->heap_region_containing_raw(finger); + + // Above heap_region_containing_raw may return NULL as we always scan claim + // until the end of the heap. In this case, just jump to the next region. + HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords; // Is the gap between reading the finger and doing the CAS too long? HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger); - if (res == finger) { + if (res == finger && curr_region != NULL) { // we succeeded + HeapWord* bottom = curr_region->bottom(); + HeapWord* limit = curr_region->next_top_at_mark_start(); + + if (verbose_low()) { + gclog_or_tty->print_cr("[%u] curr_region = "PTR_FORMAT" " + "["PTR_FORMAT", "PTR_FORMAT"), " + "limit = "PTR_FORMAT, + worker_id, p2i(curr_region), p2i(bottom), p2i(end), p2i(limit)); + } // notice that _finger == end cannot be guaranteed here since, // someone else might have moved the finger even further @@ -3026,10 +3036,17 @@ } else { assert(_finger > finger, "the finger should have moved forward"); if (verbose_low()) { - gclog_or_tty->print_cr("[%u] somebody else moved the finger, " - "global finger = "PTR_FORMAT", " - "our finger = "PTR_FORMAT, - worker_id, p2i(_finger), p2i(finger)); + if (curr_region == NULL) { + gclog_or_tty->print_cr("[%u] found uncommitted region, moving finger, " + "global finger = "PTR_FORMAT", " + "our finger = "PTR_FORMAT, + worker_id, p2i(_finger), p2i(finger)); + } else { + gclog_or_tty->print_cr("[%u] somebody else moved the finger, " + "global finger = "PTR_FORMAT", " + "our finger = "PTR_FORMAT, + worker_id, p2i(_finger), p2i(finger)); + } } // read it again @@ -3144,8 +3161,10 @@ // happens, heap_region_containing() will return the bottom of the // corresponding starts humongous region and the check below will // not hold any more. + // Since we always iterate over all regions, we might get a NULL HeapRegion + // here. HeapRegion* global_hr = _g1h->heap_region_containing_raw(global_finger); - guarantee(global_finger == global_hr->bottom(), + guarantee(global_hr == NULL || global_finger == global_hr->bottom(), err_msg("global finger: "PTR_FORMAT" region: "HR_FORMAT, p2i(global_finger), HR_FORMAT_PARAMS(global_hr))); } @@ -3158,7 +3177,7 @@ if (task_finger != NULL && task_finger < _heap_end) { // See above note on the global finger verification. HeapRegion* task_hr = _g1h->heap_region_containing_raw(task_finger); - guarantee(task_finger == task_hr->bottom() || + guarantee(task_hr == NULL || task_finger == task_hr->bottom() || !task_hr->in_collection_set(), err_msg("task finger: "PTR_FORMAT" region: "HR_FORMAT, p2i(task_finger), HR_FORMAT_PARAMS(task_hr))); @@ -4674,7 +4693,6 @@ _hum_prev_live_bytes(0), _hum_next_live_bytes(0), _total_remset_bytes(0), _total_strong_code_roots_bytes(0) { G1CollectedHeap* g1h = G1CollectedHeap::heap(); - MemRegion g1_committed = g1h->g1_committed(); MemRegion g1_reserved = g1h->g1_reserved(); double now = os::elapsedTime(); @@ -4682,10 +4700,8 @@ _out->cr(); _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" - G1PPRL_SUM_ADDR_FORMAT("committed") G1PPRL_SUM_ADDR_FORMAT("reserved") G1PPRL_SUM_BYTE_FORMAT("region-size"), - p2i(g1_committed.start()), p2i(g1_committed.end()), p2i(g1_reserved.start()), p2i(g1_reserved.end()), HeapRegion::GrainBytes); _out->print_cr(G1PPRL_LINE_PREFIX);
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp Wed Jul 05 19:59:00 2017 +0200 @@ -27,10 +27,12 @@ #include "classfile/javaClasses.hpp" #include "gc_implementation/g1/heapRegionSet.hpp" +#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp" #include "gc_implementation/shared/gcId.hpp" #include "utilities/taskqueue.hpp" class G1CollectedHeap; +class CMBitMap; class CMTask; typedef GenericTaskQueue<oop, mtGC> CMTaskQueue; typedef GenericTaskQueueSet<CMTaskQueue, mtGC> CMTaskQueueSet; @@ -57,7 +59,6 @@ HeapWord* _bmStartWord; // base address of range covered by map size_t _bmWordSize; // map size (in #HeapWords covered) const int _shifter; // map to char or bit - VirtualSpace _virtual_space; // underlying the bit map BitMap _bm; // the bit map itself public: @@ -115,42 +116,41 @@ void print_on_error(outputStream* st, const char* prefix) const; // debugging - NOT_PRODUCT(bool covers(ReservedSpace rs) const;) + NOT_PRODUCT(bool covers(MemRegion rs) const;) +}; + +class CMBitMapMappingChangedListener : public G1MappingChangedListener { + private: + CMBitMap* _bm; + public: + CMBitMapMappingChangedListener() : _bm(NULL) {} + + void set_bitmap(CMBitMap* bm) { _bm = bm; } + + virtual void on_commit(uint start_idx, size_t num_regions); }; class CMBitMap : public CMBitMapRO { + private: + CMBitMapMappingChangedListener _listener; public: - // constructor - CMBitMap(int shifter) : - CMBitMapRO(shifter) {} + static size_t compute_size(size_t heap_size); + // Returns the amount of bytes on the heap between two marks in the bitmap. + static size_t mark_distance(); - // Allocates the back store for the marking bitmap - bool allocate(ReservedSpace heap_rs); + CMBitMap() : CMBitMapRO(LogMinObjAlignment), _listener() { _listener.set_bitmap(this); } - // write marks - void mark(HeapWord* addr) { - assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), - "outside underlying space?"); - _bm.set_bit(heapWordToOffset(addr)); - } - void clear(HeapWord* addr) { - assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), - "outside underlying space?"); - _bm.clear_bit(heapWordToOffset(addr)); - } - bool parMark(HeapWord* addr) { - assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), - "outside underlying space?"); - return _bm.par_set_bit(heapWordToOffset(addr)); - } - bool parClear(HeapWord* addr) { - assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize), - "outside underlying space?"); - return _bm.par_clear_bit(heapWordToOffset(addr)); - } + // Initializes the underlying BitMap to cover the given area. + void initialize(MemRegion heap, G1RegionToSpaceMapper* storage); + + // Write marks. + inline void mark(HeapWord* addr); + inline void clear(HeapWord* addr); + inline bool parMark(HeapWord* addr); + inline bool parClear(HeapWord* addr); + void markRange(MemRegion mr); - void clearAll(); void clearRange(MemRegion mr); // Starting at the bit corresponding to "addr" (inclusive), find the next @@ -161,6 +161,9 @@ // the run. If there is no "1" bit at or after "addr", return an empty // MemRegion. MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr); + + // Clear the whole mark bitmap. + void clearAll(); }; // Represents a marking stack used by ConcurrentMarking in the G1 collector. @@ -680,7 +683,7 @@ return _task_queues->steal(worker_id, hash_seed, obj); } - ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs); + ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage); ~ConcurrentMark(); ConcurrentMarkThread* cmThread() { return _cmThread; } @@ -736,7 +739,8 @@ // Clear the next marking bitmap (will be called concurrently). void clearNextBitmap(); - // Return whether the next mark bitmap has no marks set. + // Return whether the next mark bitmap has no marks set. To be used for assertions + // only. Will not yield to pause requests. bool nextMarkBitmapIsClear(); // These two do the work that needs to be done before and after the @@ -794,12 +798,6 @@ bool verify_thread_buffers, bool verify_fingers) PRODUCT_RETURN; - // It is called at the end of an evacuation pause during marking so - // that CM is notified of where the new end of the heap is. It - // doesn't do anything if concurrent_marking_in_progress() is false, - // unless the force parameter is true. - void update_g1_committed(bool force = false); - bool isMarked(oop p) const { assert(p != NULL && p->is_oop(), "expected an oop"); HeapWord* addr = (HeapWord*)p;
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp Wed Jul 05 19:59:00 2017 +0200 @@ -268,6 +268,36 @@ return iterate(cl, mr); } +#define check_mark(addr) \ + assert(_bmStartWord <= (addr) && (addr) < (_bmStartWord + _bmWordSize), \ + "outside underlying space?"); \ + assert(G1CollectedHeap::heap()->is_in_exact(addr), \ + err_msg("Trying to access not available bitmap "PTR_FORMAT \ + " corresponding to "PTR_FORMAT" (%u)", \ + p2i(this), p2i(addr), G1CollectedHeap::heap()->addr_to_region(addr))); + +inline void CMBitMap::mark(HeapWord* addr) { + check_mark(addr); + _bm.set_bit(heapWordToOffset(addr)); +} + +inline void CMBitMap::clear(HeapWord* addr) { + check_mark(addr); + _bm.clear_bit(heapWordToOffset(addr)); +} + +inline bool CMBitMap::parMark(HeapWord* addr) { + check_mark(addr); + return _bm.par_set_bit(heapWordToOffset(addr)); +} + +inline bool CMBitMap::parClear(HeapWord* addr) { + check_mark(addr); + return _bm.par_clear_bit(heapWordToOffset(addr)); +} + +#undef check_mark + inline void CMTask::push(oop obj) { HeapWord* objAddr = (HeapWord*) obj; assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
--- a/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp Wed Jul 05 19:59:00 2017 +0200 @@ -173,7 +173,7 @@ // Should be called when we want to release the active region which // is returned after it's been retired. - HeapRegion* release(); + virtual HeapRegion* release(); #if G1_ALLOC_REGION_TRACING void trace(const char* str, size_t word_size = 0, HeapWord* result = NULL);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp Wed Jul 05 19:59:00 2017 +0200 @@ -32,64 +32,37 @@ PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC +void G1BlockOffsetSharedArrayMappingChangedListener::on_commit(uint start_idx, size_t num_regions) { + // Nothing to do. The BOT is hard-wired to be part of the HeapRegion, and we cannot + // retrieve it here since this would cause firing of several asserts. The code + // executed after commit of a region already needs to do some re-initialization of + // the HeapRegion, so we combine that. +} + ////////////////////////////////////////////////////////////////////// // G1BlockOffsetSharedArray ////////////////////////////////////////////////////////////////////// -G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion reserved, - size_t init_word_size) : - _reserved(reserved), _end(NULL) -{ - size_t size = compute_size(reserved.word_size()); - ReservedSpace rs(ReservedSpace::allocation_align_size_up(size)); - if (!rs.is_reserved()) { - vm_exit_during_initialization("Could not reserve enough space for heap offset array"); - } - if (!_vs.initialize(rs, 0)) { - vm_exit_during_initialization("Could not reserve enough space for heap offset array"); - } +G1BlockOffsetSharedArray::G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage) : + _reserved(), _end(NULL), _listener(), _offset_array(NULL) { + + _reserved = heap; + _end = NULL; - MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); + MemRegion bot_reserved = storage->reserved(); - _offset_array = (u_char*)_vs.low_boundary(); - resize(init_word_size); + _offset_array = (u_char*)bot_reserved.start(); + _end = _reserved.end(); + + storage->set_mapping_changed_listener(&_listener); + if (TraceBlockOffsetTable) { gclog_or_tty->print_cr("G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: "); gclog_or_tty->print_cr(" " " rs.base(): " INTPTR_FORMAT " rs.size(): " INTPTR_FORMAT " rs end(): " INTPTR_FORMAT, - rs.base(), rs.size(), rs.base() + rs.size()); - gclog_or_tty->print_cr(" " - " _vs.low_boundary(): " INTPTR_FORMAT - " _vs.high_boundary(): " INTPTR_FORMAT, - _vs.low_boundary(), - _vs.high_boundary()); - } -} - -void G1BlockOffsetSharedArray::resize(size_t new_word_size) { - assert(new_word_size <= _reserved.word_size(), "Resize larger than reserved"); - size_t new_size = compute_size(new_word_size); - size_t old_size = _vs.committed_size(); - size_t delta; - char* high = _vs.high(); - _end = _reserved.start() + new_word_size; - if (new_size > old_size) { - delta = ReservedSpace::page_align_size_up(new_size - old_size); - assert(delta > 0, "just checking"); - if (!_vs.expand_by(delta)) { - // Do better than this for Merlin - vm_exit_out_of_memory(delta, OOM_MMAP_ERROR, "offset table expansion"); - } - assert(_vs.high() == high + delta, "invalid expansion"); - // Initialization of the contents is left to the - // G1BlockOffsetArray that uses it. - } else { - delta = ReservedSpace::page_align_size_down(old_size - new_size); - if (delta == 0) return; - _vs.shrink_by(delta); - assert(_vs.high() == high - delta, "invalid expansion"); + bot_reserved.start(), bot_reserved.byte_size(), bot_reserved.end()); } } @@ -100,18 +73,7 @@ } void G1BlockOffsetSharedArray::set_offset_array(HeapWord* left, HeapWord* right, u_char offset) { - check_index(index_for(right - 1), "right address out of range"); - assert(left < right, "Heap addresses out of order"); - size_t num_cards = pointer_delta(right, left) >> LogN_words; - if (UseMemSetInBOT) { - memset(&_offset_array[index_for(left)], offset, num_cards); - } else { - size_t i = index_for(left); - const size_t end = i + num_cards; - for (; i < end; i++) { - _offset_array[i] = offset; - } - } + set_offset_array(index_for(left), index_for(right -1), offset); } ////////////////////////////////////////////////////////////////////// @@ -650,6 +612,25 @@ _next_offset_index = 0; } +HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold_raw() { + assert(!Universe::heap()->is_in_reserved(_array->_offset_array), + "just checking"); + _next_offset_index = _array->index_for_raw(_bottom); + _next_offset_index++; + _next_offset_threshold = + _array->address_for_index_raw(_next_offset_index); + return _next_offset_threshold; +} + +void G1BlockOffsetArrayContigSpace::zero_bottom_entry_raw() { + assert(!Universe::heap()->is_in_reserved(_array->_offset_array), + "just checking"); + size_t bottom_index = _array->index_for_raw(_bottom); + assert(_array->address_for_index_raw(bottom_index) == _bottom, + "Precondition of call"); + _array->set_offset_array_raw(bottom_index, 0); +} + HeapWord* G1BlockOffsetArrayContigSpace::initialize_threshold() { assert(!Universe::heap()->is_in_reserved(_array->_offset_array), "just checking"); @@ -674,8 +655,7 @@ assert(new_top <= _end, "_end should have already been updated"); // The first BOT entry should have offset 0. - zero_bottom_entry(); - initialize_threshold(); + reset_bot(); alloc_block(_bottom, new_top); }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp Wed Jul 05 19:59:00 2017 +0200 @@ -25,6 +25,7 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_HPP +#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp" #include "memory/memRegion.hpp" #include "runtime/virtualspace.hpp" #include "utilities/globalDefinitions.hpp" @@ -106,6 +107,11 @@ inline HeapWord* block_start_const(const void* addr) const; }; +class G1BlockOffsetSharedArrayMappingChangedListener : public G1MappingChangedListener { + public: + virtual void on_commit(uint start_idx, size_t num_regions); +}; + // This implementation of "G1BlockOffsetTable" divides the covered region // into "N"-word subregions (where "N" = 2^"LogN". An array with an entry // for each such subregion indicates how far back one must go to find the @@ -125,6 +131,7 @@ friend class VMStructs; private: + G1BlockOffsetSharedArrayMappingChangedListener _listener; // The reserved region covered by the shared array. MemRegion _reserved; @@ -133,16 +140,8 @@ // Array for keeping offsets for retrieving object start fast given an // address. - VirtualSpace _vs; u_char* _offset_array; // byte array keeping backwards offsets - void check_index(size_t index, const char* msg) const { - assert(index < _vs.committed_size(), - err_msg("%s - " - "index: " SIZE_FORMAT ", _vs.committed_size: " SIZE_FORMAT, - msg, index, _vs.committed_size())); - } - void check_offset(size_t offset, const char* msg) const { assert(offset <= N_words, err_msg("%s - " @@ -152,63 +151,33 @@ // Bounds checking accessors: // For performance these have to devolve to array accesses in product builds. - u_char offset_array(size_t index) const { - check_index(index, "index out of range"); - return _offset_array[index]; - } + inline u_char offset_array(size_t index) const; void set_offset_array(HeapWord* left, HeapWord* right, u_char offset); - void set_offset_array(size_t index, u_char offset) { - check_index(index, "index out of range"); - check_offset(offset, "offset too large"); + void set_offset_array_raw(size_t index, u_char offset) { _offset_array[index] = offset; } - void set_offset_array(size_t index, HeapWord* high, HeapWord* low) { - check_index(index, "index out of range"); - assert(high >= low, "addresses out of order"); - check_offset(pointer_delta(high, low), "offset too large"); - _offset_array[index] = (u_char) pointer_delta(high, low); - } + inline void set_offset_array(size_t index, u_char offset); + + inline void set_offset_array(size_t index, HeapWord* high, HeapWord* low); - void set_offset_array(size_t left, size_t right, u_char offset) { - check_index(right, "right index out of range"); - assert(left <= right, "indexes out of order"); - size_t num_cards = right - left + 1; - if (UseMemSetInBOT) { - memset(&_offset_array[left], offset, num_cards); - } else { - size_t i = left; - const size_t end = i + num_cards; - for (; i < end; i++) { - _offset_array[i] = offset; - } - } - } + inline void set_offset_array(size_t left, size_t right, u_char offset); - void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const { - check_index(index, "index out of range"); - assert(high >= low, "addresses out of order"); - check_offset(pointer_delta(high, low), "offset too large"); - assert(_offset_array[index] == pointer_delta(high, low), "Wrong offset"); - } + inline void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const; bool is_card_boundary(HeapWord* p) const; +public: + // Return the number of slots needed for an offset array // that covers mem_region_words words. - // We always add an extra slot because if an object - // ends on a card boundary we put a 0 in the next - // offset array slot, so we want that slot always - // to be reserved. - - size_t compute_size(size_t mem_region_words) { - size_t number_of_slots = (mem_region_words / N_words) + 1; - return ReservedSpace::page_align_size_up(number_of_slots); + static size_t compute_size(size_t mem_region_words) { + size_t number_of_slots = (mem_region_words / N_words); + return ReservedSpace::allocation_align_size_up(number_of_slots); } -public: enum SomePublicConstants { LogN = 9, LogN_words = LogN - LogHeapWordSize, @@ -222,25 +191,21 @@ // least "init_word_size".) The contents of the initial table are // undefined; it is the responsibility of the constituent // G1BlockOffsetTable(s) to initialize cards. - G1BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size); - - // Notes a change in the committed size of the region covered by the - // table. The "new_word_size" may not be larger than the size of the - // reserved region this table covers. - void resize(size_t new_word_size); + G1BlockOffsetSharedArray(MemRegion heap, G1RegionToSpaceMapper* storage); void set_bottom(HeapWord* new_bottom); - // Updates all the BlockOffsetArray's sharing this shared array to - // reflect the current "top"'s of their spaces. - void update_offset_arrays(); - // Return the appropriate index into "_offset_array" for "p". inline size_t index_for(const void* p) const; + inline size_t index_for_raw(const void* p) const; // Return the address indicating the start of the region corresponding to // "index" in "_offset_array". inline HeapWord* address_for_index(size_t index) const; + // Variant of address_for_index that does not check the index for validity. + inline HeapWord* address_for_index_raw(size_t index) const { + return _reserved.start() + (index << LogN_words); + } }; // And here is the G1BlockOffsetTable subtype that uses the array. @@ -480,6 +445,14 @@ blk_start, blk_end); } + // Variant of zero_bottom_entry that does not check for availability of the + // memory first. + void zero_bottom_entry_raw(); + // Variant of initialize_threshold that does not check for availability of the + // memory first. + HeapWord* initialize_threshold_raw(); + // Zero out the entry for _bottom (offset will be zero). + void zero_bottom_entry(); public: G1BlockOffsetArrayContigSpace(G1BlockOffsetSharedArray* array, MemRegion mr); @@ -487,8 +460,10 @@ // bottom of the covered region. HeapWord* initialize_threshold(); - // Zero out the entry for _bottom (offset will be zero). - void zero_bottom_entry(); + void reset_bot() { + zero_bottom_entry_raw(); + initialize_threshold_raw(); + } // Return the next threshold, the point at which the table should be // updated.
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp Wed Jul 05 19:59:00 2017 +0200 @@ -47,14 +47,69 @@ } } +#define check_index(index, msg) \ + assert((index) < (_reserved.word_size() >> LogN_words), \ + err_msg("%s - index: "SIZE_FORMAT", _vs.committed_size: "SIZE_FORMAT, \ + msg, (index), (_reserved.word_size() >> LogN_words))); \ + assert(G1CollectedHeap::heap()->is_in_exact(address_for_index_raw(index)), \ + err_msg("Index "SIZE_FORMAT" corresponding to "PTR_FORMAT \ + " (%u) is not in committed area.", \ + (index), \ + p2i(address_for_index_raw(index)), \ + G1CollectedHeap::heap()->addr_to_region(address_for_index_raw(index)))); + +u_char G1BlockOffsetSharedArray::offset_array(size_t index) const { + check_index(index, "index out of range"); + return _offset_array[index]; +} + +void G1BlockOffsetSharedArray::set_offset_array(size_t index, u_char offset) { + check_index(index, "index out of range"); + set_offset_array_raw(index, offset); +} + +void G1BlockOffsetSharedArray::set_offset_array(size_t index, HeapWord* high, HeapWord* low) { + check_index(index, "index out of range"); + assert(high >= low, "addresses out of order"); + size_t offset = pointer_delta(high, low); + check_offset(offset, "offset too large"); + set_offset_array(index, (u_char)offset); +} + +void G1BlockOffsetSharedArray::set_offset_array(size_t left, size_t right, u_char offset) { + check_index(right, "right index out of range"); + assert(left <= right, "indexes out of order"); + size_t num_cards = right - left + 1; + if (UseMemSetInBOT) { + memset(&_offset_array[left], offset, num_cards); + } else { + size_t i = left; + const size_t end = i + num_cards; + for (; i < end; i++) { + _offset_array[i] = offset; + } + } +} + +void G1BlockOffsetSharedArray::check_offset_array(size_t index, HeapWord* high, HeapWord* low) const { + check_index(index, "index out of range"); + assert(high >= low, "addresses out of order"); + check_offset(pointer_delta(high, low), "offset too large"); + assert(_offset_array[index] == pointer_delta(high, low), "Wrong offset"); +} + +// Variant of index_for that does not check the index for validity. +inline size_t G1BlockOffsetSharedArray::index_for_raw(const void* p) const { + return pointer_delta((char*)p, _reserved.start(), sizeof(char)) >> LogN; +} + inline size_t G1BlockOffsetSharedArray::index_for(const void* p) const { char* pc = (char*)p; assert(pc >= (char*)_reserved.start() && pc < (char*)_reserved.end(), err_msg("p (" PTR_FORMAT ") not in reserved [" PTR_FORMAT ", " PTR_FORMAT ")", p2i(p), p2i(_reserved.start()), p2i(_reserved.end()))); - size_t delta = pointer_delta(pc, _reserved.start(), sizeof(char)); - size_t result = delta >> LogN; + size_t result = index_for_raw(p); check_index(result, "bad index from address"); return result; } @@ -62,7 +117,7 @@ inline HeapWord* G1BlockOffsetSharedArray::address_for_index(size_t index) const { check_index(index, "index out of range"); - HeapWord* result = _reserved.start() + (index << LogN_words); + HeapWord* result = address_for_index_raw(index); assert(result >= _reserved.start() && result < _reserved.end(), err_msg("bad address from index result " PTR_FORMAT " _reserved.start() " PTR_FORMAT " _reserved.end() " @@ -71,6 +126,8 @@ return result; } +#undef check_index + inline size_t G1BlockOffsetArray::block_size(const HeapWord* p) const { return gsp()->block_size(p);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.cpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.cpp Wed Jul 05 19:59:00 2017 +0200 @@ -33,31 +33,26 @@ PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC +void G1CardCountsMappingChangedListener::on_commit(uint start_idx, size_t num_regions) { + MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords); + _counts->clear_range(mr); +} + void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) { if (has_count_table()) { - assert(from_card_num >= 0 && from_card_num < _committed_max_card_num, - err_msg("from card num out of range: "SIZE_FORMAT, from_card_num)); assert(from_card_num < to_card_num, err_msg("Wrong order? from: " SIZE_FORMAT ", to: "SIZE_FORMAT, from_card_num, to_card_num)); - assert(to_card_num <= _committed_max_card_num, - err_msg("to card num out of range: " - "to: "SIZE_FORMAT ", " - "max: "SIZE_FORMAT, - to_card_num, _committed_max_card_num)); - - to_card_num = MIN2(_committed_max_card_num, to_card_num); - Copy::fill_to_bytes(&_card_counts[from_card_num], (to_card_num - from_card_num)); } } G1CardCounts::G1CardCounts(G1CollectedHeap *g1h): - _g1h(g1h), _card_counts(NULL), - _reserved_max_card_num(0), _committed_max_card_num(0), - _committed_size(0) {} + _listener(), _g1h(g1h), _card_counts(NULL), _reserved_max_card_num(0) { + _listener.set_cardcounts(this); +} -void G1CardCounts::initialize() { +void G1CardCounts::initialize(G1RegionToSpaceMapper* mapper) { assert(_g1h->max_capacity() > 0, "initialization order"); assert(_g1h->capacity() == 0, "initialization order"); @@ -70,70 +65,9 @@ _ct_bs = _g1h->g1_barrier_set(); _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start()); - // Allocate/Reserve the counts table - size_t reserved_bytes = _g1h->max_capacity(); - _reserved_max_card_num = reserved_bytes >> CardTableModRefBS::card_shift; - - size_t reserved_size = _reserved_max_card_num * sizeof(jbyte); - ReservedSpace rs(ReservedSpace::allocation_align_size_up(reserved_size)); - if (!rs.is_reserved()) { - warning("Could not reserve enough space for the card counts table"); - guarantee(!has_reserved_count_table(), "should be NULL"); - return; - } - - MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); - - _card_counts_storage.initialize(rs, 0); - _card_counts = (jubyte*) _card_counts_storage.low(); - } -} - -void G1CardCounts::resize(size_t heap_capacity) { - // Expand the card counts table to handle a heap with the given capacity. - - if (!has_reserved_count_table()) { - // Don't expand if we failed to reserve the card counts table. - return; - } - - assert(_committed_size == - ReservedSpace::allocation_align_size_up(_committed_size), - err_msg("Unaligned? committed_size: " SIZE_FORMAT, _committed_size)); - - // Verify that the committed space for the card counts matches our - // committed max card num. Note for some allocation alignments, the - // amount of space actually committed for the counts table will be able - // to span more cards than the number spanned by the maximum heap. - size_t prev_committed_size = _committed_size; - size_t prev_committed_card_num = committed_to_card_num(prev_committed_size); - - assert(prev_committed_card_num == _committed_max_card_num, - err_msg("Card mismatch: " - "prev: " SIZE_FORMAT ", " - "committed: "SIZE_FORMAT", " - "reserved: "SIZE_FORMAT, - prev_committed_card_num, _committed_max_card_num, _reserved_max_card_num)); - - size_t new_size = (heap_capacity >> CardTableModRefBS::card_shift) * sizeof(jbyte); - size_t new_committed_size = ReservedSpace::allocation_align_size_up(new_size); - size_t new_committed_card_num = committed_to_card_num(new_committed_size); - - if (_committed_max_card_num < new_committed_card_num) { - // we need to expand the backing store for the card counts - size_t expand_size = new_committed_size - prev_committed_size; - - if (!_card_counts_storage.expand_by(expand_size)) { - warning("Card counts table backing store commit failure"); - return; - } - assert(_card_counts_storage.committed_size() == new_committed_size, - "expansion commit failure"); - - _committed_size = new_committed_size; - _committed_max_card_num = new_committed_card_num; - - clear_range(prev_committed_card_num, _committed_max_card_num); + _card_counts = (jubyte*) mapper->reserved().start(); + _reserved_max_card_num = mapper->reserved().byte_size(); + mapper->set_mapping_changed_listener(&_listener); } } @@ -149,12 +83,13 @@ uint count = 0; if (has_count_table()) { size_t card_num = ptr_2_card_num(card_ptr); - if (card_num < _committed_max_card_num) { - count = (uint) _card_counts[card_num]; - if (count < G1ConcRSHotCardLimit) { - _card_counts[card_num] = - (jubyte)(MIN2((uintx)(_card_counts[card_num] + 1), G1ConcRSHotCardLimit)); - } + assert(card_num < _reserved_max_card_num, + err_msg("Card "SIZE_FORMAT" outside of card counts table (max size "SIZE_FORMAT")", + card_num, _reserved_max_card_num)); + count = (uint) _card_counts[card_num]; + if (count < G1ConcRSHotCardLimit) { + _card_counts[card_num] = + (jubyte)(MIN2((uintx)(_card_counts[card_num] + 1), G1ConcRSHotCardLimit)); } } return count; @@ -165,31 +100,23 @@ } void G1CardCounts::clear_region(HeapRegion* hr) { - assert(!hr->isHumongous(), "Should have been cleared"); - if (has_count_table()) { - HeapWord* bottom = hr->bottom(); + MemRegion mr(hr->bottom(), hr->end()); + clear_range(mr); +} - // We use the last address in hr as hr could be the - // last region in the heap. In which case trying to find - // the card for hr->end() will be an OOB access to the - // card table. - HeapWord* last = hr->end() - 1; - assert(_g1h->g1_committed().contains(last), - err_msg("last not in committed: " - "last: " PTR_FORMAT ", " - "committed: [" PTR_FORMAT ", " PTR_FORMAT ")", - last, - _g1h->g1_committed().start(), - _g1h->g1_committed().end())); - - const jbyte* from_card_ptr = _ct_bs->byte_for_const(bottom); - const jbyte* last_card_ptr = _ct_bs->byte_for_const(last); +void G1CardCounts::clear_range(MemRegion mr) { + if (has_count_table()) { + const jbyte* from_card_ptr = _ct_bs->byte_for_const(mr.start()); + // We use the last address in the range as the range could represent the + // last region in the heap. In which case trying to find the card will be an + // OOB access to the card table. + const jbyte* last_card_ptr = _ct_bs->byte_for_const(mr.last()); #ifdef ASSERT HeapWord* start_addr = _ct_bs->addr_for(from_card_ptr); - assert(start_addr == hr->bottom(), "alignment"); + assert(start_addr == mr.start(), "MemRegion start must be aligned to a card."); HeapWord* last_addr = _ct_bs->addr_for(last_card_ptr); - assert((last_addr + CardTableModRefBS::card_size_in_words) == hr->end(), "alignment"); + assert((last_addr + CardTableModRefBS::card_size_in_words) == mr.end(), "MemRegion end must be aligned to a card."); #endif // ASSERT // Clear the counts for the (exclusive) card range. @@ -199,14 +126,22 @@ } } +class G1CardCountsClearClosure : public HeapRegionClosure { + private: + G1CardCounts* _card_counts; + public: + G1CardCountsClearClosure(G1CardCounts* card_counts) : + HeapRegionClosure(), _card_counts(card_counts) { } + + + virtual bool doHeapRegion(HeapRegion* r) { + _card_counts->clear_region(r); + return false; + } +}; + void G1CardCounts::clear_all() { assert(SafepointSynchronize::is_at_safepoint(), "don't call this otherwise"); - clear_range((size_t)0, _committed_max_card_num); + G1CardCountsClearClosure cl(this); + _g1h->heap_region_iterate(&cl); } - -G1CardCounts::~G1CardCounts() { - if (has_reserved_count_table()) { - _card_counts_storage.release(); - } -} -
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.hpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.hpp Wed Jul 05 19:59:00 2017 +0200 @@ -25,14 +25,26 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP +#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp" #include "memory/allocation.hpp" #include "runtime/virtualspace.hpp" #include "utilities/globalDefinitions.hpp" class CardTableModRefBS; +class G1CardCounts; class G1CollectedHeap; +class G1RegionToSpaceMapper; class HeapRegion; +class G1CardCountsMappingChangedListener : public G1MappingChangedListener { + private: + G1CardCounts* _counts; + public: + void set_cardcounts(G1CardCounts* counts) { _counts = counts; } + + virtual void on_commit(uint start_idx, size_t num_regions); +}; + // Table to track the number of times a card has been refined. Once // a card has been refined a certain number of times, it is // considered 'hot' and its refinement is delayed by inserting the @@ -41,6 +53,8 @@ // is 'drained' during the next evacuation pause. class G1CardCounts: public CHeapObj<mtGC> { + G1CardCountsMappingChangedListener _listener; + G1CollectedHeap* _g1h; // The table of counts @@ -49,27 +63,18 @@ // Max capacity of the reserved space for the counts table size_t _reserved_max_card_num; - // Max capacity of the committed space for the counts table - size_t _committed_max_card_num; - - // Size of committed space for the counts table - size_t _committed_size; - // CardTable bottom. const jbyte* _ct_bot; // Barrier set CardTableModRefBS* _ct_bs; - // The virtual memory backing the counts table - VirtualSpace _card_counts_storage; - // Returns true if the card counts table has been reserved. bool has_reserved_count_table() { return _card_counts != NULL; } // Returns true if the card counts table has been reserved and committed. bool has_count_table() { - return has_reserved_count_table() && _committed_max_card_num > 0; + return has_reserved_count_table(); } size_t ptr_2_card_num(const jbyte* card_ptr) { @@ -79,37 +84,24 @@ "_ct_bot: " PTR_FORMAT, p2i(card_ptr), p2i(_ct_bot))); size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte)); - assert(card_num >= 0 && card_num < _committed_max_card_num, + assert(card_num >= 0 && card_num < _reserved_max_card_num, err_msg("card pointer out of range: " PTR_FORMAT, p2i(card_ptr))); return card_num; } jbyte* card_num_2_ptr(size_t card_num) { - assert(card_num >= 0 && card_num < _committed_max_card_num, + assert(card_num >= 0 && card_num < _reserved_max_card_num, err_msg("card num out of range: "SIZE_FORMAT, card_num)); return (jbyte*) (_ct_bot + card_num); } - // Helper routine. - // Returns the number of cards that can be counted by the given committed - // table size, with a maximum of the number of cards spanned by the max - // capacity of the heap. - size_t committed_to_card_num(size_t committed_size) { - return MIN2(_reserved_max_card_num, committed_size / sizeof(jbyte)); - } - // Clear the counts table for the given (exclusive) index range. void clear_range(size_t from_card_num, size_t to_card_num); public: G1CardCounts(G1CollectedHeap* g1h); - ~G1CardCounts(); - void initialize(); - - // Resize the committed space for the card counts table in - // response to a resize of the committed space for the heap. - void resize(size_t heap_capacity); + void initialize(G1RegionToSpaceMapper* mapper); // Increments the refinement count for the given card. // Returns the pre-increment count value. @@ -122,8 +114,10 @@ // Clears the card counts for the cards spanned by the region void clear_region(HeapRegion* hr); + // Clears the card counts for the cards spanned by the MemRegion + void clear_range(MemRegion mr); + // Clear the entire card counts table during GC. - // Updates the policy stats with the duration. void clear_all(); };
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Wed Jul 05 19:59:00 2017 +0200 @@ -45,12 +45,13 @@ #include "gc_implementation/g1/g1MarkSweep.hpp" #include "gc_implementation/g1/g1OopClosures.inline.hpp" #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp" +#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp" #include "gc_implementation/g1/g1RemSet.inline.hpp" #include "gc_implementation/g1/g1StringDedup.hpp" #include "gc_implementation/g1/g1YCTypes.hpp" #include "gc_implementation/g1/heapRegion.inline.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp" -#include "gc_implementation/g1/heapRegionSeq.inline.hpp" +#include "gc_implementation/g1/heapRegionSet.inline.hpp" #include "gc_implementation/g1/vm_operations_g1.hpp" #include "gc_implementation/shared/gcHeapSummary.hpp" #include "gc_implementation/shared/gcTimer.hpp" @@ -381,6 +382,14 @@ gclog_or_tty->cr(); } +void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) { + OtherRegionsTable::invalidate(start_idx, num_regions); +} + +void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions) { + reset_from_card_cache(start_idx, num_regions); +} + void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr) { // Claim the right to put the region on the dirty cards region list @@ -523,9 +532,9 @@ // again to allocate from it. append_secondary_free_list(); - assert(!_free_list.is_empty(), "if the secondary_free_list was not " + assert(_hrs.num_free_regions() > 0, "if the secondary_free_list was not " "empty we should have moved at least one entry to the free_list"); - HeapRegion* res = _free_list.remove_region(is_old); + HeapRegion* res = _hrs.allocate_free_region(is_old); if (G1ConcRegionFreeingVerbose) { gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " "allocated "HR_FORMAT" from secondary_free_list", @@ -566,7 +575,7 @@ } } - res = _free_list.remove_region(is_old); + res = _hrs.allocate_free_region(is_old); if (res == NULL) { if (G1ConcRegionFreeingVerbose) { @@ -591,8 +600,8 @@ // Given that expand() succeeded in expanding the heap, and we // always expand the heap by an amount aligned to the heap // region size, the free list should in theory not be empty. - // In either case remove_region() will check for NULL. - res = _free_list.remove_region(is_old); + // In either case allocate_free_region() will check for NULL. + res = _hrs.allocate_free_region(is_old); } else { _expand_heap_after_alloc_failure = false; } @@ -600,55 +609,11 @@ return res; } -uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions, - size_t word_size) { - assert(isHumongous(word_size), "word_size should be humongous"); - assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); - - uint first = G1_NULL_HRS_INDEX; - if (num_regions == 1) { - // Only one region to allocate, no need to go through the slower - // path. The caller will attempt the expansion if this fails, so - // let's not try to expand here too. - HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */); - if (hr != NULL) { - first = hr->hrs_index(); - } else { - first = G1_NULL_HRS_INDEX; - } - } else { - // We can't allocate humongous regions while cleanupComplete() is - // running, since some of the regions we find to be empty might not - // yet be added to the free list and it is not straightforward to - // know which list they are on so that we can remove them. Note - // that we only need to do this if we need to allocate more than - // one region to satisfy the current humongous allocation - // request. If we are only allocating one region we use the common - // region allocation code (see above). - wait_while_free_regions_coming(); - append_secondary_free_list_if_not_empty_with_lock(); - - if (free_regions() >= num_regions) { - first = _hrs.find_contiguous(num_regions); - if (first != G1_NULL_HRS_INDEX) { - for (uint i = first; i < first + num_regions; ++i) { - HeapRegion* hr = region_at(i); - assert(hr->is_empty(), "sanity"); - assert(is_on_master_free_list(hr), "sanity"); - hr->set_pending_removal(true); - } - _free_list.remove_all_pending(num_regions); - } - } - } - return first; -} - HeapWord* G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first, uint num_regions, size_t word_size) { - assert(first != G1_NULL_HRS_INDEX, "pre-condition"); + assert(first != G1_NO_HRS_INDEX, "pre-condition"); assert(isHumongous(word_size), "word_size should be humongous"); assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); @@ -786,42 +751,70 @@ verify_region_sets_optional(); - size_t word_size_rounded = round_to(word_size, HeapRegion::GrainWords); - uint num_regions = (uint) (word_size_rounded / HeapRegion::GrainWords); - uint x_num = expansion_regions(); - uint fs = _hrs.free_suffix(); - uint first = humongous_obj_allocate_find_first(num_regions, word_size); - if (first == G1_NULL_HRS_INDEX) { - // The only thing we can do now is attempt expansion. - if (fs + x_num >= num_regions) { - // If the number of regions we're trying to allocate for this - // object is at most the number of regions in the free suffix, - // then the call to humongous_obj_allocate_find_first() above - // should have succeeded and we wouldn't be here. - // - // We should only be trying to expand when the free suffix is - // not sufficient for the object _and_ we have some expansion - // room available. - assert(num_regions > fs, "earlier allocation should have succeeded"); - + uint first = G1_NO_HRS_INDEX; + uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords); + + if (obj_regions == 1) { + // Only one region to allocate, try to use a fast path by directly allocating + // from the free lists. Do not try to expand here, we will potentially do that + // later. + HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */); + if (hr != NULL) { + first = hr->hrs_index(); + } + } else { + // We can't allocate humongous regions spanning more than one region while + // cleanupComplete() is running, since some of the regions we find to be + // empty might not yet be added to the free list. It is not straightforward + // to know in which list they are on so that we can remove them. We only + // need to do this if we need to allocate more than one region to satisfy the + // current humongous allocation request. If we are only allocating one region + // we use the one-region region allocation code (see above), that already + // potentially waits for regions from the secondary free list. + wait_while_free_regions_coming(); + append_secondary_free_list_if_not_empty_with_lock(); + + // Policy: Try only empty regions (i.e. already committed first). Maybe we + // are lucky enough to find some. + first = _hrs.find_contiguous_only_empty(obj_regions); + if (first != G1_NO_HRS_INDEX) { + _hrs.allocate_free_regions_starting_at(first, obj_regions); + } + } + + if (first == G1_NO_HRS_INDEX) { + // Policy: We could not find enough regions for the humongous object in the + // free list. Look through the heap to find a mix of free and uncommitted regions. + // If so, try expansion. + first = _hrs.find_contiguous_empty_or_unavailable(obj_regions); + if (first != G1_NO_HRS_INDEX) { + // We found something. Make sure these regions are committed, i.e. expand + // the heap. Alternatively we could do a defragmentation GC. ergo_verbose1(ErgoHeapSizing, "attempt heap expansion", ergo_format_reason("humongous allocation request failed") ergo_format_byte("allocation request"), word_size * HeapWordSize); - if (expand((num_regions - fs) * HeapRegion::GrainBytes)) { - // Even though the heap was expanded, it might not have - // reached the desired size. So, we cannot assume that the - // allocation will succeed. - first = humongous_obj_allocate_find_first(num_regions, word_size); + + _hrs.expand_at(first, obj_regions); + g1_policy()->record_new_heap_size(num_regions()); + +#ifdef ASSERT + for (uint i = first; i < first + obj_regions; ++i) { + HeapRegion* hr = region_at(i); + assert(hr->is_empty(), "sanity"); + assert(is_on_master_free_list(hr), "sanity"); } +#endif + _hrs.allocate_free_regions_starting_at(first, obj_regions); + } else { + // Policy: Potentially trigger a defragmentation GC. } } HeapWord* result = NULL; - if (first != G1_NULL_HRS_INDEX) { - result = - humongous_obj_allocate_initialize_regions(first, num_regions, word_size); + if (first != G1_NO_HRS_INDEX) { + result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size); assert(result != NULL, "it should always return a valid result"); // A successful humongous object allocation changes the used space @@ -1384,7 +1377,7 @@ G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs); } - assert(free_regions() == 0, "we should not have added any free regions"); + assert(num_free_regions() == 0, "we should not have added any free regions"); rebuild_region_sets(false /* free_list_only */); // Enqueue any discovered reference objects that have @@ -1749,21 +1742,6 @@ return NULL; } -void G1CollectedHeap::update_committed_space(HeapWord* old_end, - HeapWord* new_end) { - assert(old_end != new_end, "don't call this otherwise"); - assert((HeapWord*) _g1_storage.high() == new_end, "invariant"); - - // Update the committed mem region. - _g1_committed.set_end(new_end); - // Tell the card table about the update. - Universe::heap()->barrier_set()->resize_covered_region(_g1_committed); - // Tell the BOT about the update. - _bot_shared->resize(_g1_committed.word_size()); - // Tell the hot card cache about the update - _cg1r->hot_card_cache()->resize_card_counts(capacity()); -} - bool G1CollectedHeap::expand(size_t expand_bytes) { size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); aligned_expand_bytes = align_size_up(aligned_expand_bytes, @@ -1774,55 +1752,22 @@ ergo_format_byte("attempted expansion amount"), expand_bytes, aligned_expand_bytes); - if (_g1_storage.uncommitted_size() == 0) { + if (is_maximal_no_gc()) { ergo_verbose0(ErgoHeapSizing, "did not expand the heap", ergo_format_reason("heap already fully expanded")); return false; } - // First commit the memory. - HeapWord* old_end = (HeapWord*) _g1_storage.high(); - bool successful = _g1_storage.expand_by(aligned_expand_bytes); - if (successful) { - // Then propagate this update to the necessary data structures. - HeapWord* new_end = (HeapWord*) _g1_storage.high(); - update_committed_space(old_end, new_end); - - FreeRegionList expansion_list("Local Expansion List"); - MemRegion mr = _hrs.expand_by(old_end, new_end, &expansion_list); - assert(mr.start() == old_end, "post-condition"); - // mr might be a smaller region than what was requested if - // expand_by() was unable to allocate the HeapRegion instances - assert(mr.end() <= new_end, "post-condition"); - - size_t actual_expand_bytes = mr.byte_size(); + uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes); + assert(regions_to_expand > 0, "Must expand by at least one region"); + + uint expanded_by = _hrs.expand_by(regions_to_expand); + + if (expanded_by > 0) { + size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes; assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition"); - assert(actual_expand_bytes == expansion_list.total_capacity_bytes(), - "post-condition"); - if (actual_expand_bytes < aligned_expand_bytes) { - // We could not expand _hrs to the desired size. In this case we - // need to shrink the committed space accordingly. - assert(mr.end() < new_end, "invariant"); - - size_t diff_bytes = aligned_expand_bytes - actual_expand_bytes; - // First uncommit the memory. - _g1_storage.shrink_by(diff_bytes); - // Then propagate this update to the necessary data structures. - update_committed_space(new_end, mr.end()); - } - _free_list.add_as_tail(&expansion_list); - - if (_hr_printer.is_active()) { - HeapWord* curr = mr.start(); - while (curr < mr.end()) { - HeapWord* curr_end = curr + HeapRegion::GrainWords; - _hr_printer.commit(curr, curr_end); - curr = curr_end; - } - assert(curr == mr.end(), "post-condition"); - } - g1_policy()->record_new_heap_size(n_regions()); + g1_policy()->record_new_heap_size(num_regions()); } else { ergo_verbose0(ErgoHeapSizing, "did not expand the heap", @@ -1830,12 +1775,12 @@ // The expansion of the virtual storage space was unsuccessful. // Let's see if it was because we ran out of swap. if (G1ExitOnExpansionFailure && - _g1_storage.uncommitted_size() >= aligned_expand_bytes) { + _hrs.available() >= regions_to_expand) { // We had head room... vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion"); } } - return successful; + return regions_to_expand > 0; } void G1CollectedHeap::shrink_helper(size_t shrink_bytes) { @@ -1846,7 +1791,6 @@ uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes); uint num_regions_removed = _hrs.shrink_by(num_regions_to_remove); - HeapWord* old_end = (HeapWord*) _g1_storage.high(); size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes; ergo_verbose3(ErgoHeapSizing, @@ -1856,22 +1800,7 @@ ergo_format_byte("attempted shrinking amount"), shrink_bytes, aligned_shrink_bytes, shrunk_bytes); if (num_regions_removed > 0) { - _g1_storage.shrink_by(shrunk_bytes); - HeapWord* new_end = (HeapWord*) _g1_storage.high(); - - if (_hr_printer.is_active()) { - HeapWord* curr = old_end; - while (curr > new_end) { - HeapWord* curr_end = curr; - curr -= HeapRegion::GrainWords; - _hr_printer.uncommit(curr, curr_end); - } - } - - _expansion_regions += num_regions_removed; - update_committed_space(old_end, new_end); - HeapRegionRemSet::shrink_heap(n_regions()); - g1_policy()->record_new_heap_size(n_regions()); + g1_policy()->record_new_heap_size(num_regions()); } else { ergo_verbose0(ErgoHeapSizing, "did not shrink the heap", @@ -1922,7 +1851,6 @@ _g1mm(NULL), _refine_cte_cl(NULL), _full_collection(false), - _free_list("Master Free List", new MasterFreeRegionListMtSafeChecker()), _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()), _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()), _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()), @@ -2036,8 +1964,6 @@ _reserved.set_start((HeapWord*)heap_rs.base()); _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); - _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes); - // Create the gen rem set (and barrier set) for the entire reserved region. _rem_set = collector_policy()->create_rem_set(_reserved, 2); set_barrier_set(rem_set()->bs()); @@ -2051,20 +1977,65 @@ // Carve out the G1 part of the heap. - ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); - _g1_reserved = MemRegion((HeapWord*)g1_rs.base(), - g1_rs.size()/HeapWordSize); - - _g1_storage.initialize(g1_rs, 0); - _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0); - _hrs.initialize((HeapWord*) _g1_reserved.start(), - (HeapWord*) _g1_reserved.end()); - assert(_hrs.max_length() == _expansion_regions, - err_msg("max length: %u expansion regions: %u", - _hrs.max_length(), _expansion_regions)); - - // Do later initialization work for concurrent refinement. - _cg1r->init(); + ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); + G1RegionToSpaceMapper* heap_storage = + G1RegionToSpaceMapper::create_mapper(g1_rs, + UseLargePages ? os::large_page_size() : os::vm_page_size(), + HeapRegion::GrainBytes, + 1, + mtJavaHeap); + heap_storage->set_mapping_changed_listener(&_listener); + + // Reserve space for the block offset table. We do not support automatic uncommit + // for the card table at this time. BOT only. + ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize)); + G1RegionToSpaceMapper* bot_storage = + G1RegionToSpaceMapper::create_mapper(bot_rs, + os::vm_page_size(), + HeapRegion::GrainBytes, + G1BlockOffsetSharedArray::N_bytes, + mtGC); + + ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize)); + G1RegionToSpaceMapper* cardtable_storage = + G1RegionToSpaceMapper::create_mapper(cardtable_rs, + os::vm_page_size(), + HeapRegion::GrainBytes, + G1BlockOffsetSharedArray::N_bytes, + mtGC); + + // Reserve space for the card counts table. + ReservedSpace card_counts_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize)); + G1RegionToSpaceMapper* card_counts_storage = + G1RegionToSpaceMapper::create_mapper(card_counts_rs, + os::vm_page_size(), + HeapRegion::GrainBytes, + G1BlockOffsetSharedArray::N_bytes, + mtGC); + + // Reserve space for prev and next bitmap. + size_t bitmap_size = CMBitMap::compute_size(g1_rs.size()); + + ReservedSpace prev_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size)); + G1RegionToSpaceMapper* prev_bitmap_storage = + G1RegionToSpaceMapper::create_mapper(prev_bitmap_rs, + os::vm_page_size(), + HeapRegion::GrainBytes, + CMBitMap::mark_distance(), + mtGC); + + ReservedSpace next_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size)); + G1RegionToSpaceMapper* next_bitmap_storage = + G1RegionToSpaceMapper::create_mapper(next_bitmap_rs, + os::vm_page_size(), + HeapRegion::GrainBytes, + CMBitMap::mark_distance(), + mtGC); + + _hrs.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage); + g1_barrier_set()->initialize(cardtable_storage); + // Do later initialization work for concurrent refinement. + _cg1r->init(card_counts_storage); // 6843694 - ensure that the maximum region index can fit // in the remembered set structures. @@ -2078,17 +2049,16 @@ FreeRegionList::set_unrealistically_long_length(max_regions() + 1); - _bot_shared = new G1BlockOffsetSharedArray(_reserved, - heap_word_size(init_byte_size)); + _bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage); _g1h = this; - _in_cset_fast_test.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes); - _humongous_is_live.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes); + _in_cset_fast_test.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes); + _humongous_is_live.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes); // Create the ConcurrentMark data structure and thread. // (Must do this late, so that "max_regions" is defined.) - _cm = new ConcurrentMark(this, heap_rs); + _cm = new ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage); if (_cm == NULL || !_cm->completed_initialization()) { vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark"); return JNI_ENOMEM; @@ -2143,12 +2113,10 @@ // counts and that mechanism. SpecializationStats::clear(); - // Here we allocate the dummy full region that is required by the - // G1AllocRegion class. If we don't pass an address in the reserved - // space here, lots of asserts fire. - - HeapRegion* dummy_region = new_heap_region(0 /* index of bottom region */, - _g1_reserved.start()); + // Here we allocate the dummy HeapRegion that is required by the + // G1AllocRegion class. + HeapRegion* dummy_region = _hrs.get_dummy_region(); + // We'll re-use the same region whether the alloc region will // require BOT updates or not and, if it doesn't, then a non-young // region will complain that it cannot support allocations without @@ -2264,7 +2232,7 @@ } size_t G1CollectedHeap::capacity() const { - return _g1_committed.byte_size(); + return _hrs.length() * HeapRegion::GrainBytes; } void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) { @@ -2548,7 +2516,7 @@ } } } else { - if (cause == GCCause::_gc_locker + if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) { // Schedule a standard evacuation pause. We're setting word_size @@ -2569,8 +2537,8 @@ } bool G1CollectedHeap::is_in(const void* p) const { - if (_g1_committed.contains(p)) { - // Given that we know that p is in the committed space, + if (_hrs.reserved().contains(p)) { + // Given that we know that p is in the reserved space, // heap_region_containing_raw() should successfully // return the containing region. HeapRegion* hr = heap_region_containing_raw(p); @@ -2580,6 +2548,18 @@ } } +#ifdef ASSERT +bool G1CollectedHeap::is_in_exact(const void* p) const { + bool contains = reserved_region().contains(p); + bool available = _hrs.is_available(addr_to_region((HeapWord*)p)); + if (contains && available) { + return true; + } else { + return false; + } +} +#endif + // Iteration functions. // Applies an ExtendedOopClosure onto all references of objects within a HeapRegion. @@ -2644,83 +2624,9 @@ void G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, uint worker_id, - uint no_of_par_workers, - jint claim_value) { - const uint regions = n_regions(); - const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ? - no_of_par_workers : - 1); - assert(UseDynamicNumberOfGCThreads || - no_of_par_workers == workers()->total_workers(), - "Non dynamic should use fixed number of workers"); - // try to spread out the starting points of the workers - const HeapRegion* start_hr = - start_region_for_worker(worker_id, no_of_par_workers); - const uint start_index = start_hr->hrs_index(); - - // each worker will actually look at all regions - for (uint count = 0; count < regions; ++count) { - const uint index = (start_index + count) % regions; - assert(0 <= index && index < regions, "sanity"); - HeapRegion* r = region_at(index); - // we'll ignore "continues humongous" regions (we'll process them - // when we come across their corresponding "start humongous" - // region) and regions already claimed - if (r->claim_value() == claim_value || r->continuesHumongous()) { - continue; - } - // OK, try to claim it - if (r->claimHeapRegion(claim_value)) { - // success! - assert(!r->continuesHumongous(), "sanity"); - if (r->startsHumongous()) { - // If the region is "starts humongous" we'll iterate over its - // "continues humongous" first; in fact we'll do them - // first. The order is important. In on case, calling the - // closure on the "starts humongous" region might de-allocate - // and clear all its "continues humongous" regions and, as a - // result, we might end up processing them twice. So, we'll do - // them first (notice: most closures will ignore them anyway) and - // then we'll do the "starts humongous" region. - for (uint ch_index = index + 1; ch_index < regions; ++ch_index) { - HeapRegion* chr = region_at(ch_index); - - // if the region has already been claimed or it's not - // "continues humongous" we're done - if (chr->claim_value() == claim_value || - !chr->continuesHumongous()) { - break; - } - - // No one should have claimed it directly. We can given - // that we claimed its "starts humongous" region. - assert(chr->claim_value() != claim_value, "sanity"); - assert(chr->humongous_start_region() == r, "sanity"); - - if (chr->claimHeapRegion(claim_value)) { - // we should always be able to claim it; no one else should - // be trying to claim this region - - bool res2 = cl->doHeapRegion(chr); - assert(!res2, "Should not abort"); - - // Right now, this holds (i.e., no closure that actually - // does something with "continues humongous" regions - // clears them). We might have to weaken it in the future, - // but let's leave these two asserts here for extra safety. - assert(chr->continuesHumongous(), "should still be the case"); - assert(chr->humongous_start_region() == r, "sanity"); - } else { - guarantee(false, "we should not reach here"); - } - } - } - - assert(!r->continuesHumongous(), "sanity"); - bool res = cl->doHeapRegion(r); - assert(!res, "Should not abort"); - } - } + uint num_workers, + jint claim_value) const { + _hrs.par_iterate(cl, worker_id, num_workers, claim_value); } class ResetClaimValuesClosure: public HeapRegionClosure { @@ -2898,17 +2804,6 @@ return result; } -HeapRegion* G1CollectedHeap::start_region_for_worker(uint worker_i, - uint no_of_par_workers) { - uint worker_num = - G1CollectedHeap::use_parallel_gc_threads() ? no_of_par_workers : 1U; - assert(UseDynamicNumberOfGCThreads || - no_of_par_workers == workers()->total_workers(), - "Non dynamic should use fixed number of workers"); - const uint start_index = n_regions() * worker_i / worker_num; - return region_at(start_index); -} - void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) { HeapRegion* r = g1_policy()->collection_set(); while (r != NULL) { @@ -2951,15 +2846,11 @@ } HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const { - // We're not using an iterator given that it will wrap around when - // it reaches the last region and this is not what we want here. - for (uint index = from->hrs_index() + 1; index < n_regions(); index++) { - HeapRegion* hr = region_at(index); - if (!hr->isHumongous()) { - return hr; - } - } - return NULL; + HeapRegion* result = _hrs.next_region_in_heap(from); + while (result != NULL && result->isHumongous()) { + result = _hrs.next_region_in_heap(result); + } + return result; } Space* G1CollectedHeap::space_containing(const void* addr) const { @@ -3017,7 +2908,7 @@ } size_t G1CollectedHeap::max_capacity() const { - return _g1_reserved.byte_size(); + return _hrs.reserved().byte_size(); } jlong G1CollectedHeap::millis_since_last_gc() { @@ -3546,9 +3437,9 @@ st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", capacity()/K, used_unlocked()/K); st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", - _g1_storage.low_boundary(), - _g1_storage.high(), - _g1_storage.high_boundary()); + _hrs.reserved().start(), + _hrs.reserved().start() + _hrs.length() + HeapRegion::GrainWords, + _hrs.reserved().end()); st->cr(); st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K); uint young_regions = _young_list->length(); @@ -4239,10 +4130,7 @@ // No need for an ergo verbose message here, // expansion_amount() does this when it returns a value > 0. if (!expand(expand_bytes)) { - // We failed to expand the heap so let's verify that - // committed/uncommitted amount match the backing store - assert(capacity() == _g1_storage.committed_size(), "committed size mismatch"); - assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch"); + // We failed to expand the heap. Cannot do anything about it. } } } @@ -4302,10 +4190,6 @@ // RETIRE events are generated before the end GC event. _hr_printer.end_gc(false /* full */, (size_t) total_collections()); - if (mark_in_progress()) { - concurrent_mark()->update_g1_committed(); - } - #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif @@ -6140,6 +6024,7 @@ bool locked) { assert(!hr->isHumongous(), "this is only for non-humongous regions"); assert(!hr->is_empty(), "the region should not be empty"); + assert(_hrs.is_available(hr->hrs_index()), "region should be committed"); assert(free_list != NULL, "pre-condition"); if (G1VerifyBitmaps) { @@ -6194,7 +6079,7 @@ assert(list != NULL, "list can't be null"); if (!list->is_empty()) { MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); - _free_list.add_ordered(list); + _hrs.insert_list_into_free_list(list); } } @@ -6802,22 +6687,22 @@ // this is that during a full GC string deduplication needs to know if // a collected region was young or old when the full GC was initiated. } - _free_list.remove_all(); + _hrs.remove_all_free_regions(); } class RebuildRegionSetsClosure : public HeapRegionClosure { private: bool _free_list_only; HeapRegionSet* _old_set; - FreeRegionList* _free_list; + HeapRegionSeq* _hrs; size_t _total_used; public: RebuildRegionSetsClosure(bool free_list_only, - HeapRegionSet* old_set, FreeRegionList* free_list) : + HeapRegionSet* old_set, HeapRegionSeq* hrs) : _free_list_only(free_list_only), - _old_set(old_set), _free_list(free_list), _total_used(0) { - assert(_free_list->is_empty(), "pre-condition"); + _old_set(old_set), _hrs(hrs), _total_used(0) { + assert(_hrs->num_free_regions() == 0, "pre-condition"); if (!free_list_only) { assert(_old_set->is_empty(), "pre-condition"); } @@ -6830,7 +6715,7 @@ if (r->is_empty()) { // Add free regions to the free list - _free_list->add_as_tail(r); + _hrs->insert_into_free_list(r); } else if (!_free_list_only) { assert(!r->is_young(), "we should not come across young regions"); @@ -6858,7 +6743,7 @@ _young_list->empty_list(); } - RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_free_list); + RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrs); heap_region_iterate(&cl); if (!free_list_only) { @@ -7013,13 +6898,42 @@ _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, GCAllocForTenured); } + +HeapRegion* OldGCAllocRegion::release() { + HeapRegion* cur = get(); + if (cur != NULL) { + // Determine how far we are from the next card boundary. If it is smaller than + // the minimum object size we can allocate into, expand into the next card. + HeapWord* top = cur->top(); + HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes); + + size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize); + + if (to_allocate_words != 0) { + // We are not at a card boundary. Fill up, possibly into the next, taking the + // end of the region and the minimum object size into account. + to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize), + MAX2(to_allocate_words, G1CollectedHeap::min_fill_size())); + + // Skip allocation if there is not enough space to allocate even the smallest + // possible object. In this case this region will not be retained, so the + // original problem cannot occur. + if (to_allocate_words >= G1CollectedHeap::min_fill_size()) { + HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */); + CollectedHeap::fill_with_object(dummy, to_allocate_words); + } + } + } + return G1AllocRegion::release(); +} + // Heap region set verification class VerifyRegionListsClosure : public HeapRegionClosure { private: HeapRegionSet* _old_set; HeapRegionSet* _humongous_set; - FreeRegionList* _free_list; + HeapRegionSeq* _hrs; public: HeapRegionSetCount _old_count; @@ -7028,8 +6942,8 @@ VerifyRegionListsClosure(HeapRegionSet* old_set, HeapRegionSet* humongous_set, - FreeRegionList* free_list) : - _old_set(old_set), _humongous_set(humongous_set), _free_list(free_list), + HeapRegionSeq* hrs) : + _old_set(old_set), _humongous_set(humongous_set), _hrs(hrs), _old_count(), _humongous_count(), _free_count(){ } bool doHeapRegion(HeapRegion* hr) { @@ -7043,7 +6957,7 @@ assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrs_index())); _humongous_count.increment(1u, hr->capacity()); } else if (hr->is_empty()) { - assert(hr->containing_set() == _free_list, err_msg("Heap region %u is empty but not on the free list.", hr->hrs_index())); + assert(_hrs->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrs_index())); _free_count.increment(1u, hr->capacity()); } else { assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrs_index())); @@ -7052,7 +6966,7 @@ return false; } - void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, FreeRegionList* free_list) { + void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionSeq* free_list) { guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length())); guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT, old_set->total_capacity_bytes(), _old_count.capacity())); @@ -7061,26 +6975,17 @@ guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), err_msg("Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT, humongous_set->total_capacity_bytes(), _humongous_count.capacity())); - guarantee(free_list->length() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->length(), _free_count.length())); + guarantee(free_list->num_free_regions() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count.length())); guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), err_msg("Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT, free_list->total_capacity_bytes(), _free_count.capacity())); } }; -HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index, - HeapWord* bottom) { - HeapWord* end = bottom + HeapRegion::GrainWords; - MemRegion mr(bottom, end); - assert(_g1_reserved.contains(mr), "invariant"); - // This might return NULL if the allocation fails - return new HeapRegion(hrs_index, _bot_shared, mr); -} - void G1CollectedHeap::verify_region_sets() { assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); // First, check the explicit lists. - _free_list.verify_list(); + _hrs.verify(); { // Given that a concurrent operation might be adding regions to // the secondary free list we have to take the lock before @@ -7111,9 +7016,9 @@ // Finally, make sure that the region accounting in the lists is // consistent with what we see in the heap. - VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_free_list); + VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrs); heap_region_iterate(&cl); - cl.verify_counts(&_old_set, &_humongous_set, &_free_list); + cl.verify_counts(&_old_set, &_humongous_set, &_hrs); } // Optimized nmethod scanning
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Wed Jul 05 19:59:00 2017 +0200 @@ -183,6 +183,13 @@ public: OldGCAllocRegion() : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { } + + // This specialization of release() makes sure that the last card that has been + // allocated into has been completely filled by a dummy object. + // This avoids races when remembered set scanning wants to update the BOT of the + // last card in the retained old gc alloc region, and allocation threads + // allocating into that card at the same time. + virtual HeapRegion* release(); }; // The G1 STW is alive closure. @@ -199,6 +206,13 @@ class RefineCardTableEntryClosure; +class G1RegionMappingChangedListener : public G1MappingChangedListener { + private: + void reset_from_card_cache(uint start_idx, size_t num_regions); + public: + virtual void on_commit(uint start_idx, size_t num_regions); +}; + class G1CollectedHeap : public SharedHeap { friend class VM_CollectForMetadataAllocation; friend class VM_G1CollectForAllocation; @@ -237,19 +251,9 @@ static size_t _humongous_object_threshold_in_words; - // Storage for the G1 heap. - VirtualSpace _g1_storage; - MemRegion _g1_reserved; - - // The part of _g1_storage that is currently committed. - MemRegion _g1_committed; - - // The master free list. It will satisfy all new region allocations. - FreeRegionList _free_list; - // The secondary free list which contains regions that have been - // freed up during the cleanup process. This will be appended to the - // master free list when appropriate. + // freed up during the cleanup process. This will be appended to + // the master free list when appropriate. FreeRegionList _secondary_free_list; // It keeps track of the old regions. @@ -283,6 +287,9 @@ // after heap shrinking (free_list_only == true). void rebuild_region_sets(bool free_list_only); + // Callback for region mapping changed events. + G1RegionMappingChangedListener _listener; + // The sequence of all heap regions in the heap. HeapRegionSeq _hrs; @@ -513,14 +520,6 @@ // humongous object, set is_old to true. If not, to false. HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand); - // Attempt to satisfy a humongous allocation request of the given - // size by finding a contiguous set of free regions of num_regions - // length and remove them from the master free list. Return the - // index of the first region or G1_NULL_HRS_INDEX if the search - // was unsuccessful. - uint humongous_obj_allocate_find_first(uint num_regions, - size_t word_size); - // Initialize a contiguous set of free regions of length num_regions // and starting at index first so that they appear as a single // humongous region. @@ -862,11 +861,6 @@ CodeBlobClosure* scan_strong_code, uint worker_i); - // Notifies all the necessary spaces that the committed space has - // been updated (either expanded or shrunk). It should be called - // after _g1_storage is updated. - void update_committed_space(HeapWord* old_end, HeapWord* new_end); - // The concurrent marker (and the thread it runs in.) ConcurrentMark* _cm; ConcurrentMarkThread* _cmThread; @@ -1177,27 +1171,20 @@ // But G1CollectedHeap doesn't yet support this. virtual bool is_maximal_no_gc() const { - return _g1_storage.uncommitted_size() == 0; + return _hrs.available() == 0; } - // The total number of regions in the heap. - uint n_regions() const { return _hrs.length(); } + // The current number of regions in the heap. + uint num_regions() const { return _hrs.length(); } // The max number of regions in the heap. uint max_regions() const { return _hrs.max_length(); } // The number of regions that are completely free. - uint free_regions() const { return _free_list.length(); } + uint num_free_regions() const { return _hrs.num_free_regions(); } // The number of regions that are not completely free. - uint used_regions() const { return n_regions() - free_regions(); } - - // The number of regions available for "regular" expansion. - uint expansion_regions() const { return _expansion_regions; } - - // Factory method for HeapRegion instances. It will return NULL if - // the allocation fails. - HeapRegion* new_heap_region(uint hrs_index, HeapWord* bottom); + uint num_used_regions() const { return num_regions() - num_free_regions(); } void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN; void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN; @@ -1246,7 +1233,7 @@ #ifdef ASSERT bool is_on_master_free_list(HeapRegion* hr) { - return hr->containing_set() == &_free_list; + return _hrs.is_free(hr); } #endif // ASSERT @@ -1258,7 +1245,7 @@ } void append_secondary_free_list() { - _free_list.add_ordered(&_secondary_free_list); + _hrs.insert_list_into_free_list(&_secondary_free_list); } void append_secondary_free_list_if_not_empty_with_lock() { @@ -1304,6 +1291,11 @@ // Returns "TRUE" iff "p" points into the committed areas of the heap. virtual bool is_in(const void* p) const; +#ifdef ASSERT + // Returns whether p is in one of the available areas of the heap. Slow but + // extensive version. + bool is_in_exact(const void* p) const; +#endif // Return "TRUE" iff the given object address is within the collection // set. Slow implementation. @@ -1364,25 +1356,19 @@ // Return "TRUE" iff the given object address is in the reserved // region of g1. bool is_in_g1_reserved(const void* p) const { - return _g1_reserved.contains(p); + return _hrs.reserved().contains(p); } // Returns a MemRegion that corresponds to the space that has been // reserved for the heap - MemRegion g1_reserved() { - return _g1_reserved; - } - - // Returns a MemRegion that corresponds to the space that has been - // committed in the heap - MemRegion g1_committed() { - return _g1_committed; + MemRegion g1_reserved() const { + return _hrs.reserved(); } virtual bool is_in_closed_subset(const void* p) const; - G1SATBCardTableModRefBS* g1_barrier_set() { - return (G1SATBCardTableModRefBS*) barrier_set(); + G1SATBCardTableLoggingModRefBS* g1_barrier_set() { + return (G1SATBCardTableLoggingModRefBS*) barrier_set(); } // This resets the card table to all zeros. It is used after @@ -1416,6 +1402,8 @@ // within the heap. inline uint addr_to_region(HeapWord* addr) const; + inline HeapWord* bottom_addr_for_region(uint index) const; + // Divide the heap region sequence into "chunks" of some size (the number // of regions divided by the number of parallel threads times some // overpartition factor, currently 4). Assumes that this will be called @@ -1429,10 +1417,10 @@ // setting the claim value of the second and subsequent regions of the // chunk.) For now requires that "doHeapRegion" always returns "false", // i.e., that a closure never attempt to abort a traversal. - void heap_region_par_iterate_chunked(HeapRegionClosure* blk, - uint worker, - uint no_of_par_workers, - jint claim_value); + void heap_region_par_iterate_chunked(HeapRegionClosure* cl, + uint worker_id, + uint num_workers, + jint claim_value) const; // It resets all the region claim values to the default. void reset_heap_region_claim_values(); @@ -1457,11 +1445,6 @@ // starting region for iterating over the current collection set. HeapRegion* start_cset_region_for_worker(uint worker_i); - // This is a convenience method that is used by the - // HeapRegionIterator classes to calculate the starting region for - // each worker so that they do not all start from the same region. - HeapRegion* start_region_for_worker(uint worker_i, uint no_of_par_workers); - // Iterate over the regions (if any) in the current collection set. void collection_set_iterate(HeapRegionClosure* blk);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Wed Jul 05 19:59:00 2017 +0200 @@ -47,19 +47,21 @@ return (uint)(pointer_delta(addr, _reserved.start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes); } +inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const { + return _hrs.reserved().start() + index * HeapRegion::GrainWords; +} + template <class T> -inline HeapRegion* -G1CollectedHeap::heap_region_containing_raw(const T addr) const { +inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) const { assert(addr != NULL, "invariant"); - assert(_g1_reserved.contains((const void*) addr), + assert(is_in_g1_reserved((const void*) addr), err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")", - p2i((void*)addr), p2i(_g1_reserved.start()), p2i(_g1_reserved.end()))); + p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()))); return _hrs.addr_to_region((HeapWord*) addr); } template <class T> -inline HeapRegion* -G1CollectedHeap::heap_region_containing(const T addr) const { +inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const { HeapRegion* hr = heap_region_containing_raw(addr); if (hr->continuesHumongous()) { return hr->humongous_start_region(); @@ -89,10 +91,9 @@ return r != NULL && r->in_collection_set(); } -inline HeapWord* -G1CollectedHeap::attempt_allocation(size_t word_size, - unsigned int* gc_count_before_ret, - int* gclocker_retry_count_ret) { +inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size, + unsigned int* gc_count_before_ret, + int* gclocker_retry_count_ret) { assert_heap_not_locked_and_not_at_safepoint(); assert(!isHumongous(word_size), "attempt_allocation() should not " "be called for humongous allocation requests"); @@ -252,8 +253,7 @@ } } -inline bool -G1CollectedHeap::evacuation_should_fail() { +inline bool G1CollectedHeap::evacuation_should_fail() { if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) { return false; }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Wed Jul 05 19:59:00 2017 +0200 @@ -456,7 +456,7 @@ } else { _young_list_fixed_length = _young_gen_sizer->min_desired_young_length(); } - _free_regions_at_end_of_collection = _g1->free_regions(); + _free_regions_at_end_of_collection = _g1->num_free_regions(); update_young_list_target_length(); // We may immediately start allocating regions and placing them on the @@ -829,7 +829,7 @@ record_survivor_regions(0, NULL, NULL); - _free_regions_at_end_of_collection = _g1->free_regions(); + _free_regions_at_end_of_collection = _g1->num_free_regions(); // Reset survivors SurvRateGroup. _survivor_surv_rate_group->reset(); update_young_list_target_length(); @@ -1181,7 +1181,7 @@ _in_marking_window = new_in_marking_window; _in_marking_window_im = new_in_marking_window_im; - _free_regions_at_end_of_collection = _g1->free_regions(); + _free_regions_at_end_of_collection = _g1->num_free_regions(); update_young_list_target_length(); // Note that _mmu_tracker->max_gc_time() returns the time in seconds. @@ -1203,7 +1203,7 @@ _survivor_used_bytes_before_gc = young_list->survivor_used_bytes(); _heap_capacity_bytes_before_gc = _g1->capacity(); _heap_used_bytes_before_gc = _g1->used(); - _cur_collection_pause_used_regions_at_start = _g1->used_regions(); + _cur_collection_pause_used_regions_at_start = _g1->num_used_regions(); _eden_capacity_bytes_before_gc = (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc; @@ -1618,7 +1618,7 @@ G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) { _collectionSetChooser->clear(); - uint region_num = _g1->n_regions(); + uint region_num = _g1->num_regions(); if (G1CollectedHeap::use_parallel_gc_threads()) { const uint OverpartitionFactor = 4; uint WorkUnit; @@ -1639,7 +1639,7 @@ MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor), MinWorkUnit); } - _collectionSetChooser->prepare_for_par_region_addition(_g1->n_regions(), + _collectionSetChooser->prepare_for_par_region_addition(_g1->num_regions(), WorkUnit); ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser, (int) WorkUnit); @@ -1936,7 +1936,7 @@ // of them are available. G1CollectedHeap* g1h = G1CollectedHeap::heap(); - const size_t region_num = g1h->n_regions(); + const size_t region_num = g1h->num_regions(); const size_t perc = (size_t) G1OldCSetRegionThresholdPercent; size_t result = region_num * perc / 100; // emulate ceiling
--- a/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp Wed Jul 05 19:59:00 2017 +0200 @@ -33,7 +33,7 @@ G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h): _g1h(g1h), _hot_cache(NULL), _use_cache(false), _card_counts(g1h) {} -void G1HotCardCache::initialize() { +void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) { if (default_use_cache()) { _use_cache = true; @@ -49,7 +49,7 @@ _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / (int)n_workers); _hot_cache_par_claimed_idx = 0; - _card_counts.initialize(); + _card_counts.initialize(card_counts_storage); } } @@ -135,11 +135,8 @@ // above, are discarded prior to re-enabling the cache near the end of the GC. } -void G1HotCardCache::resize_card_counts(size_t heap_capacity) { - _card_counts.resize(heap_capacity); -} - void G1HotCardCache::reset_card_counts(HeapRegion* hr) { + assert(!hr->isHumongous(), "Should have been cleared"); _card_counts.clear_region(hr); }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp Wed Jul 05 19:59:00 2017 +0200 @@ -78,7 +78,7 @@ G1HotCardCache(G1CollectedHeap* g1h); ~G1HotCardCache(); - void initialize(); + void initialize(G1RegionToSpaceMapper* card_counts_storage); bool use_cache() { return _use_cache; } @@ -115,9 +115,6 @@ bool hot_cache_is_empty() { return _n_hot == 0; } - // Resizes the card counts table to match the given capacity - void resize_card_counts(size_t heap_capacity); - // Zeros the values in the card counts table for entire committed heap void reset_card_counts();
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp Wed Jul 05 19:59:00 2017 +0200 @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/g1/g1PageBasedVirtualSpace.hpp" +#include "oops/markOop.hpp" +#include "oops/oop.inline.hpp" +#include "services/memTracker.hpp" +#ifdef TARGET_OS_FAMILY_linux +# include "os_linux.inline.hpp" +#endif +#ifdef TARGET_OS_FAMILY_solaris +# include "os_solaris.inline.hpp" +#endif +#ifdef TARGET_OS_FAMILY_windows +# include "os_windows.inline.hpp" +#endif +#ifdef TARGET_OS_FAMILY_aix +# include "os_aix.inline.hpp" +#endif +#ifdef TARGET_OS_FAMILY_bsd +# include "os_bsd.inline.hpp" +#endif +#include "utilities/bitMap.inline.hpp" + +G1PageBasedVirtualSpace::G1PageBasedVirtualSpace() : _low_boundary(NULL), + _high_boundary(NULL), _committed(), _page_size(0), _special(false), _executable(false) { +} + +bool G1PageBasedVirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t page_size) { + if (!rs.is_reserved()) { + return false; // Allocation failed. + } + assert(_low_boundary == NULL, "VirtualSpace already initialized"); + assert(page_size > 0, "Granularity must be non-zero."); + + _low_boundary = rs.base(); + _high_boundary = _low_boundary + rs.size(); + + _special = rs.special(); + _executable = rs.executable(); + + _page_size = page_size; + + assert(_committed.size() == 0, "virtual space initialized more than once"); + uintx size_in_bits = rs.size() / page_size; + _committed.resize(size_in_bits, /* in_resource_area */ false); + + return true; +} + + +G1PageBasedVirtualSpace::~G1PageBasedVirtualSpace() { + release(); +} + +void G1PageBasedVirtualSpace::release() { + // This does not release memory it never reserved. + // Caller must release via rs.release(); + _low_boundary = NULL; + _high_boundary = NULL; + _special = false; + _executable = false; + _page_size = 0; + _committed.resize(0, false); +} + +size_t G1PageBasedVirtualSpace::committed_size() const { + return _committed.count_one_bits() * _page_size; +} + +size_t G1PageBasedVirtualSpace::reserved_size() const { + return pointer_delta(_high_boundary, _low_boundary, sizeof(char)); +} + +size_t G1PageBasedVirtualSpace::uncommitted_size() const { + return reserved_size() - committed_size(); +} + +uintptr_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const { + return (addr - _low_boundary) / _page_size; +} + +bool G1PageBasedVirtualSpace::is_area_committed(uintptr_t start, size_t size_in_pages) const { + uintptr_t end = start + size_in_pages; + return _committed.get_next_zero_offset(start, end) >= end; +} + +bool G1PageBasedVirtualSpace::is_area_uncommitted(uintptr_t start, size_t size_in_pages) const { + uintptr_t end = start + size_in_pages; + return _committed.get_next_one_offset(start, end) >= end; +} + +char* G1PageBasedVirtualSpace::page_start(uintptr_t index) { + return _low_boundary + index * _page_size; +} + +size_t G1PageBasedVirtualSpace::byte_size_for_pages(size_t num) { + return num * _page_size; +} + +MemRegion G1PageBasedVirtualSpace::commit(uintptr_t start, size_t size_in_pages) { + // We need to make sure to commit all pages covered by the given area. + guarantee(is_area_uncommitted(start, size_in_pages), "Specified area is not uncommitted"); + + if (!_special) { + os::commit_memory_or_exit(page_start(start), byte_size_for_pages(size_in_pages), _executable, + err_msg("Failed to commit pages from "SIZE_FORMAT" of length "SIZE_FORMAT, start, size_in_pages)); + } + _committed.set_range(start, start + size_in_pages); + + MemRegion result((HeapWord*)page_start(start), byte_size_for_pages(size_in_pages) / HeapWordSize); + return result; +} + +MemRegion G1PageBasedVirtualSpace::uncommit(uintptr_t start, size_t size_in_pages) { + guarantee(is_area_committed(start, size_in_pages), "checking"); + + if (!_special) { + os::uncommit_memory(page_start(start), byte_size_for_pages(size_in_pages)); + } + + _committed.clear_range(start, start + size_in_pages); + + MemRegion result((HeapWord*)page_start(start), byte_size_for_pages(size_in_pages) / HeapWordSize); + return result; +} + +bool G1PageBasedVirtualSpace::contains(const void* p) const { + return _low_boundary <= (const char*) p && (const char*) p < _high_boundary; +} + +#ifndef PRODUCT +void G1PageBasedVirtualSpace::print_on(outputStream* out) { + out->print ("Virtual space:"); + if (special()) out->print(" (pinned in memory)"); + out->cr(); + out->print_cr(" - committed: " SIZE_FORMAT, committed_size()); + out->print_cr(" - reserved: " SIZE_FORMAT, reserved_size()); + out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", p2i(_low_boundary), p2i(_high_boundary)); +} + +void G1PageBasedVirtualSpace::print() { + print_on(tty); +} +#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp Wed Jul 05 19:59:00 2017 +0200 @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1PAGEBASEDVIRTUALSPACE_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1PAGEBASEDVIRTUALSPACE_HPP + +#include "memory/allocation.hpp" +#include "memory/memRegion.hpp" +#include "runtime/virtualspace.hpp" +#include "utilities/bitMap.hpp" + +// Virtual space management helper for a virtual space with an OS page allocation +// granularity. +// (De-)Allocation requests are always OS page aligned by passing a page index +// and multiples of pages. +// The implementation gives an error when trying to commit or uncommit pages that +// have already been committed or uncommitted. +class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC { + friend class VMStructs; + private: + // Reserved area addresses. + char* _low_boundary; + char* _high_boundary; + + // The commit/uncommit granularity in bytes. + size_t _page_size; + + // Bitmap used for verification of commit/uncommit operations. + BitMap _committed; + + // Indicates that the entire space has been committed and pinned in memory, + // os::commit_memory() or os::uncommit_memory() have no function. + bool _special; + + // Indicates whether the committed space should be executable. + bool _executable; + + // Returns the index of the page which contains the given address. + uintptr_t addr_to_page_index(char* addr) const; + // Returns the address of the given page index. + char* page_start(uintptr_t index); + // Returns the byte size of the given number of pages. + size_t byte_size_for_pages(size_t num); + + // Returns true if the entire area is backed by committed memory. + bool is_area_committed(uintptr_t start, size_t size_in_pages) const; + // Returns true if the entire area is not backed by committed memory. + bool is_area_uncommitted(uintptr_t start, size_t size_in_pages) const; + + public: + + // Commit the given area of pages starting at start being size_in_pages large. + MemRegion commit(uintptr_t start, size_t size_in_pages); + + // Uncommit the given area of pages starting at start being size_in_pages large. + MemRegion uncommit(uintptr_t start, size_t size_in_pages); + + bool special() const { return _special; } + + // Initialization + G1PageBasedVirtualSpace(); + bool initialize_with_granularity(ReservedSpace rs, size_t page_size); + + // Destruction + ~G1PageBasedVirtualSpace(); + + // Amount of reserved memory. + size_t reserved_size() const; + // Memory used in this virtual space. + size_t committed_size() const; + // Memory left to use/expand in this virtual space. + size_t uncommitted_size() const; + + bool contains(const void* p) const; + + MemRegion reserved() { + MemRegion x((HeapWord*)_low_boundary, reserved_size() / HeapWordSize); + return x; + } + + void release(); + + void check_for_contiguity() PRODUCT_RETURN; + + // Debugging + void print_on(outputStream* out) PRODUCT_RETURN; + void print(); +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1PAGEBASEDVIRTUALSPACE_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.cpp Wed Jul 05 19:59:00 2017 +0200 @@ -0,0 +1,159 @@ +/* + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/g1/g1BiasedArray.hpp" +#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp" +#include "memory/allocation.inline.hpp" +#include "runtime/virtualspace.hpp" +#include "services/memTracker.hpp" +#include "utilities/bitMap.inline.hpp" + +G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs, + size_t commit_granularity, + size_t region_granularity, + MemoryType type) : + _storage(), + _commit_granularity(commit_granularity), + _region_granularity(region_granularity), + _listener(NULL), + _commit_map() { + guarantee(is_power_of_2(commit_granularity), "must be"); + guarantee(is_power_of_2(region_granularity), "must be"); + _storage.initialize_with_granularity(rs, commit_granularity); + + MemTracker::record_virtual_memory_type((address)rs.base(), type); +} + +// G1RegionToSpaceMapper implementation where the region granularity is larger than +// or the same as the commit granularity. +// Basically, the space corresponding to one region region spans several OS pages. +class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper { + private: + size_t _pages_per_region; + + public: + G1RegionsLargerThanCommitSizeMapper(ReservedSpace rs, + size_t os_commit_granularity, + size_t alloc_granularity, + size_t commit_factor, + MemoryType type) : + G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type), + _pages_per_region(alloc_granularity / (os_commit_granularity * commit_factor)) { + + guarantee(alloc_granularity >= os_commit_granularity, "allocation granularity smaller than commit granularity"); + _commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false); + } + + virtual void commit_regions(uintptr_t start_idx, size_t num_regions) { + _storage.commit(start_idx * _pages_per_region, num_regions * _pages_per_region); + _commit_map.set_range(start_idx, start_idx + num_regions); + fire_on_commit(start_idx, num_regions); + } + + virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) { + _storage.uncommit(start_idx * _pages_per_region, num_regions * _pages_per_region); + _commit_map.clear_range(start_idx, start_idx + num_regions); + } +}; + +// G1RegionToSpaceMapper implementation where the region granularity is smaller +// than the commit granularity. +// Basically, the contents of one OS page span several regions. +class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper { + private: + class CommitRefcountArray : public G1BiasedMappedArray<uint> { + protected: + virtual uint default_value() const { return 0; } + }; + + size_t _regions_per_page; + + CommitRefcountArray _refcounts; + + uintptr_t region_idx_to_page_idx(uint region) const { + return region / _regions_per_page; + } + + public: + G1RegionsSmallerThanCommitSizeMapper(ReservedSpace rs, + size_t os_commit_granularity, + size_t alloc_granularity, + size_t commit_factor, + MemoryType type) : + G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type), + _regions_per_page((os_commit_granularity * commit_factor) / alloc_granularity), _refcounts() { + + guarantee((os_commit_granularity * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity"); + _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + rs.size()), os_commit_granularity); + _commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false); + } + + virtual void commit_regions(uintptr_t start_idx, size_t num_regions) { + for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) { + assert(!_commit_map.at(i), err_msg("Trying to commit storage at region "INTPTR_FORMAT" that is already committed", i)); + uintptr_t idx = region_idx_to_page_idx(i); + uint old_refcount = _refcounts.get_by_index(idx); + if (old_refcount == 0) { + _storage.commit(idx, 1); + } + _refcounts.set_by_index(idx, old_refcount + 1); + _commit_map.set_bit(i); + fire_on_commit(i, 1); + } + } + + virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) { + for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) { + assert(_commit_map.at(i), err_msg("Trying to uncommit storage at region "INTPTR_FORMAT" that is not committed", i)); + uintptr_t idx = region_idx_to_page_idx(i); + uint old_refcount = _refcounts.get_by_index(idx); + assert(old_refcount > 0, "must be"); + if (old_refcount == 1) { + _storage.uncommit(idx, 1); + } + _refcounts.set_by_index(idx, old_refcount - 1); + _commit_map.clear_bit(i); + } + } +}; + +void G1RegionToSpaceMapper::fire_on_commit(uint start_idx, size_t num_regions) { + if (_listener != NULL) { + _listener->on_commit(start_idx, num_regions); + } +} + +G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs, + size_t os_commit_granularity, + size_t region_granularity, + size_t commit_factor, + MemoryType type) { + + if (region_granularity >= (os_commit_granularity * commit_factor)) { + return new G1RegionsLargerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type); + } else { + return new G1RegionsSmallerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.hpp Wed Jul 05 19:59:00 2017 +0200 @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1REGIONTOSPACEMAPPER_HPP +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1REGIONTOSPACEMAPPER_HPP + +#include "gc_implementation/g1/g1PageBasedVirtualSpace.hpp" +#include "memory/allocation.hpp" +#include "utilities/debug.hpp" + +class G1MappingChangedListener VALUE_OBJ_CLASS_SPEC { + public: + // Fired after commit of the memory, i.e. the memory this listener is registered + // for can be accessed. + virtual void on_commit(uint start_idx, size_t num_regions) = 0; +}; + +// Maps region based commit/uncommit requests to the underlying page sized virtual +// space. +class G1RegionToSpaceMapper : public CHeapObj<mtGC> { + private: + G1MappingChangedListener* _listener; + protected: + // Backing storage. + G1PageBasedVirtualSpace _storage; + size_t _commit_granularity; + size_t _region_granularity; + // Mapping management + BitMap _commit_map; + + G1RegionToSpaceMapper(ReservedSpace rs, size_t commit_granularity, size_t region_granularity, MemoryType type); + + void fire_on_commit(uint start_idx, size_t num_regions); + public: + MemRegion reserved() { return _storage.reserved(); } + + void set_mapping_changed_listener(G1MappingChangedListener* listener) { _listener = listener; } + + virtual ~G1RegionToSpaceMapper() { + _commit_map.resize(0, /* in_resource_area */ false); + } + + bool is_committed(uintptr_t idx) const { + return _commit_map.at(idx); + } + + virtual void commit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0; + virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0; + + // Creates an appropriate G1RegionToSpaceMapper for the given parameters. + // The byte_translation_factor defines how many bytes in a region correspond to + // a single byte in the data structure this mapper is for. + // Eg. in the card table, this value corresponds to the size a single card + // table entry corresponds to. + static G1RegionToSpaceMapper* create_mapper(ReservedSpace rs, + size_t os_commit_granularity, + size_t region_granularity, + size_t byte_translation_factor, + MemoryType type); +}; + +#endif /* SHARE_VM_GC_IMPLEMENTATION_G1_G1REGIONTOSPACEMAPPER_HPP */
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp Wed Jul 05 19:59:00 2017 +0200 @@ -540,6 +540,12 @@ bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i, bool check_for_refs_into_cset) { + assert(_g1->is_in_exact(_ct_bs->addr_for(card_ptr)), + err_msg("Card at "PTR_FORMAT" index "SIZE_FORMAT" representing heap at "PTR_FORMAT" (%u) must be in committed heap", + p2i(card_ptr), + _ct_bs->index_for(_ct_bs->addr_for(card_ptr)), + _ct_bs->addr_for(card_ptr), + _g1->addr_to_region(_ct_bs->addr_for(card_ptr)))); // If the card is no longer dirty, nothing to do. if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Wed Jul 05 19:59:00 2017 +0200 @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" #include "gc_implementation/g1/heapRegion.hpp" #include "gc_implementation/g1/satbQueue.hpp" @@ -38,7 +39,6 @@ _kind = G1SATBCT; } - void G1SATBCardTableModRefBS::enqueue(oop pre_val) { // Nulls should have been already filtered. assert(pre_val->is_oop(true), "Error"); @@ -125,13 +125,52 @@ } #endif +void G1SATBCardTableLoggingModRefBSChangedListener::on_commit(uint start_idx, size_t num_regions) { + MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords); + _card_table->clear(mr); +} + G1SATBCardTableLoggingModRefBS:: G1SATBCardTableLoggingModRefBS(MemRegion whole_heap, int max_covered_regions) : G1SATBCardTableModRefBS(whole_heap, max_covered_regions), - _dcqs(JavaThread::dirty_card_queue_set()) + _dcqs(JavaThread::dirty_card_queue_set()), + _listener() { _kind = G1SATBCTLogging; + _listener.set_card_table(this); +} + +void G1SATBCardTableLoggingModRefBS::initialize(G1RegionToSpaceMapper* mapper) { + mapper->set_mapping_changed_listener(&_listener); + + _byte_map_size = mapper->reserved().byte_size(); + + _guard_index = cards_required(_whole_heap.word_size()) - 1; + _last_valid_index = _guard_index - 1; + + HeapWord* low_bound = _whole_heap.start(); + HeapWord* high_bound = _whole_heap.end(); + + _cur_covered_regions = 1; + _covered[0] = _whole_heap; + + _byte_map = (jbyte*) mapper->reserved().start(); + byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); + assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); + assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); + + if (TraceCardTableModRefBS) { + gclog_or_tty->print_cr("G1SATBCardTableModRefBS::G1SATBCardTableModRefBS: "); + gclog_or_tty->print_cr(" " + " &_byte_map[0]: " INTPTR_FORMAT + " &_byte_map[_last_valid_index]: " INTPTR_FORMAT, + p2i(&_byte_map[0]), + p2i(&_byte_map[_last_valid_index])); + gclog_or_tty->print_cr(" " + " byte_map_base: " INTPTR_FORMAT, + p2i(byte_map_base)); + } } void
--- a/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp Wed Jul 05 19:59:00 2017 +0200 @@ -25,6 +25,7 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP +#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp" #include "memory/cardTableModRefBS.hpp" #include "memory/memRegion.hpp" #include "oops/oop.inline.hpp" @@ -33,6 +34,7 @@ #if INCLUDE_ALL_GCS class DirtyCardQueueSet; +class G1SATBCardTableLoggingModRefBS; // This barrier is specialized to use a logging barrier to support // snapshot-at-the-beginning marking. @@ -126,18 +128,40 @@ jbyte val = _byte_map[card_index]; return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val(); } +}; +class G1SATBCardTableLoggingModRefBSChangedListener : public G1MappingChangedListener { + private: + G1SATBCardTableLoggingModRefBS* _card_table; + public: + G1SATBCardTableLoggingModRefBSChangedListener() : _card_table(NULL) { } + + void set_card_table(G1SATBCardTableLoggingModRefBS* card_table) { _card_table = card_table; } + + virtual void on_commit(uint start_idx, size_t num_regions); }; // Adds card-table logging to the post-barrier. // Usual invariant: all dirty cards are logged in the DirtyCardQueueSet. class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS { + friend class G1SATBCardTableLoggingModRefBSChangedListener; private: + G1SATBCardTableLoggingModRefBSChangedListener _listener; DirtyCardQueueSet& _dcqs; public: + static size_t compute_size(size_t mem_region_size_in_words) { + size_t number_of_slots = (mem_region_size_in_words / card_size_in_words); + return ReservedSpace::allocation_align_size_up(number_of_slots); + } + G1SATBCardTableLoggingModRefBS(MemRegion whole_heap, int max_covered_regions); + virtual void initialize() { } + virtual void initialize(G1RegionToSpaceMapper* mapper); + + virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); } + bool is_a(BarrierSet::Name bsn) { return bsn == BarrierSet::G1SATBCTLogging || G1SATBCardTableModRefBS::is_a(bsn); @@ -154,8 +178,6 @@ void write_region_work(MemRegion mr) { invalidate(mr); } void write_ref_array_work(MemRegion mr) { invalidate(mr); } - - };
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Wed Jul 05 19:59:00 2017 +0200 @@ -345,11 +345,6 @@ return low; } -#ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away -#pragma warning( disable:4355 ) // 'this' : used in base member initializer list -#endif // _MSC_VER - - HeapRegion::HeapRegion(uint hrs_index, G1BlockOffsetSharedArray* sharedOffsetArray, MemRegion mr) : @@ -361,7 +356,7 @@ _claimed(InitialClaimValue), _evacuation_failed(false), _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), _young_type(NotYoung), _next_young_region(NULL), - _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL), _pending_removal(false), + _next_dirty_cards_region(NULL), _next(NULL), _prev(NULL), #ifdef ASSERT _containing_set(NULL), #endif // ASSERT @@ -370,14 +365,20 @@ _predicted_bytes_to_copy(0) { _rem_set = new HeapRegionRemSet(sharedOffsetArray, this); + assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant."); + + initialize(mr); +} + +void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { + assert(_rem_set->is_empty(), "Remembered set must be empty"); + + G1OffsetTableContigSpace::initialize(mr, clear_space, mangle_space); + _orig_end = mr.end(); - // Note that initialize() will set the start of the unmarked area of the - // region. hr_clear(false /*par*/, false /*clear_space*/); set_top(bottom()); record_top_and_timestamp(); - - assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant."); } CompactibleSpace* HeapRegion::next_compaction_space() const { @@ -905,7 +906,7 @@ } // If it returns false, verify_for_object() will output the - // appropriate messasge. + // appropriate message. if (do_bot_verify && !g1->is_obj_dead(obj, this) && !_offsets.verify_for_object(p, obj_size)) { @@ -1036,8 +1037,7 @@ set_top(bottom()); set_saved_mark_word(bottom()); CompactibleSpace::clear(mangle_space); - _offsets.zero_bottom_entry(); - _offsets.initialize_threshold(); + reset_bot(); } void G1OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { @@ -1127,9 +1127,11 @@ _gc_time_stamp(0) { _offsets.set_space(this); - // false ==> we'll do the clearing if there's clearing to be done. - CompactibleSpace::initialize(mr, false, SpaceDecorator::Mangle); +} + +void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) { + CompactibleSpace::initialize(mr, clear_space, mangle_space); _top = bottom(); - _offsets.zero_bottom_entry(); - _offsets.initialize_threshold(); + reset_bot(); } +
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp Wed Jul 05 19:59:00 2017 +0200 @@ -62,7 +62,7 @@ p2i((_hr_)->bottom()), p2i((_hr_)->top()), p2i((_hr_)->end()) // sentinel value for hrs_index -#define G1_NULL_HRS_INDEX ((uint) -1) +#define G1_NO_HRS_INDEX ((uint) -1) // A dirty card to oop closure for heap regions. It // knows how to get the G1 heap and how to use the bitmap @@ -146,6 +146,9 @@ HeapWord* top() const { return _top; } protected: + // Reset the G1OffsetTableContigSpace. + virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space); + HeapWord** top_addr() { return &_top; } // Allocation helpers (return NULL if full). inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value); @@ -200,8 +203,7 @@ virtual void print() const; void reset_bot() { - _offsets.zero_bottom_entry(); - _offsets.initialize_threshold(); + _offsets.reset_bot(); } void update_bot_for_object(HeapWord* start, size_t word_size) { @@ -264,7 +266,6 @@ #ifdef ASSERT HeapRegionSetBase* _containing_set; #endif // ASSERT - bool _pending_removal; // For parallel heapRegion traversal. jint _claimed; @@ -333,6 +334,12 @@ G1BlockOffsetSharedArray* sharedOffsetArray, MemRegion mr); + // Initializing the HeapRegion not only resets the data structure, but also + // resets the BOT for that heap region. + // The default values for clear_space means that we will do the clearing if + // there's clearing to be done ourselves. We also always mangle the space. + virtual void initialize(MemRegion mr, bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle); + static int LogOfHRGrainBytes; static int LogOfHRGrainWords; @@ -553,26 +560,6 @@ // to provide a dummy version of it. #endif // ASSERT - // If we want to remove regions from a list in bulk we can simply tag - // them with the pending_removal tag and call the - // remove_all_pending() method on the list. - - bool pending_removal() { return _pending_removal; } - - void set_pending_removal(bool pending_removal) { - if (pending_removal) { - assert(!_pending_removal && containing_set() != NULL, - "can only set pending removal to true if it's false and " - "the region belongs to a region set"); - } else { - assert( _pending_removal && containing_set() == NULL, - "can only set pending removal to false if it's true and " - "the region does not belong to a region set"); - } - - _pending_removal = pending_removal; - } - HeapRegion* get_next_young_region() { return _next_young_region; } void set_next_young_region(HeapRegion* hr) { _next_young_region = hr;
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Wed Jul 05 19:59:00 2017 +0200 @@ -373,17 +373,17 @@ _max_regions, &_static_mem_size); - for (uint i = 0; i < n_par_rs; i++) { - for (uint j = 0; j < _max_regions; j++) { - set(i, j, InvalidCard); - } - } + invalidate(0, _max_regions); } -void FromCardCache::shrink(uint new_num_regions) { +void FromCardCache::invalidate(uint start_idx, size_t new_num_regions) { + guarantee((size_t)start_idx + new_num_regions <= max_uintx, + err_msg("Trying to invalidate beyond maximum region, from %u size "SIZE_FORMAT, + start_idx, new_num_regions)); for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) { - assert(new_num_regions <= _max_regions, "Must be within max."); - for (uint j = new_num_regions; j < _max_regions; j++) { + uint end_idx = (start_idx + (uint)new_num_regions); + assert(end_idx <= _max_regions, "Must be within max."); + for (uint j = start_idx; j < end_idx; j++) { set(i, j, InvalidCard); } } @@ -407,12 +407,12 @@ } } -void OtherRegionsTable::init_from_card_cache(uint max_regions) { +void OtherRegionsTable::initialize(uint max_regions) { FromCardCache::initialize(HeapRegionRemSet::num_par_rem_sets(), max_regions); } -void OtherRegionsTable::shrink_from_card_cache(uint new_num_regions) { - FromCardCache::shrink(new_num_regions); +void OtherRegionsTable::invalidate(uint start_idx, size_t num_regions) { + FromCardCache::invalidate(start_idx, num_regions); } void OtherRegionsTable::print_from_card_cache() { @@ -841,7 +841,7 @@ HeapRegion* hr) : _bosa(bosa), _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrs_index()), true), - _code_roots(), _other_regions(hr, &_m) { + _code_roots(), _other_regions(hr, &_m), _iter_state(Unclaimed), _iter_claimed(0) { reset_for_par_iteration(); }
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Wed Jul 05 19:59:00 2017 +0200 @@ -84,7 +84,7 @@ static void initialize(uint n_par_rs, uint max_num_regions); - static void shrink(uint new_num_regions); + static void invalidate(uint start_idx, size_t num_regions); static void print(outputStream* out = gclog_or_tty) PRODUCT_RETURN; @@ -213,11 +213,11 @@ // Declare the heap size (in # of regions) to the OtherRegionsTable. // (Uses it to initialize from_card_cache). - static void init_from_card_cache(uint max_regions); + static void initialize(uint max_regions); - // Declares that only regions i s.t. 0 <= i < new_n_regs are in use. - // Make sure any entries for higher regions are invalid. - static void shrink_from_card_cache(uint new_num_regions); + // Declares that regions between start_idx <= i < start_idx + num_regions are + // not in use. Make sure that any entries for these regions are invalid. + static void invalidate(uint start_idx, size_t num_regions); static void print_from_card_cache(); }; @@ -400,12 +400,11 @@ // Declare the heap size (in # of regions) to the HeapRegionRemSet(s). // (Uses it to initialize from_card_cache). static void init_heap(uint max_regions) { - OtherRegionsTable::init_from_card_cache(max_regions); + OtherRegionsTable::initialize(max_regions); } - // Declares that only regions i s.t. 0 <= i < new_n_regs are in use. - static void shrink_heap(uint new_n_regs) { - OtherRegionsTable::shrink_from_card_cache(new_n_regs); + static void invalidate(uint start_idx, uint num_regions) { + OtherRegionsTable::invalidate(start_idx, num_regions); } #ifndef PRODUCT
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp Fri Aug 29 11:57:50 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp Wed Jul 05 19:59:00 2017 +0200 @@ -25,163 +25,204 @@ #include "precompiled.hpp" #include "gc_implementation/g1/heapRegion.hpp" #include "gc_implementation/g1/heapRegionSeq.inline.hpp" -#include "gc_implementation/g1/heapRegionSet.hpp" +#include "gc_implementation/g1/heapRegionSet.inline.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" +#include "gc_implementation/g1/concurrentG1Refine.hpp" #include "memory/allocation.hpp" -// Private +void HeapRegionSeq::initialize(G1RegionToSpaceMapper* heap_storage, + G1RegionToSpaceMapper* prev_bitmap, + G1RegionToSpaceMapper* next_bitmap, + G1RegionToSpaceMapper* bot, + G1RegionToSpaceMapper* cardtable, + G1RegionToSpaceMapper* card_counts) { + _allocated_heapregions_length = 0; + + _heap_mapper = heap_storage; + + _prev_bitmap_mapper = prev_bitmap; + _next_bitmap_mapper = next_bitmap; + + _bot_mapper = bot; + _cardtable_mapper = cardtable; + + _card_counts_mapper = card_counts; + + MemRegion reserved = heap_storage->reserved(); + _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes); + + _available_map.resize(_regions.length(), false); + _available_map.clear(); +} -uint HeapRegionSeq::find_contiguous_from(uint from, uint num) { - uint len = length(); - assert(num > 1, "use this only for sequences of length 2 or greater"); - assert(from <= len, - err_msg("from: %u should be valid and <= than %u", from, len)); +bool HeapRegionSeq::is_available(uint region) const { + return _available_map.at(region); +} + +#ifdef ASSERT +bool HeapRegionSeq::is_free(HeapRegion* hr) const { + return _free_list.contains(hr); +} +#endif + +HeapRegion* HeapRegionSeq::new_heap_region(uint hrs_index) { + HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(hrs_index); + MemRegion mr(bottom, bottom + HeapRegion::GrainWords); + assert(reserved().contains(mr), "invariant"); + return new HeapRegion(hrs_index, G1CollectedHeap::heap()->bot_shared(), mr); +} + +void HeapRegionSeq::commit_regions(uint index, size_t num_regions) { + guarantee(num_regions > 0, "Must commit more than zero regions"); + guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions"); + + _num_committed += (uint)num_regions; + + _heap_mapper->commit_regions(index, num_regions); + + // Also commit auxiliary data + _prev_bitmap_mapper->commit_regions(index, num_regions); + _next_bitmap_mapper->commit_regions(index, num_regions); - uint curr = from; - uint first = G1_NULL_HRS_INDEX; - uint num_so_far = 0; - while (curr < len && num_so_far < num) { - if (at(curr)->is_empty()) { - if (first == G1_NULL_HRS_INDEX) { - first = curr; - num_so_far = 1; - } else { - num_so_far += 1; - } - } else { - first = G1_NULL_HRS_INDEX; - num_so_far = 0; + _bot_mapper->commit_regions(index, num_regions); + _cardtable_mapper->commit_regions(index, num_regions); + + _card_counts_mapper->commit_regions(index, num_regions); +} + +void HeapRegionSeq::uncommit_regions(uint start, size_t num_regions) { + guarantee(num_regions >= 1, err_msg("Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start)); + guarantee(_num_committed >= num_regions, "pre-condition"); + + // Print before uncommitting. + if (G1CollectedHeap::heap()->hr_printer()->is_active()) { + for (uint i = start; i < start + num_regions; i++) { + HeapRegion* hr = at(i); + G1CollectedHeap::heap()->hr_printer()->uncommit(hr->bottom(), hr->end()); } - curr += 1; } - assert(num_so_far <= num, "post-condition"); - if (num_so_far == num) { - // we found enough space for the humongous object - assert(from <= first && first < len, "post-condition"); - assert(first < curr && (curr - first) == num, "post-condition"); - for (uint i = first; i < first + num; ++i) { - assert(at(i)->is_empty(), "post-condition"); + + _num_committed -= (uint)num_regions; + + _available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range); + _heap_mapper->uncommit_regions(start, num_regions); + + // Also uncommit auxiliary data + _prev_bitmap_mapper->uncommit_regions(start, num_regions); + _next_bitmap_mapper->uncommit_regions(start, num_regions); + + _bot_mapper->uncommit_regions(start, num_regions); + _cardtable_mapper->uncommit_regions(start, num_regions); + + _card_counts_mapper->uncommit_regions(start, num_regions); +} + +void HeapRegionSeq::make_regions_available(uint start, uint num_regions) { + guarantee(num_regions > 0, "No point in calling this for zero regions"); + commit_regions(start, num_regions); + for (uint i = start; i < start + num_regions; i++) { + if (_regions.get_by_index(i) == NULL) { + HeapRegion* new_hr = new_heap_region(i); + _regions.set_by_index(i, new_hr); + _allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1); } - return first; - } else { - // we failed to find enough space for the humongous object - return G1_NULL_HRS_INDEX; + } + + _available_map.par_set_range(start, start + num_regions, BitMap::unknown_range); + + for (uint i = start; i < start + num_regions; i++) { + assert(is_available(i), err_msg("Just made region %u available but is apparently not.", i)); + HeapRegion* hr = at(i); + if (G1CollectedHeap::heap()->hr_printer()->is_active()) { + G1CollectedHeap::heap()->hr_printer()->commit(hr->bottom(), hr->end()); + } + HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(i); + MemRegion mr(bottom, bottom + HeapRegion::GrainWords); + + hr->initialize(mr); + insert_into_free_list(at(i)); } } -// Public +uint HeapRegionSeq::expand_by(uint num_regions) { + return expand_at(0, num_regions); +} + +uint HeapRegionSeq::expand_at(uint start, uint num_regions) { + if (num_regions == 0) { + return 0; + } + + uint cur = start; + uint idx_last_found = 0; + uint num_last_found = 0; -void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end) { - assert((uintptr_t) bottom % HeapRegion::GrainBytes == 0, - "bottom should be heap region aligned"); - assert((uintptr_t) end % HeapRegion::GrainBytes == 0, - "end should be heap region aligned"); + uint expanded = 0; - _next_search_index = 0; - _allocated_length = 0; + while (expanded < num_regions && + (num_last_found = find_unavailable_from_idx(cur, &idx_last_found)) > 0) { + uint to_expand = MIN2(num_regions - expanded, num_last_found); + make_regions_available(idx_last_found, to_expand); + expanded += to_expand; + cur = idx_last_found + num_last_found + 1; + } - _regions.initialize(bottom, end, HeapRegion::GrainBytes); + verify_optional(); + return expanded; } -MemRegion HeapRegionSeq::expand_by(HeapWord* old_end, - HeapWord* new_end, - FreeRegionList* list) { - assert(old_end < new_end, "don't call it otherwise"); - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - - HeapWord* next_bottom = old_end; - assert(heap_bottom() <= next_bottom, "invariant"); - while (next_bottom < new_end) { - assert(next_bottom < heap_end(), "invariant"); - uint index = length(); - - assert(index < max_length(), "otherwise we cannot expand further"); - if (index == 0) { - // We have not allocated any regions so far - assert(next_bottom == heap_bottom(), "invariant"); - } else { - // next_bottom should match the end of the last/previous region - assert(next_bottom == at(index - 1)->end(), "invariant"); - } +uint HeapRegionSeq::find_contiguous(size_t num, bool empty_only) { + uint found = 0; + size_t length_found = 0; + uint cur = 0; - if (index == _allocated_length) { - // We have to allocate a new HeapRegion. - HeapRegion* new_hr = g1h->new_heap_region(index, next_bottom); - if (new_hr == NULL) { - // allocation failed, we bail out and return what we have done so far - return MemRegion(old_end, next_bottom); - } - assert(_regions.get_by_index(index) == NULL, "invariant"); - _regions.set_by_index(index, new_hr); - increment_allocated_length(); + while (length_found < num && cur < max_length()) { + HeapRegion* hr = _regions.get_by_index(cur); + if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) { + // This region is a potential candidate for allocation into. + length_found++; + } else { + // This region is not a candidate. The next region is the next possible one. + found = cur + 1; + length_found = 0; } - // Have to increment the length first, otherwise we will get an - // assert failure at(index) below. - increment_length(); - HeapRegion* hr = at(index); - list->add_as_tail(hr); + cur++; + } - next_bottom = hr->end(); + if (length_found == num) { + for (uint i = found; i < (found + num); i++) { + HeapRegion* hr = _regions.get_by_index(i); + // sanity check + guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()), + err_msg("Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT + " that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr))); + } + return found; + } else { + return G1_NO_HRS_INDEX; } - assert(next_bottom == new_end, "post-condition"); - return MemRegion(old_end, next_bottom); } -uint HeapRegionSeq::free_suffix() { - uint res = 0; - uint index = length(); - while (index > 0) { - index -= 1; - if (!at(index)->is_empty()) { - break; +HeapRegion* HeapRegionSeq::next_region_in_heap(const HeapRegion* r) const { + guarantee(r != NULL, "Start region must be a valid region"); + guarantee(is_available(r->hrs_index()), err_msg("Trying to iterate starting from region %u which is not in the heap", r->hrs_index())); + for (uint i = r->hrs_index() + 1; i < _allocated_heapregions_length; i++) { + HeapRegion* hr = _regions.get_by_index(i); + if (is_available(i)) { + return hr; } - res += 1; } - return res; -} - -uint HeapRegionSeq::find_contiguous(uint num) { - assert(num > 1, "use this only for sequences of length 2 or greater"); - assert(_next_search_index <= length(), - err_msg("_next_search_index: %u should be valid and <= than %u", - _next_search_index, length())); - - uint start = _next_search_index; - uint res = find_contiguous_from(start, num); - if (res == G1_NULL_HRS_INDEX && start > 0) { - // Try starting from the beginning. If _next_search_index was 0, - // no point in doing this again. - res = find_contiguous_from(0, num); - } - if (res != G1_NULL_HRS_INDEX) { - assert(res < length(), err_msg("res: %u should be valid", res)); - _next_search_index = res + num; - assert(_next_search_index <= length(), - err_msg("_next_search_index: %u should be valid and <= than %u", - _next_search_index, length())); - } - return res; + return NULL; } void HeapRegionSeq::iterate(HeapRegionClosure* blk) const { - iterate_from((HeapRegion*) NULL, blk); -} - -void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const { - uint hr_index = 0; - if (hr != NULL) { - hr_index = hr->hrs_index(); - } + uint len = max_length(); - uint len = length(); - for (uint i = hr_index; i < len; i += 1) { - bool res = blk->doHeapRegion(at(i)); - if (res) { - blk->incomplete(); - return; + for (uint i = 0; i < len; i++) { + if (!is_available(i)) { + continue; } - } - for (uint i = 0; i < hr_index; i += 1) { + guarantee(at(i) != NULL, err_msg("Tried to access region %u that has a NULL HeapRegion*", i)); bool res = blk->doHeapRegion(at(i)); if (res) { blk->incomplete(); @@ -190,71 +231,220 @@ } } +uint HeapRegionSeq::find_unavailable_from_idx(uint start_idx, uint* res_idx) const { + guarantee(res_idx != NULL, "checking"); + guarantee(start_idx <= (max_length() + 1), "checking"); + + uint num_regions = 0; + + uint cur = start_idx; + while (cur < max_length() && is_available(cur)) { + cur++; + } + if (cur == max_length()) { + return num_regions; + } + *res_idx = cur; + while (cur < max_length() && !is_available(cur)) { + cur++; + } + num_regions = cur - *res_idx; +#ifdef ASSERT + for (uint i = *res_idx; i < (*res_idx + num_regions); i++) { + assert(!is_available(i), "just checking"); + } + assert(cur == max_length() || num_regions == 0 || is_available(cur), + err_msg("The region at the current position %u must be available or at the end of the heap.", cur)); +#endif + return num_regions; +} + +uint HeapRegionSeq::start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const { + return num_regions * worker_i / num_workers; +} + +void HeapRegionSeq::par_iterate(HeapRegionClosure* blk, uint worker_id, uint num_workers, jint claim_value) const { + const uint start_index = start_region_for_worker(worker_id, num_workers, _allocated_heapregions_length); + + // Every worker will actually look at all regions, skipping over regions that + // are currently not committed. + // This also (potentially) iterates over regions newly allocated during GC. This + // is no problem except for some extra work. + for (uint count = 0; count < _allocated_heapregions_length; count++) { + const uint index = (start_index + count) % _allocated_heapregions_length; + assert(0 <= index && index < _allocated_heapregions_length, "sanity"); + // Skip over unavailable regions + if (!is_available(index)) { + continue; + } + HeapRegion* r = _regions.get_by_index(index); + // We'll ignore "continues humongous" regions (we'll process them + // when we come across their corresponding "start humongous" + // region) and regions already claimed. + if (r->claim_value() == claim_value || r->continuesHumongous()) { + continue; + } + // OK, try to claim it + if (!r->claimHeapRegion(claim_value)) { + continue; + } + // Success! + if (r->startsHumongous()) { + // If the region is "starts humongous" we'll iterate over its + // "continues humongous" first; in fact we'll do them + // first. The order is important. In one case, calling the + // closure on the "starts humongous" region might de-allocate + // and clear all its "continues humongous" regions and, as a + // result, we might end up processing them twice. So, we'll do + // them first (note: most closures will ignore them anyway) and + // then we'll do the "starts humongous" region. + for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) { + HeapRegion* chr = _regions.get_by_index(ch_index); + + assert(chr->continuesHumongous(), "Must be humongous region"); + assert(chr->humongous_start_region() == r, + err_msg("Must work on humongous continuation of the original start region " + PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr))); + assert(chr->claim_value() != claim_value, + "Must not have been claimed yet because claiming of humongous continuation first claims the start region"); + + bool claim_result = chr->claimHeapRegion(claim_value); + // We should always be able to claim it; no one else should + // be trying to claim this region. + guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object"); + + bool res2 = blk->doHeapRegion(chr); + if (res2) { + return; + } + + // Right now, this holds (i.e., no closure that actually + // does something with "continues humongous" regions + // clears them). We might have to weaken it in the future, + // but let's leave these two asserts here for extra safety. + assert(chr->continuesHumongous(), "should still be the case"); + assert(chr->humongous_start_region() == r, "sanity"); + } + } + + bool res = blk->doHeapRegion(r); + if (res) { + return; + } + } +} + uint HeapRegionSeq::shrink_by(uint num_regions_to_remove) { - // Reset this in case it's currently pointing into the regions that - // we just removed. - _next_search_index = 0; - assert(length() > 0, "the region sequence should not be empty"); - assert(length() <= _allocated_length, "invariant"); - assert(_allocated_length > 0, "we should have at least one region committed"); + assert(length() <= _allocated_heapregions_length, "invariant"); + assert(_allocated_heapregions_length > 0, "we should have at least one region committed"); assert(num_regions_to_remove < length(), "We should never remove all regions"); - uint i = 0; - for (; i < num_regions_to_remove; i++) { - HeapRegion* cur = at(length() - 1); + if (num_regions_to_remove == 0) { + return 0; + } + + uint removed = 0; + uint cur = _allocated_heapregions_length - 1; + uint idx_last_found = 0; + uint num_last_found = 0; - if (!cur->is_empty()) { - // We have to give up if the region can not be moved - break; + while ((removed < num_regions_to_remove) && + (num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) { + // Only allow uncommit from the end of the heap. + if ((idx_last_found + num_last_found) != _allocated_heapregions_length) { + return 0; + } + uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found); + + uncommit_regions(idx_last_found + num_last_found - to_remove, to_remove); + + cur -= num_last_found; + removed += to_remove; } - assert(!cur->isHumongous(), "Humongous regions should not be empty"); - decrement_length(); - } - return i; + verify_optional(); + + return removed; } -#ifndef PRODUCT -void HeapRegionSeq::verify_optional() { - guarantee(length() <= _allocated_length, +uint HeapRegionSeq::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const { + guarantee(start_idx < _allocated_heapregions_length, "checking"); + guarantee(res_idx != NULL, "checking"); + + uint num_regions_found = 0; + + jlong cur = start_idx; + while (cur != -1 && !(is_available(cur) && at(cur)->is_empty())) { + cur--; + } + if (cur == -1) { + return num_regions_found; + } + jlong old_cur = cur; + // cur indexes the first empty region + while (cur != -1 && is_available(cur) && at(cur)->is_empty()) { + cur--; + } + *res_idx = cur + 1; + num_regions_found = old_cur - cur; +