OpenJDK / jdk / jdk
changeset 51166:97f4558b287f
Merge
author | prr |
---|---|
date | Thu, 12 Jul 2018 11:09:23 -0700 |
parents | 6d59a6d025e8 6c449bdee4fa |
children | d03b04e7569a |
files | src/java.base/share/classes/sun/net/NetworkServer.java src/java.base/share/classes/sun/net/URLCanonicalizer.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/AltHashing.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/SymbolTable.java test/hotspot/jtreg/runtime/SharedArchiveFile/SASymbolTableTest.java test/hotspot/jtreg/runtime/SharedArchiveFile/SASymbolTableTestAgent.java test/hotspot/jtreg/serviceability/sa/ClhsdbSymbol.java test/hotspot/jtreg/serviceability/sa/ClhsdbSymbolTable.java test/hotspot/jtreg/vmTestbase/nsk/jvmti/AttachOnDemand/attach024/java.base/java/util/ServiceConfigurationError.java test/jdk/java/lang/System/SetProperties.java test/jdk/sun/tools/jhsdb/AlternateHashingTest.java test/jdk/sun/tools/jhsdb/LingeredAppWithAltHashing.java test/langtools/tools/javac/6558548/T6558548_6.out test/langtools/tools/javac/8013179/T8013179.java test/langtools/tools/javac/8013179/T8013179.out test/langtools/tools/javac/StringsInSwitch/BadlyTypedLabel1_6.out test/langtools/tools/javac/StringsInSwitch/BadlyTypedLabel2_6.out test/langtools/tools/javac/StringsInSwitch/NonConstantLabel6.out test/langtools/tools/javac/StringsInSwitch/OneCaseSwitches.out test/langtools/tools/javac/StringsInSwitch/RSCL1_6.out test/langtools/tools/javac/StringsInSwitch/RSCL2_6.out test/langtools/tools/javac/TryWithResources/BadTwr6.out test/langtools/tools/javac/TryWithResources/BadTwrSyntax6.out test/langtools/tools/javac/TryWithResources/PlainTry6.out test/langtools/tools/javac/TryWithResources/TwrOnNonResource6.out test/langtools/tools/javac/annotations/repeatingAnnotations/WrongVersion6.out test/langtools/tools/javac/annotations/typeAnnotations/failures/AnnotationVersion.out test/langtools/tools/javac/defaultMethods/static/StaticInvokeQualified6.out test/langtools/tools/javac/defaultMethods/static/StaticInvokeSimple6.out test/langtools/tools/javac/literals/BadBinaryLiterals.6.out test/langtools/tools/javac/literals/BadUnderscoreLiterals.6.out test/langtools/tools/javac/types/CastObjectToPrimitiveTest.out |
diffstat | 599 files changed, 15946 insertions(+), 6511 deletions(-) [+] |
line wrap: on
line diff
--- a/.hgtags Tue Jul 03 16:09:25 2018 +0530 +++ b/.hgtags Thu Jul 12 11:09:23 2018 -0700 @@ -490,9 +490,9 @@ 02934b0d661b82b7fe1052a04998d2091352e08d jdk-11+16 64e4b1686141e57a681936a8283983341484676e jdk-11+17 e1b3def126240d5433902f3cb0e91a4c27f6db50 jdk-11+18 -fb8b3f4672774e15654958295558a1af1b576919 jdk-11+19 -fb8b3f4672774e15654958295558a1af1b576919 jdk-11+19 36ca515343e00b021dcfc902e986d26ec994a2e5 jdk-11+19 -c9cd3ec6a0ebaf373b6ff0071c396cc657f7c4bd jdk-12+0 -c9cd3ec6a0ebaf373b6ff0071c396cc657f7c4bd jdk-12+0 95aad0c785e497f1bade3955c4e4a677b629fa9d jdk-12+0 +9816d7cc655e53ba081f938b656e31971b8f097a jdk-11+20 +14708e1acdc3974f4539027cbbcfa6d69f83cf51 jdk-11+21 +00b16d0457e43d23f6ca5ade6b243edce62750a0 jdk-12+1 +69b438908512d3dfef5852c6a843a5778333a309 jdk-12+2
--- a/make/Docs.gmk Tue Jul 03 16:09:25 2018 +0530 +++ b/make/Docs.gmk Thu Jul 12 11:09:23 2018 -0700 @@ -274,6 +274,8 @@ $1_INDIRECT_EXPORTS := $$(call FindTransitiveIndirectDepsForModules, $$($1_MODULES)) $1_ALL_MODULES := $$(sort $$($1_MODULES) $$($1_INDIRECT_EXPORTS)) + $1_JAVA_ARGS := -Dextlink.spec.version=$$(VERSION_SPECIFICATION) + ifeq ($$(ENABLE_FULL_DOCS), true) # Tell the ModuleGraph taglet to generate html links to soon-to-be-created # png files with module graphs. @@ -327,9 +329,10 @@ ) ifeq ($$($1_JAVADOC_CMD), ) - $1_JAVADOC_CMD := $$(JAVA) -Djava.awt.headless=true \ - -Dextlink.spec.version=$$(VERSION_SPECIFICATION) $$($1_JAVA_ARGS) \ + $1_JAVADOC_CMD := $$(JAVA) -Djava.awt.headless=true $$($1_JAVA_ARGS) \ $$(NEW_JAVADOC) + else + $1_OPTIONS += $$(addprefix -J, $$($1_JAVA_ARGS)) endif $1_VARDEPS := $$($1_JAVA_ARGS) $$($1_OPTIONS) $$(MODULES_SOURCE_PATH) \ @@ -463,7 +466,9 @@ # Setup generation of the reference Java SE API documentation (javadoc + modulegraph) # The reference javadoc is just the same as javase, but using the BootJDK javadoc -# and a stable set of javadoc options. +# and a stable set of javadoc options. Typically it is used for generating +# diffs between the reference javadoc and a javadoc bundle of a specific build +# generated in the same way. $(eval $(call SetupApiDocsGeneration, REFERENCE_API, \ MODULES := $(JAVASE_MODULES), \ @@ -497,10 +502,9 @@ JDK_INDEX_TARGETS += $(COPY_GLOBAL_RESOURCES) # Copy the legal notices distributed with the docs bundle -DOCS_LEGAL_NOTICES := jquery.md jszip.md pako.md $(eval $(call SetupCopyFiles, COPY_DOCS_LEGAL_NOTICES, \ SRC := $(TOPDIR)/src/jdk.javadoc/share/legal, \ - FILES := $(DOCS_LEGAL_NOTICES), \ + FILES := $(wildcard $(TOPDIR)/src/jdk.javadoc/share/legal/*), \ DEST := $(DOCS_OUTPUTDIR)/legal, \ )) JDK_INDEX_TARGETS += $(COPY_DOCS_LEGAL_NOTICES)
--- a/make/Main.gmk Tue Jul 03 16:09:25 2018 +0530 +++ b/make/Main.gmk Thu Jul 12 11:09:23 2018 -0700 @@ -487,6 +487,14 @@ +($(CD) $(TOPDIR)/make/test && $(MAKE) $(MAKE_ARGS) -f JtregNativeJdk.gmk \ test-image-jdk-jtreg-native) +build-test-hotspot-jtreg-graal: + +($(CD) $(TOPDIR)/make/test && $(MAKE) $(MAKE_ARGS) -f JtregGraalUnit.gmk \ + build-test-hotspot-jtreg-graal) + +test-image-hotspot-jtreg-graal: + +($(CD) $(TOPDIR)/make/test && $(MAKE) $(MAKE_ARGS) -f JtregGraalUnit.gmk \ + test-image-hotspot-jtreg-graal) + run-test: +($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f RunTests.gmk run-test TEST="$(TEST)") @@ -523,6 +531,7 @@ test-image-hotspot-jtreg-native build-test-jdk-jtreg-native \ test-image-jdk-jtreg-native build-test-lib build-test-failure-handler \ test-failure-handler test-image-failure-handler test-image-hotspot-gtest \ + test-image-hotspot-jtreg-graal build-test-hotspot-jtreg-graal \ run-test exploded-run-test ################################################################################ @@ -853,10 +862,14 @@ build-test-jdk-jtreg-native: buildtools-jdk java.base-libs + build-test-hotspot-jtreg-graal: exploded-image-optimize + test-image-hotspot-jtreg-native: build-test-hotspot-jtreg-native test-image-jdk-jtreg-native: build-test-jdk-jtreg-native + test-image-hotspot-jtreg-graal: build-test-hotspot-jtreg-graal + test-image-hotspot-gtest: hotspot test-hotspot-internal: exploded-image @@ -993,6 +1006,10 @@ # If not already set, set the JVM specific targets to build the test image JVM_TEST_IMAGE_TARGETS ?= test-image-hotspot-jtreg-native test-image-hotspot-gtest +ifeq ($(INCLUDE_GRAAL), true) + JVM_TEST_IMAGE_TARGETS += test-image-hotspot-jtreg-graal +endif + # This target builds the test image test-image: prepare-test-image \ test-image-jdk-jtreg-native test-image-failure-handler \
--- a/make/RunTests.gmk Tue Jul 03 16:09:25 2018 +0530 +++ b/make/RunTests.gmk Thu Jul 12 11:09:23 2018 -0700 @@ -535,6 +535,8 @@ $1_JTREG_BASIC_OPTIONS += -cpa:$$(JIB_JAR) endif + $1_JTREG_BASIC_OPTIONS += -e:TEST_IMAGE_GRAAL_DIR=${TEST_IMAGE_DIR}/hotspot/jtreg/graal + ifneq ($$(JTREG_FAILURE_HANDLER_OPTIONS), ) $1_JTREG_LAUNCHER_OPTIONS += -Djava.library.path="$(JTREG_FAILURE_HANDLER_DIR)" endif
--- a/make/autoconf/hotspot.m4 Tue Jul 03 16:09:25 2018 +0530 +++ b/make/autoconf/hotspot.m4 Thu Jul 12 11:09:23 2018 -0700 @@ -201,8 +201,6 @@ ENABLE_AOT="true" elif test "x$enable_aot" = "xno"; then ENABLE_AOT="false" - AC_MSG_CHECKING([if aot should be enabled]) - AC_MSG_RESULT([no, forced]) else AC_MSG_ERROR([Invalid value for --enable-aot: $enable_aot]) fi @@ -228,7 +226,7 @@ else ENABLE_AOT="false" if test "x$enable_aot" = "xyes"; then - AC_MSG_ERROR([AOT is currently only supported on x86_64. Remove --enable-aot.]) + AC_MSG_ERROR([AOT is currently only supported on x86_64 and aarch64. Remove --enable-aot.]) fi fi fi @@ -374,57 +372,106 @@ fi fi - # Only enable jvmci on x86_64, sparcv9 and aarch64. - if test "x$OPENJDK_TARGET_CPU" = "xx86_64" || \ - test "x$OPENJDK_TARGET_CPU" = "xsparcv9" || \ - test "x$OPENJDK_TARGET_CPU" = "xaarch64" ; then - JVM_FEATURES_jvmci="jvmci" + AC_MSG_CHECKING([if jvmci module jdk.internal.vm.ci should be built]) + # Check if jvmci is diabled + DISABLE_JVMCI=`$ECHO $DISABLED_JVM_FEATURES | $GREP jvmci` + if test "x$DISABLE_JVMCI" = "xjvmci"; then + AC_MSG_RESULT([no, forced]) + JVM_FEATURES_jvmci="" + INCLUDE_JVMCI="false" else - JVM_FEATURES_jvmci="" + # Only enable jvmci on x86_64, sparcv9 and aarch64 + if test "x$OPENJDK_TARGET_CPU" = "xx86_64" || \ + test "x$OPENJDK_TARGET_CPU" = "xsparcv9" || \ + test "x$OPENJDK_TARGET_CPU" = "xaarch64" ; then + AC_MSG_RESULT([yes]) + JVM_FEATURES_jvmci="jvmci" + INCLUDE_JVMCI="true" + else + AC_MSG_RESULT([no]) + JVM_FEATURES_jvmci="" + INCLUDE_JVMCI="false" + if HOTSPOT_CHECK_JVM_FEATURE(jvmci); then + AC_MSG_ERROR([JVMCI is currently not supported on this platform.]) + fi + fi fi - AC_MSG_CHECKING([if jdk.internal.vm.compiler should be built]) - if HOTSPOT_CHECK_JVM_FEATURE(graal); then - AC_MSG_RESULT([yes, forced]) - if test "x$JVM_FEATURES_jvmci" != "xjvmci" ; then - AC_MSG_ERROR([Specified JVM feature 'graal' requires feature 'jvmci']) - fi - INCLUDE_GRAAL="true" + AC_SUBST(INCLUDE_JVMCI) + + AC_MSG_CHECKING([if graal module jdk.internal.vm.compiler should be built]) + # Check if graal is diabled + DISABLE_GRAAL=`$ECHO $DISABLED_JVM_FEATURES | $GREP graal` + if test "x$DISABLE_GRAAL" = "xgraal"; then + AC_MSG_RESULT([no, forced]) + JVM_FEATURES_graal="" + INCLUDE_GRAAL="false" else - # By default enable graal build on x64 or where AOT is available. - # graal build requires jvmci. - if test "x$JVM_FEATURES_jvmci" = "xjvmci" && \ - (test "x$OPENJDK_TARGET_CPU" = "xx86_64" || \ - test "x$ENABLE_AOT" = "xtrue") ; then - AC_MSG_RESULT([yes]) + if HOTSPOT_CHECK_JVM_FEATURE(graal); then + AC_MSG_RESULT([yes, forced]) + if test "x$JVM_FEATURES_jvmci" != "xjvmci" ; then + AC_MSG_ERROR([Specified JVM feature 'graal' requires feature 'jvmci']) + fi JVM_FEATURES_graal="graal" INCLUDE_GRAAL="true" else - AC_MSG_RESULT([no]) - JVM_FEATURES_graal="" - INCLUDE_GRAAL="false" + # By default enable graal build on x64 or where AOT is available. + # graal build requires jvmci. + if test "x$JVM_FEATURES_jvmci" = "xjvmci" && \ + (test "x$OPENJDK_TARGET_CPU" = "xx86_64" || \ + test "x$ENABLE_AOT" = "xtrue") ; then + AC_MSG_RESULT([yes]) + JVM_FEATURES_graal="graal" + INCLUDE_GRAAL="true" + else + AC_MSG_RESULT([no]) + JVM_FEATURES_graal="" + INCLUDE_GRAAL="false" + fi fi fi AC_SUBST(INCLUDE_GRAAL) + # Disable aot with '--with-jvm-features=-aot' + DISABLE_AOT=`$ECHO $DISABLED_JVM_FEATURES | $GREP aot` + if test "x$DISABLE_AOT" = "xaot"; then + ENABLE_AOT="false" + fi + AC_MSG_CHECKING([if aot should be enabled]) if test "x$ENABLE_AOT" = "xtrue"; then - if test "x$enable_aot" = "xyes"; then - AC_MSG_RESULT([yes, forced]) + if test "x$JVM_FEATURES_graal" != "xgraal"; then + if test "x$enable_aot" = "xyes" || HOTSPOT_CHECK_JVM_FEATURE(aot); then + AC_MSG_RESULT([yes, forced]) + AC_MSG_ERROR([Specified JVM feature 'aot' requires feature 'graal']) + else + AC_MSG_RESULT([no]) + fi + JVM_FEATURES_aot="" + ENABLE_AOT="false" else - AC_MSG_RESULT([yes]) + if test "x$enable_aot" = "xyes" || HOTSPOT_CHECK_JVM_FEATURE(aot); then + AC_MSG_RESULT([yes, forced]) + else + AC_MSG_RESULT([yes]) + fi + JVM_FEATURES_aot="aot" fi - JVM_FEATURES_aot="aot" else - if test "x$enable_aot" = "xno"; then + if test "x$enable_aot" = "xno" || "x$DISABLE_AOT" = "xaot"; then AC_MSG_RESULT([no, forced]) else AC_MSG_RESULT([no]) fi JVM_FEATURES_aot="" + if HOTSPOT_CHECK_JVM_FEATURE(aot); then + AC_MSG_ERROR([To enable aot, you must use --enable-aot]) + fi fi + AC_SUBST(ENABLE_AOT) + if test "x$OPENJDK_TARGET_CPU" = xarm ; then # Default to use link time optimizations on minimal on arm JVM_FEATURES_link_time_opt="link-time-opt"
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/autoconf/lib-tests.m4 Thu Jul 12 11:09:23 2018 -0700 @@ -0,0 +1,57 @@ +# +# Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. Oracle designates this +# particular file as subject to the "Classpath" exception as provided +# by Oracle in the LICENSE file that accompanied this code. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# + +############################################################################### +# +# Check for graalunit libs, needed for running graalunit tests. +# +AC_DEFUN_ONCE([LIB_TESTS_SETUP_GRAALUNIT], +[ + AC_ARG_WITH(graalunit-lib, [AS_HELP_STRING([--with-graalunit-lib], + [specify location of 3rd party libraries used by Graal unit tests])]) + + GRAALUNIT_LIB= + if test "x${with_graalunit_lib}" != x; then + AC_MSG_CHECKING([for graalunit libs]) + if test "x${with_graalunit_lib}" = xno; then + AC_MSG_RESULT([disabled, graalunit tests can not be run]) + elif test "x${with_graalunit_lib}" = xyes; then + AC_MSG_RESULT([not specified]) + AC_MSG_ERROR([You must specify the path to 3rd party libraries used by Graal unit tests]) + else + GRAALUNIT_LIB="${with_graalunit_lib}" + if test ! -d "${GRAALUNIT_LIB}"; then + AC_MSG_RESULT([no]) + AC_MSG_ERROR([Could not find graalunit 3rd party libraries as specified. (${with_graalunit_lib})]) + else + AC_MSG_RESULT([$GRAALUNIT_LIB]) + fi + fi + fi + + BASIC_FIXUP_PATH([GRAALUNIT_LIB]) + AC_SUBST(GRAALUNIT_LIB) +]) +
--- a/make/autoconf/libraries.m4 Tue Jul 03 16:09:25 2018 +0530 +++ b/make/autoconf/libraries.m4 Thu Jul 12 11:09:23 2018 -0700 @@ -32,6 +32,7 @@ m4_include([lib-std.m4]) m4_include([lib-x11.m4]) m4_include([lib-fontconfig.m4]) +m4_include([lib-tests.m4]) ################################################################################ # Determine which libraries are needed for this configuration @@ -101,6 +102,7 @@ LIB_SETUP_BUNDLED_LIBS LIB_SETUP_MISC_LIBS LIB_SETUP_SOLARIS_STLPORT + LIB_TESTS_SETUP_GRAALUNIT if test "x$TOOLCHAIN_TYPE" = xsolstudio; then GLOBAL_LIBS="-lc"
--- a/make/autoconf/spec.gmk.in Tue Jul 03 16:09:25 2018 +0530 +++ b/make/autoconf/spec.gmk.in Thu Jul 12 11:09:23 2018 -0700 @@ -347,6 +347,7 @@ LIBFFI_CFLAGS:=@LIBFFI_CFLAGS@ ENABLE_LIBFFI_BUNDLING:=@ENABLE_LIBFFI_BUNDLING@ LIBFFI_LIB_FILE:=@LIBFFI_LIB_FILE@ +GRAALUNIT_LIB := @GRAALUNIT_LIB@ PACKAGE_PATH=@PACKAGE_PATH@ @@ -813,6 +814,7 @@ INCLUDE_SA=@INCLUDE_SA@ INCLUDE_GRAAL=@INCLUDE_GRAAL@ +INCLUDE_JVMCI=@INCLUDE_JVMCI@ OS_VERSION_MAJOR:=@OS_VERSION_MAJOR@ OS_VERSION_MINOR:=@OS_VERSION_MINOR@
--- a/make/common/Modules.gmk Tue Jul 03 16:09:25 2018 +0530 +++ b/make/common/Modules.gmk Thu Jul 12 11:09:23 2018 -0700 @@ -205,7 +205,14 @@ endif ################################################################################ -# Filter out Graal specific modules if Graal build is disabled +# Filter out jvmci specific modules if jvmci is disabled + +ifeq ($(INCLUDE_JVMCI), false) + MODULES_FILTER += jdk.internal.vm.ci +endif + +################################################################################ +# Filter out Graal specific modules if Graal is disabled ifeq ($(INCLUDE_GRAAL), false) MODULES_FILTER += jdk.internal.vm.compiler
--- a/make/conf/jib-profiles.js Tue Jul 03 16:09:25 2018 +0530 +++ b/make/conf/jib-profiles.js Thu Jul 12 11:09:23 2018 -0700 @@ -239,7 +239,7 @@ // These are the base setttings for all the main build profiles. common.main_profile_base = { - dependencies: ["boot_jdk", "gnumake", "jtreg", "jib"], + dependencies: ["boot_jdk", "gnumake", "jtreg", "jib", "autoconf"], default_make_targets: ["product-bundles", "test-bundles"], configure_args: concat(["--enable-jtreg-failure-handler"], "--with-exclude-translations=de,es,fr,it,ko,pt_BR,sv,ca,tr,cs,sk,ja_JP_A,ja_JP_HA,ja_JP_HI,ja_JP_I", @@ -378,7 +378,7 @@ "linux-x64": { target_os: "linux", target_cpu: "x64", - dependencies: ["devkit", "autoconf", "graphviz", "pandoc"], + dependencies: ["devkit", "graphviz", "pandoc", "graalunit_lib"], configure_args: concat(common.configure_args_64bit, "--enable-full-docs", "--with-zlib=system"), default_make_targets: ["docs-bundles"], @@ -388,7 +388,7 @@ target_os: "linux", target_cpu: "x86", build_cpu: "x64", - dependencies: ["devkit", "autoconf"], + dependencies: ["devkit"], configure_args: concat(common.configure_args_32bit, "--with-jvm-variants=minimal,server", "--with-zlib=system"), }, @@ -396,7 +396,7 @@ "macosx-x64": { target_os: "macosx", target_cpu: "x64", - dependencies: ["devkit", "autoconf"], + dependencies: ["devkit", "graalunit_lib"], configure_args: concat(common.configure_args_64bit, "--with-zlib=system", "--with-macosx-version-max=10.9.0"), }, @@ -404,7 +404,7 @@ "solaris-x64": { target_os: "solaris", target_cpu: "x64", - dependencies: ["devkit", "autoconf", "cups"], + dependencies: ["devkit", "cups"], configure_args: concat(common.configure_args_64bit, "--with-zlib=system", "--enable-dtrace"), }, @@ -412,7 +412,7 @@ "solaris-sparcv9": { target_os: "solaris", target_cpu: "sparcv9", - dependencies: ["devkit", "autoconf", "cups"], + dependencies: ["devkit", "cups"], configure_args: concat(common.configure_args_64bit, "--with-zlib=system", "--enable-dtrace"), }, @@ -420,7 +420,7 @@ "windows-x64": { target_os: "windows", target_cpu: "x64", - dependencies: ["devkit", "autoconf"], + dependencies: ["devkit", "graalunit_lib"], configure_args: concat(common.configure_args_64bit), }, @@ -428,7 +428,7 @@ target_os: "windows", target_cpu: "x86", build_cpu: "x64", - dependencies: ["devkit", "autoconf"], + dependencies: ["devkit"], configure_args: concat(common.configure_args_32bit), }, @@ -436,7 +436,7 @@ target_os: "linux", target_cpu: "aarch64", build_cpu: "x64", - dependencies: ["devkit", "autoconf", "build_devkit", "cups"], + dependencies: ["devkit", "build_devkit", "cups"], configure_args: [ "--openjdk-target=aarch64-linux-gnu", "--with-freetype=bundled", "--disable-warnings-as-errors", "--with-cpu-port=aarch64", @@ -447,7 +447,7 @@ target_os: "linux", target_cpu: "aarch64", build_cpu: "x64", - dependencies: ["devkit", "autoconf", "build_devkit", "cups", "headless_stubs"], + dependencies: ["devkit", "build_devkit", "cups", "headless_stubs"], configure_args: [ "--with-cpu-port=arm64", "--with-jvm-variants=server", @@ -460,7 +460,7 @@ target_os: "linux", target_cpu: "arm", build_cpu: "x64", - dependencies: ["devkit", "autoconf", "build_devkit", "cups"], + dependencies: ["devkit", "build_devkit", "cups"], configure_args: [ "--openjdk-target=arm-linux-gnueabihf", "--with-freetype=bundled", "--with-abi-profile=arm-vfp-hflt", "--disable-warnings-as-errors" @@ -471,7 +471,7 @@ target_os: "linux", target_cpu: "arm", build_cpu: "x64", - dependencies: ["devkit", "autoconf", "build_devkit", "cups"], + dependencies: ["devkit", "build_devkit", "cups"], configure_args: [ "--with-jvm-variants=minimal1,client", "--with-x=" + input.get("devkit", "install_path") + "/arm-linux-gnueabihf/libc/usr/X11R6-PI", @@ -972,6 +972,14 @@ configure_args: "", }, + graalunit_lib: { + organization: common.organization, + ext: "zip", + revision: "619_Apr_12_2018", + module: "graalunit-lib", + configure_args: "--with-graalunit-lib=" + input.get("graalunit_lib", "install_path"), + environment_name: "GRAALUNIT_LIB" + }, }; // Need to add a value for the Visual Studio tools variable to make
--- a/make/data/publicsuffixlist/VERSION Tue Jul 03 16:09:25 2018 +0530 +++ b/make/data/publicsuffixlist/VERSION Thu Jul 12 11:09:23 2018 -0700 @@ -1,2 +1,2 @@ Github: https://raw.githubusercontent.com/publicsuffix/list/2225db8d9f4a2a27ec697c883360632fa0c16261/public_suffix_list.dat -Date: 2018-05-09 +Date: 2018-05-24
--- a/make/hotspot/lib/JvmFeatures.gmk Tue Jul 03 16:09:25 2018 +0530 +++ b/make/hotspot/lib/JvmFeatures.gmk Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ # -# Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -109,6 +109,7 @@ classListParser.cpp \ classLoaderExt.cpp \ filemap.cpp \ + heapShared.cpp \ metaspaceShared.cpp \ metaspaceShared_$(HOTSPOT_TARGET_CPU).cpp \ metaspaceShared_$(HOTSPOT_TARGET_CPU_ARCH).cpp \
--- a/make/hotspot/symbols/symbols-unix Tue Jul 03 16:09:25 2018 +0530 +++ b/make/hotspot/symbols/symbols-unix Thu Jul 12 11:09:23 2018 -0700 @@ -136,6 +136,7 @@ JVM_InitProperties JVM_InitStackTraceElement JVM_InitStackTraceElementArray +JVM_InitializeFromArchive JVM_InternString JVM_Interrupt JVM_InvokeMethod
--- a/make/scripts/compare.sh Tue Jul 03 16:09:25 2018 +0530 +++ b/make/scripts/compare.sh Thu Jul 12 11:09:23 2018 -0700 @@ -1385,26 +1385,22 @@ echo " $OTHER_SPARKLE_DIR" fi - if [ -d "$OTHER/images" ]; then - OTHER_SEC_DIR="$OTHER/images" - else - OTHER_SEC_DIR="$OTHER/tmp" - fi - if [ -f "$THIS_SEC_DIR/sec-bin.zip" ]; then + THIS_SEC_DIR="$THIS/images" + OTHER_SEC_DIR="$OTHER/images" + if [ -f "$THIS_SEC_DIR/sec-bin.zip" ] && [ -f "$OTHER_SEC_DIR/sec-bin.zip" ]; then OTHER_SEC_BIN="$OTHER_SEC_DIR/sec-bin.zip" - THIS_SEC_DIR="$THIS/images" THIS_SEC_BIN="$THIS_SEC_DIR/sec-bin.zip" - fi - if [ "$OPENJDK_TARGET_OS" = "windows" ]; then - if [ "$OPENJDK_TARGET_CPU" = "x86_64" ]; then - JGSS_WINDOWS_BIN="jgss-windows-x64-bin.zip" - else - JGSS_WINDOWS_BIN="jgss-windows-i586-bin.zip" + if [ "$OPENJDK_TARGET_OS" = "windows" ]; then + if [ "$OPENJDK_TARGET_CPU" = "x86_64" ]; then + JGSS_WINDOWS_BIN="jgss-windows-x64-bin.zip" + else + JGSS_WINDOWS_BIN="jgss-windows-i586-bin.zip" + fi + OTHER_SEC_WINDOWS_BIN="$OTHER_SEC_DIR/sec-windows-bin.zip" + OTHER_JGSS_WINDOWS_BIN="$OTHER_SEC_DIR/$JGSS_WINDOWS_BIN" + THIS_SEC_WINDOWS_BIN="$THIS_SEC_DIR/sec-windows-bin.zip" + THIS_JGSS_WINDOWS_BIN="$THIS_SEC_DIR/$JGSS_WINDOWS_BIN" fi - OTHER_SEC_WINDOWS_BIN="$OTHER_SEC_DIR/sec-windows-bin.zip" - OTHER_JGSS_WINDOWS_BIN="$OTHER_SEC_DIR/$JGSS_WINDOWS_BIN" - THIS_SEC_WINDOWS_BIN="$THIS_SEC_DIR/sec-windows-bin.zip" - THIS_JGSS_WINDOWS_BIN="$THIS_SEC_DIR/$JGSS_WINDOWS_BIN" fi if [ -d "$THIS/images/docs" ] && [ -d "$OTHER/images/docs" ]; then
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/test/JtregGraalUnit.gmk Thu Jul 12 11:09:23 2018 -0700 @@ -0,0 +1,144 @@ +# +# Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. Oracle designates this +# particular file as subject to the "Classpath" exception as provided +# by Oracle in the LICENSE file that accompanied this code. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# + +################################################################################ +# This file builds Graal component of the JTReg tests for Hotspot. +# It also covers the test-image part, where the built files are copied to the +# test image. +################################################################################ + +default: all + +include $(SPEC) +include MakeBase.gmk +include SetupJavaCompilers.gmk + +TARGETS_BUILD := +TARGETS_IMAGE := +TARGETS_EXTRA_LIB := + +ifeq ($(INCLUDE_GRAAL), true) + ifneq ($(GRAALUNIT_LIB), ) + SRC_DIR := $(TOPDIR)/src/jdk.internal.vm.compiler/share/classes + TEST_DIR := $(TOPDIR)/test/hotspot/jtreg/compiler/graalunit + COMPILE_OUTPUTDIR := $(SUPPORT_OUTPUTDIR)/test/graalunit + LIB_OUTPUTDIR := $(TEST_IMAGE_DIR)/hotspot/jtreg/graal + + ### Copy 3rd party libs + $(eval $(call SetupCopyFiles, COPY_GRAALUNIT_LIBS, \ + FILES := $(wildcard $(GRAALUNIT_LIB)/*.jar), \ + DEST := $(LIB_OUTPUTDIR), \ + )) + + TARGETS_EXTRA_LIB += $(COPY_GRAALUNIT_LIBS) + + ### Compile and build graalunit tests + $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_TESTS, \ + SETUP := GENERATE_USINGJDKBYTECODE, \ + SRC := \ + $(SRC_DIR)/jdk.internal.vm.compiler.collections.test/src \ + $(SRC_DIR)/org.graalvm.compiler.api.directives.test/src \ + $(SRC_DIR)/org.graalvm.compiler.api.test/src \ + $(SRC_DIR)/org.graalvm.compiler.asm.aarch64.test/src \ + $(SRC_DIR)/org.graalvm.compiler.asm.amd64.test/src \ + $(SRC_DIR)/org.graalvm.compiler.asm.sparc.test/src \ + $(SRC_DIR)/org.graalvm.compiler.asm.test/src \ + $(SRC_DIR)/org.graalvm.compiler.core.amd64.test/src \ + $(SRC_DIR)/org.graalvm.compiler.core.test/src \ + $(SRC_DIR)/org.graalvm.compiler.debug.test/src \ + $(SRC_DIR)/org.graalvm.compiler.graph.test/src \ + $(SRC_DIR)/org.graalvm.compiler.hotspot.amd64.test/src \ + $(SRC_DIR)/org.graalvm.compiler.hotspot.lir.test/src \ + $(SRC_DIR)/org.graalvm.compiler.hotspot.sparc.test/src \ + $(SRC_DIR)/org.graalvm.compiler.hotspot.test/src \ + $(SRC_DIR)/org.graalvm.compiler.lir.test/src \ + $(SRC_DIR)/org.graalvm.compiler.loop.test/src \ + $(SRC_DIR)/org.graalvm.compiler.nodes.test/src \ + $(SRC_DIR)/org.graalvm.compiler.options.test/src \ + $(SRC_DIR)/org.graalvm.compiler.phases.common.test/src \ + $(SRC_DIR)/org.graalvm.compiler.replacements.test/src \ + $(SRC_DIR)/org.graalvm.compiler.test/src \ + $(SRC_DIR)/org.graalvm.util.test/src \ + $(SRC_DIR)/org.graalvm.compiler.jtt/src \ + $(SRC_DIR)/org.graalvm.compiler.lir.jtt/src \ + , \ + BIN := $(COMPILE_OUTPUTDIR)/jdk.vm.compiler.tests, \ + JAR := $(COMPILE_OUTPUTDIR)/jdk.vm.compiler.tests.jar, \ + CLASSPATH := \ + $(JDK_OUTPUTDIR)/modules/jdk.internal.vm.compiler \ + $(JDK_OUTPUTDIR)/modules/jdk.internal.vm.ci \ + $(LIB_OUTPUTDIR)/junit-4.12.jar \ + $(LIB_OUTPUTDIR)/asm-5.0.4.jar \ + $(LIB_OUTPUTDIR)/asm-tree-5.0.4.jar \ + $(LIB_OUTPUTDIR)/java-allocation-instrumenter.jar \ + $(LIB_OUTPUTDIR)/hamcrest-core-1.3.jar \ + , \ + ADD_JAVAC_FLAGS := \ + -Xlint:none -processorpath \ + $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.replacements.verifier.jar \ + --add-exports jdk.unsupported/sun.misc=ALL-UNNAMED \ + , \ + )) + + TARGETS_BUILD += $(BUILD_VM_COMPILER_TESTS) + + + ### Compile and build mxtool + $(eval $(call SetupJavaCompilation, BUILD_MXTOOL, \ + SETUP := GENERATE_USINGJDKBYTECODE, \ + SRC := $(TEST_DIR)/com.oracle.mxtool.junit, \ + BIN := $(COMPILE_OUTPUTDIR)/com.oracle.mxtool.junit, \ + JAR := $(COMPILE_OUTPUTDIR)/com.oracle.mxtool.junit.jar, \ + CLASSPATH := $(LIB_OUTPUTDIR)/junit-4.12.jar, \ + )) + + TARGETS_BUILD += $(BUILD_MXTOOL) + + + ################################################################################ + # Targets for building test-image. + ################################################################################ + + # Copy to hotspot jtreg test image + $(eval $(call SetupCopyFiles, COPY_HOTSPOT_JTREG_GRAAL, \ + SRC := $(COMPILE_OUTPUTDIR), \ + DEST := $(LIB_OUTPUTDIR), \ + FILES := jdk.vm.compiler.tests.jar com.oracle.mxtool.junit.jar, \ + )) + + TARGETS_IMAGE += $(COPY_HOTSPOT_JTREG_GRAAL) + else + $(info Skip building of Graal unit tests because 3rd party libraries directory is not specified) + endif +endif + +$(TARGETS_BUILD): $(TARGETS_EXTRA_LIB) +build-test-hotspot-jtreg-graal: $(TARGETS_BUILD) +test-image-hotspot-jtreg-graal: $(TARGETS_IMAGE) + +all: build-test-hotspot-jtreg-graal +test-image: test-image-hotspot-jtreg-graal + +.PHONY: default all build-test-hotspot-jtreg-graal test-image-hotspot-jtreg-graal test-image
--- a/make/test/JtregNativeHotspot.gmk Tue Jul 03 16:09:25 2018 +0530 +++ b/make/test/JtregNativeHotspot.gmk Thu Jul 12 11:09:23 2018 -0700 @@ -836,6 +836,10 @@ ################################################################################ +ifeq ($(TOOLCHAIN_TYPE), solstudio) + BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libji06t001 += -erroff=E_END_OF_LOOP_CODE_NOT_REACHED +endif + # Platform specific setup ifneq ($(OPENJDK_TARGET_OS)-$(OPENJDK_TARGET_CPU_ARCH), solaris-sparc) BUILD_HOTSPOT_JTREG_EXCLUDE += liboverflow.c exeThreadSignalMask.c @@ -858,9 +862,13 @@ BUILD_HOTSPOT_JTREG_EXECUTABLES_LIBS_exesigtest := -ljvm +ifeq ($(OPENJDK_TARGET_OS), solaris) + BUILD_HOTSPOT_JTREG_EXCLUDE += libterminatedThread.c +endif + ifeq ($(OPENJDK_TARGET_OS), windows) BUILD_HOTSPOT_JTREG_EXECUTABLES_CFLAGS_exeFPRegs := -MT - BUILD_HOTSPOT_JTREG_EXCLUDE += exesigtest.c + BUILD_HOTSPOT_JTREG_EXCLUDE += exesigtest.c libterminatedThread.c else BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libbootclssearch_agent += -lpthread @@ -1494,6 +1502,7 @@ BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libvmdeath001 += -lpthread BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libgetphase001 += -lpthread BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libgetphase002 += -lpthread + BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libterminatedThread += -lpthread endif $(eval $(call SetupTestFilesCompilation, BUILD_HOTSPOT_JTREG_LIBRARIES, \
--- a/src/hotspot/cpu/aarch64/aarch64.ad Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/cpu/aarch64/aarch64.ad Thu Jul 12 11:09:23 2018 -0700 @@ -1471,7 +1471,7 @@ // Ctl+Mem to a StoreB node (which does the actual card mark). // // n.b. a StoreCM node will only appear in this configuration when - // using CMS. StoreCM differs from a normal card mark write (StoreB) + // using CMS or G1. StoreCM differs from a normal card mark write (StoreB) // because it implies a requirement to order visibility of the card // mark (StoreCM) relative to the object put (StoreP/N) using a // StoreStore memory barrier (arguably this ought to be represented @@ -1481,16 +1481,12 @@ // the sequence // // dmb ishst - // stlrb - // - // However, in the case of a volatile put if we can recognise this - // configuration and plant an stlr for the object write then we can - // omit the dmb and just plant an strb since visibility of the stlr - // is ordered before visibility of subsequent stores. StoreCM nodes - // also arise when using G1 or using CMS with conditional card - // marking. In these cases (as we shall see) we don't need to insert - // the dmb when translating StoreCM because there is already an - // intervening StoreLoad barrier between it and the StoreP/N. + // strb + // + // However, when using G1 or CMS with conditional card marking (as + // we shall see) we don't need to insert the dmb when translating + // StoreCM because there is already an intervening StoreLoad barrier + // between it and the StoreP/N. // // It is also possible to perform the card mark conditionally on it // currently being unmarked in which case the volatile put graph @@ -2868,50 +2864,17 @@ { assert(storecm->Opcode() == Op_StoreCM, "expecting a StoreCM"); - // we only ever need to generate a dmb ishst between an object put - // and the associated card mark when we are using CMS without - // conditional card marking - - if (!UseConcMarkSweepGC || UseCondCardMark) { - return true; - } - - // if we are implementing volatile puts using barriers then the - // object put is an str so we must insert the dmb ishst - - if (UseBarriersForVolatile) { + // we need to generate a dmb ishst between an object put and the + // associated card mark when we are using CMS without conditional + // card marking + + if (UseConcMarkSweepGC && !UseCondCardMark) { return false; } - // we can omit the dmb ishst if this StoreCM is part of a volatile - // put because in thta case the put will be implemented by stlr - // - // we need to check for a normal subgraph feeding this StoreCM. - // that means the StoreCM must be fed Memory from a leading membar, - // either a MemBarRelease or its dependent MemBarCPUOrder, and the - // leading membar must be part of a normal subgraph - - Node *x = storecm->in(StoreNode::Memory); - - if (!x->is_Proj()) { - return false; - } - - x = x->in(0); - - if (!x->is_MemBar()) { - return false; - } - - MemBarNode *leading = x->as_MemBar(); - - // reject invalid candidates - if (!leading_membar(leading)) { - return false; - } - - // we can omit the StoreStore if it is the head of a normal subgraph - return (leading_to_normal(leading) != NULL); + // a storestore is unnecesary in all other cases + + return true; }
--- a/src/hotspot/cpu/ppc/vm_version_ext_ppc.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/cpu/ppc/vm_version_ext_ppc.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -25,19 +25,30 @@ #include "jvm.h" #include "memory/allocation.hpp" #include "memory/allocation.inline.hpp" +#include "runtime/vm_version.hpp" #include "vm_version_ext_ppc.hpp" // VM_Version_Ext statics int VM_Version_Ext::_no_of_threads = 0; int VM_Version_Ext::_no_of_cores = 0; int VM_Version_Ext::_no_of_sockets = 0; +bool VM_Version_Ext::_initialized = false; char VM_Version_Ext::_cpu_name[CPU_TYPE_DESC_BUF_SIZE] = {0}; char VM_Version_Ext::_cpu_desc[CPU_DETAILED_DESC_BUF_SIZE] = {0}; // get cpu information. -bool VM_Version_Ext::initialize_cpu_information(void) { - // Not yet implemented. - return false; +void VM_Version_Ext::initialize_cpu_information(void) { + // do nothing if cpu info has been initialized + if (_initialized) { + return; + } + + _no_of_cores = os::processor_count(); + _no_of_threads = _no_of_cores; + _no_of_sockets = _no_of_cores; + snprintf(_cpu_name, CPU_TYPE_DESC_BUF_SIZE, "PowerPC POWER%lu", PowerArchitecturePPC64); + snprintf(_cpu_desc, CPU_DETAILED_DESC_BUF_SIZE, "PPC %s", features_string()); + _initialized = true; } int VM_Version_Ext::number_of_threads(void) { @@ -56,9 +67,7 @@ } const char* VM_Version_Ext::cpu_name(void) { - if (!initialize_cpu_information()) { - return NULL; - } + initialize_cpu_information(); char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_TYPE_DESC_BUF_SIZE, mtTracing); if (NULL == tmp) { return NULL; @@ -68,9 +77,7 @@ } const char* VM_Version_Ext::cpu_description(void) { - if (!initialize_cpu_information()) { - return NULL; - } + initialize_cpu_information(); char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_DETAILED_DESC_BUF_SIZE, mtTracing); if (NULL == tmp) { return NULL;
--- a/src/hotspot/cpu/ppc/vm_version_ext_ppc.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/cpu/ppc/vm_version_ext_ppc.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -43,10 +43,11 @@ static int _no_of_threads; static int _no_of_cores; static int _no_of_sockets; + static bool _initialized; static char _cpu_name[CPU_TYPE_DESC_BUF_SIZE]; static char _cpu_desc[CPU_DETAILED_DESC_BUF_SIZE]; - static bool initialize_cpu_information(void); + static void initialize_cpu_information(void); public:
--- a/src/hotspot/cpu/s390/s390.ad Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/cpu/s390/s390.ad Thu Jul 12 11:09:23 2018 -0700 @@ -9839,7 +9839,7 @@ match(Set index (PartialSubtypeCheck sub super)); effect(KILL pcc, KILL scratch1, KILL scratch2); ins_cost(10 * DEFAULT_COST); - size(12); + // TODO: s390 port size(FIXED_SIZE); format %{ " CALL PartialSubtypeCheck\n" %} ins_encode %{ AddressLiteral stub_address(StubRoutines::zarch::partial_subtype_check());
--- a/src/hotspot/cpu/s390/templateTable_s390.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/cpu/s390/templateTable_s390.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -3636,7 +3636,7 @@ NearLabel subtype, no_such_interface; - __ check_klass_subtype(klass, interface, Z_tmp_2, Z_tmp_3, subtype); + __ check_klass_subtype(klass, interface, Z_tmp_2, flags/*scratch*/, subtype); // If we get here the typecheck failed __ z_bru(no_such_interface); __ bind(subtype); @@ -3649,7 +3649,6 @@ __ bind(notVFinal); // Get receiver klass into klass - also a null check. - __ restore_locals(); __ load_klass(klass, receiver); __ lookup_interface_method(klass, interface, noreg, noreg, /*temp*/Z_ARG1, @@ -3680,7 +3679,7 @@ // interpreter entry point and a conditional jump to it in case of a null // method. __ compareU64_and_branch(method2, (intptr_t) 0, - Assembler::bcondZero, no_such_method); + Assembler::bcondZero, no_such_method); __ profile_arguments_type(Z_tmp_1, method2, Z_tmp_2, true); @@ -3695,8 +3694,6 @@ __ bind(no_such_method); // Throw exception. - __ restore_bcp(); // Bcp must be correct for exception handler (was destroyed). - __ restore_locals(); // Make sure locals pointer is correct as well (was destroyed). // Pass arguments for generating a verbose error message. __ z_lgr(Z_tmp_1, method); // Prevent register clash. __ call_VM(noreg, @@ -3709,8 +3706,6 @@ __ bind(no_such_interface); // Throw exception. - __ restore_bcp(); // Bcp must be correct for exception handler (was destroyed). - __ restore_locals(); // Make sure locals pointer is correct as well (was destroyed). // Pass arguments for generating a verbose error message. __ call_VM(noreg, CAST_FROM_FN_PTR(address,
--- a/src/hotspot/cpu/s390/vm_version_ext_s390.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/cpu/s390/vm_version_ext_s390.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -31,13 +31,23 @@ int VM_Version_Ext::_no_of_threads = 0; int VM_Version_Ext::_no_of_cores = 0; int VM_Version_Ext::_no_of_sockets = 0; +bool VM_Version_Ext::_initialized = false; char VM_Version_Ext::_cpu_name[CPU_TYPE_DESC_BUF_SIZE] = {0}; char VM_Version_Ext::_cpu_desc[CPU_DETAILED_DESC_BUF_SIZE] = {0}; // get cpu information. -bool VM_Version_Ext::initialize_cpu_information(void) { - // Not yet implemented. - return false; +void VM_Version_Ext::initialize_cpu_information(void) { + // do nothing if cpu info has been initialized + if (_initialized) { + return; + } + + _no_of_cores = os::processor_count(); + _no_of_threads = _no_of_cores; + _no_of_sockets = _no_of_cores; + snprintf(_cpu_name, CPU_TYPE_DESC_BUF_SIZE, "s390 %s", VM_Version::get_model_string()); + snprintf(_cpu_desc, CPU_DETAILED_DESC_BUF_SIZE, "zArch %s", features_string()); + _initialized = true; } int VM_Version_Ext::number_of_threads(void) { @@ -56,9 +66,7 @@ } const char* VM_Version_Ext::cpu_name(void) { - if (!initialize_cpu_information()) { - return NULL; - } + initialize_cpu_information(); char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_TYPE_DESC_BUF_SIZE, mtTracing); if (NULL == tmp) { return NULL; @@ -68,9 +76,7 @@ } const char* VM_Version_Ext::cpu_description(void) { - if (!initialize_cpu_information()) { - return NULL; - } + initialize_cpu_information(); char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_DETAILED_DESC_BUF_SIZE, mtTracing); if (NULL == tmp) { return NULL;
--- a/src/hotspot/cpu/s390/vm_version_ext_s390.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/cpu/s390/vm_version_ext_s390.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -43,10 +43,11 @@ static int _no_of_threads; static int _no_of_cores; static int _no_of_sockets; + static bool _initialized; static char _cpu_name[CPU_TYPE_DESC_BUF_SIZE]; static char _cpu_desc[CPU_DETAILED_DESC_BUF_SIZE]; - static bool initialize_cpu_information(void); + static void initialize_cpu_information(void); public:
--- a/src/hotspot/cpu/s390/vm_version_s390.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/cpu/s390/vm_version_s390.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -1,6 +1,6 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2016, 2017 SAP SE. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,6 +36,7 @@ # include <sys/sysinfo.h> bool VM_Version::_is_determine_features_test_running = false; +const char* VM_Version::_model_string; unsigned long VM_Version::_features[_features_buffer_len] = {0, 0, 0, 0}; unsigned long VM_Version::_cipher_features[_features_buffer_len] = {0, 0, 0, 0}; @@ -210,6 +211,10 @@ FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); } + if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) { + FLAG_SET_DEFAULT(UseSHA, false); + } + if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) { FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, true); } @@ -244,32 +249,40 @@ void VM_Version::set_features_string() { unsigned int ambiguity = 0; + _model_string = z_name[0]; if (is_z13()) { _features_string = "System z G7-z13 (LDISP_fast, ExtImm, PCrel Load/Store, CmpB, Cond Load/Store, Interlocked Update, TxM, VectorInstr)"; + _model_string = z_name[7]; ambiguity++; } if (is_ec12()) { _features_string = "System z G6-EC12 (LDISP_fast, ExtImm, PCrel Load/Store, CmpB, Cond Load/Store, Interlocked Update, TxM)"; + _model_string = z_name[6]; ambiguity++; } if (is_z196()) { _features_string = "System z G5-z196 (LDISP_fast, ExtImm, PCrel Load/Store, CmpB, Cond Load/Store, Interlocked Update)"; + _model_string = z_name[5]; ambiguity++; } if (is_z10()) { _features_string = "System z G4-z10 (LDISP_fast, ExtImm, PCrel Load/Store, CmpB)"; + _model_string = z_name[4]; ambiguity++; } if (is_z9()) { _features_string = "System z G3-z9 (LDISP_fast, ExtImm), out-of-support as of 2016-04-01"; + _model_string = z_name[3]; ambiguity++; } if (is_z990()) { _features_string = "System z G2-z990 (LDISP_fast), out-of-support as of 2014-07-01"; + _model_string = z_name[2]; ambiguity++; } if (is_z900()) { _features_string = "System z G1-z900 (LDISP), out-of-support as of 2014-07-01"; + _model_string = z_name[1]; ambiguity++; }
--- a/src/hotspot/cpu/s390/vm_version_s390.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/cpu/s390/vm_version_s390.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -1,6 +1,6 @@ /* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2016 SAP SE. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -131,6 +131,7 @@ static unsigned int _Dcache_lineSize; static unsigned int _Icache_lineSize; static bool _is_determine_features_test_running; + static const char* _model_string; static bool test_feature_bit(unsigned long* featureBuffer, int featureNum, unsigned int bufLen); static void set_features_string(); @@ -346,6 +347,7 @@ static bool is_determine_features_test_running() { return _is_determine_features_test_running; } // CPU feature query functions + static const char* get_model_string() { return _model_string; } static bool has_StoreFacilityListExtended() { return (_features[0] & StoreFacilityListExtendedMask) == StoreFacilityListExtendedMask; } static bool has_Crypto() { return (_features[0] & CryptoFacilityMask) == CryptoFacilityMask; } static bool has_ETF2() { return (_features[0] & ETF2Mask) == ETF2Mask; }
--- a/src/hotspot/cpu/x86/methodHandles_x86.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/cpu/x86/methodHandles_x86.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -440,8 +440,6 @@ verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp3); } - BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); - Register temp3_intf = temp3; __ load_heap_oop(temp3_intf, member_clazz); load_klass_from_Class(_masm, temp3_intf);
--- a/src/hotspot/os/aix/os_aix.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/os/aix/os_aix.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -576,7 +576,9 @@ } } Arguments::set_java_home(buf); - set_boot_path('/', ':'); + if (!set_boot_path('/', ':')) { + vm_exit_during_initialization("Failed setting boot class path.", NULL); + } } // Where to look for native libraries. @@ -1208,22 +1210,6 @@ ::abort(); } -// This method is a copy of JDK's sysGetLastErrorString -// from src/solaris/hpi/src/system_md.c - -size_t os::lasterror(char *buf, size_t len) { - if (errno == 0) return 0; - - const char *s = os::strerror(errno); - size_t n = ::strlen(s); - if (n >= len) { - n = len - 1; - } - ::strncpy(buf, s, n); - buf[n] = '\0'; - return n; -} - intx os::current_thread_id() { return (intx)pthread_self(); }
--- a/src/hotspot/os/bsd/os_bsd.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/os/bsd/os_bsd.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -372,7 +372,9 @@ } } Arguments::set_java_home(buf); - set_boot_path('/', ':'); + if (!set_boot_path('/', ':')) { + vm_exit_during_initialization("Failed setting boot class path.", NULL); + } } // Where to look for native libraries. @@ -1081,22 +1083,6 @@ ::abort(); } -// This method is a copy of JDK's sysGetLastErrorString -// from src/solaris/hpi/src/system_md.c - -size_t os::lasterror(char *buf, size_t len) { - if (errno == 0) return 0; - - const char *s = os::strerror(errno); - size_t n = ::strlen(s); - if (n >= len) { - n = len - 1; - } - ::strncpy(buf, s, n); - buf[n] = '\0'; - return n; -} - // Information of current thread in variety of formats pid_t os::Bsd::gettid() { int retval = -1;
--- a/src/hotspot/os/linux/os_linux.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/os/linux/os_linux.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -367,7 +367,9 @@ } } Arguments::set_java_home(buf); - set_boot_path('/', ':'); + if (!set_boot_path('/', ':')) { + vm_exit_during_initialization("Failed setting boot class path.", NULL); + } } // Where to look for native libraries. @@ -1419,23 +1421,6 @@ ::abort(); } - -// This method is a copy of JDK's sysGetLastErrorString -// from src/solaris/hpi/src/system_md.c - -size_t os::lasterror(char *buf, size_t len) { - if (errno == 0) return 0; - - const char *s = os::strerror(errno); - size_t n = ::strlen(s); - if (n >= len) { - n = len - 1; - } - ::strncpy(buf, s, n); - buf[n] = '\0'; - return n; -} - // thread_id is kernel thread id (similar to Solaris LWP id) intx os::current_thread_id() { return os::Linux::gettid(); } int os::current_process_id() { @@ -5572,14 +5557,18 @@ static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time); -static clockid_t thread_cpu_clockid(Thread* thread) { - pthread_t tid = thread->osthread()->pthread_id(); - clockid_t clockid; - - // Get thread clockid - int rc = os::Linux::pthread_getcpuclockid(tid, &clockid); - assert(rc == 0, "pthread_getcpuclockid is expected to return 0 code"); - return clockid; +static jlong fast_cpu_time(Thread *thread) { + clockid_t clockid; + int rc = os::Linux::pthread_getcpuclockid(thread->osthread()->pthread_id(), + &clockid); + if (rc == 0) { + return os::Linux::fast_thread_cpu_time(clockid); + } else { + // It's possible to encounter a terminated native thread that failed + // to detach itself from the VM - which should result in ESRCH. + assert_status(rc == ESRCH, rc, "pthread_getcpuclockid failed"); + return -1; + } } // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) @@ -5601,7 +5590,7 @@ jlong os::thread_cpu_time(Thread* thread) { // consistent with what current_thread_cpu_time() returns if (os::Linux::supports_fast_thread_cpu_time()) { - return os::Linux::fast_thread_cpu_time(thread_cpu_clockid(thread)); + return fast_cpu_time(thread); } else { return slow_thread_cpu_time(thread, true /* user + sys */); } @@ -5617,7 +5606,7 @@ jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) { if (user_sys_cpu_time && os::Linux::supports_fast_thread_cpu_time()) { - return os::Linux::fast_thread_cpu_time(thread_cpu_clockid(thread)); + return fast_cpu_time(thread); } else { return slow_thread_cpu_time(thread, user_sys_cpu_time); }
--- a/src/hotspot/os/posix/os_posix.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/os/posix/os_posix.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -150,6 +150,19 @@ return errno; } +size_t os::lasterror(char *buf, size_t len) { + if (errno == 0) return 0; + + const char *s = os::strerror(errno); + size_t n = ::strlen(s); + if (n >= len) { + n = len - 1; + } + ::strncpy(buf, s, n); + buf[n] = '\0'; + return n; +} + bool os::is_debugger_attached() { // not implemented return false;
--- a/src/hotspot/os/solaris/os_solaris.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/os/solaris/os_solaris.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -580,7 +580,9 @@ } } Arguments::set_java_home(buf); - set_boot_path('/', ':'); + if (!set_boot_path('/', ':')) { + vm_exit_during_initialization("Failed setting boot class path.", NULL); + } } // Where to look for native libraries. @@ -2010,23 +2012,6 @@ // no suffix required } -// This method is a copy of JDK's sysGetLastErrorString -// from src/solaris/hpi/src/system_md.c - -size_t os::lasterror(char *buf, size_t len) { - if (errno == 0) return 0; - - const char *s = os::strerror(errno); - size_t n = ::strlen(s); - if (n >= len) { - n = len - 1; - } - ::strncpy(buf, s, n); - buf[n] = '\0'; - return n; -} - - // sun.misc.Signal extern "C" {
--- a/src/hotspot/os/windows/os_windows.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/os/windows/os_windows.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -230,7 +230,7 @@ FREE_C_HEAP_ARRAY(char, dll_path); if (!set_boot_path('\\', ';')) { - return; + vm_exit_during_initialization("Failed setting boot class path.", NULL); } }
--- a/src/hotspot/os_cpu/linux_x86/gc/z/zBackingFile_linux_x86.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/os_cpu/linux_x86/gc/z/zBackingFile_linux_x86.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -28,7 +28,6 @@ #include "gc/z/zErrno.hpp" #include "gc/z/zLargePages.inline.hpp" #include "logging/log.hpp" -#include "runtime/init.hpp" #include "runtime/os.hpp" #include "utilities/align.hpp" #include "utilities/debug.hpp" @@ -47,10 +46,6 @@ // Sysfs file for transparent huge page on tmpfs #define ZFILENAME_SHMEM_ENABLED "/sys/kernel/mm/transparent_hugepage/shmem_enabled" -// Default mount points -#define ZMOUNTPOINT_TMPFS "/dev/shm" -#define ZMOUNTPOINT_HUGETLBFS "/hugepages" - // Java heap filename #define ZFILENAME_HEAP "java_heap" @@ -79,13 +74,30 @@ #define HUGETLBFS_MAGIC 0x958458f6 #endif +// Preferred tmpfs mount points, ordered by priority +static const char* z_preferred_tmpfs_mountpoints[] = { + "/dev/shm", + "/run/shm", + NULL +}; + +// Preferred hugetlbfs mount points, ordered by priority +static const char* z_preferred_hugetlbfs_mountpoints[] = { + "/dev/hugepages", + "/hugepages", + NULL +}; + static int z_memfd_create(const char *name, unsigned int flags) { return syscall(__NR_memfd_create, name, flags); } +bool ZBackingFile::_hugetlbfs_mmap_retry = true; + ZBackingFile::ZBackingFile() : _fd(-1), _filesystem(0), + _available(0), _initialized(false) { // Create backing file @@ -94,39 +106,47 @@ return; } - // Get filesystem type + // Get filesystem statistics struct statfs statfs_buf; if (fstatfs(_fd, &statfs_buf) == -1) { ZErrno err; - log_error(gc, init)("Failed to determine filesystem type for backing file (%s)", err.to_string()); + log_error(gc, init)("Failed to determine filesystem type for backing file (%s)", + err.to_string()); return; } + _filesystem = statfs_buf.f_type; + _available = statfs_buf.f_bavail * statfs_buf.f_bsize; // Make sure we're on a supported filesystem if (!is_tmpfs() && !is_hugetlbfs()) { - log_error(gc, init)("Backing file must be located on a %s or a %s filesystem", ZFILESYSTEM_TMPFS, ZFILESYSTEM_HUGETLBFS); + log_error(gc, init)("Backing file must be located on a %s or a %s filesystem", + ZFILESYSTEM_TMPFS, ZFILESYSTEM_HUGETLBFS); return; } // Make sure the filesystem type matches requested large page type if (ZLargePages::is_transparent() && !is_tmpfs()) { - log_error(gc, init)("-XX:+UseTransparentHugePages can only be enable when using a %s filesystem", ZFILESYSTEM_TMPFS); + log_error(gc, init)("-XX:+UseTransparentHugePages can only be enable when using a %s filesystem", + ZFILESYSTEM_TMPFS); return; } if (ZLargePages::is_transparent() && !tmpfs_supports_transparent_huge_pages()) { - log_error(gc, init)("-XX:+UseTransparentHugePages on a %s filesystem not supported by kernel", ZFILESYSTEM_TMPFS); + log_error(gc, init)("-XX:+UseTransparentHugePages on a %s filesystem not supported by kernel", + ZFILESYSTEM_TMPFS); return; } if (ZLargePages::is_explicit() && !is_hugetlbfs()) { - log_error(gc, init)("-XX:+UseLargePages (without -XX:+UseTransparentHugePages) can only be enabled when using a %s filesystem", ZFILESYSTEM_HUGETLBFS); + log_error(gc, init)("-XX:+UseLargePages (without -XX:+UseTransparentHugePages) can only be enabled when using a %s filesystem", + ZFILESYSTEM_HUGETLBFS); return; } if (!ZLargePages::is_explicit() && is_hugetlbfs()) { - log_error(gc, init)("-XX:+UseLargePages must be enabled when using a %s filesystem", ZFILESYSTEM_HUGETLBFS); + log_error(gc, init)("-XX:+UseLargePages must be enabled when using a %s filesystem", + ZFILESYSTEM_HUGETLBFS); return; } @@ -149,17 +169,21 @@ return -1; } - log_debug(gc, init)("Heap backed by file /memfd:%s", filename); + log_info(gc, init)("Heap backed by file: /memfd:%s", filename); return fd; } int ZBackingFile::create_file_fd(const char* name) const { - const char* const filesystem = ZLargePages::is_explicit() ? ZFILESYSTEM_HUGETLBFS : ZFILESYSTEM_TMPFS; - const char* const mountpoint = ZLargePages::is_explicit() ? ZMOUNTPOINT_HUGETLBFS : ZMOUNTPOINT_TMPFS; + const char* const filesystem = ZLargePages::is_explicit() + ? ZFILESYSTEM_HUGETLBFS + : ZFILESYSTEM_TMPFS; + const char** const preferred_mountpoints = ZLargePages::is_explicit() + ? z_preferred_hugetlbfs_mountpoints + : z_preferred_tmpfs_mountpoints; // Find mountpoint - ZBackingPath path(filesystem, mountpoint); + ZBackingPath path(filesystem, preferred_mountpoints); if (path.get() == NULL) { log_error(gc, init)("Use -XX:ZPath to specify the path to a %s filesystem", filesystem); return -1; @@ -181,7 +205,7 @@ return -1; } - log_debug(gc, init)("Heap backed by file %s/#" UINT64_FORMAT, path.get(), (uint64_t)stat_buf.st_ino); + log_info(gc, init)("Heap backed by file: %s/#" UINT64_FORMAT, path.get(), (uint64_t)stat_buf.st_ino); return fd_anon; } @@ -207,7 +231,7 @@ return -1; } - log_debug(gc, init)("Heap backed by file %s", filename); + log_info(gc, init)("Heap backed by file: %s", filename); return fd; } @@ -238,6 +262,10 @@ return _fd; } +size_t ZBackingFile::available() const { + return _available; +} + bool ZBackingFile::is_tmpfs() const { return _filesystem == TMPFS_MAGIC; } @@ -292,12 +320,12 @@ return true; } -bool ZBackingFile::expand_tmpfs(size_t offset, size_t length) const { +bool ZBackingFile::try_expand_tmpfs(size_t offset, size_t length) const { assert(is_tmpfs(), "Wrong filesystem"); return try_expand_tmpfs(offset, length, os::vm_page_size()); } -bool ZBackingFile::expand_hugetlbfs(size_t offset, size_t length) const { +bool ZBackingFile::try_expand_hugetlbfs(size_t offset, size_t length) const { assert(is_hugetlbfs(), "Wrong filesystem"); // Prior to kernel 4.3, hugetlbfs did not support posix_fallocate(). @@ -320,11 +348,11 @@ // process being returned to the huge page pool and made available for new // allocations. void* addr = MAP_FAILED; - const int max_attempts = 3; + const int max_attempts = 5; for (int attempt = 1; attempt <= max_attempts; attempt++) { addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset); - if (addr != MAP_FAILED || is_init_completed()) { - // Mapping was successful or initialization phase has completed + if (addr != MAP_FAILED || !_hugetlbfs_mmap_retry) { + // Mapping was successful or mmap retry is disabled break; } @@ -337,6 +365,11 @@ sleep(1); } + // Disable mmap retry from now on + if (_hugetlbfs_mmap_retry) { + _hugetlbfs_mmap_retry = false; + } + if (addr == MAP_FAILED) { // Not enough huge pages left ZErrno err; @@ -355,6 +388,39 @@ return true; } -bool ZBackingFile::expand(size_t offset, size_t length) const { - return is_hugetlbfs() ? expand_hugetlbfs(offset, length) : expand_tmpfs(offset, length); +bool ZBackingFile::try_expand_tmpfs_or_hugetlbfs(size_t offset, size_t length, size_t alignment) const { + assert(is_aligned(offset, alignment), "Invalid offset"); + assert(is_aligned(length, alignment), "Invalid length"); + + log_debug(gc)("Expanding heap from " SIZE_FORMAT "M to " SIZE_FORMAT "M", offset / M, (offset + length) / M); + + return is_hugetlbfs() ? try_expand_hugetlbfs(offset, length) : try_expand_tmpfs(offset, length); } + +size_t ZBackingFile::try_expand(size_t offset, size_t length, size_t alignment) const { + size_t start = offset; + size_t end = offset + length; + + // Try to expand + if (try_expand_tmpfs_or_hugetlbfs(start, length, alignment)) { + // Success + return end; + } + + // Failed, try to expand as much as possible + for (;;) { + length = align_down((end - start) / 2, alignment); + if (length < alignment) { + // Done, don't expand more + return start; + } + + if (try_expand_tmpfs_or_hugetlbfs(start, length, alignment)) { + // Success, try expand more + start += length; + } else { + // Failed, try expand less + end -= length; + } + } +}
--- a/src/hotspot/os_cpu/linux_x86/gc/z/zBackingFile_linux_x86.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/os_cpu/linux_x86/gc/z/zBackingFile_linux_x86.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,8 +28,11 @@ class ZBackingFile { private: + static bool _hugetlbfs_mmap_retry; + int _fd; uint64_t _filesystem; + size_t _available; bool _initialized; int create_mem_fd(const char* name) const; @@ -42,9 +45,9 @@ bool try_split_and_expand_tmpfs(size_t offset, size_t length, size_t alignment) const; bool try_expand_tmpfs(size_t offset, size_t length, size_t alignment) const; - bool expand_tmpfs(size_t offset, size_t length) const; - - bool expand_hugetlbfs(size_t offset, size_t length) const; + bool try_expand_tmpfs(size_t offset, size_t length) const; + bool try_expand_hugetlbfs(size_t offset, size_t length) const; + bool try_expand_tmpfs_or_hugetlbfs(size_t offset, size_t length, size_t alignment) const; public: ZBackingFile(); @@ -52,7 +55,9 @@ bool is_initialized() const; int fd() const; - bool expand(size_t offset, size_t length) const; + size_t available() const; + + size_t try_expand(size_t offset, size_t length, size_t alignment) const; }; #endif // OS_CPU_LINUX_X86_ZBACKINGFILE_LINUX_X86_HPP
--- a/src/hotspot/os_cpu/linux_x86/gc/z/zBackingPath_linux_x86.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/os_cpu/linux_x86/gc/z/zBackingPath_linux_x86.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -33,13 +33,13 @@ // Mount information, see proc(5) for more details. #define PROC_SELF_MOUNTINFO "/proc/self/mountinfo" -ZBackingPath::ZBackingPath(const char* filesystem, const char* preferred_path) { +ZBackingPath::ZBackingPath(const char* filesystem, const char** preferred_mountpoints) { if (ZPath != NULL) { // Use specified path _path = strdup(ZPath); } else { // Find suitable path - _path = find_mountpoint(filesystem, preferred_path); + _path = find_mountpoint(filesystem, preferred_mountpoints); } } @@ -52,8 +52,8 @@ char* line_mountpoint = NULL; char* line_filesystem = NULL; - // Parse line and return a newly allocated string containing the mountpoint if - // the line contains a matching filesystem and the mountpoint is accessible by + // Parse line and return a newly allocated string containing the mount point if + // the line contains a matching filesystem and the mount point is accessible by // the current user. if (sscanf(line, "%*u %*u %*u:%*u %*s %ms %*[^-]- %ms", &line_mountpoint, &line_filesystem) != 2 || strcmp(line_filesystem, filesystem) != 0 || @@ -68,7 +68,7 @@ return line_mountpoint; } -void ZBackingPath::get_mountpoints(ZArray<char*>* mountpoints, const char* filesystem) const { +void ZBackingPath::get_mountpoints(const char* filesystem, ZArray<char*>* mountpoints) const { FILE* fd = fopen(PROC_SELF_MOUNTINFO, "r"); if (fd == NULL) { ZErrno err; @@ -98,37 +98,45 @@ mountpoints->clear(); } -char* ZBackingPath::find_mountpoint(const char* filesystem, const char* preferred_mountpoint) const { +char* ZBackingPath::find_preferred_mountpoint(const char* filesystem, + ZArray<char*>* mountpoints, + const char** preferred_mountpoints) const { + // Find preferred mount point + ZArrayIterator<char*> iter1(mountpoints); + for (char* mountpoint; iter1.next(&mountpoint);) { + for (const char** preferred = preferred_mountpoints; *preferred != NULL; preferred++) { + if (!strcmp(mountpoint, *preferred)) { + // Preferred mount point found + return strdup(mountpoint); + } + } + } + + // Preferred mount point not found + log_error(gc, init)("More than one %s filesystem found:", filesystem); + ZArrayIterator<char*> iter2(mountpoints); + for (char* mountpoint; iter2.next(&mountpoint);) { + log_error(gc, init)(" %s", mountpoint); + } + + return NULL; +} + +char* ZBackingPath::find_mountpoint(const char* filesystem, const char** preferred_mountpoints) const { char* path = NULL; ZArray<char*> mountpoints; - get_mountpoints(&mountpoints, filesystem); + get_mountpoints(filesystem, &mountpoints); if (mountpoints.size() == 0) { - // No filesystem found + // No mount point found log_error(gc, init)("Failed to find an accessible %s filesystem", filesystem); } else if (mountpoints.size() == 1) { - // One filesystem found + // One mount point found path = strdup(mountpoints.at(0)); - } else if (mountpoints.size() > 1) { - // More than one filesystem found - ZArrayIterator<char*> iter(&mountpoints); - for (char* mountpoint; iter.next(&mountpoint);) { - if (!strcmp(mountpoint, preferred_mountpoint)) { - // Preferred mount point found - path = strdup(mountpoint); - break; - } - } - - if (path == NULL) { - // Preferred mount point not found - log_error(gc, init)("More than one %s filesystem found:", filesystem); - ZArrayIterator<char*> iter2(&mountpoints); - for (char* mountpoint; iter2.next(&mountpoint);) { - log_error(gc, init)(" %s", mountpoint); - } - } + } else { + // More than one mount point found + path = find_preferred_mountpoint(filesystem, &mountpoints, preferred_mountpoints); } free_mountpoints(&mountpoints);
--- a/src/hotspot/os_cpu/linux_x86/gc/z/zBackingPath_linux_x86.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/os_cpu/linux_x86/gc/z/zBackingPath_linux_x86.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,13 +31,19 @@ private: char* _path; - char* get_mountpoint(const char* line, const char* filesystem) const; - void get_mountpoints(ZArray<char*>* mountpoints, const char* filesystem) const; + char* get_mountpoint(const char* line, + const char* filesystem) const; + void get_mountpoints(const char* filesystem, + ZArray<char*>* mountpoints) const; void free_mountpoints(ZArray<char*>* mountpoints) const; - char* find_mountpoint(const char* filesystem, const char* preferred_mountpoint) const; + char* find_preferred_mountpoint(const char* filesystem, + ZArray<char*>* mountpoints, + const char** preferred_mountpoints) const; + char* find_mountpoint(const char* filesystem, + const char** preferred_mountpoints) const; public: - ZBackingPath(const char* filesystem, const char* preferred_path); + ZBackingPath(const char* filesystem, const char** preferred_mountpoints); ~ZBackingPath(); const char* get() const;
--- a/src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -52,8 +52,15 @@ _file(), _granule_size(granule_size) { - // Check and warn if max map count seems too low + if (!_file.is_initialized()) { + return; + } + + // Check and warn if max map count is too low check_max_map_count(max_capacity, granule_size); + + // Check and warn if available space on filesystem is too low + check_available_space_on_filesystem(max_capacity); } void ZPhysicalMemoryBacking::check_max_map_count(size_t max_capacity, size_t granule_size) const { @@ -61,7 +68,7 @@ FILE* const file = fopen(filename, "r"); if (file == NULL) { // Failed to open file, skip check - log_debug(gc)("Failed to open %s", filename); + log_debug(gc, init)("Failed to open %s", filename); return; } @@ -70,7 +77,7 @@ fclose(file); if (result != 1) { // Failed to read file, skip check - log_debug(gc)("Failed to read %s", filename); + log_debug(gc, init)("Failed to read %s", filename); return; } @@ -81,15 +88,43 @@ // We speculate that we need another 20% to allow for non-ZGC subsystems to map memory. const size_t required_max_map_count = (max_capacity / granule_size) * 3 * 1.2; if (actual_max_map_count < required_max_map_count) { - log_warning(gc)("The system limit on number of memory mappings " - "per process might be too low for the given"); - log_warning(gc)("Java heap size (" SIZE_FORMAT "M). Please " - "adjust %s to allow for at least", max_capacity / M, filename); - log_warning(gc)(SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). " - "Continuing execution with the current limit could", - required_max_map_count, actual_max_map_count); - log_warning(gc)("lead to a fatal error down the line, due to failed " - "attempts to map memory."); + log_warning(gc, init)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****"); + log_warning(gc, init)("The system limit on number of memory mappings per process might be too low " + "for the given"); + log_warning(gc, init)("max Java heap size (" SIZE_FORMAT "M). Please adjust %s to allow for at", + max_capacity / M, filename); + log_warning(gc, init)("least " SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). Continuing " + "execution with the current", required_max_map_count, actual_max_map_count); + log_warning(gc, init)("limit could lead to a fatal error, due to failure to map memory."); + } +} + +void ZPhysicalMemoryBacking::check_available_space_on_filesystem(size_t max_capacity) const { + // Note that the available space on a tmpfs or a hugetlbfs filesystem + // will be zero if no size limit was specified when it was mounted. + const size_t available = _file.available(); + if (available == 0) { + // No size limit set, skip check + log_info(gc, init)("Available space on backing filesystem: N/A"); + return; + } + + log_info(gc, init)("Available space on backing filesystem: " SIZE_FORMAT "M", + available / M); + + // Warn if the filesystem doesn't currently have enough space available to hold + // the max heap size. The max heap size will be capped if we later hit this limit + // when trying to expand the heap. + if (available < max_capacity) { + log_warning(gc, init)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****"); + log_warning(gc, init)("Not enough space available on the backing filesystem to hold the current " + "max Java heap"); + log_warning(gc, init)("size (" SIZE_FORMAT "M). Please adjust the size of the backing filesystem " + "accordingly (available", max_capacity / M); + log_warning(gc, init)("space is currently " SIZE_FORMAT "M). Continuing execution with the current " + "filesystem size could", available / M); + log_warning(gc, init)("lead to a premature OutOfMemoryError being thrown, due to failure to map " + "memory."); } } @@ -97,18 +132,16 @@ return _file.is_initialized(); } -bool ZPhysicalMemoryBacking::expand(size_t from, size_t to) { - const size_t size = to - from; +size_t ZPhysicalMemoryBacking::try_expand(size_t old_capacity, size_t new_capacity) { + assert(old_capacity < new_capacity, "Invalid old/new capacity"); - // Expand - if (!_file.expand(from, size)) { - return false; + const size_t capacity = _file.try_expand(old_capacity, new_capacity - old_capacity, _granule_size); + if (capacity > old_capacity) { + // Add expanded capacity to free list + _manager.free(old_capacity, capacity - old_capacity); } - // Add expanded space to free list - _manager.free(from, size); - - return true; + return capacity; } ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) {
--- a/src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/os_cpu/linux_x86/gc/z/zPhysicalMemoryBacking_linux_x86.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,6 +37,7 @@ const size_t _granule_size; void check_max_map_count(size_t max_capacity, size_t granule_size) const; + void check_available_space_on_filesystem(size_t max_capacity) const; void map_failed(ZErrno err) const; void advise_view(uintptr_t addr, size_t size) const; @@ -49,7 +50,8 @@ bool is_initialized() const; - bool expand(size_t from, size_t to); + size_t try_expand(size_t old_capacity, size_t new_capacity); + ZPhysicalMemory alloc(size_t size); void free(ZPhysicalMemory pmem);
--- a/src/hotspot/share/aot/aotCompiledMethod.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/aot/aotCompiledMethod.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -272,6 +272,7 @@ if (md != _method) f(md); } } else if (iter.type() == relocInfo::virtual_call_type) { + ResourceMark rm; // Check compiledIC holders associated with this nmethod CompiledIC *ic = CompiledIC_at(&iter); if (ic->is_icholder_call()) { @@ -444,6 +445,7 @@ return; } + ResourceMark rm; RelocIterator iter(this); while (iter.next()) { iter.reloc()->clear_inline_cache();
--- a/src/hotspot/share/ci/ciEnv.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/ci/ciEnv.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -938,9 +938,9 @@ _inc_decompile_count_on_failure = false; record_failure("call site target change"); } else if (Dependencies::is_klass_type(result)) { - record_failure("invalid non-klass dependency"); + record_failure("concurrent class loading"); } else { - record_failure("concurrent class loading"); + record_failure("invalid non-klass dependency"); } } }
--- a/src/hotspot/share/ci/ciStreams.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/ci/ciStreams.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -255,8 +255,7 @@ // constant. constantTag ciBytecodeStream::get_constant_pool_tag(int index) const { VM_ENTRY_MARK; - BasicType bt = _method->get_Method()->constants()->basic_type_for_constant_at(index); - return constantTag::ofBasicType(bt); + return _method->get_Method()->constants()->constant_tag_at(index); } // ------------------------------------------------------------------
--- a/src/hotspot/share/classfile/classLoader.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/classfile/classLoader.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -298,8 +298,6 @@ char *copy = NEW_C_HEAP_ARRAY(char, strlen(zip_name)+1, mtClass); strcpy(copy, zip_name); _zip_name = copy; - _is_boot_append = is_boot_append; - _multi_versioned = _unknown; } ClassPathZipEntry::~ClassPathZipEntry() { @@ -338,95 +336,11 @@ return buffer; } -#if INCLUDE_CDS -u1* ClassPathZipEntry::open_versioned_entry(const char* name, jint* filesize, TRAPS) { - u1* buffer = NULL; - if (DumpSharedSpaces && !_is_boot_append) { - // We presume default is multi-release enabled - const char* multi_ver = Arguments::get_property("jdk.util.jar.enableMultiRelease"); - const char* verstr = Arguments::get_property("jdk.util.jar.version"); - bool is_multi_ver = (multi_ver == NULL || - strcmp(multi_ver, "true") == 0 || - strcmp(multi_ver, "force") == 0) && - is_multiple_versioned(THREAD); - // command line version setting - int version = 0; - const int base_version = 8; // JDK8 - int cur_ver = JDK_Version::current().major_version(); - if (verstr != NULL) { - version = atoi(verstr); - if (version < base_version || version > cur_ver) { - // If the specified version is lower than the base version, the base - // entry will be used; if the version is higher than the current - // jdk version, the highest versioned entry will be used. - if (version < base_version) { - is_multi_ver = false; - } - // print out warning, do not use assertion here since it will continue to look - // for proper version. - warning("JDK%d is not supported in multiple version jars", version); - } - } - - if (is_multi_ver) { - int n; - const char* version_entry = "META-INF/versions/"; - // 10 is the max length of a decimal 32-bit non-negative number - // 2 includes the '/' and trailing zero - size_t entry_name_len = strlen(version_entry) + 10 + strlen(name) + 2; - char* entry_name = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, entry_name_len); - if (version > 0) { - n = jio_snprintf(entry_name, entry_name_len, "%s%d/%s", version_entry, version, name); - entry_name[n] = '\0'; - buffer = open_entry((const char*)entry_name, filesize, false, CHECK_NULL); - if (buffer == NULL) { - warning("Could not find %s in %s, try to find highest version instead", entry_name, _zip_name); - } - } - if (buffer == NULL) { - for (int i = cur_ver; i >= base_version; i--) { - n = jio_snprintf(entry_name, entry_name_len, "%s%d/%s", version_entry, i, name); - entry_name[n] = '\0'; - buffer = open_entry((const char*)entry_name, filesize, false, CHECK_NULL); - if (buffer != NULL) { - break; - } - } - } - FREE_RESOURCE_ARRAY(char, entry_name, entry_name_len); - } - } - return buffer; -} - -bool ClassPathZipEntry::is_multiple_versioned(TRAPS) { - assert(DumpSharedSpaces, "called only at dump time"); - if (_multi_versioned != _unknown) { - return (_multi_versioned == _yes) ? true : false; - } - jint size; - char* buffer = (char*)open_entry("META-INF/MANIFEST.MF", &size, true, CHECK_false); - if (buffer != NULL) { - char* p = buffer; - for ( ; *p; ++p) *p = tolower(*p); - if (strstr(buffer, "multi-release: true") != NULL) { - _multi_versioned = _yes; - return true; - } - } - _multi_versioned = _no; - return false; -} -#endif // INCLUDE_CDS - ClassFileStream* ClassPathZipEntry::open_stream(const char* name, TRAPS) { jint filesize; - u1* buffer = open_versioned_entry(name, &filesize, CHECK_NULL); + u1* buffer = open_entry(name, &filesize, false, CHECK_NULL); if (buffer == NULL) { - buffer = open_entry(name, &filesize, false, CHECK_NULL); - if (buffer == NULL) { - return NULL; - } + return NULL; } if (UsePerfData) { ClassLoader::perf_sys_classfile_bytes_read()->inc(filesize); @@ -635,6 +549,7 @@ void ClassLoader::setup_bootstrap_search_path() { const char* sys_class_path = Arguments::get_sysclasspath(); + assert(sys_class_path != NULL, "System boot class path must not be NULL"); if (PrintSharedArchiveAndExit) { // Don't print sys_class_path - this is the bootcp of this current VM process, not necessarily // the same as the bootcp of the shared archive.
--- a/src/hotspot/share/classfile/classLoader.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/classfile/classLoader.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -94,17 +94,9 @@ } jzentry; class ClassPathZipEntry: public ClassPathEntry { - enum { - _unknown = 0, - _yes = 1, - _no = 2 - }; private: jzfile* _zip; // The zip archive const char* _zip_name; // Name of zip archive - bool _is_boot_append; // entry coming from -Xbootclasspath/a - u1 _multi_versioned; // indicates if the jar file has multi-versioned entries. - // It can have value of "_unknown", "_yes", or "_no" public: bool is_modules_image() const { return false; } bool is_jar_file() const { return true; } @@ -113,10 +105,8 @@ ClassPathZipEntry(jzfile* zip, const char* zip_name, bool is_boot_append); virtual ~ClassPathZipEntry(); u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS); - u1* open_versioned_entry(const char* name, jint* filesize, TRAPS) NOT_CDS_RETURN_(NULL); ClassFileStream* open_stream(const char* name, TRAPS); void contents_do(void f(const char* name, void* context), void* context); - bool is_multiple_versioned(TRAPS) NOT_CDS_RETURN_(false); // Debugging NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);) };
--- a/src/hotspot/share/classfile/javaClasses.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/classfile/javaClasses.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -1051,8 +1051,9 @@ ResetMirrorField reset(archived_mirror_h); InstanceKlass::cast(k)->do_nonstatic_fields(&reset); - log_trace(cds, mirror)("Archived %s mirror object from " PTR_FORMAT " ==> " PTR_FORMAT, - type2name((BasicType)t), p2i(Universe::_mirrors[t]), p2i(archived_m)); + log_trace(cds, heap, mirror)( + "Archived %s mirror object from " PTR_FORMAT " ==> " PTR_FORMAT, + type2name((BasicType)t), p2i(Universe::_mirrors[t]), p2i(archived_m)); Universe::_mirrors[t] = archived_m; } @@ -1133,8 +1134,9 @@ k->set_has_raw_archived_mirror(); ResourceMark rm; - log_trace(cds, mirror)("Archived %s mirror object from " PTR_FORMAT " ==> " PTR_FORMAT, - k->external_name(), p2i(mirror), p2i(archived_mirror)); + log_trace(cds, heap, mirror)( + "Archived %s mirror object from " PTR_FORMAT " ==> " PTR_FORMAT, + k->external_name(), p2i(mirror), p2i(archived_mirror)); return archived_mirror; } @@ -1186,8 +1188,9 @@ // klass. Updated the field in the archived mirror to point to the relocated // klass in the archive. Klass *reloc_k = MetaspaceShared::get_relocated_klass(as_Klass(mirror)); - log_debug(cds, mirror)("Relocate mirror metadata field at _klass_offset from " PTR_FORMAT " ==> " PTR_FORMAT, - p2i(as_Klass(mirror)), p2i(reloc_k)); + log_debug(cds, heap, mirror)( + "Relocate mirror metadata field at _klass_offset from " PTR_FORMAT " ==> " PTR_FORMAT, + p2i(as_Klass(mirror)), p2i(reloc_k)); archived_mirror->metadata_field_put(_klass_offset, reloc_k); // The field at _array_klass_offset is pointing to the original one dimension @@ -1195,8 +1198,9 @@ Klass *arr = array_klass_acquire(mirror); if (arr != NULL) { Klass *reloc_arr = MetaspaceShared::get_relocated_klass(arr); - log_debug(cds, mirror)("Relocate mirror metadata field at _array_klass_offset from " PTR_FORMAT " ==> " PTR_FORMAT, - p2i(arr), p2i(reloc_arr)); + log_debug(cds, heap, mirror)( + "Relocate mirror metadata field at _array_klass_offset from " PTR_FORMAT " ==> " PTR_FORMAT, + p2i(arr), p2i(reloc_arr)); archived_mirror->metadata_field_put(_array_klass_offset, reloc_arr); } return archived_mirror; @@ -1247,7 +1251,8 @@ set_mirror_module_field(k, mirror, module, THREAD); ResourceMark rm; - log_trace(cds, mirror)("Restored %s archived mirror " PTR_FORMAT, k->external_name(), p2i(mirror())); + log_trace(cds, heap, mirror)( + "Restored %s archived mirror " PTR_FORMAT, k->external_name(), p2i(mirror())); return true; } @@ -4273,6 +4278,9 @@ int java_util_concurrent_locks_AbstractOwnableSynchronizer::_owner_offset; int reflect_ConstantPool::_oop_offset; int reflect_UnsafeStaticFieldAccessorImpl::_base_offset; +int jdk_internal_module_ArchivedModuleGraph::_archivedSystemModules_offset; +int jdk_internal_module_ArchivedModuleGraph::_archivedModuleFinder_offset; +int jdk_internal_module_ArchivedModuleGraph::_archivedMainModule_offset; #define STACKTRACEELEMENT_FIELDS_DO(macro) \ macro(declaringClassObject_offset, k, "declaringClassObject", class_signature, false); \ @@ -4435,6 +4443,23 @@ return (hardcoded_offset * heapOopSize) + instanceOopDesc::base_offset_in_bytes(); } +#define MODULEBOOTSTRAP_FIELDS_DO(macro) \ + macro(_archivedSystemModules_offset, k, "archivedSystemModules", systemModules_signature, true); \ + macro(_archivedModuleFinder_offset, k, "archivedModuleFinder", moduleFinder_signature, true); \ + macro(_archivedMainModule_offset, k, "archivedMainModule", string_signature, true) + +void jdk_internal_module_ArchivedModuleGraph::compute_offsets() { + InstanceKlass* k = SystemDictionary::ArchivedModuleGraph_klass(); + assert(k != NULL, "must be loaded"); + MODULEBOOTSTRAP_FIELDS_DO(FIELD_COMPUTE_OFFSET); +} + +#if INCLUDE_CDS +void jdk_internal_module_ArchivedModuleGraph::serialize(SerializeClosure* f) { + MODULEBOOTSTRAP_FIELDS_DO(FIELD_SERIALIZE_OFFSET); +} +#endif + // Compute hard-coded offsets // Invoked before SystemDictionary::initialize, so pre-loaded classes // are not available to determine the offset_of_static_fields. @@ -4493,6 +4518,8 @@ java_lang_LiveStackFrameInfo::compute_offsets(); java_util_concurrent_locks_AbstractOwnableSynchronizer::compute_offsets(); + jdk_internal_module_ArchivedModuleGraph::compute_offsets(); + // generated interpreter code wants to know about the offsets we just computed: AbstractAssembler::update_delayed_values(); }
--- a/src/hotspot/share/classfile/javaClasses.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/classfile/javaClasses.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -1491,6 +1491,19 @@ static void serialize(SerializeClosure* f) NOT_CDS_RETURN; }; +class jdk_internal_module_ArchivedModuleGraph: AllStatic { + private: + static int _archivedSystemModules_offset; + static int _archivedModuleFinder_offset; + static int _archivedMainModule_offset; + public: + static int archivedSystemModules_offset() { return _archivedSystemModules_offset; } + static int archivedModuleFinder_offset() { return _archivedModuleFinder_offset; } + static int archivedMainModule_offset() { return _archivedMainModule_offset; } + static void compute_offsets(); + static void serialize(SerializeClosure* f) NOT_CDS_RETURN; +}; + // Use to declare fields that need to be injected into Java classes // for the JVM to use. The name_index and signature_index are // declared in vmSymbols. The may_be_java flag is used to declare
--- a/src/hotspot/share/classfile/stringTable.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/classfile/stringTable.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -785,6 +785,10 @@ oop StringTable::create_archived_string(oop s, Thread* THREAD) { assert(DumpSharedSpaces, "this function is only used with -Xshare:dump"); + if (MetaspaceShared::is_archive_object(s)) { + return s; + } + oop new_s = NULL; typeArrayOop v = java_lang_String::value_no_keepalive(s); typeArrayOop new_v =
--- a/src/hotspot/share/classfile/systemDictionary.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/classfile/systemDictionary.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -187,6 +187,7 @@ do_klass(jdk_internal_loader_ClassLoaders_AppClassLoader_klass, jdk_internal_loader_ClassLoaders_AppClassLoader, Pre ) \ do_klass(jdk_internal_loader_ClassLoaders_PlatformClassLoader_klass, jdk_internal_loader_ClassLoaders_PlatformClassLoader, Pre ) \ do_klass(CodeSource_klass, java_security_CodeSource, Pre ) \ + do_klass(ArchivedModuleGraph_klass, jdk_internal_module_ArchivedModuleGraph, Pre ) \ \ do_klass(StackTraceElement_klass, java_lang_StackTraceElement, Opt ) \ \
--- a/src/hotspot/share/classfile/vmSymbols.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/classfile/vmSymbols.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -124,6 +124,7 @@ template(getBootClassPathEntryForClass_name, "getBootClassPathEntryForClass") \ template(jdk_internal_vm_PostVMInitHook, "jdk/internal/vm/PostVMInitHook") \ template(sun_net_www_ParseUtil, "sun/net/www/ParseUtil") \ + template(jdk_internal_module_ArchivedModuleGraph, "jdk/internal/module/ArchivedModuleGraph") \ \ template(jdk_internal_loader_ClassLoaders_AppClassLoader, "jdk/internal/loader/ClassLoaders$AppClassLoader") \ template(jdk_internal_loader_ClassLoaders_PlatformClassLoader, "jdk/internal/loader/ClassLoaders$PlatformClassLoader") \ @@ -652,6 +653,8 @@ template(url_void_signature, "(Ljava/net/URL;)V") \ template(toFileURL_name, "toFileURL") \ template(toFileURL_signature, "(Ljava/lang/String;)Ljava/net/URL;") \ + template(moduleFinder_signature, "Ljava/lang/module/ModuleFinder;") \ + template(systemModules_signature, "Ljdk/internal/module/SystemModules;") \ \ /*end*/
--- a/src/hotspot/share/code/compiledMethod.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/code/compiledMethod.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -325,6 +325,7 @@ // Clear ICStubs of all compiled ICs void CompiledMethod::clear_ic_stubs() { assert_locked_or_safepoint(CompiledIC_lock); + ResourceMark rm; RelocIterator iter(this); while(iter.next()) { if (iter.type() == relocInfo::virtual_call_type) { @@ -547,6 +548,7 @@ bool CompiledMethod::cleanup_inline_caches_impl(bool parallel, bool unloading_occurred, bool clean_all) { assert_locked_or_safepoint(CompiledIC_lock); bool postponed = false; + ResourceMark rm; // Find all calls in an nmethod and clear the ones that point to non-entrant, // zombie and unloaded nmethods.
--- a/src/hotspot/share/compiler/compileBroker.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/compiler/compileBroker.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -530,7 +530,6 @@ void CompileBroker::print_compile_queues(outputStream* st) { st->print_cr("Current compiles: "); - MutexLocker locker(MethodCompileQueue_lock); char buf[2000]; int buflen = sizeof(buf); @@ -546,7 +545,7 @@ } void CompileQueue::print(outputStream* st) { - assert(MethodCompileQueue_lock->owned_by_self(), "must own lock"); + assert_locked_or_safepoint(MethodCompileQueue_lock); st->print_cr("%s:", name()); CompileTask* task = _first; if (task == NULL) { @@ -1638,12 +1637,6 @@ * out to be a problem. */ void CompileBroker::shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread) { - // Free buffer blob, if allocated - if (thread->get_buffer_blob() != NULL) { - MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - CodeCache::free(thread->get_buffer_blob()); - } - if (comp->should_perform_shutdown()) { // There are two reasons for shutting down the compiler // 1) compiler runtime initialization failed
--- a/src/hotspot/share/compiler/compilerDefinitions.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/compiler/compilerDefinitions.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -253,17 +253,18 @@ if (FLAG_IS_DEFAULT(OnStackReplacePercentage)) { FLAG_SET_DEFAULT(OnStackReplacePercentage, 933); } + // JVMCI needs values not less than defaults if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) { - FLAG_SET_DEFAULT(ReservedCodeCacheSize, 64*M); + FLAG_SET_DEFAULT(ReservedCodeCacheSize, MAX2(64*M, ReservedCodeCacheSize)); } if (FLAG_IS_DEFAULT(InitialCodeCacheSize)) { - FLAG_SET_DEFAULT(InitialCodeCacheSize, 16*M); + FLAG_SET_DEFAULT(InitialCodeCacheSize, MAX2(16*M, InitialCodeCacheSize)); } if (FLAG_IS_DEFAULT(MetaspaceSize)) { - FLAG_SET_DEFAULT(MetaspaceSize, 12*M); + FLAG_SET_DEFAULT(MetaspaceSize, MAX2(12*M, MetaspaceSize)); } if (FLAG_IS_DEFAULT(NewSizeThreadIncrease)) { - FLAG_SET_DEFAULT(NewSizeThreadIncrease, 4*K); + FLAG_SET_DEFAULT(NewSizeThreadIncrease, MAX2(4*K, NewSizeThreadIncrease)); } if (TieredStopAtLevel != CompLevel_full_optimization) { // Currently JVMCI compiler can only work at the full optimization level
--- a/src/hotspot/share/gc/epsilon/epsilonHeap.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/gc/epsilon/epsilonHeap.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -121,6 +121,11 @@ safe_object_iterate(cl); } + // Object pinning support: every object is implicitly pinned + virtual bool supports_object_pinning() const { return true; } + virtual oop pin_object(JavaThread* thread, oop obj) { return obj; } + virtual void unpin_object(JavaThread* thread, oop obj) { } + // No support for block parsing. virtual HeapWord* block_start(const void* addr) const { return NULL; } virtual size_t block_size(const HeapWord* addr) const { return 0; }
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -1024,11 +1024,17 @@ uint _num_regions_selected_for_rebuild; // The number of regions actually selected for rebuild. - void update_remset_before_rebuild(HeapRegion * hr) { + void update_remset_before_rebuild(HeapRegion* hr) { G1RemSetTrackingPolicy* tracking_policy = _g1h->g1_policy()->remset_tracker(); - size_t const live_bytes = _cm->liveness(hr->hrm_index()) * HeapWordSize; - bool selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes); + bool selected_for_rebuild; + if (hr->is_humongous()) { + bool const is_live = _cm->liveness(hr->humongous_start_region()->hrm_index()) > 0; + selected_for_rebuild = tracking_policy->update_humongous_before_rebuild(hr, is_live); + } else { + size_t const live_bytes = _cm->liveness(hr->hrm_index()); + selected_for_rebuild = tracking_policy->update_before_rebuild(hr, live_bytes); + } if (selected_for_rebuild) { _num_regions_selected_for_rebuild++; }
--- a/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -29,10 +29,6 @@ #include "gc/g1/heapRegionRemSet.hpp" #include "runtime/safepoint.hpp" -bool G1RemSetTrackingPolicy::is_interesting_humongous_region(HeapRegion* r) const { - return r->is_humongous() && oop(r->humongous_start_region()->bottom())->is_typeArray(); -} - bool G1RemSetTrackingPolicy::needs_scan_for_rebuild(HeapRegion* r) const { // All non-free, non-young, non-closed archive regions need to be scanned for references; // At every gc we gather references to other regions in young, and closed archive @@ -64,51 +60,81 @@ /* nothing to do */ } +static void print_before_rebuild(HeapRegion* r, bool selected_for_rebuild, size_t total_live_bytes, size_t live_bytes) { + log_trace(gc, remset, tracking)("Before rebuild region %u " + "(ntams: " PTR_FORMAT ") " + "total_live_bytes " SIZE_FORMAT " " + "selected %s " + "(live_bytes " SIZE_FORMAT " " + "next_marked " SIZE_FORMAT " " + "marked " SIZE_FORMAT " " + "type %s)", + r->hrm_index(), + p2i(r->next_top_at_mark_start()), + total_live_bytes, + BOOL_TO_STR(selected_for_rebuild), + live_bytes, + r->next_marked_bytes(), + r->marked_bytes(), + r->get_type_str()); +} + +bool G1RemSetTrackingPolicy::update_humongous_before_rebuild(HeapRegion* r, bool is_live) { + assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); + assert(r->is_humongous(), "Region %u should be humongous", r->hrm_index()); + + if (r->is_archive()) { + return false; + } + + assert(!r->rem_set()->is_updating(), "Remembered set of region %u is updating before rebuild", r->hrm_index()); + + bool selected_for_rebuild = false; + // For humongous regions, to be of interest for rebuilding the remembered set the following must apply: + // - We always try to update the remembered sets of humongous regions containing + // type arrays as they might have been reset after full gc. + if (is_live && oop(r->humongous_start_region()->bottom())->is_typeArray() && !r->rem_set()->is_tracked()) { + r->rem_set()->set_state_updating(); + selected_for_rebuild = true; + } + + size_t const live_bytes = is_live ? HeapRegion::GrainBytes : 0; + print_before_rebuild(r, selected_for_rebuild, live_bytes, live_bytes); + + return selected_for_rebuild; +} + bool G1RemSetTrackingPolicy::update_before_rebuild(HeapRegion* r, size_t live_bytes) { assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); - - bool selected_for_rebuild = false; + assert(!r->is_humongous(), "Region %u is humongous", r->hrm_index()); // Only consider updating the remembered set for old gen regions - excluding archive regions // which never move (but are "Old" regions). - if (r->is_old_or_humongous() && !r->is_archive()) { - size_t between_ntams_and_top = (r->top() - r->next_top_at_mark_start()) * HeapWordSize; - size_t total_live_bytes = live_bytes + between_ntams_and_top; - // Completely free regions after rebuild are of no interest wrt rebuilding the - // remembered set. - assert(!r->rem_set()->is_updating(), "Remembered set of region %u is updating before rebuild", r->hrm_index()); - // To be of interest for rebuilding the remembered set the following must apply: - // - They must contain some live data in them. - // - We always try to update the remembered sets of humongous regions containing - // type arrays if they are empty as they might have been reset after full gc. - // - Only need to rebuild non-complete remembered sets. - // - Otherwise only add those old gen regions which occupancy is low enough that there - // is a chance that we will ever evacuate them in the mixed gcs. - if ((total_live_bytes > 0) && - (is_interesting_humongous_region(r) || CollectionSetChooser::region_occupancy_low_enough_for_evac(total_live_bytes)) && - !r->rem_set()->is_tracked()) { + if (!r->is_old() || r->is_archive()) { + return false; + } + + assert(!r->rem_set()->is_updating(), "Remembered set of region %u is updating before rebuild", r->hrm_index()); + + size_t between_ntams_and_top = (r->top() - r->next_top_at_mark_start()) * HeapWordSize; + size_t total_live_bytes = live_bytes + between_ntams_and_top; - r->rem_set()->set_state_updating(); - selected_for_rebuild = true; - } - log_trace(gc, remset, tracking)("Before rebuild region %u " - "(ntams: " PTR_FORMAT ") " - "total_live_bytes " SIZE_FORMAT " " - "selected %s " - "(live_bytes " SIZE_FORMAT " " - "next_marked " SIZE_FORMAT " " - "marked " SIZE_FORMAT " " - "type %s)", - r->hrm_index(), - p2i(r->next_top_at_mark_start()), - total_live_bytes, - BOOL_TO_STR(selected_for_rebuild), - live_bytes, - r->next_marked_bytes(), - r->marked_bytes(), - r->get_type_str()); + bool selected_for_rebuild = false; + // For old regions, to be of interest for rebuilding the remembered set the following must apply: + // - They must contain some live data in them. + // - Only need to rebuild non-complete remembered sets. + // - Otherwise only add those old gen regions which occupancy is low enough that there + // is a chance that we will ever evacuate them in the mixed gcs. + if ((total_live_bytes > 0) && + CollectionSetChooser::region_occupancy_low_enough_for_evac(total_live_bytes) && + !r->rem_set()->is_tracked()) { + + r->rem_set()->set_state_updating(); + selected_for_rebuild = true; } + print_before_rebuild(r, selected_for_rebuild, total_live_bytes, live_bytes); + return selected_for_rebuild; } @@ -149,4 +175,3 @@ r->rem_set()->mem_size()); } } -
--- a/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -33,10 +33,6 @@ // the remembered set, ie. when it should be tracked, and if/when the remembered // set is complete. class G1RemSetTrackingPolicy : public CHeapObj<mtGC> { -private: - // Is the given region an interesting humongous region to start remembered set tracking - // for? - bool is_interesting_humongous_region(HeapRegion* r) const; public: // Do we need to scan the given region to get all outgoing references for remembered // set rebuild? @@ -45,6 +41,9 @@ // called at any time. The caller makes sure that the changes to the remembered // set state are visible to other threads. void update_at_allocate(HeapRegion* r); + // Update remembered set tracking state for humongous regions before we are going to + // rebuild remembered sets. Called at safepoint in the remark pause. + bool update_humongous_before_rebuild(HeapRegion* r, bool is_live); // Update remembered set tracking state before we are going to rebuild remembered // sets. Called at safepoint in the remark pause. bool update_before_rebuild(HeapRegion* r, size_t live_bytes);
--- a/src/hotspot/share/gc/shared/barrierSet.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/gc/shared/barrierSet.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -199,7 +199,7 @@ template <typename T> static T atomic_cmpxchg_in_heap_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { - return Raw::oop_atomic_cmpxchg_at(new_value, base, offset, compare_value); + return Raw::atomic_cmpxchg_at(new_value, base, offset, compare_value); } template <typename T>
--- a/src/hotspot/share/gc/shared/oopStorage.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/gc/shared/oopStorage.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -45,47 +45,47 @@ #include "utilities/ostream.hpp" #include "utilities/spinYield.hpp" -OopStorage::AllocateEntry::AllocateEntry() : _prev(NULL), _next(NULL) {} +OopStorage::AllocationListEntry::AllocationListEntry() : _prev(NULL), _next(NULL) {} -OopStorage::AllocateEntry::~AllocateEntry() { +OopStorage::AllocationListEntry::~AllocationListEntry() { assert(_prev == NULL, "deleting attached block"); assert(_next == NULL, "deleting attached block"); } -OopStorage::AllocateList::AllocateList() : _head(NULL), _tail(NULL) {} +OopStorage::AllocationList::AllocationList() : _head(NULL), _tail(NULL) {} -OopStorage::AllocateList::~AllocateList() { +OopStorage::AllocationList::~AllocationList() { // ~OopStorage() empties its lists before destroying them. assert(_head == NULL, "deleting non-empty block list"); assert(_tail == NULL, "deleting non-empty block list"); } -void OopStorage::AllocateList::push_front(const Block& block) { +void OopStorage::AllocationList::push_front(const Block& block) { const Block* old = _head; if (old == NULL) { assert(_tail == NULL, "invariant"); _head = _tail = █ } else { - block.allocate_entry()._next = old; - old->allocate_entry()._prev = █ + block.allocation_list_entry()._next = old; + old->allocation_list_entry()._prev = █ _head = █ } } -void OopStorage::AllocateList::push_back(const Block& block) { +void OopStorage::AllocationList::push_back(const Block& block) { const Block* old = _tail; if (old == NULL) { assert(_head == NULL, "invariant"); _head = _tail = █ } else { - old->allocate_entry()._next = █ - block.allocate_entry()._prev = old; + old->allocation_list_entry()._next = █ + block.allocation_list_entry()._prev = old; _tail = █ } } -void OopStorage::AllocateList::unlink(const Block& block) { - const AllocateEntry& block_entry = block.allocate_entry(); +void OopStorage::AllocationList::unlink(const Block& block) { + const AllocationListEntry& block_entry = block.allocation_list_entry(); const Block* prev_blk = block_entry._prev; const Block* next_blk = block_entry._next; block_entry._prev = NULL; @@ -96,15 +96,15 @@ _head = _tail = NULL; } else if (prev_blk == NULL) { assert(_head == &block, "invariant"); - next_blk->allocate_entry()._prev = NULL; + next_blk->allocation_list_entry()._prev = NULL; _head = next_blk; } else if (next_blk == NULL) { assert(_tail == &block, "invariant"); - prev_blk->allocate_entry()._next = NULL; + prev_blk->allocation_list_entry()._next = NULL; _tail = prev_blk; } else { - next_blk->allocate_entry()._prev = prev_blk; - prev_blk->allocate_entry()._next = next_blk; + next_blk->allocation_list_entry()._prev = prev_blk; + prev_blk->allocation_list_entry()._next = next_blk; } } @@ -210,7 +210,7 @@ _owner(owner), _memory(memory), _active_index(0), - _allocate_entry(), + _allocation_list_entry(), _deferred_updates_next(NULL), _release_refcount(0) { @@ -367,65 +367,65 @@ ////////////////////////////////////////////////////////////////////////////// // Allocation // -// Allocation involves the _allocate_list, which contains a subset of the +// Allocation involves the _allocation_list, which contains a subset of the // blocks owned by a storage object. This is a doubly-linked list, linked // through dedicated fields in the blocks. Full blocks are removed from this // list, though they are still present in the _active_array. Empty blocks are -// kept at the end of the _allocate_list, to make it easy for empty block +// kept at the end of the _allocation_list, to make it easy for empty block // deletion to find them. // // allocate(), and delete_empty_blocks_concurrent() lock the -// _allocate_mutex while performing any list and array modifications. +// _allocation_mutex while performing any list and array modifications. // // allocate() and release() update a block's _allocated_bitmask using CAS // loops. This prevents loss of updates even though release() performs // its updates without any locking. // -// allocate() obtains the entry from the first block in the _allocate_list, +// allocate() obtains the entry from the first block in the _allocation_list, // and updates that block's _allocated_bitmask to indicate the entry is in // use. If this makes the block full (all entries in use), the block is -// removed from the _allocate_list so it won't be considered by future +// removed from the _allocation_list so it won't be considered by future // allocations until some entries in it are released. // // release() is performed lock-free. release() first looks up the block for // the entry, using address alignment to find the enclosing block (thereby // avoiding iteration over the _active_array). Once the block has been // determined, its _allocated_bitmask needs to be updated, and its position in -// the _allocate_list may need to be updated. There are two cases: +// the _allocation_list may need to be updated. There are two cases: // // (a) If the block is neither full nor would become empty with the release of // the entry, only its _allocated_bitmask needs to be updated. But if the CAS // update fails, the applicable case may change for the retry. // -// (b) Otherwise, the _allocate_list also needs to be modified. This requires -// locking the _allocate_mutex. To keep the release() operation lock-free, -// rather than updating the _allocate_list itself, it instead performs a +// (b) Otherwise, the _allocation_list also needs to be modified. This requires +// locking the _allocation_mutex. To keep the release() operation lock-free, +// rather than updating the _allocation_list itself, it instead performs a // lock-free push of the block onto the _deferred_updates list. Entries on // that list are processed by allocate() and delete_empty_blocks_XXX(), while // they already hold the necessary lock. That processing makes the block's // list state consistent with its current _allocated_bitmask. The block is -// added to the _allocate_list if not already present and the bitmask is not -// full. The block is moved to the end of the _allocated_list if the bitmask +// added to the _allocation_list if not already present and the bitmask is not +// full. The block is moved to the end of the _allocation_list if the bitmask // is empty, for ease of empty block deletion processing. oop* OopStorage::allocate() { - MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag); + MutexLockerEx ml(_allocation_mutex, Mutex::_no_safepoint_check_flag); // Do some deferred update processing every time we allocate. - // Continue processing deferred updates if _allocate_list is empty, + // Continue processing deferred updates if _allocation_list is empty, // in the hope that we'll get a block from that, rather than // allocating a new block. - while (reduce_deferred_updates() && (_allocate_list.head() == NULL)) {} + while (reduce_deferred_updates() && (_allocation_list.head() == NULL)) {} - // Use the first block in _allocate_list for the allocation. - Block* block = _allocate_list.head(); + // Use the first block in _allocation_list for the allocation. + Block* block = _allocation_list.head(); if (block == NULL) { // No available blocks; make a new one, and add to storage. { - MutexUnlockerEx mul(_allocate_mutex, Mutex::_no_safepoint_check_flag); + MutexUnlockerEx mul(_allocation_mutex, Mutex::_no_safepoint_check_flag); block = Block::new_block(this); } if (block == NULL) { - while (_allocate_list.head() == NULL) { + while (_allocation_list.head() == NULL) { if (!reduce_deferred_updates()) { // Failed to make new block, no other thread made a block // available while the mutex was released, and didn't get @@ -448,13 +448,13 @@ return NULL; } } - // Add to end of _allocate_list. The mutex release allowed - // other threads to add blocks to the _allocate_list. We prefer + // Add to end of _allocation_list. The mutex release allowed + // other threads to add blocks to the _allocation_list. We prefer // to allocate from non-empty blocks, to allow empty blocks to // be deleted. - _allocate_list.push_back(*block); + _allocation_list.push_back(*block); } - block = _allocate_list.head(); + block = _allocation_list.head(); } // Allocate from first block. assert(block != NULL, "invariant"); @@ -471,7 +471,7 @@ // Transitioning from not full to full. // Remove full blocks from consideration by future allocates. log_debug(oopstorage, blocks)("%s: block full " PTR_FORMAT, name(), p2i(block)); - _allocate_list.unlink(*block); + _allocation_list.unlink(*block); } log_info(oopstorage, ref)("%s: allocated " PTR_FORMAT, name(), p2i(result)); return result; @@ -482,7 +482,7 @@ // Return true if the array was successfully expanded, false to // indicate allocation failure. bool OopStorage::expand_active_array() { - assert_lock_strong(_allocate_mutex); + assert_lock_strong(_allocation_mutex); ActiveArray* old_array = _active_array; size_t new_size = 2 * old_array->size(); log_info(oopstorage, blocks)("%s: expand active array " SIZE_FORMAT, @@ -632,7 +632,7 @@ // (updated bitmask is empty or old bitmask was full), atomically push // this block onto the deferred updates list. Some future call to // reduce_deferred_updates will make any needed changes related to this - // block and _allocate_list. This deferral avoids list updates and the + // block and _allocation_list. This deferral avoids list updates and the // associated locking here. if ((releasing == old_allocated) || is_full_bitmask(old_allocated)) { // Log transitions. Both transitions are possible in a single update. @@ -663,7 +663,7 @@ // Process one available deferred update. Returns true if one was processed. bool OopStorage::reduce_deferred_updates() { - assert_locked_or_safepoint(_allocate_mutex); + assert_locked_or_safepoint(_allocation_mutex); // Atomically pop a block off the list, if any available. // No ABA issue because this is only called by one thread at a time. // The atomicity is wrto pushes by release(). @@ -687,20 +687,20 @@ uintx allocated = block->allocated_bitmask(); // Make membership in list consistent with bitmask state. - if ((_allocate_list.ctail() != NULL) && - ((_allocate_list.ctail() == block) || - (_allocate_list.next(*block) != NULL))) { - // Block is in the allocate list. + if ((_allocation_list.ctail() != NULL) && + ((_allocation_list.ctail() == block) || + (_allocation_list.next(*block) != NULL))) { + // Block is in the _allocation_list. assert(!is_full_bitmask(allocated), "invariant"); } else if (!is_full_bitmask(allocated)) { - // Block is not in the allocate list, but now should be. - _allocate_list.push_front(*block); + // Block is not in the _allocation_list, but now should be. + _allocation_list.push_front(*block); } // Else block is full and not in list, which is correct. // Move empty block to end of list, for possible deletion. if (is_empty_bitmask(allocated)) { - _allocate_list.unlink(*block); - _allocate_list.push_back(*block); + _allocation_list.unlink(*block); + _allocation_list.push_back(*block); } log_debug(oopstorage, blocks)("%s: processed deferred update " PTR_FORMAT, @@ -759,24 +759,24 @@ const size_t initial_active_array_size = 8; OopStorage::OopStorage(const char* name, - Mutex* allocate_mutex, + Mutex* allocation_mutex, Mutex* active_mutex) : _name(dup_name(name)), _active_array(ActiveArray::create(initial_active_array_size)), - _allocate_list(), + _allocation_list(), _deferred_updates(NULL), - _allocate_mutex(allocate_mutex), + _allocation_mutex(allocation_mutex), _active_mutex(active_mutex), _allocation_count(0), _concurrent_iteration_active(false) { _active_array->increment_refcount(); - assert(_active_mutex->rank() < _allocate_mutex->rank(), - "%s: active_mutex must have lower rank than allocate_mutex", _name); + assert(_active_mutex->rank() < _allocation_mutex->rank(), + "%s: active_mutex must have lower rank than allocation_mutex", _name); assert(_active_mutex->_safepoint_check_required != Mutex::_safepoint_check_always, "%s: active mutex requires safepoint check", _name); - assert(_allocate_mutex->_safepoint_check_required != Mutex::_safepoint_check_always, - "%s: allocate mutex requires safepoint check", _name); + assert(_allocation_mutex->_safepoint_check_required != Mutex::_safepoint_check_always, + "%s: allocation mutex requires safepoint check", _name); } void OopStorage::delete_empty_block(const Block& block) { @@ -791,8 +791,8 @@ _deferred_updates = block->deferred_updates_next(); block->set_deferred_updates_next(NULL); } - while ((block = _allocate_list.head()) != NULL) { - _allocate_list.unlink(*block); + while ((block = _allocation_list.head()) != NULL) { + _allocation_list.unlink(*block); } bool unreferenced = _active_array->decrement_refcount(); assert(unreferenced, "deleting storage while _active_array is referenced"); @@ -811,18 +811,18 @@ while (reduce_deferred_updates()) {} // Don't interfere with a concurrent iteration. if (_concurrent_iteration_active) return; - // Delete empty (and otherwise deletable) blocks from end of _allocate_list. - for (Block* block = _allocate_list.tail(); + // Delete empty (and otherwise deletable) blocks from end of _allocation_list. + for (Block* block = _allocation_list.tail(); (block != NULL) && block->is_deletable(); - block = _allocate_list.tail()) { + block = _allocation_list.tail()) { _active_array->remove(block); - _allocate_list.unlink(*block); + _allocation_list.unlink(*block); delete_empty_block(*block); } } void OopStorage::delete_empty_blocks_concurrent() { - MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag); + MutexLockerEx ml(_allocation_mutex, Mutex::_no_safepoint_check_flag); // Other threads could be adding to the empty block count while we // release the mutex across the block deletions. Set an upper bound // on how many blocks we'll try to release, so other threads can't @@ -834,7 +834,7 @@ // lock. But limit number processed to limit lock duration. reduce_deferred_updates(); - Block* block = _allocate_list.tail(); + Block* block = _allocation_list.tail(); if ((block == NULL) || !block->is_deletable()) { // No block to delete, so done. There could be more pending // deferred updates that could give us more work to do; deal with @@ -848,10 +848,10 @@ if (_concurrent_iteration_active) return; _active_array->remove(block); } - // Remove block from _allocate_list and delete it. - _allocate_list.unlink(*block); + // Remove block from _allocation_list and delete it. + _allocation_list.unlink(*block); // Release mutex while deleting block. - MutexUnlockerEx ul(_allocate_mutex, Mutex::_no_safepoint_check_flag); + MutexUnlockerEx ul(_allocation_mutex, Mutex::_no_safepoint_check_flag); delete_empty_block(*block); } } @@ -860,7 +860,7 @@ const Block* block = find_block_or_null(ptr); if (block != NULL) { // Prevent block deletion and _active_array modification. - MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag); + MutexLockerEx ml(_allocation_mutex, Mutex::_no_safepoint_check_flag); // Block could be a false positive, so get index carefully. size_t index = Block::active_index_safe(block); if ((index < _active_array->block_count()) &&
--- a/src/hotspot/share/gc/shared/oopStorage.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/gc/shared/oopStorage.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -73,7 +73,7 @@ class OopStorage : public CHeapObj<mtGC> { public: - OopStorage(const char* name, Mutex* allocate_mutex, Mutex* active_mutex); + OopStorage(const char* name, Mutex* allocation_mutex, Mutex* active_mutex); ~OopStorage(); // These count and usage accessors are racy unless at a safepoint. @@ -94,12 +94,12 @@ ALLOCATED_ENTRY }; - // Locks _allocate_mutex. + // Locks _allocation_mutex. // precondition: ptr != NULL. EntryStatus allocation_status(const oop* ptr) const; // Allocates and returns a new entry. Returns NULL if memory allocation - // failed. Locks _allocate_mutex. + // failed. Locks _allocation_mutex. // postcondition: *result == NULL. oop* allocate(); @@ -152,7 +152,7 @@ // Block cleanup functions are for the exclusive use of the GC. // Both stop deleting if there is an in-progress concurrent iteration. - // Concurrent deletion locks both the allocate_mutex and the active_mutex. + // Concurrent deletion locks both the _allocation_mutex and the _active_mutex. void delete_empty_blocks_safepoint(); void delete_empty_blocks_concurrent(); @@ -172,20 +172,20 @@ NOT_AIX( private: ) class Block; // Fixed-size array of oops, plus bookkeeping. class ActiveArray; // Array of Blocks, plus bookkeeping. - class AllocateEntry; // Provides AllocateList links in a Block. + class AllocationListEntry; // Provides AllocationList links in a Block. // Doubly-linked list of Blocks. - class AllocateList { + class AllocationList { const Block* _head; const Block* _tail; // Noncopyable. - AllocateList(const AllocateList&); - AllocateList& operator=(const AllocateList&); + AllocationList(const AllocationList&); + AllocationList& operator=(const AllocationList&); public: - AllocateList(); - ~AllocateList(); + AllocationList(); + ~AllocationList(); Block* head(); Block* tail(); @@ -219,10 +219,10 @@ private: const char* _name; ActiveArray* _active_array; - AllocateList _allocate_list; + AllocationList _allocation_list; Block* volatile _deferred_updates; - Mutex* _allocate_mutex; + Mutex* _allocation_mutex; Mutex* _active_mutex; // Volatile for racy unlocked accesses.
--- a/src/hotspot/share/gc/shared/oopStorage.inline.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/gc/shared/oopStorage.inline.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -107,10 +107,10 @@ return *block_ptr(index); } -// A Block has an embedded AllocateEntry to provide the links between -// Blocks in a AllocateList. -class OopStorage::AllocateEntry { - friend class OopStorage::AllocateList; +// A Block has an embedded AllocationListEntry to provide the links between +// Blocks in an AllocationList. +class OopStorage::AllocationListEntry { + friend class OopStorage::AllocationList; // Members are mutable, and we deal exclusively with pointers to // const, to make const blocks easier to use; a block being const @@ -119,18 +119,18 @@ mutable const Block* _next; // Noncopyable. - AllocateEntry(const AllocateEntry&); - AllocateEntry& operator=(const AllocateEntry&); + AllocationListEntry(const AllocationListEntry&); + AllocationListEntry& operator=(const AllocationListEntry&); public: - AllocateEntry(); - ~AllocateEntry(); + AllocationListEntry(); + ~AllocationListEntry(); }; // Fixed-sized array of oops, plus bookkeeping data. // All blocks are in the storage's _active_array, at the block's _active_index. -// Non-full blocks are in the storage's _allocate_list, linked through the -// block's _allocate_entry. Empty blocks are at the end of that list. +// Non-full blocks are in the storage's _allocation_list, linked through the +// block's _allocation_list_entry. Empty blocks are at the end of that list. class OopStorage::Block /* No base class, to avoid messing up alignment. */ { // _data must be the first non-static data member, for alignment. oop _data[BitsPerWord]; @@ -140,7 +140,7 @@ const OopStorage* _owner; void* _memory; // Unaligned storage containing block. size_t _active_index; - AllocateEntry _allocate_entry; + AllocationListEntry _allocation_list_entry; Block* volatile _deferred_updates_next; volatile uintx _release_refcount; @@ -158,7 +158,7 @@ Block& operator=(const Block&); public: - const AllocateEntry& allocate_entry() const; + const AllocationListEntry& allocation_list_entry() const; static size_t allocation_size(); static size_t allocation_alignment_shift(); @@ -197,36 +197,36 @@ template<typename F> bool iterate(F f) const; }; // class Block -inline OopStorage::Block* OopStorage::AllocateList::head() { +inline OopStorage::Block* OopStorage::AllocationList::head() { return const_cast<Block*>(_head); } -inline OopStorage::Block* OopStorage::AllocateList::tail() { +inline OopStorage::Block* OopStorage::AllocationList::tail() { return const_cast<Block*>(_tail); } -inline const OopStorage::Block* OopStorage::AllocateList::chead() const { +inline const OopStorage::Block* OopStorage::AllocationList::chead() const { return _head; } -inline const OopStorage::Block* OopStorage::AllocateList::ctail() const { +inline const OopStorage::Block* OopStorage::AllocationList::ctail() const { return _tail; } -inline OopStorage::Block* OopStorage::AllocateList::prev(Block& block) { - return const_cast<Block*>(block.allocate_entry()._prev); +inline OopStorage::Block* OopStorage::AllocationList::prev(Block& block) { + return const_cast<Block*>(block.allocation_list_entry()._prev); } -inline OopStorage::Block* OopStorage::AllocateList::next(Block& block) { - return const_cast<Block*>(block.allocate_entry()._next); +inline OopStorage::Block* OopStorage::AllocationList::next(Block& block) { + return const_cast<Block*>(block.allocation_list_entry()._next); } -inline const OopStorage::Block* OopStorage::AllocateList::prev(const Block& block) const { - return block.allocate_entry()._prev; +inline const OopStorage::Block* OopStorage::AllocationList::prev(const Block& block) const { + return block.allocation_list_entry()._prev; } -inline const OopStorage::Block* OopStorage::AllocateList::next(const Block& block) const { - return block.allocate_entry()._next; +inline const OopStorage::Block* OopStorage::AllocationList::next(const Block& block) const { + return block.allocation_list_entry()._next; } template<typename Closure> @@ -298,8 +298,8 @@ // Inline Block accesses for use in iteration loops. -inline const OopStorage::AllocateEntry& OopStorage::Block::allocate_entry() const { - return _allocate_entry; +inline const OopStorage::AllocationListEntry& OopStorage::Block::allocation_list_entry() const { + return _allocation_list_entry; } inline void OopStorage::Block::check_index(unsigned index) const {
--- a/src/hotspot/share/gc/shared/oopStorageParState.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/gc/shared/oopStorageParState.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -52,7 +52,7 @@ // interfering with with each other. // // Both allocate() and delete_empty_blocks_concurrent() lock the -// _allocate_mutex while performing their respective list and array +// _allocation_mutex while performing their respective list and array // manipulations, preventing them from interfering with each other. // // When allocate() creates a new block, it is added to the end of the
--- a/src/hotspot/share/gc/shared/referenceProcessor.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/gc/shared/referenceProcessor.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -1033,7 +1033,7 @@ // The last ref must have its discovered field pointing to itself. oop next_discovered = (current_head != NULL) ? current_head : obj; - oop retest = RawAccess<>::oop_atomic_cmpxchg(next_discovered, discovered_addr, oop(NULL)); + oop retest = HeapAccess<AS_NO_KEEPALIVE>::oop_atomic_cmpxchg(next_discovered, discovered_addr, oop(NULL)); if (retest == NULL) { // This thread just won the right to enqueue the object.
--- a/src/hotspot/share/gc/shared/referenceProcessorPhaseTimes.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/gc/shared/referenceProcessorPhaseTimes.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -80,8 +80,7 @@ STATIC_ASSERT((REF_PHANTOM + 1) == ARRAY_SIZE(ReferenceTypeNames)); static const char* phase_enum_2_phase_string(ReferenceProcessor::RefProcPhases phase) { - assert(phase >= ReferenceProcessor::RefPhase1 && phase <= ReferenceProcessor::RefPhaseMax, - "Invalid reference processing phase (%d)", phase); + ASSERT_PHASE(phase); return PhaseNames[phase]; }
--- a/src/hotspot/share/gc/shared/space.inline.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/gc/shared/space.inline.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -25,6 +25,7 @@ #ifndef SHARE_VM_GC_SHARED_SPACE_INLINE_HPP #define SHARE_VM_GC_SHARED_SPACE_INLINE_HPP +#include "gc/shared/blockOffsetTable.inline.hpp" #include "gc/shared/collectedHeap.hpp" #include "gc/shared/generation.hpp" #include "gc/shared/space.hpp"
--- a/src/hotspot/share/gc/shared/taskqueue.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/gc/shared/taskqueue.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -61,10 +61,11 @@ public: inline TaskQueueStats() { reset(); } - inline void record_push() { ++_stats[push]; } - inline void record_pop() { ++_stats[pop]; } - inline void record_pop_slow() { record_pop(); ++_stats[pop_slow]; } - inline void record_steal(bool success); + inline void record_push() { ++_stats[push]; } + inline void record_pop() { ++_stats[pop]; } + inline void record_pop_slow() { record_pop(); ++_stats[pop_slow]; } + inline void record_steal_attempt() { ++_stats[steal_attempt]; } + inline void record_steal() { ++_stats[steal]; } inline void record_overflow(size_t new_length); TaskQueueStats & operator +=(const TaskQueueStats & addend); @@ -87,11 +88,6 @@ static const char * const _names[last_stat_id]; }; -void TaskQueueStats::record_steal(bool success) { - ++_stats[steal_attempt]; - if (success) ++_stats[steal]; -} - void TaskQueueStats::record_overflow(size_t new_len) { ++_stats[overflow]; if (new_len > _stats[overflow_max_len]) _stats[overflow_max_len] = new_len; @@ -364,18 +360,19 @@ template<class T, MEMFLAGS F> class GenericTaskQueueSet: public TaskQueueSetSuperImpl<F> { +public: + typedef typename T::element_type E; + private: uint _n; T** _queues; + bool steal_best_of_2(uint queue_num, int* seed, E& t); + public: - typedef typename T::element_type E; - GenericTaskQueueSet(int n); ~GenericTaskQueueSet(); - bool steal_best_of_2(uint queue_num, int* seed, E& t); - void register_queue(uint i, T* q); T* queue(uint n);
--- a/src/hotspot/share/gc/shared/taskqueue.inline.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/gc/shared/taskqueue.inline.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -252,12 +252,12 @@ template<class T, MEMFLAGS F> bool GenericTaskQueueSet<T, F>::steal(uint queue_num, int* seed, E& t) { for (uint i = 0; i < 2 * _n; i++) { + TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal_attempt()); if (steal_best_of_2(queue_num, seed, t)) { - TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(true)); + TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal()); return true; } } - TASKQUEUE_STATS_ONLY(queue(queue_num)->stats.record_steal(false)); return false; }
--- a/src/hotspot/share/gc/z/zDirector.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/gc/z/zDirector.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -81,7 +81,7 @@ // Perform GC if heap usage passes 10/20/30% and no other GC has been // performed yet. This allows us to get some early samples of the GC // duration, which is needed by the other rules. - const size_t max_capacity = ZHeap::heap()->max_capacity(); + const size_t max_capacity = ZHeap::heap()->current_max_capacity(); const size_t used = ZHeap::heap()->used(); const double used_threshold_percent = (ZStatCycle::ncycles() + 1) * 0.1; const size_t used_threshold = max_capacity * used_threshold_percent; @@ -107,7 +107,7 @@ // Calculate amount of free memory available to Java threads. Note that // the heap reserve is not available to Java threads and is therefore not // considered part of the free memory. - const size_t max_capacity = ZHeap::heap()->max_capacity(); + const size_t max_capacity = ZHeap::heap()->current_max_capacity(); const size_t max_reserve = ZHeap::heap()->max_reserve(); const size_t used = ZHeap::heap()->used(); const size_t free_with_reserve = max_capacity - used; @@ -155,7 +155,7 @@ // passed since the previous GC. This helps avoid superfluous GCs when running // applications with very low allocation rate. const size_t used_after_last_gc = ZStatHeap::used_at_relocate_end(); - const size_t used_increase_threshold = ZHeap::heap()->max_capacity() * 0.10; // 10% + const size_t used_increase_threshold = ZHeap::heap()->current_max_capacity() * 0.10; // 10% const size_t used_threshold = used_after_last_gc + used_increase_threshold; const size_t used = ZHeap::heap()->used(); const double time_since_last_gc = ZStatCycle::time_since_last();
--- a/src/hotspot/share/gc/z/zHash.inline.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/gc/z/zHash.inline.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -21,6 +21,38 @@ * questions. */ +/* + * This file is available under and governed by the GNU General Public + * License version 2 only, as published by the Free Software Foundation. + * However, the following notice accompanied the original version of this + * file: + * + * (C) 2009 by Remo Dentato (rdentato@gmail.com) + * + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * http://opensource.org/licenses/bsd-license.php + */ + #ifndef SHARE_GC_Z_ZHASH_INLINE_HPP #define SHARE_GC_Z_ZHASH_INLINE_HPP
--- a/src/hotspot/share/gc/z/zHeap.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/gc/z/zHeap.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -107,6 +107,10 @@ return _page_allocator.max_capacity(); } +size_t ZHeap::current_max_capacity() const { + return _page_allocator.current_max_capacity(); +} + size_t ZHeap::capacity() const { return _page_allocator.capacity(); }
--- a/src/hotspot/share/gc/z/zHeap.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/gc/z/zHeap.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -79,6 +79,7 @@ // Heap metrics size_t min_capacity() const; size_t max_capacity() const; + size_t current_max_capacity() const; size_t capacity() const; size_t max_reserve() const; size_t used_high() const;
--- a/src/hotspot/share/gc/z/zPageAllocator.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/gc/z/zPageAllocator.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -84,11 +84,12 @@ ZPage* const ZPageAllocator::gc_marker = (ZPage*)-1; ZPageAllocator::ZPageAllocator(size_t min_capacity, size_t max_capacity, size_t max_reserve) : + _lock(), _virtual(), _physical(max_capacity, ZPageSizeMin), _cache(), - _pre_mapped(_virtual, _physical, min_capacity), _max_reserve(max_reserve), + _pre_mapped(_virtual, _physical, try_ensure_unused_for_pre_mapped(min_capacity)), _used_high(0), _used_low(0), _used(0), @@ -107,6 +108,10 @@ return _physical.max_capacity(); } +size_t ZPageAllocator::current_max_capacity() const { + return _physical.current_max_capacity(); +} + size_t ZPageAllocator::capacity() const { return _physical.capacity(); } @@ -169,18 +174,43 @@ } } -size_t ZPageAllocator::available(ZAllocationFlags flags) const { - size_t available = max_capacity() - used(); - assert(_physical.available() + _pre_mapped.available() + _cache.available() == available, "Should be equal"); +size_t ZPageAllocator::max_available(bool no_reserve) const { + size_t available = current_max_capacity() - used(); - if (flags.no_reserve()) { - // The memory reserve should not be considered free + if (no_reserve) { + // The reserve should not be considered available available -= MIN2(available, max_reserve()); } return available; } +size_t ZPageAllocator::try_ensure_unused(size_t size, bool no_reserve) { + // Ensure that we always have space available for the reserve. This + // is needed to avoid losing the reserve because of failure to map + // more memory before reaching max capacity. + _physical.try_ensure_unused_capacity(size + max_reserve()); + + size_t unused = _physical.unused_capacity(); + + if (no_reserve) { + // The reserve should not be considered unused + unused -= MIN2(unused, max_reserve()); + } + + return MIN2(size, unused); +} + +size_t ZPageAllocator::try_ensure_unused_for_pre_mapped(size_t size) { + // This function is called during construction, where the + // physical memory manager might have failed to initialied. + if (!_physical.is_initialized()) { + return 0; + } + + return try_ensure_unused(size, true /* no_reserve */); +} + ZPage* ZPageAllocator::create_page(uint8_t type, size_t size) { // Allocate physical memory const ZPhysicalMemory pmem = _physical.alloc(size); @@ -259,8 +289,8 @@ } ZPage* ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, ZAllocationFlags flags) { - const size_t available_total = available(flags); - if (available_total < size) { + const size_t max = max_available(flags.no_reserve()); + if (max < size) { // Not enough free memory return NULL; } @@ -281,11 +311,11 @@ // subsequent allocations can use the physical memory. flush_pre_mapped(); - // Check if physical memory is available - const size_t available_physical = _physical.available(); - if (available_physical < size) { + // Try ensure that physical memory is available + const size_t unused = try_ensure_unused(size, flags.no_reserve()); + if (unused < size) { // Flush cache to free up more physical memory - flush_cache(size - available_physical); + flush_cache(size - unused); } // Create new page and allocate physical memory @@ -303,7 +333,7 @@ increase_used(size, flags.relocation()); // Send trace event - ZTracer::tracer()->report_page_alloc(size, used(), available(flags), _cache.available(), flags); + ZTracer::tracer()->report_page_alloc(size, used(), max_available(flags.no_reserve()), _cache.available(), flags); return page; }
--- a/src/hotspot/share/gc/z/zPageAllocator.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/gc/z/zPageAllocator.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,8 +43,8 @@ ZVirtualMemoryManager _virtual; ZPhysicalMemoryManager _physical; ZPageCache _cache; + const size_t _max_reserve; ZPreMappedMemory _pre_mapped; - const size_t _max_reserve; size_t _used_high; size_t _used_low; size_t _used; @@ -58,7 +58,9 @@ void increase_used(size_t size, bool relocation); void decrease_used(size_t size, bool reclaimed); - size_t available(ZAllocationFlags flags) const; + size_t max_available(bool no_reserve) const; + size_t try_ensure_unused(size_t size, bool no_reserve); + size_t try_ensure_unused_for_pre_mapped(size_t size); ZPage* create_page(uint8_t type, size_t size); void map_page(ZPage* page); @@ -83,6 +85,7 @@ bool is_initialized() const; size_t max_capacity() const; + size_t current_max_capacity() const; size_t capacity() const; size_t max_reserve() const; size_t used_high() const;
--- a/src/hotspot/share/gc/z/zPhysicalMemory.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/gc/z/zPhysicalMemory.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "memory/allocation.inline.hpp" #include "services/memTracker.hpp" #include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" ZPhysicalMemory::ZPhysicalMemory() : _nsegments(0), @@ -93,6 +94,7 @@ ZPhysicalMemoryManager::ZPhysicalMemoryManager(size_t max_capacity, size_t granule_size) : _backing(max_capacity, granule_size), _max_capacity(max_capacity), + _current_max_capacity(max_capacity), _capacity(0), _used(0) {} @@ -100,31 +102,34 @@ return _backing.is_initialized(); } -bool ZPhysicalMemoryManager::ensure_available(size_t size) { - const size_t unused_capacity = _capacity - _used; - if (unused_capacity >= size) { - // Enough unused capacity available - return true; +void ZPhysicalMemoryManager::try_ensure_unused_capacity(size_t size) { + const size_t unused = unused_capacity(); + if (unused >= size) { + // Don't try to expand, enough unused capacity available + return; + } + + const size_t current_max = current_max_capacity(); + if (_capacity == current_max) { + // Don't try to expand, current max capacity reached + return; } - const size_t expand_with = size - unused_capacity; - const size_t new_capacity = _capacity + expand_with; - if (new_capacity > _max_capacity) { - // Can not expand beyond max capacity - return false; - } + // Try to expand + const size_t old_capacity = capacity(); + const size_t new_capacity = MIN2(old_capacity + size - unused, current_max); + _capacity = _backing.try_expand(old_capacity, new_capacity); - // Expand - if (!_backing.expand(_capacity, new_capacity)) { - log_error(gc)("Failed to expand Java heap with " SIZE_FORMAT "%s", - byte_size_in_proper_unit(expand_with), - proper_unit_for_byte_size(expand_with)); - return false; + if (_capacity != new_capacity) { + // Failed, or partly failed, to expand + log_error(gc, init)("Not enough space available on the backing filesystem to hold the current max"); + log_error(gc, init)("Java heap size (" SIZE_FORMAT "M). Forcefully lowering max Java heap size to " + SIZE_FORMAT "M (%.0lf%%).", current_max / M, _capacity / M, + percent_of(_capacity, current_max)); + + // Adjust current max capacity to avoid further expand attempts + _current_max_capacity = _capacity; } - - _capacity = new_capacity; - - return true; } void ZPhysicalMemoryManager::nmt_commit(ZPhysicalMemory pmem, uintptr_t offset) { @@ -144,7 +149,7 @@ } ZPhysicalMemory ZPhysicalMemoryManager::alloc(size_t size) { - if (!ensure_available(size)) { + if (unused_capacity() < size) { // Not enough memory available return ZPhysicalMemory(); }
--- a/src/hotspot/share/gc/z/zPhysicalMemory.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/gc/z/zPhysicalMemory.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -70,11 +70,10 @@ private: ZPhysicalMemoryBacking _backing; const size_t _max_capacity; + size_t _current_max_capacity; size_t _capacity; size_t _used; - bool ensure_available(size_t size); - void nmt_commit(ZPhysicalMemory pmem, uintptr_t offset); void nmt_uncommit(ZPhysicalMemory pmem, uintptr_t offset); @@ -84,9 +83,11 @@ bool is_initialized() const; size_t max_capacity() const; + size_t current_max_capacity() const; size_t capacity() const; - size_t used() const; - size_t available() const; + size_t unused_capacity() const; + + void try_ensure_unused_capacity(size_t size); ZPhysicalMemory alloc(size_t size); void free(ZPhysicalMemory pmem);
--- a/src/hotspot/share/gc/z/zPhysicalMemory.inline.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/gc/z/zPhysicalMemory.inline.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -71,16 +71,16 @@ return _max_capacity; } +inline size_t ZPhysicalMemoryManager::current_max_capacity() const { + return _current_max_capacity; +} + inline size_t ZPhysicalMemoryManager::capacity() const { return _capacity; } -inline size_t ZPhysicalMemoryManager::used() const { - return _used; -} - -inline size_t ZPhysicalMemoryManager::available() const { - return _max_capacity - _used; +inline size_t ZPhysicalMemoryManager::unused_capacity() const { + return _capacity - _used; } #endif // SHARE_GC_Z_ZPHYSICALMEMORY_INLINE_HPP
--- a/src/hotspot/share/gc/z/zPreMappedMemory.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/gc/z/zPreMappedMemory.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,22 +42,26 @@ log_info(gc, init)("Pre-touching: %s", AlwaysPreTouch ? "Enabled" : "Disabled"); log_info(gc, init)("Pre-mapping: " SIZE_FORMAT "M", size / M); - _pmem = pmm.alloc(size); - if (_pmem.is_null()) { - // Out of memory - return; - } + if (size > 0) { + _pmem = pmm.alloc(size); + if (_pmem.is_null()) { + // Out of memory + log_error(gc, init)("Failed to pre-map Java heap (Cannot allocate physical memory)"); + return; + } - _vmem = vmm.alloc(size, true /* alloc_from_front */); - if (_vmem.is_null()) { - // Out of address space - pmm.free(_pmem); - return; + _vmem = vmm.alloc(size, true /* alloc_from_front */); + if (_vmem.is_null()) { + // Out of address space + log_error(gc, init)("Failed to pre-map Java heap (Cannot allocate virtual memory)"); + pmm.free(_pmem); + return; + } + + // Map physical memory + pmm.map(_pmem, _vmem.start()); } - // Map physical memory - pmm.map(_pmem, _vmem.start()); - _initialized = true; }
--- a/src/hotspot/share/include/jvm.h Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/include/jvm.h Thu Jul 12 11:09:23 2018 -0700 @@ -171,6 +171,8 @@ JNIEXPORT jobjectArray JNICALL JVM_GetVmArguments(JNIEnv *env); +JNIEXPORT void JNICALL +JVM_InitializeFromArchive(JNIEnv* env, jclass cls); /* * java.lang.Throwable
--- a/src/hotspot/share/interpreter/linkResolver.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/interpreter/linkResolver.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -1221,17 +1221,14 @@ // check if the method is not <init> resolved_method->name() != vmSymbols::object_initializer_name()) { - // check if this is an old-style super call and do a new lookup if so - // a) check if ACC_SUPER flag is set for the current class Klass* current_klass = link_info.current_klass(); - if ((current_klass->is_super() || !AllowNonVirtualCalls) && - // b) check if the class of the resolved_klass is a superclass - // (not supertype in order to exclude interface classes) of the current class. - // This check is not performed for super.invoke for interface methods - // in super interfaces. - current_klass->is_subclass_of(resolved_klass) && - current_klass != resolved_klass - ) { + + // Check if the class of the resolved_klass is a superclass + // (not supertype in order to exclude interface classes) of the current class. + // This check is not performed for super.invoke for interface methods + // in super interfaces. + if (current_klass->is_subclass_of(resolved_klass) && + current_klass != resolved_klass) { // Lookup super method Klass* super_klass = current_klass->super(); sel_method = lookup_instance_method_in_klasses(super_klass,
--- a/src/hotspot/share/jfr/jfr.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/jfr/jfr.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -26,10 +26,10 @@ #include "jfr/jfr.hpp" #include "jfr/leakprofiler/leakProfiler.hpp" #include "jfr/periodic/sampling/jfrThreadSampler.hpp" -#include "jfr/recorder/service/jfrOptionSet.hpp" #include "jfr/recorder/jfrRecorder.hpp" #include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp" #include "jfr/recorder/repository/jfrEmergencyDump.hpp" +#include "jfr/recorder/service/jfrOptionSet.hpp" #include "jfr/support/jfrThreadLocal.hpp" #include "runtime/java.hpp" @@ -64,9 +64,7 @@ } void Jfr::on_thread_exit(JavaThread* thread) { - if (JfrRecorder::is_recording()) { - JfrThreadLocal::on_exit(thread); - } + JfrThreadLocal::on_exit(thread); } void Jfr::on_thread_destruct(Thread* thread) {
--- a/src/hotspot/share/jfr/leakprofiler/utilities/saveRestore.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/jfr/leakprofiler/utilities/saveRestore.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -74,7 +74,6 @@ CLDClaimContext::~CLDClaimContext() { if (_cld != NULL) { - assert(!_cld->claimed(), "invariant"); _cld->claim(); assert(_cld->claimed(), "invariant"); }
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -60,22 +60,32 @@ #include "gc/g1/g1YCTypes.hpp" #endif -class JfrCheckpointThreadCountClosure : public ThreadClosure { -private: - u4 _total_threads; -public: - JfrCheckpointThreadCountClosure() : _total_threads(0) {} - u4 total_threads() { return _total_threads; } - void do_thread(Thread *t) { _total_threads++; } -}; - // Requires a ResourceMark for get_thread_name/as_utf8 class JfrCheckpointThreadClosure : public ThreadClosure { private: JfrCheckpointWriter& _writer; - Thread* _curthread; + JfrCheckpointContext _ctx; + const intptr_t _count_position; + Thread* const _curthread; + u4 _count; + public: - JfrCheckpointThreadClosure(JfrCheckpointWriter& writer) : _writer(writer), _curthread(Thread::current()) {} + JfrCheckpointThreadClosure(JfrCheckpointWriter& writer) : _writer(writer), + _ctx(writer.context()), + _count_position(writer.reserve(sizeof(u4))), + _curthread(Thread::current()), + _count(0) { + } + + ~JfrCheckpointThreadClosure() { + if (_count == 0) { + // restore + _writer.set_context(_ctx); + return; + } + _writer.write_count(_count, _count_position); + } + void do_thread(Thread* t); }; @@ -83,10 +93,16 @@ void JfrCheckpointThreadClosure::do_thread(Thread* t) { assert(t != NULL, "invariant"); assert_locked_or_safepoint(Threads_lock); - _writer.write_key(t->jfr_thread_local()->thread_id()); + const JfrThreadLocal* const tl = t->jfr_thread_local(); + assert(tl != NULL, "invariant"); + if (tl->is_dead()) { + return; + } + ++_count; + _writer.write_key(tl->thread_id()); _writer.write(t->name()); const OSThread* const os_thread = t->osthread(); - _writer.write<traceid>(os_thread != NULL ? os_thread->thread_id() : (u8)0); + _writer.write<traceid>(os_thread != NULL ? os_thread->thread_id() : 0); if (t->is_Java_thread()) { JavaThread* const jt = (JavaThread*)t; _writer.write(jt->name()); @@ -97,17 +113,12 @@ return; } _writer.write((const char*)NULL); // java name - _writer.write<traceid>((traceid)0); // java thread id - _writer.write<traceid>((traceid)0); // java thread group + _writer.write((traceid)0); // java thread id + _writer.write((traceid)0); // java thread group } void JfrThreadConstantSet::serialize(JfrCheckpointWriter& writer) { assert(SafepointSynchronize::is_at_safepoint(), "invariant"); - JfrCheckpointThreadCountClosure tcc; - Threads::threads_do(&tcc); - const u4 total_threads = tcc.total_threads(); - // THREADS - writer.write_count(total_threads); JfrCheckpointThreadClosure tc(writer); Threads::threads_do(&tc); } @@ -334,7 +345,7 @@ writer.write_count(1); writer.write_key(_thread->jfr_thread_local()->thread_id()); writer.write(thread_name); - writer.write((u8)_thread->osthread()->thread_id()); + writer.write((traceid)_thread->osthread()->thread_id()); writer.write(thread_name); writer.write(java_lang_thread_id); writer.write(thread_group_id);
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -148,9 +148,8 @@ } void JfrTypeManager::write_type_set() { - assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); // can safepoint here because of Module_lock - MutexLockerEx lock(Module_lock); + MutexLockerEx lock(SafepointSynchronize::is_at_safepoint() ? NULL : Module_lock); JfrCheckpointWriter writer(true, true, Thread::current()); TypeSet set; set.serialize(writer);
--- a/src/hotspot/share/jfr/support/jfrThreadLocal.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/jfr/support/jfrThreadLocal.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -23,8 +23,9 @@ */ #include "precompiled.hpp" +#include "jfr/jni/jfrJavaSupport.hpp" #include "jfr/periodic/jfrThreadCPULoadEvent.hpp" -#include "jfr/jni/jfrJavaSupport.hpp" +#include "jfr/recorder/jfrRecorder.hpp" #include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp" #include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp" #include "jfr/recorder/service/jfrOptionSet.hpp" @@ -51,7 +52,8 @@ _wallclock_time(os::javaTimeNanos()), _stack_trace_hash(0), _stackdepth(0), - _entering_suspend_flag(0) {} + _entering_suspend_flag(0), + _dead(false) {} u8 JfrThreadLocal::add_data_lost(u8 value) { _data_lost += value; @@ -71,9 +73,17 @@ return _thread_cp; } +void JfrThreadLocal::set_dead() { + assert(!is_dead(), "invariant"); + _dead = true; +} + void JfrThreadLocal::on_exit(JavaThread* thread) { - JfrCheckpointManager::write_thread_checkpoint(thread); - JfrThreadCPULoadEvent::send_event_for_thread(thread); + if (JfrRecorder::is_recording()) { + JfrCheckpointManager::write_thread_checkpoint(thread); + JfrThreadCPULoadEvent::send_event_for_thread(thread); + } + thread->jfr_thread_local()->set_dead(); } void JfrThreadLocal::on_destruct(Thread* thread) {
--- a/src/hotspot/share/jfr/support/jfrThreadLocal.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/jfr/support/jfrThreadLocal.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -50,11 +50,14 @@ unsigned int _stack_trace_hash; mutable u4 _stackdepth; volatile jint _entering_suspend_flag; + bool _dead; JfrBuffer* install_native_buffer() const; JfrBuffer* install_java_buffer() const; JfrStackFrame* install_stackframes() const; + void set_dead(); + public: JfrThreadLocal(); @@ -202,6 +205,10 @@ _trace_id = id; } + bool is_dead() const { + return _dead; + } + bool has_thread_checkpoint() const; void set_thread_checkpoint(const JfrCheckpointBlobHandle& handle); const JfrCheckpointBlobHandle& thread_checkpoint() const;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/memory/heapShared.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -0,0 +1,506 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "classfile/javaClasses.inline.hpp" +#include "classfile/vmSymbols.hpp" +#include "logging/log.hpp" +#include "logging/logMessage.hpp" +#include "logging/logStream.hpp" +#include "memory/heapShared.hpp" +#include "memory/iterator.inline.hpp" +#include "memory/metadataFactory.hpp" +#include "memory/metaspaceClosure.hpp" +#include "memory/metaspaceShared.hpp" +#include "memory/resourceArea.hpp" +#include "oops/compressedOops.inline.hpp" +#include "oops/oop.inline.hpp" + +#if INCLUDE_CDS_JAVA_HEAP +KlassSubGraphInfo* HeapShared::_subgraph_info_list = NULL; +int HeapShared::_num_archived_subgraph_info_records = 0; +Array<ArchivedKlassSubGraphInfoRecord>* HeapShared::_archived_subgraph_info_records = NULL; + +// Currently there is only one class mirror (ArchivedModuleGraph) with archived +// sub-graphs. +KlassSubGraphInfo* HeapShared::find_subgraph_info(Klass* k) { + KlassSubGraphInfo* info = _subgraph_info_list; + while (info != NULL) { + if (info->klass() == k) { + return info; + } + info = info->next(); + } + return NULL; +} + +// Get the subgraph_info for Klass k. A new subgraph_info is created if +// there is no existing one for k. The subgraph_info records the relocated +// Klass* of the original k. +KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) { + Klass* relocated_k = MetaspaceShared::get_relocated_klass(k); + KlassSubGraphInfo* info = find_subgraph_info(relocated_k); + if (info != NULL) { + return info; + } + + info = new KlassSubGraphInfo(relocated_k, _subgraph_info_list); + _subgraph_info_list = info; + return info; +} + +int HeapShared::num_of_subgraph_infos() { + int num = 0; + KlassSubGraphInfo* info = _subgraph_info_list; + while (info != NULL) { + num ++; + info = info->next(); + } + return num; +} + +// Add an entry field to the current KlassSubGraphInfo. +void KlassSubGraphInfo::add_subgraph_entry_field(int static_field_offset, oop v) { + assert(DumpSharedSpaces, "dump time only"); + if (_subgraph_entry_fields == NULL) { + _subgraph_entry_fields = + new(ResourceObj::C_HEAP, mtClass) GrowableArray<juint>(10, true); + } + _subgraph_entry_fields->append((juint)static_field_offset); + _subgraph_entry_fields->append(CompressedOops::encode(v)); +} + +// Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs. +// Only objects of boot classes can be included in sub-graph. +void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k, Klass *relocated_k) { + assert(DumpSharedSpaces, "dump time only"); + assert(relocated_k == MetaspaceShared::get_relocated_klass(orig_k), + "must be the relocated Klass in the shared space"); + + if (_subgraph_object_klasses == NULL) { + _subgraph_object_klasses = + new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, true); + } + + assert(relocated_k->is_shared(), "must be a shared class"); + if (relocated_k->is_instance_klass()) { + assert(InstanceKlass::cast(relocated_k)->is_shared_boot_class(), + "must be boot class"); + // SystemDictionary::xxx_klass() are not updated, need to check + // the original Klass* + if (orig_k == SystemDictionary::String_klass() || + orig_k == SystemDictionary::Object_klass()) { + // Initialized early during VM initialization. No need to be added + // to the sub-graph object class list. + return; + } + } else if (relocated_k->is_objArray_klass()) { + Klass* abk = ObjArrayKlass::cast(relocated_k)->bottom_klass(); + if (abk->is_instance_klass()) { + assert(InstanceKlass::cast(abk)->is_shared_boot_class(), + "must be boot class"); + } + if (relocated_k == Universe::objectArrayKlassObj()) { + // Initialized early during Universe::genesis. No need to be added + // to the list. + return; + } + } else { + assert(relocated_k->is_typeArray_klass(), "must be"); + // Primitive type arrays are created early during Universe::genesis. + return; + } + + _subgraph_object_klasses->append_if_missing(relocated_k); +} + +// Initialize an archived subgraph_info_record from the given KlassSubGraphInfo. +void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) { + _k = info->klass(); + _next = NULL; + _entry_field_records = NULL; + _subgraph_klasses = NULL; + + // populate the entry fields + GrowableArray<juint>* entry_fields = info->subgraph_entry_fields(); + if (entry_fields != NULL) { + int num_entry_fields = entry_fields->length(); + assert(num_entry_fields % 2 == 0, "sanity"); + _entry_field_records = + MetaspaceShared::new_ro_array<juint>(num_entry_fields); + for (int i = 0 ; i < num_entry_fields; i++) { + _entry_field_records->at_put(i, entry_fields->at(i)); + } + } + + // the Klasses of the objects in the sub-graphs + GrowableArray<Klass*>* subgraph_klasses = info->subgraph_object_klasses(); + if (subgraph_klasses != NULL) { + int num_subgraphs_klasses = subgraph_klasses->length(); + _subgraph_klasses = + MetaspaceShared::new_ro_array<Klass*>(num_subgraphs_klasses); + for (int i = 0; i < num_subgraphs_klasses; i++) { + Klass* subgraph_k = subgraph_klasses->at(i); + if (log_is_enabled(Info, cds, heap)) { + ResourceMark rm; + log_info(cds, heap)( + "Archived object klass (%d): %s in %s sub-graphs", + i, subgraph_k->external_name(), _k->external_name()); + } + _subgraph_klasses->at_put(i, subgraph_k); + } + } +} + +// Build the records of archived subgraph infos, which include: +// - Entry points to all subgraphs from the containing class mirror. The entry +// points are static fields in the mirror. For each entry point, the field +// offset and value are recorded in the sub-graph info. The value are stored +// back to the corresponding field at runtime. +// - A list of klasses that need to be loaded/initialized before archived +// java object sub-graph can be accessed at runtime. +// +// The records are saved in the archive file and reloaded at runtime. Currently +// there is only one class mirror (ArchivedModuleGraph) with archived sub-graphs. +// +// Layout of the archived subgraph info records: +// +// records_size | num_records | records* +// ArchivedKlassSubGraphInfoRecord | entry_fields | subgraph_object_klasses +size_t HeapShared::build_archived_subgraph_info_records(int num_records) { + // remember the start address + char* start_p = MetaspaceShared::read_only_space_top(); + + // now populate the archived subgraph infos, which will be saved in the + // archive file + _archived_subgraph_info_records = + MetaspaceShared::new_ro_array<ArchivedKlassSubGraphInfoRecord>(num_records); + KlassSubGraphInfo* info = _subgraph_info_list; + int i = 0; + while (info != NULL) { + assert(i < _archived_subgraph_info_records->length(), "sanity"); + ArchivedKlassSubGraphInfoRecord* record = + _archived_subgraph_info_records->adr_at(i); + record->init(info); + info = info->next(); + i ++; + } + + // _subgraph_info_list is no longer needed + delete _subgraph_info_list; + _subgraph_info_list = NULL; + + char* end_p = MetaspaceShared::read_only_space_top(); + size_t records_size = end_p - start_p; + return records_size; +} + +// Write the subgraph info records in the shared _ro region +void HeapShared::write_archived_subgraph_infos() { + assert(DumpSharedSpaces, "dump time only"); + + Array<intptr_t>* records_header = MetaspaceShared::new_ro_array<intptr_t>(3); + + _num_archived_subgraph_info_records = num_of_subgraph_infos(); + size_t records_size = build_archived_subgraph_info_records( + _num_archived_subgraph_info_records); + + // Now write the header information: + // records_size, num_records, _archived_subgraph_info_records + assert(records_header != NULL, "sanity"); + intptr_t* p = (intptr_t*)(records_header->data()); + *p = (intptr_t)records_size; + p ++; + *p = (intptr_t)_num_archived_subgraph_info_records; + p ++; + *p = (intptr_t)_archived_subgraph_info_records; +} + +char* HeapShared::read_archived_subgraph_infos(char* buffer) { + Array<intptr_t>* records_header = (Array<intptr_t>*)buffer; + intptr_t* p = (intptr_t*)(records_header->data()); + size_t records_size = (size_t)(*p); + p ++; + _num_archived_subgraph_info_records = *p; + p ++; + _archived_subgraph_info_records = + (Array<ArchivedKlassSubGraphInfoRecord>*)(*p); + + buffer = (char*)_archived_subgraph_info_records + records_size; + return buffer; +} + +void HeapShared::initialize_from_archived_subgraph(Klass* k) { + if (!MetaspaceShared::open_archive_heap_region_mapped()) { + return; // nothing to do + } + + if (_num_archived_subgraph_info_records == 0) { + return; // no subgraph info records + } + + // Initialize from archived data. Currently only ArchivedModuleGraph + // has archived object subgraphs, which is used during VM initialization + // time when bootstraping the system modules. No lock is needed. + Thread* THREAD = Thread::current(); + for (int i = 0; i < _archived_subgraph_info_records->length(); i++) { + ArchivedKlassSubGraphInfoRecord* record = _archived_subgraph_info_records->adr_at(i); + if (record->klass() == k) { + int i; + // Found the archived subgraph info record for the requesting klass. + // Load/link/initialize the klasses of the objects in the subgraph. + // NULL class loader is used. + Array<Klass*>* klasses = record->subgraph_klasses(); + if (klasses != NULL) { + for (i = 0; i < klasses->length(); i++) { + Klass* obj_k = klasses->at(i); + Klass* resolved_k = SystemDictionary::resolve_or_null( + (obj_k)->name(), THREAD); + if (resolved_k != obj_k) { + return; + } + if ((obj_k)->is_instance_klass()) { + InstanceKlass* ik = InstanceKlass::cast(obj_k); + ik->initialize(THREAD); + } else if ((obj_k)->is_objArray_klass()) { + ObjArrayKlass* oak = ObjArrayKlass::cast(obj_k); + oak->initialize(THREAD); + } + } + } + + if (HAS_PENDING_EXCEPTION) { + CLEAR_PENDING_EXCEPTION; + // None of the field value will be set if there was an exception. + // The java code will not see any of the archived objects in the + // subgraphs referenced from k in this case. + return; + } + + // Load the subgraph entry fields from the record and store them back to + // the corresponding fields within the mirror. + oop m = k->java_mirror(); + Array<juint>* entry_field_records = record->entry_field_records(); + if (entry_field_records != NULL) { + int efr_len = entry_field_records->length(); + assert(efr_len % 2 == 0, "sanity"); + for (i = 0; i < efr_len;) { + int field_offset = entry_field_records->at(i); + // The object refereced by the field becomes 'known' by GC from this + // point. All objects in the subgraph reachable from the object are + // also 'known' by GC. + oop v = MetaspaceShared::materialize_archived_object( + CompressedOops::decode(entry_field_records->at(i+1))); + m->obj_field_put(field_offset, v); + i += 2; + } + } + + // Done. Java code can see the archived sub-graphs referenced from k's + // mirror after this point. + return; + } + } +} + +class WalkOopAndArchiveClosure: public BasicOopIterateClosure { + int _level; + KlassSubGraphInfo* _subgraph_info; + oop _orig_referencing_obj; + oop _archived_referencing_obj; + public: + WalkOopAndArchiveClosure(int level, KlassSubGraphInfo* subgraph_info, + oop orig, oop archived) : _level(level), + _subgraph_info(subgraph_info), + _orig_referencing_obj(orig), + _archived_referencing_obj(archived) {} + void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } + void do_oop( oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } + + protected: + template <class T> void do_oop_work(T *p) { + oop obj = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(obj)) { + // A java.lang.Class instance can not be included in an archived + // object sub-graph. + if (java_lang_Class::is_instance(obj)) { + tty->print("Unknown java.lang.Class object is in the archived sub-graph\n"); + vm_exit(1); + } + + LogTarget(Debug, cds, heap) log; + LogStream ls(log); + outputStream* out = &ls; + { + ResourceMark rm; + log.print("(%d) %s <--- referenced from: %s", + _level, obj->klass()->external_name(), + CompressedOops::is_null(_orig_referencing_obj) ? + "" : _orig_referencing_obj->klass()->external_name()); + obj->print_on(out); + } + + if (MetaspaceShared::is_archive_object(obj)) { + // The current oop is an archived oop, nothing needs to be done + log.print("--- object is already archived ---"); + return; + } + + size_t field_delta = pointer_delta( + p, _orig_referencing_obj, sizeof(char)); + T* new_p = (T*)(address(_archived_referencing_obj) + field_delta); + oop archived = MetaspaceShared::find_archived_heap_object(obj); + if (archived != NULL) { + // There is an archived copy existing, update reference to point + // to the archived copy + RawAccess<IS_NOT_NULL>::oop_store(new_p, archived); + log.print( + "--- found existing archived copy, store archived " PTR_FORMAT " in " PTR_FORMAT, + p2i(archived), p2i(new_p)); + return; + } + + int l = _level + 1; + Thread* THREAD = Thread::current(); + // Archive the current oop before iterating through its references + archived = MetaspaceShared::archive_heap_object(obj, THREAD); + assert(MetaspaceShared::is_archive_object(archived), "must be archived"); + log.print("=== archiving oop " PTR_FORMAT " ==> " PTR_FORMAT, + p2i(obj), p2i(archived)); + + // Following the references in the current oop and archive any + // encountered objects during the process + WalkOopAndArchiveClosure walker(l, _subgraph_info, obj, archived); + obj->oop_iterate(&walker); + + // Update the reference in the archived copy of the referencing object + RawAccess<IS_NOT_NULL>::oop_store(new_p, archived); + log.print("=== store archived " PTR_FORMAT " in " PTR_FORMAT, + p2i(archived), p2i(new_p)); + + // Add the klass to the list of classes that need to be loaded before + // module system initialization + Klass *orig_k = obj->klass(); + Klass *relocated_k = archived->klass(); + _subgraph_info->add_subgraph_object_klass(orig_k, relocated_k); + } + } +}; + +// +// Start from the given static field in a java mirror and archive the +// complete sub-graph of java heap objects that are reached directly +// or indirectly from the starting object by following references. +// Currently, only ArchivedModuleGraph class instance (mirror) has archived +// object subgraphs. Sub-graph archiving restrictions (current): +// +// - All classes of objects in the archived sub-graph (including the +// entry class) must be boot class only. +// - No java.lang.Class instance (java mirror) can be included inside +// an archived sub-graph. Mirror can only be the sub-graph entry object. +// +// The Java heap object sub-graph archiving process (see +// WalkOopAndArchiveClosure): +// +// 1) Java object sub-graph archiving starts from a given static field +// within a Class instance (java mirror). If the static field is a +// refererence field and points to a non-null java object, proceed to +// the next step. +// +// 2) Archives the referenced java object. If an archived copy of the +// current object already exists, updates the pointer in the archived +// copy of the referencing object to point to the current archived object. +// Otherwise, proceed to the next step. +// +// 3) Follows all references within the current java object and recursively +// archive the sub-graph of objects starting from each reference. +// +// 4) Updates the pointer in the archived copy of referencing object to +// point to the current archived object. +// +// 5) The Klass of the current java object is added to the list of Klasses +// for loading and initialzing before any object in the archived graph can +// be accessed at runtime. +// +void HeapShared::archive_reachable_objects_from_static_field(Klass *k, + int field_offset, + BasicType field_type, + TRAPS) { + assert(DumpSharedSpaces, "dump time only"); + assert(k->is_instance_klass(), "sanity"); + assert(InstanceKlass::cast(k)->is_shared_boot_class(), + "must be boot class"); + + oop m = k->java_mirror(); + oop archived_m = MetaspaceShared::find_archived_heap_object(m); + if (CompressedOops::is_null(archived_m)) { + return; + } + + if (field_type == T_OBJECT) { + // obtain k's subGraph Info + KlassSubGraphInfo* subgraph_info = get_subgraph_info(k); + + // get the object referenced by the field + oop f = m->obj_field(field_offset); + if (!CompressedOops::is_null(f)) { + LogTarget(Debug, cds, heap) log; + LogStream ls(log); + outputStream* out = &ls; + log.print("Start from: "); + f->print_on(out); + + // get the archived copy of the field referenced object + oop af = MetaspaceShared::archive_heap_object(f, THREAD); + if (!MetaspaceShared::is_archive_object(f)) { + WalkOopAndArchiveClosure walker(1, subgraph_info, f, af); + f->oop_iterate(&walker); + } + + // The field value is not preserved in the archived mirror. + // Record the field as a new subGraph entry point. The recorded + // information is restored from the archive at runtime. + subgraph_info->add_subgraph_entry_field(field_offset, af); + Klass *relocated_k = af->klass(); + Klass *orig_k = f->klass(); + subgraph_info->add_subgraph_object_klass(orig_k, relocated_k); + } else { + // The field contains null, we still need to record the entry point, + // so it can be restored at runtime. + subgraph_info->add_subgraph_entry_field(field_offset, NULL); + } + } else { + ShouldNotReachHere(); + } +} + +#define do_module_object_graph(archive_object_graph_do) \ + archive_object_graph_do(SystemDictionary::ArchivedModuleGraph_klass(), jdk_internal_module_ArchivedModuleGraph::archivedSystemModules_offset(), T_OBJECT, CHECK); \ + archive_object_graph_do(SystemDictionary::ArchivedModuleGraph_klass(), jdk_internal_module_ArchivedModuleGraph::archivedModuleFinder_offset(), T_OBJECT, CHECK); \ + archive_object_graph_do(SystemDictionary::ArchivedModuleGraph_klass(), jdk_internal_module_ArchivedModuleGraph::archivedMainModule_offset(), T_OBJECT, CHECK) + +void HeapShared::archive_module_graph_objects(Thread* THREAD) { + do_module_object_graph(archive_reachable_objects_from_static_field); +} +#endif // INCLUDE_CDS_JAVA_HEAP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/memory/heapShared.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_MEMORY_HEAPSHARED_HPP +#define SHARE_VM_MEMORY_HEAPSHARED_HPP + +#include "classfile/systemDictionary.hpp" +#include "memory/universe.hpp" +#include "oops/objArrayKlass.hpp" +#include "oops/oop.hpp" +#include "oops/typeArrayKlass.hpp" +#include "utilities/growableArray.hpp" + +#if INCLUDE_CDS_JAVA_HEAP +// A dump time sub-graph info for Klass _k. It includes the entry points +// (static fields in _k's mirror) of the archived sub-graphs reachable +// from _k's mirror. It also contains a list of Klasses of the objects +// within the sub-graphs. +class KlassSubGraphInfo: public CHeapObj<mtClass> { + private: + KlassSubGraphInfo* _next; + // The class that contains the static field(s) as the entry point(s) + // of archived object sub-graph(s). + Klass* _k; + // A list of classes need to be loaded and initialized before the archived + // object sub-graphs can be accessed at runtime. + GrowableArray<Klass*>* _subgraph_object_klasses; + // A list of _k's static fields as the entry points of archived sub-graphs. + // For each entry field, it is a pair of field_offset and field_value. + GrowableArray<juint>* _subgraph_entry_fields; + + public: + KlassSubGraphInfo(Klass* k, KlassSubGraphInfo* next) : + _next(next), _k(k), _subgraph_object_klasses(NULL), + _subgraph_entry_fields(NULL) {} + ~KlassSubGraphInfo() { + if (_subgraph_object_klasses != NULL) { + delete _subgraph_object_klasses; + } + if (_subgraph_entry_fields != NULL) { + delete _subgraph_entry_fields; + } + }; + + KlassSubGraphInfo* next() { return _next; } + Klass* klass() { return _k; } + GrowableArray<Klass*>* subgraph_object_klasses() { + return _subgraph_object_klasses; + } + GrowableArray<juint>* subgraph_entry_fields() { + return _subgraph_entry_fields; + } + void add_subgraph_entry_field(int static_field_offset, oop v); + void add_subgraph_object_klass(Klass *orig_k, Klass *relocated_k); +}; + +// An archived record of object sub-graphs reachable from static +// fields within _k's mirror. The record is reloaded from the archive +// at runtime. +class ArchivedKlassSubGraphInfoRecord { + private: + ArchivedKlassSubGraphInfoRecord* _next; + Klass* _k; + + // contains pairs of field offset and value for each subgraph entry field + Array<juint>* _entry_field_records; + + // klasses of objects in archived sub-graphs referenced from the entry points + // (static fields) in the containing class + Array<Klass*>* _subgraph_klasses; + public: + ArchivedKlassSubGraphInfoRecord() : + _next(NULL), _k(NULL), _entry_field_records(NULL), _subgraph_klasses(NULL) {} + void init(KlassSubGraphInfo* info); + Klass* klass() { return _k; } + ArchivedKlassSubGraphInfoRecord* next() { return _next; } + void set_next(ArchivedKlassSubGraphInfoRecord* next) { _next = next; } + Array<juint>* entry_field_records() { return _entry_field_records; } + Array<Klass*>* subgraph_klasses() { return _subgraph_klasses; } +}; +#endif // INCLUDE_CDS_JAVA_HEAP + +class HeapShared: AllStatic { + private: +#if INCLUDE_CDS_JAVA_HEAP + // This is a list of subgraph infos built at dump time while + // archiving object subgraphs. + static KlassSubGraphInfo* _subgraph_info_list; + + // Contains a list of ArchivedKlassSubGraphInfoRecords that is stored + // in the archive file and reloaded at runtime. + static int _num_archived_subgraph_info_records; + static Array<ArchivedKlassSubGraphInfoRecord>* _archived_subgraph_info_records; + + // Archive object sub-graph starting from the given static field + // in Klass k's mirror. + static void archive_reachable_objects_from_static_field( + Klass* k, int field_ofset, BasicType field_type, TRAPS); + + static KlassSubGraphInfo* find_subgraph_info(Klass *k); + static KlassSubGraphInfo* get_subgraph_info(Klass *k); + static int num_of_subgraph_infos(); + + static size_t build_archived_subgraph_info_records(int num_records); +#endif // INCLUDE_CDS_JAVA_HEAP + public: + static char* read_archived_subgraph_infos(char* buffer) NOT_CDS_JAVA_HEAP_RETURN_(buffer); + static void write_archived_subgraph_infos() NOT_CDS_JAVA_HEAP_RETURN; + static void initialize_from_archived_subgraph(Klass* k) NOT_CDS_JAVA_HEAP_RETURN; + + static void archive_module_graph_objects(Thread* THREAD) NOT_CDS_JAVA_HEAP_RETURN; +}; +#endif // SHARE_VM_MEMORY_HEAPSHARED_HPP
--- a/src/hotspot/share/memory/metaspaceShared.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/memory/metaspaceShared.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -39,6 +39,7 @@ #include "logging/log.hpp" #include "logging/logMessage.hpp" #include "memory/filemap.hpp" +#include "memory/heapShared.hpp" #include "memory/metaspace.hpp" #include "memory/metaspaceClosure.hpp" #include "memory/metaspaceShared.hpp" @@ -207,6 +208,10 @@ return _ro_region.allocate(num_bytes); } +char* MetaspaceShared::read_only_space_top() { + return _ro_region.top(); +} + void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() { assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled"); @@ -456,6 +461,7 @@ java_lang_StackFrameInfo::serialize(soc); java_lang_LiveStackFrameInfo::serialize(soc); java_util_concurrent_locks_AbstractOwnableSynchronizer::serialize(soc); + jdk_internal_module_ArchivedModuleGraph::serialize(soc); } address MetaspaceShared::cds_i2i_entry_code_buffers(size_t total_size) { @@ -1350,6 +1356,11 @@ char* table_top = _ro_region.allocate(table_bytes, sizeof(intptr_t)); SystemDictionary::copy_table(table_top, _ro_region.top()); + // Write the archived object sub-graph infos. For each klass with sub-graphs, + // the info includes the static fields (sub-graph entry points) and Klasses + // of objects included in the sub-graph. + HeapShared::write_archived_subgraph_infos(); + // Write the other data to the output array. WriteClosure wc(&_ro_region); MetaspaceShared::serialize(&wc); @@ -1861,6 +1872,8 @@ MetaspaceShared::archive_klass_objects(THREAD); + HeapShared::archive_module_graph_objects(THREAD); + G1CollectedHeap::heap()->end_archive_alloc_range(open_archive, os::vm_allocation_granularity()); } @@ -1906,14 +1919,16 @@ ArchivedObjectCache* cache = MetaspaceShared::archive_object_cache(); cache->put(obj, archived_oop); } - log_debug(cds)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT, - p2i(obj), p2i(archived_oop)); + log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT, + p2i(obj), p2i(archived_oop)); return archived_oop; } oop MetaspaceShared::materialize_archived_object(oop obj) { - assert(obj != NULL, "sanity"); - return G1CollectedHeap::heap()->materialize_archived_object(obj); + if (obj != NULL) { + return G1CollectedHeap::heap()->materialize_archived_object(obj); + } + return NULL; } void MetaspaceShared::archive_klass_objects(Thread* THREAD) { @@ -2121,6 +2136,9 @@ buffer += sizeof(intptr_t); buffer += len; + // The table of archived java heap object sub-graph infos + buffer = HeapShared::read_archived_subgraph_infos(buffer); + // Verify various attributes of the archive, plus initialize the // shared string/symbol tables intptr_t* array = (intptr_t*)buffer;
--- a/src/hotspot/share/memory/metaspaceShared.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/memory/metaspaceShared.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -232,6 +232,8 @@ static char* misc_code_space_alloc(size_t num_bytes); static char* read_only_space_alloc(size_t num_bytes); + static char* read_only_space_top(); + template <typename T> static Array<T>* new_ro_array(int length) { #if INCLUDE_CDS
--- a/src/hotspot/share/oops/constantPool.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/oops/constantPool.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -807,6 +807,17 @@ } } +constantTag ConstantPool::constant_tag_at(int which) { + constantTag tag = tag_at(which); + if (tag.is_dynamic_constant() || + tag.is_dynamic_constant_in_error()) { + // have to look at the signature for this one + Symbol* constant_type = uncached_signature_ref_at(which); + return constantTag::ofBasicType(FieldType::basic_type(constant_type)); + } + return tag; +} + BasicType ConstantPool::basic_type_for_constant_at(int which) { constantTag tag = tag_at(which); if (tag.is_dynamic_constant() ||
--- a/src/hotspot/share/oops/constantPool.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/oops/constantPool.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -719,6 +719,9 @@ enum { _no_index_sentinel = -1, _possible_index_sentinel = -2 }; public: + // Get the tag for a constant, which may involve a constant dynamic + constantTag constant_tag_at(int which); + // Get the basic type for a constant, which may involve a constant dynamic BasicType basic_type_for_constant_at(int which); // Resolve late bound constants.
--- a/src/hotspot/share/opto/gcm.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/opto/gcm.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -683,7 +683,7 @@ Block* store_block = get_block_for_node(store); assert(store_block != NULL, "unused killing projections skipped above"); - if (store->is_Phi() && store->in(0)->is_Loop()) { + if (store->is_Phi()) { // Loop-phis need to raise load before input. (Other phis are treated // as store below.) //
--- a/src/hotspot/share/opto/ifnode.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/opto/ifnode.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -1490,7 +1490,8 @@ // be skipped. For example, range check predicate has two checks // for lower and upper bounds. ProjNode* unc_proj = proj_out(1 - prev_dom->as_Proj()->_con)->as_Proj(); - if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) != NULL) { + if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) != NULL || + unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_profile_predicate) != NULL) { prev_dom = idom; }
--- a/src/hotspot/share/opto/loopPredicate.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/opto/loopPredicate.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -1056,7 +1056,9 @@ stack.push(in, 1); break; } else if (in->is_IfProj() && - in->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) { + in->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) && + (in->in(0)->Opcode() == Op_If || + in->in(0)->Opcode() == Op_RangeCheck)) { if (pf.to(in) * loop_trip_cnt >= 1) { stack.push(in, 1); } @@ -1281,7 +1283,7 @@ Node* n = skip_loop_predicates(entry); // Check if predicates were already added to the profile predicate // block - if (n != entry->in(0)->in(0)) { + if (n != entry->in(0)->in(0) || n->outcnt() != 1) { has_profile_predicates = true; } entry = n;
--- a/src/hotspot/share/opto/loopTransform.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/opto/loopTransform.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -861,7 +861,9 @@ // Check for being too big if (body_size > (uint)_local_loop_unroll_limit) { - if ((UseSubwordForMaxVector || xors_in_loop >= 4) && body_size < (uint)LoopUnrollLimit * 4) return true; + if ((cl->is_subword_loop() || xors_in_loop >= 4) && body_size < (uint)LoopUnrollLimit * 4) { + return true; + } // Normal case: loop too big return false; }
--- a/src/hotspot/share/opto/loopnode.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/opto/loopnode.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -616,6 +616,11 @@ } IfNode* check_iff = limit_check_proj->in(0)->as_If(); + + if (!is_dominator(get_ctrl(limit), check_iff->in(0))) { + return false; + } + Node* cmp_limit; Node* bol; @@ -4224,34 +4229,34 @@ // which can inhibit range check elimination. if (least != early) { Node* ctrl_out = least->unique_ctrl_out(); - if (ctrl_out && ctrl_out->is_CountedLoop() && + if (ctrl_out && ctrl_out->is_Loop() && least == ctrl_out->in(LoopNode::EntryControl)) { + // Move the node above predicates as far up as possible so a + // following pass of loop predication doesn't hoist a predicate + // that depends on it above that node. Node* new_ctrl = least; - // Move the node above predicates so a following pass of loop - // predication doesn't hoist a predicate that depends on it - // above that node. - if (find_predicate_insertion_point(new_ctrl, Deoptimization::Reason_loop_limit_check) != NULL) { - new_ctrl = new_ctrl->in(0)->in(0); - assert(is_dominator(early, new_ctrl), "least != early so we can move up the dominator tree"); - } - if (find_predicate_insertion_point(new_ctrl, Deoptimization::Reason_profile_predicate) != NULL) { + for (;;) { + if (!new_ctrl->is_Proj()) { + break; + } + CallStaticJavaNode* call = new_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); + if (call == NULL) { + break; + } + int req = call->uncommon_trap_request(); + Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req); + if (trap_reason != Deoptimization::Reason_loop_limit_check && + trap_reason != Deoptimization::Reason_predicate && + trap_reason != Deoptimization::Reason_profile_predicate) { + break; + } Node* c = new_ctrl->in(0)->in(0); - assert(is_dominator(early, c), "least != early so we can move up the dominator tree"); + if (is_dominator(c, early) && c != early) { + break; + } new_ctrl = c; } - if (find_predicate_insertion_point(new_ctrl, Deoptimization::Reason_predicate) != NULL) { - Node* c = new_ctrl->in(0)->in(0); - assert(is_dominator(early, c), "least != early so we can move up the dominator tree"); - new_ctrl = c; - } - if (new_ctrl != ctrl_out) { - least = new_ctrl; - } else if (ctrl_out->is_CountedLoop() || ctrl_out->is_OuterStripMinedLoop()) { - Node* least_dom = idom(least); - if (get_loop(least_dom)->is_member(get_loop(least))) { - least = least_dom; - } - } + least = new_ctrl; } }
--- a/src/hotspot/share/opto/loopnode.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/opto/loopnode.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -75,7 +75,8 @@ HasRangeChecks=8192, IsMultiversioned=16384, StripMined=32768, - ProfileTripFailed=65536}; + SubwordLoop=65536, + ProfileTripFailed=131072}; char _unswitch_count; enum { _unswitch_max=3 }; char _postloop_flags; @@ -99,6 +100,7 @@ bool partial_peel_has_failed() const { return _loop_flags & PartialPeelFailed; } bool is_strip_mined() const { return _loop_flags & StripMined; } bool is_profile_trip_failed() const { return _loop_flags & ProfileTripFailed; } + bool is_subword_loop() const { return _loop_flags & SubwordLoop; } void mark_partial_peel_failed() { _loop_flags |= PartialPeelFailed; } void mark_has_reductions() { _loop_flags |= HasReductions; } @@ -112,6 +114,7 @@ void mark_strip_mined() { _loop_flags |= StripMined; } void clear_strip_mined() { _loop_flags &= ~StripMined; } void mark_profile_trip_failed() { _loop_flags |= ProfileTripFailed; } + void mark_subword_loop() { _loop_flags |= SubwordLoop; } int unswitch_max() { return _unswitch_max; } int unswitch_count() { return _unswitch_count; }
--- a/src/hotspot/share/opto/loopopts.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/opto/loopopts.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -257,6 +257,7 @@ ProjNode* unc_proj = iff->as_If()->proj_out(1 - dp_proj->_con)->as_Proj(); if (exclude_loop_predicate && (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) != NULL || + unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_profile_predicate) != NULL || unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_range_check) != NULL)) { // If this is a range check (IfNode::is_range_check), do not // reorder because Compile::allow_range_check_smearing might have
--- a/src/hotspot/share/opto/memnode.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/opto/memnode.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -518,8 +518,7 @@ if (ac->is_clonebasic()) { intptr_t offset; AllocateNode* alloc = AllocateNode::Ideal_allocation(ac->in(ArrayCopyNode::Dest), phase, offset); - assert(alloc != NULL && (!ReduceBulkZeroing || alloc->initialization()->is_complete_with_arraycopy()), "broken allocation"); - if (alloc == ld_alloc) { + if (alloc != NULL && alloc == ld_alloc) { return ac; } }
--- a/src/hotspot/share/opto/superword.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/opto/superword.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -376,6 +376,7 @@ if (same_type) { max_vector = cur_max_vector; flag_small_bt = true; + cl->mark_subword_loop(); } } }
--- a/src/hotspot/share/prims/jniCheck.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/prims/jniCheck.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -1995,9 +1995,6 @@ checked_jni_GetModule(JNIEnv *env, jclass clazz)) functionEnter(thr); - IN_VM( - jniCheck::validate_class(thr, clazz, false); - ) jobject result = UNCHECKED()->GetModule(env,clazz); functionExit(thr); return result;
--- a/src/hotspot/share/prims/jvm.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/prims/jvm.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -39,6 +39,7 @@ #include "interpreter/bytecode.hpp" #include "jfr/jfrEvents.hpp" #include "logging/log.hpp" +#include "memory/heapShared.hpp" #include "memory/oopFactory.hpp" #include "memory/referenceType.hpp" #include "memory/resourceArea.hpp" @@ -3598,6 +3599,13 @@ return VM_Version::supports_cx8(); JVM_END +JVM_ENTRY(void, JVM_InitializeFromArchive(JNIEnv* env, jclass cls)) + JVMWrapper("JVM_InitializeFromArchive"); + Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve(cls)); + assert(k->is_klass(), "just checking"); + HeapShared::initialize_from_archived_subgraph(k); +JVM_END + // Returns an array of all live Thread objects (VM internal JavaThreads, // jvmti agent threads, and JNI attaching threads are skipped) // See CR 6404306 regarding JNI attaching threads
--- a/src/hotspot/share/runtime/arguments.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/runtime/arguments.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -535,12 +535,7 @@ { "IgnoreUnverifiableClassesDuringDump", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() }, { "CompilerThreadHintNoPreempt", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) }, { "VMThreadHintNoPreempt", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) }, - { "PrintSafepointStatistics", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) }, - { "PrintSafepointStatisticsTimeout", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) }, - { "PrintSafepointStatisticsCount",JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) }, { "AggressiveOpts", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) }, - { "AllowNonVirtualCalls", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) }, - { "UnlinkSymbolsALot", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) }, // --- Deprecated alias flags (see also aliased_jvm_flags) - sorted by obsolete_in then expired_in: { "DefaultMaxRAMFraction", JDK_Version::jdk(8), JDK_Version::undefined(), JDK_Version::undefined() }, @@ -573,6 +568,11 @@ { "NativeMonitorTimeout", JDK_Version::undefined(), JDK_Version::jdk(11), JDK_Version::jdk(12) }, { "NativeMonitorSpinLimit", JDK_Version::undefined(), JDK_Version::jdk(11), JDK_Version::jdk(12) }, { "NativeMonitorFlags", JDK_Version::undefined(), JDK_Version::jdk(11), JDK_Version::jdk(12) }, + { "UnlinkSymbolsALot", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) }, + { "AllowNonVirtualCalls", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) }, + { "PrintSafepointStatistics", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) }, + { "PrintSafepointStatisticsTimeout",JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) }, + { "PrintSafepointStatisticsCount", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) }, #ifdef TEST_VERIFY_SPECIAL_JVM_FLAGS { "dep > obs", JDK_Version::jdk(9), JDK_Version::jdk(8), JDK_Version::undefined() },
--- a/src/hotspot/share/runtime/globals.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/runtime/globals.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -486,9 +486,6 @@ notproduct(bool, ZombieALot, false, \ "Create zombies (non-entrant) at exit from the runtime system") \ \ - product(bool, UnlinkSymbolsALot, false, \ - "Unlink unreferenced symbols from the symbol table at safepoints")\ - \ notproduct(bool, WalkStackALot, false, \ "Trace stack (no print) at every exit from the runtime system") \ \ @@ -1180,20 +1177,6 @@ "null (+offset) will not raise a SIGSEGV, i.e.," \ "ImplicitNullChecks don't work (PPC64).") \ \ - product(bool, PrintSafepointStatistics, false, \ - "(Deprecated) Print statistics about safepoint synchronization") \ - \ - product(intx, PrintSafepointStatisticsCount, 300, \ - "(Deprecated) Total number of safepoint statistics collected " \ - "before printing them out") \ - range(1, max_intx) \ - \ - product(intx, PrintSafepointStatisticsTimeout, -1, \ - "(Deprecated) Print safepoint statistics only when safepoint takes " \ - "more than PrintSafepointSatisticsTimeout in millis") \ - LP64_ONLY(range(-1, max_intx/MICROUNITS)) \ - NOT_LP64(range(-1, max_intx)) \ - \ diagnostic(bool, EnableThreadSMRExtraValidityChecks, true, \ "Enable Thread SMR extra validity checks") \ \ @@ -2571,9 +2554,6 @@ "Default survivor space alignment in bytes") \ constraint(SurvivorAlignmentInBytesConstraintFunc,AfterErgo) \ \ - product(bool , AllowNonVirtualCalls, false, \ - "Obey the ACC_SUPER flag and allow invokenonvirtual calls") \ - \ product(ccstr, DumpLoadedClassList, NULL, \ "Dump the names all loaded classes, that could be stored into " \ "the CDS archive, in the specified file") \
--- a/src/hotspot/share/runtime/handshake.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/runtime/handshake.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -297,12 +297,12 @@ void HandshakeState::set_operation(JavaThread* target, HandshakeOperation* op) { _operation = op; - SafepointMechanism::arm_local_poll(target); + SafepointMechanism::arm_local_poll_release(target); } void HandshakeState::clear_handshake(JavaThread* target) { _operation = NULL; - SafepointMechanism::disarm_local_poll(target); + SafepointMechanism::disarm_local_poll_release(target); } void HandshakeState::process_self_inner(JavaThread* thread) {
--- a/src/hotspot/share/runtime/init.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/runtime/init.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -28,6 +28,8 @@ #include "code/icBuffer.hpp" #include "gc/shared/collectedHeap.hpp" #include "interpreter/bytecodes.hpp" +#include "logging/log.hpp" +#include "logging/logTag.hpp" #include "memory/universe.hpp" #include "prims/methodHandles.hpp" #include "runtime/flags/jvmFlag.hpp" @@ -165,7 +167,7 @@ if (!destructorsCalled) { destructorsCalled = true; perfMemory_exit(); - if (PrintSafepointStatistics) { + if (log_is_enabled(Debug, safepoint, stats)) { // Print the collected safepoint statistics. SafepointSynchronize::print_stat_on_exit(); }
--- a/src/hotspot/share/runtime/interfaceSupport.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/runtime/interfaceSupport.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -66,9 +66,6 @@ if (ZombieALot) { InterfaceSupport::zombieAll(); } - if (UnlinkSymbolsALot) { - InterfaceSupport::unlinkSymbols(); - } // do verification AFTER potential deoptimization if (VerifyStack) { InterfaceSupport::verify_stack(); @@ -208,11 +205,6 @@ zombieAllCounter++; } -void InterfaceSupport::unlinkSymbols() { - VM_UnlinkSymbols op; - VMThread::execute(&op); -} - void InterfaceSupport::deoptimizeAll() { // This method is called by all threads when a thread make // transition to VM state (for example, runtime calls).
--- a/src/hotspot/share/runtime/interfaceSupport.inline.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/runtime/interfaceSupport.inline.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,6 @@ static void walk_stack(); static void zombieAll(); - static void unlinkSymbols(); static void deoptimizeAll(); static void stress_derived_pointers(); static void verify_stack();
--- a/src/hotspot/share/runtime/safepoint.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/runtime/safepoint.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -150,15 +150,23 @@ static volatile int TryingToBlock = 0 ; // proximate value -- for advisory use only static bool timeout_error_printed = false; + +// Statistic related statics +julong SafepointSynchronize::_coalesced_vmop_count = 0; +static jlong _safepoint_begin_time = 0; +static float _ts_of_current_safepoint = 0.0f; +static volatile int _nof_threads_hit_polling_page = 0; + // Roll all threads forward to a safepoint and suspend them all void SafepointSynchronize::begin() { EventSafepointBegin begin_event; Thread* myThread = Thread::current(); assert(myThread->is_VM_thread(), "Only VM thread may execute a safepoint"); - if (PrintSafepointStatistics || PrintSafepointStatisticsTimeout > 0) { + if (log_is_enabled(Debug, safepoint, stats)) { _safepoint_begin_time = os::javaTimeNanos(); _ts_of_current_safepoint = tty->time_stamp().seconds(); + _nof_threads_hit_polling_page = 0; } Universe::heap()->safepoint_synchronize_begin(); @@ -190,14 +198,6 @@ jlong safepoint_limit_time = 0; timeout_error_printed = false; - // PrintSafepointStatisticsTimeout can be specified separately. When - // specified, PrintSafepointStatistics will be set to true in - // deferred_initialize_stat method. The initialization has to be done - // early enough to avoid any races. See bug 6880029 for details. - if (PrintSafepointStatistics || PrintSafepointStatisticsTimeout > 0) { - deferred_initialize_stat(); - } - // Begin the process of bringing the system to a safepoint. // Java threads can be in several different states and are // stopped by different mechanisms: @@ -243,9 +243,10 @@ if (SafepointMechanism::uses_thread_local_poll()) { // Arming the per thread poll while having _state != _not_synchronized means safepointing log_trace(safepoint)("Setting thread local yield flag for threads"); + OrderAccess::storestore(); // storestore, global state -> local state for (JavaThreadIteratorWithHandle jtiwh; JavaThread *cur = jtiwh.next(); ) { // Make sure the threads start polling, it is time to yield. - SafepointMechanism::arm_local_poll(cur); // release store, global state -> local state + SafepointMechanism::arm_local_poll(cur); } } OrderAccess::fence(); // storestore|storeload, global state -> local state @@ -311,7 +312,7 @@ if (iterations == 0) { initial_running = still_running; - if (PrintSafepointStatistics) { + if (log_is_enabled(Debug, safepoint, stats)) { begin_statistics(nof_threads, still_running); } } @@ -402,7 +403,7 @@ } // ThreadsListHandle destroyed here. assert(still_running == 0, "sanity check"); - if (PrintSafepointStatistics) { + if (log_is_enabled(Debug, safepoint, stats)) { update_statistics_on_spin_end(); } if (sync_event.should_commit()) { @@ -435,7 +436,7 @@ if (SafepointTimeout) { jlong current_time = os::javaTimeNanos(); if (safepoint_limit_time < current_time) { - tty->print_cr("# SafepointSynchronize: Finished after " + log_warning(safepoint)("# SafepointSynchronize: Finished after " INT64_FORMAT_W(6) " ms", (int64_t)((current_time - safepoint_limit_time) / MICROUNITS + (jlong)SafepointTimeoutDelay)); @@ -469,7 +470,7 @@ log_info(safepoint)("Entering safepoint region: %s", VMThread::vm_safepoint_description()); RuntimeService::record_safepoint_synchronized(); - if (PrintSafepointStatistics) { + if (log_is_enabled(Debug, safepoint, stats)) { update_statistics_on_sync_end(os::javaTimeNanos()); } @@ -482,7 +483,7 @@ } } - if (PrintSafepointStatistics) { + if (log_is_enabled(Debug, safepoint, stats)) { // Record how much time spend on the above cleanup tasks update_statistics_on_cleanup_end(os::javaTimeNanos()); } @@ -505,7 +506,7 @@ DEBUG_ONLY(Thread* myThread = Thread::current();) assert(myThread->is_VM_thread(), "Only VM thread can execute a safepoint"); - if (PrintSafepointStatistics) { + if (log_is_enabled(Debug, safepoint, stats)) { end_statistics(os::javaTimeNanos()); } @@ -546,7 +547,7 @@ for (; JavaThread *current = jtiwh.next(); ) { ThreadSafepointState* cur_state = current->safepoint_state(); cur_state->restart(); // TSS _running - SafepointMechanism::disarm_local_poll(current); // release store, local state -> polling page + SafepointMechanism::disarm_local_poll(current); } log_info(safepoint)("Leaving safepoint region"); } else { @@ -951,8 +952,8 @@ assert(SafepointSynchronize::is_synchronizing(), "polling encountered outside safepoint synchronization"); } - if (PrintSafepointStatistics) { - inc_page_trap_count(); + if (log_is_enabled(Debug, safepoint, stats)) { + Atomic::inc(&_nof_threads_hit_polling_page); } ThreadSafepointState* state = thread->safepoint_state(); @@ -966,29 +967,34 @@ timeout_error_printed = true; // Print out the thread info which didn't reach the safepoint for debugging // purposes (useful when there are lots of threads in the debugger). - tty->cr(); - tty->print_cr("# SafepointSynchronize::begin: Timeout detected:"); - if (reason == _spinning_timeout) { - tty->print_cr("# SafepointSynchronize::begin: Timed out while spinning to reach a safepoint."); - } else if (reason == _blocking_timeout) { - tty->print_cr("# SafepointSynchronize::begin: Timed out while waiting for threads to stop."); - } + LogTarget(Warning, safepoint) lt; + if (lt.is_enabled()) { + ResourceMark rm; + LogStream ls(lt); + + ls.cr(); + ls.print_cr("# SafepointSynchronize::begin: Timeout detected:"); + if (reason == _spinning_timeout) { + ls.print_cr("# SafepointSynchronize::begin: Timed out while spinning to reach a safepoint."); + } else if (reason == _blocking_timeout) { + ls.print_cr("# SafepointSynchronize::begin: Timed out while waiting for threads to stop."); + } - tty->print_cr("# SafepointSynchronize::begin: Threads which did not reach the safepoint:"); - ThreadSafepointState *cur_state; - ResourceMark rm; - for (JavaThreadIteratorWithHandle jtiwh; JavaThread *cur_thread = jtiwh.next(); ) { - cur_state = cur_thread->safepoint_state(); + ls.print_cr("# SafepointSynchronize::begin: Threads which did not reach the safepoint:"); + ThreadSafepointState *cur_state; + for (JavaThreadIteratorWithHandle jtiwh; JavaThread *cur_thread = jtiwh.next(); ) { + cur_state = cur_thread->safepoint_state(); - if (cur_thread->thread_state() != _thread_blocked && - ((reason == _spinning_timeout && cur_state->is_running()) || - (reason == _blocking_timeout && !cur_state->has_called_back()))) { - tty->print("# "); - cur_thread->print(); - tty->cr(); + if (cur_thread->thread_state() != _thread_blocked && + ((reason == _spinning_timeout && cur_state->is_running()) || + (reason == _blocking_timeout && !cur_state->has_called_back()))) { + ls.print("# "); + cur_thread->print_on(&ls); + ls.cr(); + } } + ls.print_cr("# SafepointSynchronize::begin: (End of list)"); } - tty->print_cr("# SafepointSynchronize::begin: (End of list)"); } // To debug the long safepoint, specify both DieOnSafepointTimeout & @@ -1238,67 +1244,42 @@ // // Statistics & Instrumentations // -SafepointSynchronize::SafepointStats* SafepointSynchronize::_safepoint_stats = NULL; -jlong SafepointSynchronize::_safepoint_begin_time = 0; -int SafepointSynchronize::_cur_stat_index = 0; -julong SafepointSynchronize::_safepoint_reasons[VM_Operation::VMOp_Terminating]; -julong SafepointSynchronize::_coalesced_vmop_count = 0; -jlong SafepointSynchronize::_max_sync_time = 0; -jlong SafepointSynchronize::_max_vmop_time = 0; -float SafepointSynchronize::_ts_of_current_safepoint = 0.0f; +struct SafepointStats { + float _time_stamp; // record when the current safepoint occurs in seconds + int _vmop_type; // tyep of VM operation triggers the safepoint + int _nof_total_threads; // total number of Java threads + int _nof_initial_running_threads; // total number of initially seen running threads + int _nof_threads_wait_to_block; // total number of threads waiting for to block + bool _page_armed; // true if polling page is armed, false otherwise + int _nof_threads_hit_page_trap; // total number of threads hitting the page trap + jlong _time_to_spin; // total time in millis spent in spinning + jlong _time_to_wait_to_block; // total time in millis spent in waiting for to block + jlong _time_to_do_cleanups; // total time in millis spent in performing cleanups + jlong _time_to_sync; // total time in millis spent in getting to _synchronized + jlong _time_to_exec_vmop; // total time in millis spent in vm operation itself +}; + +static const int _statistics_header_count = 30; +static int _cur_stat_index = 0; +static SafepointStats safepoint_stats = {0}; // zero initialize +static SafepointStats* spstat = &safepoint_stats; + +static julong _safepoint_reasons[VM_Operation::VMOp_Terminating]; +static jlong _max_sync_time = 0; +static jlong _max_vmop_time = 0; static jlong cleanup_end_time = 0; -static bool init_done = false; - -// Helper method to print the header. -static void print_header() { - // The number of spaces is significant here, and should match the format - // specifiers in print_statistics(). - - tty->print(" vmop " - "[ threads: total initially_running wait_to_block ]" - "[ time: spin block sync cleanup vmop ] "); - - tty->print_cr("page_trap_count"); -} - -void SafepointSynchronize::deferred_initialize_stat() { - if (init_done) return; - - // If PrintSafepointStatisticsTimeout is specified, the statistics data will - // be printed right away, in which case, _safepoint_stats will regress to - // a single element array. Otherwise, it is a circular ring buffer with default - // size of PrintSafepointStatisticsCount. - int stats_array_size; - if (PrintSafepointStatisticsTimeout > 0) { - stats_array_size = 1; - PrintSafepointStatistics = true; - } else { - stats_array_size = PrintSafepointStatisticsCount; - } - _safepoint_stats = (SafepointStats*)os::malloc(stats_array_size - * sizeof(SafepointStats), mtInternal); - guarantee(_safepoint_stats != NULL, - "not enough memory for safepoint instrumentation data"); - - init_done = true; -} void SafepointSynchronize::begin_statistics(int nof_threads, int nof_running) { - assert(init_done, "safepoint statistics array hasn't been initialized"); - SafepointStats *spstat = &_safepoint_stats[_cur_stat_index]; spstat->_time_stamp = _ts_of_current_safepoint; VM_Operation *op = VMThread::vm_operation(); - spstat->_vmop_type = (op != NULL ? op->type() : -1); - if (op != NULL) { - _safepoint_reasons[spstat->_vmop_type]++; - } + spstat->_vmop_type = op != NULL ? op->type() : VM_Operation::VMOp_None; + _safepoint_reasons[spstat->_vmop_type]++; spstat->_nof_total_threads = nof_threads; spstat->_nof_initial_running_threads = nof_running; - spstat->_nof_threads_hit_page_trap = 0; // Records the start time of spinning. The real time spent on spinning // will be adjusted when spin is done. Same trick is applied for time @@ -1311,8 +1292,6 @@ } void SafepointSynchronize::update_statistics_on_spin_end() { - SafepointStats *spstat = &_safepoint_stats[_cur_stat_index]; - jlong cur_time = os::javaTimeNanos(); spstat->_nof_threads_wait_to_block = _waiting_to_block; @@ -1329,7 +1308,6 @@ } void SafepointSynchronize::update_statistics_on_sync_end(jlong end_time) { - SafepointStats *spstat = &_safepoint_stats[_cur_stat_index]; if (spstat->_nof_threads_wait_to_block != 0) { spstat->_time_to_wait_to_block = end_time - @@ -1348,106 +1326,90 @@ } void SafepointSynchronize::update_statistics_on_cleanup_end(jlong end_time) { - SafepointStats *spstat = &_safepoint_stats[_cur_stat_index]; // Record how long spent in cleanup tasks. spstat->_time_to_do_cleanups = end_time - spstat->_time_to_do_cleanups; - cleanup_end_time = end_time; } void SafepointSynchronize::end_statistics(jlong vmop_end_time) { - SafepointStats *spstat = &_safepoint_stats[_cur_stat_index]; // Update the vm operation time. spstat->_time_to_exec_vmop = vmop_end_time - cleanup_end_time; if (spstat->_time_to_exec_vmop > _max_vmop_time) { _max_vmop_time = spstat->_time_to_exec_vmop; } - // Only the sync time longer than the specified - // PrintSafepointStatisticsTimeout will be printed out right away. - // By default, it is -1 meaning all samples will be put into the list. - if ( PrintSafepointStatisticsTimeout > 0) { - if (spstat->_time_to_sync > (jlong)PrintSafepointStatisticsTimeout * MICROUNITS) { - print_statistics(); - } - } else { - // The safepoint statistics will be printed out when the _safepoin_stats - // array fills up. - if (_cur_stat_index == PrintSafepointStatisticsCount - 1) { - print_statistics(); - _cur_stat_index = 0; - } else { - _cur_stat_index++; - } - } + + spstat->_nof_threads_hit_page_trap = _nof_threads_hit_polling_page; + + print_statistics(); +} + +// Helper method to print the header. +static void print_header(outputStream* st) { + // The number of spaces is significant here, and should match the format + // specifiers in print_statistics(). + + st->print(" vmop " + "[ threads: total initially_running wait_to_block ]" + "[ time: spin block sync cleanup vmop ] "); + + st->print_cr("page_trap_count"); } +// This prints a nice table. To get the statistics to not shift due to the logging uptime +// decorator, use the option as: -Xlog:safepoint+stats=debug:[outputfile]:none void SafepointSynchronize::print_statistics() { - for (int index = 0; index <= _cur_stat_index; index++) { - if (index % 30 == 0) { - print_header(); - } - SafepointStats* sstats = &_safepoint_stats[index]; - tty->print("%8.3f: ", sstats->_time_stamp); - tty->print("%-30s [ " - INT32_FORMAT_W(8) " " INT32_FORMAT_W(17) " " INT32_FORMAT_W(13) " " - "]", - (sstats->_vmop_type == -1 ? "no vm operation" : VM_Operation::name(sstats->_vmop_type)), - sstats->_nof_total_threads, - sstats->_nof_initial_running_threads, - sstats->_nof_threads_wait_to_block); - // "/ MICROUNITS " is to convert the unit from nanos to millis. - tty->print("[ " - INT64_FORMAT_W(7) " " INT64_FORMAT_W(7) " " - INT64_FORMAT_W(7) " " INT64_FORMAT_W(7) " " - INT64_FORMAT_W(7) " ] ", - (int64_t)(sstats->_time_to_spin / MICROUNITS), - (int64_t)(sstats->_time_to_wait_to_block / MICROUNITS), - (int64_t)(sstats->_time_to_sync / MICROUNITS), - (int64_t)(sstats->_time_to_do_cleanups / MICROUNITS), - (int64_t)(sstats->_time_to_exec_vmop / MICROUNITS)); + LogTarget(Debug, safepoint, stats) lt; + assert (lt.is_enabled(), "should only be called when printing statistics is enabled"); + LogStream ls(lt); + + // Print header every 30 entries + if ((_cur_stat_index % _statistics_header_count) == 0) { + print_header(&ls); + _cur_stat_index = 1; // wrap + } else { + _cur_stat_index++; + } - tty->print_cr(INT32_FORMAT_W(15) " ", sstats->_nof_threads_hit_page_trap); - } + ls.print("%8.3f: ", spstat->_time_stamp); + ls.print("%-28s [ " + INT32_FORMAT_W(8) " " INT32_FORMAT_W(17) " " INT32_FORMAT_W(13) " " + "]", + VM_Operation::name(spstat->_vmop_type), + spstat->_nof_total_threads, + spstat->_nof_initial_running_threads, + spstat->_nof_threads_wait_to_block); + // "/ MICROUNITS " is to convert the unit from nanos to millis. + ls.print("[ " + INT64_FORMAT_W(7) " " INT64_FORMAT_W(7) " " + INT64_FORMAT_W(7) " " INT64_FORMAT_W(7) " " + INT64_FORMAT_W(7) " ] ", + (int64_t)(spstat->_time_to_spin / MICROUNITS), + (int64_t)(spstat->_time_to_wait_to_block / MICROUNITS), + (int64_t)(spstat->_time_to_sync / MICROUNITS), + (int64_t)(spstat->_time_to_do_cleanups / MICROUNITS), + (int64_t)(spstat->_time_to_exec_vmop / MICROUNITS)); + + ls.print_cr(INT32_FORMAT_W(15) " ", spstat->_nof_threads_hit_page_trap); } -// This method will be called when VM exits. It will first call -// print_statistics to print out the rest of the sampling. Then -// it tries to summarize the sampling. +// This method will be called when VM exits. This tries to summarize the sampling. +// Current thread may already be deleted, so don't use ResourceMark. void SafepointSynchronize::print_stat_on_exit() { - if (_safepoint_stats == NULL) return; - - SafepointStats *spstat = &_safepoint_stats[_cur_stat_index]; - - // During VM exit, end_statistics may not get called and in that - // case, if the sync time is less than PrintSafepointStatisticsTimeout, - // don't print it out. - // Approximate the vm op time. - _safepoint_stats[_cur_stat_index]._time_to_exec_vmop = - os::javaTimeNanos() - cleanup_end_time; - - if ( PrintSafepointStatisticsTimeout < 0 || - spstat->_time_to_sync > (jlong)PrintSafepointStatisticsTimeout * MICROUNITS) { - print_statistics(); - } - tty->cr(); - - // Print out polling page sampling status. - tty->print_cr("Polling page always armed"); for (int index = 0; index < VM_Operation::VMOp_Terminating; index++) { if (_safepoint_reasons[index] != 0) { - tty->print_cr("%-26s" UINT64_FORMAT_W(10), VM_Operation::name(index), - _safepoint_reasons[index]); + log_debug(safepoint, stats)("%-28s" UINT64_FORMAT_W(10), VM_Operation::name(index), + _safepoint_reasons[index]); } } - tty->print_cr(UINT64_FORMAT_W(5) " VM operations coalesced during safepoint", - _coalesced_vmop_count); - tty->print_cr("Maximum sync time " INT64_FORMAT_W(5) " ms", - (int64_t)(_max_sync_time / MICROUNITS)); - tty->print_cr("Maximum vm operation time (except for Exit VM operation) " - INT64_FORMAT_W(5) " ms", - (int64_t)(_max_vmop_time / MICROUNITS)); + log_debug(safepoint, stats)("VM operations coalesced during safepoint " INT64_FORMAT, + _coalesced_vmop_count); + log_debug(safepoint, stats)("Maximum sync time " INT64_FORMAT" ms", + (int64_t)(_max_sync_time / MICROUNITS)); + log_debug(safepoint, stats)("Maximum vm operation time (except for Exit VM operation) " + INT64_FORMAT " ms", + (int64_t)(_max_vmop_time / MICROUNITS)); }
--- a/src/hotspot/share/runtime/safepoint.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/runtime/safepoint.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -89,20 +89,6 @@ SAFEPOINT_CLEANUP_NUM_TASKS }; - typedef struct { - float _time_stamp; // record when the current safepoint occurs in seconds - int _vmop_type; // type of VM operation triggers the safepoint - int _nof_total_threads; // total number of Java threads - int _nof_initial_running_threads; // total number of initially seen running threads - int _nof_threads_wait_to_block; // total number of threads waiting for to block - int _nof_threads_hit_page_trap; // total number of threads hitting the page trap - jlong _time_to_spin; // total time in millis spent in spinning - jlong _time_to_wait_to_block; // total time in millis spent in waiting for to block - jlong _time_to_do_cleanups; // total time in millis spent in performing cleanups - jlong _time_to_sync; // total time in millis spent in getting to _synchronized - jlong _time_to_exec_vmop; // total time in millis spent in vm operation itself - } SafepointStats; - private: static volatile SynchronizeState _state; // Threads might read this flag directly, without acquiring the Threads_lock static volatile int _waiting_to_block; // number of threads we are waiting for to block @@ -118,27 +104,16 @@ public: static volatile int _safepoint_counter; private: - static long _end_of_last_safepoint; // Time of last safepoint in milliseconds + static long _end_of_last_safepoint; // Time of last safepoint in milliseconds + static julong _coalesced_vmop_count; // coalesced vmop count // Statistics - static jlong _safepoint_begin_time; // time when safepoint begins - static SafepointStats* _safepoint_stats; // array of SafepointStats struct - static int _cur_stat_index; // current index to the above array - static julong _safepoint_reasons[]; // safepoint count for each VM op - static julong _coalesced_vmop_count; // coalesced vmop count - static jlong _max_sync_time; // maximum sync time in nanos - static jlong _max_vmop_time; // maximum vm operation time in nanos - static float _ts_of_current_safepoint; // time stamp of current safepoint in seconds - static void begin_statistics(int nof_threads, int nof_running); static void update_statistics_on_spin_end(); static void update_statistics_on_sync_end(jlong end_time); static void update_statistics_on_cleanup_end(jlong end_time); static void end_statistics(jlong end_time); static void print_statistics(); - inline static void inc_page_trap_count() { - Atomic::inc(&_safepoint_stats[_cur_stat_index]._nof_threads_hit_page_trap); - } // For debug long safepoint static void print_safepoint_timeout(SafepointTimeoutReason timeout_reason); @@ -192,7 +167,6 @@ static bool is_cleanup_needed(); static void do_cleanup_tasks(); - static void deferred_initialize_stat(); static void print_stat_on_exit(); inline static void inc_vmop_coalesced_count() { _coalesced_vmop_count++; }
--- a/src/hotspot/share/runtime/safepointMechanism.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/runtime/safepointMechanism.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -79,9 +79,13 @@ // Blocks a thread until safepoint is completed static inline void block_if_requested(JavaThread* thread); + // Caller is responsible for using a memory barrier if needed. static inline void arm_local_poll(JavaThread* thread); static inline void disarm_local_poll(JavaThread* thread); + static inline void arm_local_poll_release(JavaThread* thread); + static inline void disarm_local_poll_release(JavaThread* thread); + // Setup the selected safepoint mechanism static void initialize(); static void initialize_header(JavaThread* thread);
--- a/src/hotspot/share/runtime/safepointMechanism.inline.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/runtime/safepointMechanism.inline.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -87,4 +87,12 @@ thread->set_polling_page(poll_disarmed_value()); } +void SafepointMechanism::arm_local_poll_release(JavaThread* thread) { + thread->set_polling_page_release(poll_armed_value()); +} + +void SafepointMechanism::disarm_local_poll_release(JavaThread* thread) { + thread->set_polling_page_release(poll_disarmed_value()); +} + #endif // SHARE_VM_RUNTIME_SAFEPOINTMECHANISM_INLINE_HPP
--- a/src/hotspot/share/runtime/thread.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/runtime/thread.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -963,7 +963,7 @@ // The flag: potential_vm_operation notifies if this particular safepoint state could potentially // invoke the vm-thread (e.g., an oop allocation). In that case, we also have to make sure that -// no threads which allow_vm_block's are held +// no locks which allow_vm_block's are held void Thread::check_for_valid_safepoint_state(bool potential_vm_operation) { // Check if current thread is allowed to block at a safepoint if (!(_allow_safepoint_count == 0)) { @@ -3298,6 +3298,11 @@ } CompilerThread::~CompilerThread() { + // Free buffer blob, if allocated + if (get_buffer_blob() != NULL) { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + CodeCache::free(get_buffer_blob()); + } // Delete objects which were allocated on heap. delete _counters; }
--- a/src/hotspot/share/runtime/thread.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/runtime/thread.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -1207,6 +1207,7 @@ bool do_not_unlock_if_synchronized() { return _do_not_unlock_if_synchronized; } void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; } + inline void set_polling_page_release(void* poll_value); inline void set_polling_page(void* poll_value); inline volatile void* get_polling_page();
--- a/src/hotspot/share/runtime/thread.inline.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/runtime/thread.inline.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -170,8 +170,13 @@ // The release make sure this store is done after storing the handshake // operation or global state +inline void JavaThread::set_polling_page_release(void* poll_value) { + OrderAccess::release_store(polling_page_addr(), poll_value); +} + +// Caller is responsible for using a memory barrier if needed. inline void JavaThread::set_polling_page(void* poll_value) { - OrderAccess::release_store(polling_page_addr(), poll_value); + *polling_page_addr() = poll_value; } // The aqcquire make sure reading of polling page is done before
--- a/src/hotspot/share/runtime/vmThread.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/runtime/vmThread.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,8 +47,8 @@ #include "utilities/xmlstream.hpp" // Dummy VM operation to act as first element in our circular double-linked list -class VM_Dummy: public VM_Operation { - VMOp_Type type() const { return VMOp_Dummy; } +class VM_None: public VM_Operation { + VMOp_Type type() const { return VMOp_None; } void doit() {}; }; @@ -58,7 +58,7 @@ for(int i = 0; i < nof_priorities; i++) { _queue_length[i] = 0; _queue_counter = 0; - _queue[i] = new VM_Dummy(); + _queue[i] = new VM_None(); _queue[i]->set_next(_queue[i]); _queue[i]->set_prev(_queue[i]); } @@ -511,7 +511,7 @@ _vm_queue->set_drain_list(next); evaluate_operation(_cur_vm_operation); _cur_vm_operation = next; - if (PrintSafepointStatistics) { + if (log_is_enabled(Debug, safepoint, stats)) { SafepointSynchronize::inc_vmop_coalesced_count(); } } while (_cur_vm_operation != NULL);
--- a/src/hotspot/share/runtime/vm_operations.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/runtime/vm_operations.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -192,12 +192,6 @@ #endif // !PRODUCT -void VM_UnlinkSymbols::doit() { - JavaThread *thread = (JavaThread *)calling_thread(); - assert(thread->is_Java_thread(), "must be a Java thread"); - SymbolTable::unlink(); -} - void VM_Verify::doit() { Universe::heap()->prepare_for_verify(); Universe::verify();
--- a/src/hotspot/share/runtime/vm_operations.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/runtime/vm_operations.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -40,7 +40,7 @@ // Note: When new VM_XXX comes up, add 'XXX' to the template table. #define VM_OPS_DO(template) \ - template(Dummy) \ + template(None) \ template(ThreadStop) \ template(ThreadDump) \ template(PrintThreads) \ @@ -52,7 +52,6 @@ template(DeoptimizeFrame) \ template(DeoptimizeAll) \ template(ZombieAll) \ - template(UnlinkSymbols) \ template(Verify) \ template(PrintJNI) \ template(HeapDumper) \ @@ -353,14 +352,6 @@ }; #endif // PRODUCT -class VM_UnlinkSymbols: public VM_Operation { - public: - VM_UnlinkSymbols() {} - VMOp_Type type() const { return VMOp_UnlinkSymbols; } - void doit(); - bool allow_nested_vm_operations() const { return true; } -}; - class VM_Verify: public VM_Operation { public: VMOp_Type type() const { return VMOp_Verify; }
--- a/src/hotspot/share/services/mallocSiteTable.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/services/mallocSiteTable.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -28,34 +28,10 @@ #include "runtime/atomic.hpp" #include "services/mallocSiteTable.hpp" -/* - * Early os::malloc() calls come from initializations of static variables, long before entering any - * VM code. Upon the arrival of the first os::malloc() call, malloc site hashtable has to be - * initialized, along with the allocation site for the hashtable entries. - * To ensure that malloc site hashtable can be initialized without triggering any additional os::malloc() - * call, the hashtable bucket array and hashtable entry allocation site have to be static. - * It is not a problem for hashtable bucket, since it is an array of pointer type, C runtime just - * allocates a block memory and zero the memory for it. - * But for hashtable entry allocation site object, things get tricky. C runtime not only allocates - * memory for it, but also calls its constructor at some later time. If we initialize the allocation site - * at the first os::malloc() call, the object will be reinitialized when its constructor is called - * by C runtime. - * To workaround above issue, we declare a static size_t array with the size of the CallsiteHashtableEntry, - * the memory is used to instantiate CallsiteHashtableEntry for the hashtable entry allocation site. - * Given it is a primitive type array, C runtime will do nothing other than assign the memory block for the variable, - * which is exactly what we want. - * The same trick is also applied to create NativeCallStack object for CallsiteHashtableEntry memory allocation. - * - * Note: C++ object usually aligns to particular alignment, depends on compiler implementation, we declare - * the memory as size_t arrays, to ensure the memory is aligned to native machine word alignment. - */ - -// Reserve enough memory for NativeCallStack and MallocSiteHashtableEntry objects -size_t MallocSiteTable::_hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)]; -size_t MallocSiteTable::_hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)]; - // Malloc site hashtable buckets MallocSiteHashtableEntry* MallocSiteTable::_table[MallocSiteTable::table_size]; +const NativeCallStack* MallocSiteTable::_hash_entry_allocation_stack = NULL; +const MallocSiteHashtableEntry* MallocSiteTable::_hash_entry_allocation_site = NULL; // concurrent access counter volatile int MallocSiteTable::_access_count = 0; @@ -73,9 +49,6 @@ * time, it is in single-threaded mode from JVM perspective. */ bool MallocSiteTable::initialize() { - assert(sizeof(_hash_entry_allocation_stack) >= sizeof(NativeCallStack), "Sanity Check"); - assert(sizeof(_hash_entry_allocation_site) >= sizeof(MallocSiteHashtableEntry), - "Sanity Check"); assert((size_t)table_size <= MAX_MALLOCSITE_TABLE_SIZE, "Hashtable overflow"); // Fake the call stack for hashtable entry allocation @@ -84,24 +57,32 @@ // Create pseudo call stack for hashtable entry allocation address pc[3]; if (NMT_TrackingStackDepth >= 3) { - pc[2] = (address)MallocSiteTable::allocation_at; + uintx *fp = (uintx*)MallocSiteTable::allocation_at; + // On ppc64, 'fp' is a pointer to a function descriptor which is a struct of + // three native pointers where the first pointer is the real function address. + // See: http://refspecs.linuxfoundation.org/ELF/ppc64/PPC-elf64abi-1.9.html#FUNC-DES + pc[2] = (address)(fp PPC64_ONLY(BIG_ENDIAN_ONLY([0]))); } if (NMT_TrackingStackDepth >= 2) { - pc[1] = (address)MallocSiteTable::lookup_or_add; + uintx *fp = (uintx*)MallocSiteTable::lookup_or_add; + pc[1] = (address)(fp PPC64_ONLY(BIG_ENDIAN_ONLY([0]))); } - pc[0] = (address)MallocSiteTable::new_entry; + uintx *fp = (uintx*)MallocSiteTable::new_entry; + pc[0] = (address)(fp PPC64_ONLY(BIG_ENDIAN_ONLY([0]))); - // Instantiate NativeCallStack object, have to use placement new operator. (see comments above) - NativeCallStack* stack = ::new ((void*)_hash_entry_allocation_stack) - NativeCallStack(pc, MIN2(((int)(sizeof(pc) / sizeof(address))), ((int)NMT_TrackingStackDepth))); + static const NativeCallStack stack(pc, MIN2(((int)(sizeof(pc) / sizeof(address))), ((int)NMT_TrackingStackDepth))); + static const MallocSiteHashtableEntry entry(stack, mtNMT); - // Instantiate hash entry for hashtable entry allocation callsite - MallocSiteHashtableEntry* entry = ::new ((void*)_hash_entry_allocation_site) - MallocSiteHashtableEntry(*stack, mtNMT); + assert(_hash_entry_allocation_stack == NULL && + _hash_entry_allocation_site == NULL, + "Already initailized"); + + _hash_entry_allocation_stack = &stack; + _hash_entry_allocation_site = &entry; // Add the allocation site to hashtable. - int index = hash_to_index(stack->hash()); - _table[index] = entry; + int index = hash_to_index(stack.hash()); + _table[index] = const_cast<MallocSiteHashtableEntry*>(&entry); return true; } @@ -204,6 +185,9 @@ _table[index] = NULL; delete_linked_list(head); } + + _hash_entry_allocation_stack = NULL; + _hash_entry_allocation_site = NULL; } void MallocSiteTable::delete_linked_list(MallocSiteHashtableEntry* head) { @@ -211,7 +195,7 @@ while (head != NULL) { p = head; head = (MallocSiteHashtableEntry*)head->next(); - if (p != (MallocSiteHashtableEntry*)_hash_entry_allocation_site) { + if (p != hash_entry_allocation_site()) { delete p; } }
--- a/src/hotspot/share/services/mallocSiteTable.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/services/mallocSiteTable.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -42,7 +42,7 @@ public: MallocSite() : - AllocationSite<MemoryCounter>(NativeCallStack::EMPTY_STACK), _flags(mtNone) {} + AllocationSite<MemoryCounter>(NativeCallStack::empty_stack()), _flags(mtNone) {} MallocSite(const NativeCallStack& stack, MEMFLAGS flags) : AllocationSite<MemoryCounter>(stack), _flags(flags) {} @@ -247,7 +247,13 @@ } static inline const NativeCallStack* hash_entry_allocation_stack() { - return (NativeCallStack*)_hash_entry_allocation_stack; + assert(_hash_entry_allocation_stack != NULL, "Must be set"); + return _hash_entry_allocation_stack; + } + + static inline const MallocSiteHashtableEntry* hash_entry_allocation_site() { + assert(_hash_entry_allocation_site != NULL, "Must be set"); + return _hash_entry_allocation_site; } private: @@ -256,15 +262,11 @@ // The callsite hashtable. It has to be a static table, // since malloc call can come from C runtime linker. - static MallocSiteHashtableEntry* _table[table_size]; + static MallocSiteHashtableEntry* _table[table_size]; + static const NativeCallStack* _hash_entry_allocation_stack; + static const MallocSiteHashtableEntry* _hash_entry_allocation_site; - // Reserve enough memory for placing the objects - - // The memory for hashtable entry allocation stack object - static size_t _hash_entry_allocation_stack[CALC_OBJ_SIZE_IN_TYPE(NativeCallStack, size_t)]; - // The memory for hashtable entry allocation callsite object - static size_t _hash_entry_allocation_site[CALC_OBJ_SIZE_IN_TYPE(MallocSiteHashtableEntry, size_t)]; NOT_PRODUCT(static int _peak_count;) };
--- a/src/hotspot/share/services/memTracker.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/services/memTracker.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -68,10 +68,6 @@ os::unsetenv(buf); } - // Construct NativeCallStack::EMPTY_STACK. It may get constructed twice, - // but it is benign, the results are the same. - ::new ((void*)&NativeCallStack::EMPTY_STACK) NativeCallStack(0, false); - if (!MallocTracker::initialize(level) || !VirtualMemoryTracker::initialize(level)) { level = NMT_off;
--- a/src/hotspot/share/services/memTracker.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/services/memTracker.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -31,8 +31,8 @@ #if !INCLUDE_NMT -#define CURRENT_PC NativeCallStack::EMPTY_STACK -#define CALLER_PC NativeCallStack::EMPTY_STACK +#define CURRENT_PC NativeCallStack::empty_stack() +#define CALLER_PC NativeCallStack::empty_stack() class Tracker : public StackObj { public: @@ -86,9 +86,9 @@ extern volatile bool NMT_stack_walkable; #define CURRENT_PC ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ? \ - NativeCallStack(0, true) : NativeCallStack::EMPTY_STACK) + NativeCallStack(0, true) : NativeCallStack::empty_stack()) #define CALLER_PC ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ? \ - NativeCallStack(1, true) : NativeCallStack::EMPTY_STACK) + NativeCallStack(1, true) : NativeCallStack::empty_stack()) class MemBaseline; class Mutex;
--- a/src/hotspot/share/services/virtualMemoryTracker.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/services/virtualMemoryTracker.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -302,7 +302,7 @@ ReservedMemoryRegion(address base, size_t size) : - VirtualMemoryRegion(base, size), _stack(NativeCallStack::EMPTY_STACK), _flag(mtNone) { } + VirtualMemoryRegion(base, size), _stack(NativeCallStack::empty_stack()), _flag(mtNone) { } // Copy constructor ReservedMemoryRegion(const ReservedMemoryRegion& rr) :
--- a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -263,8 +263,11 @@ Prefetch::read(*pref->value(), 0); pref = pref->next(); } - if (next->next() != NULL) { - Prefetch::read(*next->next()->value(), 0); + // Read next() Node* once. May be racing with a thread moving the next + // pointers. + Node* next_pref = next->next(); + if (next_pref != NULL) { + Prefetch::read(*next_pref->value(), 0); } if (eval_f(next->value())) { return true; @@ -546,8 +549,9 @@ lookup_f.equals(rem_n->value(), &is_dead); if (is_dead) { ndel[dels++] = rem_n; - bucket->release_assign_node_ptr(rem_n_prev, rem_n->next()); - rem_n = rem_n->next(); + Node* next_node = rem_n->next(); + bucket->release_assign_node_ptr(rem_n_prev, next_node); + rem_n = next_node; if (dels == BULK_DELETE_LIMIT) { break; } @@ -654,32 +658,33 @@ while (aux != NULL) { bool dead_hash = false; size_t aux_hash = CONFIG::get_hash(*aux->value(), &dead_hash); + Node* aux_next = aux->next(); if (dead_hash) { delete_me = aux; // This item is dead, move both list to next new_table->get_bucket(odd_index)->release_assign_node_ptr(odd, - aux->next()); + aux_next); new_table->get_bucket(even_index)->release_assign_node_ptr(even, - aux->next()); + aux_next); } else { size_t aux_index = bucket_idx_hash(new_table, aux_hash); if (aux_index == even_index) { // This is a even, so move odd to aux/even next new_table->get_bucket(odd_index)->release_assign_node_ptr(odd, - aux->next()); + aux_next); // Keep in even list even = aux->next_ptr(); } else if (aux_index == odd_index) { // This is a odd, so move odd to aux/odd next new_table->get_bucket(even_index)->release_assign_node_ptr(even, - aux->next()); + aux_next); // Keep in odd list odd = aux->next_ptr(); } else { fatal("aux_index does not match even or odd indices"); } } - aux = aux->next(); + aux = aux_next; // We can only move 1 pointer otherwise a reader might be moved to the wrong // chain. E.g. looking for even hash value but got moved to the odd bucket @@ -976,8 +981,9 @@ while (rem_n != NULL) { if (eval_f(rem_n->value())) { ndel[dels++] = rem_n; - bucket->release_assign_node_ptr(rem_n_prev, rem_n->next()); - rem_n = rem_n->next(); + Node* next_node = rem_n->next(); + bucket->release_assign_node_ptr(rem_n_prev, next_node); + rem_n = next_node; if (dels == num_del) { break; }
--- a/src/hotspot/share/utilities/macros.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/utilities/macros.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -569,6 +569,14 @@ #define NOT_AARCH64(code) code #endif +#ifdef VM_LITTLE_ENDIAN +#define LITTLE_ENDIAN_ONLY(code) code +#define BIG_ENDIAN_ONLY(code) +#else +#define LITTLE_ENDIAN_ONLY(code) +#define BIG_ENDIAN_ONLY(code) code +#endif + #define define_pd_global(type, name, value) const type pd_##name = value; // Helper macros for constructing file names for includes.
--- a/src/hotspot/share/utilities/nativeCallStack.cpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/utilities/nativeCallStack.cpp Thu Jul 12 11:09:23 2018 -0700 @@ -28,8 +28,6 @@ #include "utilities/globalDefinitions.hpp" #include "utilities/nativeCallStack.hpp" -const NativeCallStack NativeCallStack::EMPTY_STACK(0, false); - NativeCallStack::NativeCallStack(int toSkip, bool fillStack) : _hash_value(0) { @@ -126,4 +124,3 @@ } } } -
--- a/src/hotspot/share/utilities/nativeCallStack.hpp Tue Jul 03 16:09:25 2018 +0530 +++ b/src/hotspot/share/utilities/nativeCallStack.hpp Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,18 +51,21 @@ * 2. The class is strict stack object, no heap or virtual memory can be allocated * from it. */ +class MemTracker; + class NativeCallStack : public StackObj { - public: - static const NativeCallStack EMPTY_STACK; - - private: +private: address _stack[NMT_TrackingStackDepth]; unsigned int _hash_value; - public: +public: NativeCallStack(int toSkip = 0, bool fillStack = false); NativeCallStack(address* pc, int frameCount); + static inline const NativeCallStack& empty_stack() { + static const NativeCallStack EMPTY_STACK(0, false); + return EMPTY_STACK; + } // if it is an empty stack inline bool is_empty() const {
--- a/src/java.base/linux/classes/jdk/internal/platform/cgroupv1/SubSystem.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.base/linux/classes/jdk/internal/platform/cgroupv1/SubSystem.java Thu Jul 12 11:09:23 2018 -0700 @@ -27,6 +27,7 @@ import java.io.BufferedReader; import java.io.IOException; +import java.math.BigInteger; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; @@ -100,11 +101,20 @@ public static long getLongValue(SubSystem subsystem, String parm) { String strval = getStringValue(subsystem, parm); + long retval = 0; if (strval == null) return 0L; - long retval = Long.parseLong(strval); - + try { + retval = Long.parseLong(strval); + } catch (NumberFormatException e) { + // For some properties (e.g. memory.limit_in_bytes) we may overflow the range of signed long. + // In this case, return Long.max + BigInteger b = new BigInteger(strval); + if (b.compareTo(BigInteger.valueOf(Long.MAX_VALUE)) > 0) { + return Long.MAX_VALUE; + } + } return retval; }
--- a/src/java.base/share/classes/java/io/FileSystem.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.base/share/classes/java/io/FileSystem.java Thu Jul 12 11:09:23 2018 -0700 @@ -231,8 +231,8 @@ // Flags for enabling/disabling performance optimizations for file // name canonicalization - static boolean useCanonCaches = true; - static boolean useCanonPrefixCache = true; + static boolean useCanonCaches; + static boolean useCanonPrefixCache; private static boolean getBooleanProperty(String prop, boolean defaultVal) { return Boolean.parseBoolean(System.getProperty(prop,
--- a/src/java.base/share/classes/java/lang/invoke/MethodHandles.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.base/share/classes/java/lang/invoke/MethodHandles.java Thu Jul 12 11:09:23 2018 -0700 @@ -71,6 +71,9 @@ * <li>Combinator methods, which combine or transform pre-existing method handles into new ones. * <li>Other factory methods to create method handles that emulate other common JVM operations or control flow patterns. * </ul> + * A lookup, combinator, or factory method will fail and throw an + * {@code IllegalArgumentException} if the created method handle's type + * would have <a href="MethodHandle.html#maxarity">too many parameters</a>. * * @author John Rose, JSR 292 EG * @since 1.7 @@ -386,8 +389,9 @@ * constant is not subject to security manager checks. * <li>If the looked-up method has a * <a href="MethodHandle.html#maxarity">very large arity</a>, - * the method handle creation may fail, due to the method handle - * type having too many parameters. + * the method handle creation may fail with an + * {@code IllegalArgumentException}, due to the method handle type having + * <a href="MethodHandle.html#maxarity">too many parameters.</a> * </ul> * * <h1><a id="access"></a>Access checking</h1>
--- a/src/java.base/share/classes/java/util/ArrayDeque.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.base/share/classes/java/util/ArrayDeque.java Thu Jul 12 11:09:23 2018 -0700 @@ -180,7 +180,7 @@ * sufficient to hold 16 elements. */ public ArrayDeque() { - elements = new Object[16]; + elements = new Object[16 + 1]; } /**
--- a/src/java.base/share/classes/java/util/HashMap.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.base/share/classes/java/util/HashMap.java Thu Jul 12 11:09:23 2018 -0700 @@ -1263,9 +1263,7 @@ @Override public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) { - if (value == null) - throw new NullPointerException(); - if (remappingFunction == null) + if (value == null || remappingFunction == null) throw new NullPointerException(); int hash = hash(key); Node<K,V>[] tab; Node<K,V> first; int n, i; @@ -1308,8 +1306,7 @@ else removeNode(hash, key, null, false, true); return v; - } - if (value != null) { + } else { if (t != null) t.putTreeVal(this, tab, hash, key, value); else { @@ -1320,8 +1317,8 @@ ++modCount; ++size; afterNodeInsertion(true); + return value; } - return value; } @Override
--- a/src/java.base/share/classes/java/util/zip/ZipOutputStream.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.base/share/classes/java/util/zip/ZipOutputStream.java Thu Jul 12 11:09:23 2018 -0700 @@ -582,7 +582,9 @@ uctime > UPPER_UNIXTIME_BOUND) { elen += 36; // NTFS time total 36 bytes } else { - elen += 9; // headid(2) + sz(2) + flag(1) + mtime (4) + elen += 5; // headid(2) + sz(2) + flag(1) + if (e.mtime != null) + elen += 4; // + mtime (4) } } writeShort(elen);
--- a/src/java.base/share/classes/jdk/internal/misc/VM.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.base/share/classes/jdk/internal/misc/VM.java Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1996, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1996, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -413,4 +413,15 @@ initialize(); } private static native void initialize(); + + /** + * Initialize archived static fields in the given Class using archived + * values from CDS dump time. Also initialize the classes of objects in + * the archived graph referenced by those fields. + * + * Those static fields remain as uninitialized if there is no mapped CDS + * java heap data or there is any error during initialization of the + * object class in the archived graph. + */ + public static native void initializeFromArchive(Class<?> c); }
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/java.base/share/classes/jdk/internal/module/ArchivedModuleGraph.java Thu Jul 12 11:09:23 2018 -0700 @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package jdk.internal.module; + +import java.lang.module.ModuleFinder; +import java.util.Objects; +import jdk.internal.misc.VM; + +/** + * Used by ModuleBootstrap to obtain the archived system modules and finder. + */ +final class ArchivedModuleGraph { + private static String archivedMainModule; + private static SystemModules archivedSystemModules; + private static ModuleFinder archivedModuleFinder; + + private final SystemModules systemModules; + private final ModuleFinder finder; + + private ArchivedModuleGraph(SystemModules modules, ModuleFinder finder) { + this.systemModules = modules; + this.finder = finder; + } + + SystemModules systemModules() { + return systemModules; + } + + ModuleFinder finder() { + return finder; + } + + // A factory method that ModuleBootstrap can use to obtain the + // ArchivedModuleGraph. + static ArchivedModuleGraph get(String mainModule) { + if (Objects.equals(mainModule, archivedMainModule) + && archivedSystemModules != null + && archivedModuleFinder != null) { + return new ArchivedModuleGraph(archivedSystemModules, + archivedModuleFinder); + } else { + return null; + } + } + + // Used at CDS dump time + static void archive(String mainModule, SystemModules systemModules, + ModuleFinder finder) { + if (archivedMainModule != null) + throw new UnsupportedOperationException(); + archivedMainModule = mainModule; + archivedSystemModules = systemModules; + archivedModuleFinder = finder; + } + + static { + VM.initializeFromArchive(ArchivedModuleGraph.class); + } +}
--- a/src/java.base/share/classes/jdk/internal/module/ModuleBootstrap.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.base/share/classes/jdk/internal/module/ModuleBootstrap.java Thu Jul 12 11:09:23 2018 -0700 @@ -54,6 +54,7 @@ import jdk.internal.misc.JavaLangAccess; import jdk.internal.misc.JavaLangModuleAccess; import jdk.internal.misc.SharedSecrets; +import jdk.internal.misc.VM; import jdk.internal.perf.PerfCounter; /** @@ -172,23 +173,45 @@ boolean haveModulePath = (appModulePath != null || upgradeModulePath != null); boolean needResolution = true; - if (!haveModulePath && addModules.isEmpty() && limitModules.isEmpty()) { - systemModules = SystemModuleFinders.systemModules(mainModule); - if (systemModules != null && !isPatched && (traceOutput == null)) { - needResolution = false; + // If the java heap was archived at CDS dump time and the environment + // at dump time matches the current environment then use the archived + // system modules and finder. + ArchivedModuleGraph archivedModuleGraph = ArchivedModuleGraph.get(mainModule); + if (archivedModuleGraph != null + && !haveModulePath + && addModules.isEmpty() + && limitModules.isEmpty() + && !isPatched) { + systemModules = archivedModuleGraph.systemModules(); + systemModuleFinder = archivedModuleGraph.finder(); + needResolution = (traceOutput != null); + } else { + boolean canArchive = false; + if (!haveModulePath && addModules.isEmpty() && limitModules.isEmpty()) { + systemModules = SystemModuleFinders.systemModules(mainModule); + if (systemModules != null && !isPatched) { + needResolution = (traceOutput != null); + canArchive = true; + } } - } - if (systemModules == null) { - // all system modules are observable - systemModules = SystemModuleFinders.allSystemModules(); - } - if (systemModules != null) { - // images build - systemModuleFinder = SystemModuleFinders.of(systemModules); - } else { - // exploded build or testing - systemModules = new ExplodedSystemModules(); - systemModuleFinder = SystemModuleFinders.ofSystem(); + if (systemModules == null) { + // all system modules are observable + systemModules = SystemModuleFinders.allSystemModules(); + } + if (systemModules != null) { + // images build + systemModuleFinder = SystemModuleFinders.of(systemModules); + } else { + // exploded build or testing + systemModules = new ExplodedSystemModules(); + systemModuleFinder = SystemModuleFinders.ofSystem(); + } + + // Module graph can be archived at CDS dump time. Only allow the + // unnamed module case for now. + if (canArchive && (mainModule == null)) { + ArchivedModuleGraph.archive(mainModule, systemModules, systemModuleFinder); + } } Counters.add("jdk.module.boot.1.systemModulesTime", t1);
--- a/src/java.base/share/classes/sun/net/NetworkServer.java Tue Jul 03 16:09:25 2018 +0530 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,151 +0,0 @@ -/* - * Copyright (c) 1995, 2011, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Oracle designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ -package sun.net; - -import java.io.*; -import java.net.Socket; -import java.net.ServerSocket; - -/** - * This is the base class for network servers. To define a new type - * of server define a new subclass of NetworkServer with a serviceRequest - * method that services one request. Start the server by executing: - * <pre> - * new MyServerClass().startServer(port); - * </pre> - */ -public class NetworkServer implements Runnable, Cloneable { - /** Socket for communicating with client. */ - public Socket clientSocket = null; - private Thread serverInstance; - private ServerSocket serverSocket; - - /** Stream for printing to the client. */ - public PrintStream clientOutput; - - /** Buffered stream for reading replies from client. */ - public InputStream clientInput; - - /** Close an open connection to the client. */ - public void close() throws IOException { - clientSocket.close(); - clientSocket = null; - clientInput = null; - clientOutput = null; - } - - /** Return client connection status */ - public boolean clientIsOpen() { - return clientSocket != null; - } - - public final void run() { - if (serverSocket != null) { - Thread.currentThread().setPriority(Thread.MAX_PRIORITY); - // System.out.print("Server starts " + serverSocket + "\n"); - while (true) { - try { - Socket ns = serverSocket.accept(); -// System.out.print("New connection " + ns + "\n"); - NetworkServer n = (NetworkServer)clone(); - n.serverSocket = null; - n.clientSocket = ns; - new Thread(null, n, "NetworkServer", 0, false).start(); - } catch(Exception e) { - System.out.print("Server failure\n"); - e.printStackTrace(); - try { - serverSocket.close(); - } catch(IOException e2) {} - System.out.print("cs="+serverSocket+"\n"); - break; - } - } -// close(); - } else { - try { - clientOutput = new PrintStream( - new BufferedOutputStream(clientSocket.getOutputStream()), - false, "ISO8859_1"); - clientInput = new BufferedInputStream(clientSocket.getInputStream()); - serviceRequest(); - // System.out.print("Service handler exits - // "+clientSocket+"\n"); - } catch(Exception e) { - // System.out.print("Service handler failure\n"); - // e.printStackTrace(); - } - try { - close(); - } catch(IOException e2) {} - } - } - - /** Start a server on port <i>port</i>. It will call serviceRequest() - for each new connection. */ - public final void startServer(int port) throws IOException { - serverSocket = new ServerSocket(port, 50); - serverInstance = new Thread(null, this, "NetworkServer", 0, false); - serverInstance.start(); - } - - /** Service one request. It is invoked with the clientInput and - clientOutput streams initialized. This method handles one client - connection. When it is done, it can simply exit. The default - server just echoes it's input. It is invoked in it's own private - thread. */ - public void serviceRequest() throws IOException { - byte buf[] = new byte[300]; - int n; - clientOutput.print("Echo server " + getClass().getName() + "\n"); - clientOutput.flush(); - while ((n = clientInput.read(buf, 0, buf.length)) >= 0) { - clientOutput.write(buf, 0, n); - } - } - - public static void main(String argv[]) { - try { - new NetworkServer ().startServer(8888); - } catch (IOException e) { - System.out.print("Server failed: "+e+"\n"); - } - } - - /** - * Clone this object; - */ - public Object clone() { - try { - return super.clone(); - } catch (CloneNotSupportedException e) { - // this shouldn't happen, since we are Cloneable - throw new InternalError(e); - } - } - - public NetworkServer () { - } -}
--- a/src/java.base/share/classes/sun/net/URLCanonicalizer.java Tue Jul 03 16:09:25 2018 +0530 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,129 +0,0 @@ -/* - * Copyright (c) 1996, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Oracle designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -package sun.net; - -/** - * Helper class to map URL "abbreviations" to real URLs. - * The default implementation supports the following mappings: - * <pre>{@code - * ftp.mumble.bar/... => ftp://ftp.mumble.bar/... - * gopher.mumble.bar/... => gopher://gopher.mumble.bar/... - * other.name.dom/... => http://other.name.dom/... - * /foo/... => file:/foo/... - * }</pre> - * - * Full URLs (those including a protocol name) are passed through unchanged. - * - * Subclassers can override or extend this behavior to support different - * or additional canonicalization policies. - * - * @author Steve Byrne - */ - -public class URLCanonicalizer { - /** - * Creates the default canonicalizer instance. - */ - public URLCanonicalizer() { } - - /** - * Given a possibly abbreviated URL (missing a protocol name, typically), - * this method's job is to transform that URL into a canonical form, - * by including a protocol name and additional syntax, if necessary. - * - * For a correctly formed URL, this method should just return its argument. - */ - public String canonicalize(String simpleURL) { - String resultURL = simpleURL; - if (simpleURL.startsWith("ftp.")) { - resultURL = "ftp://" + simpleURL; - } else if (simpleURL.startsWith("gopher.")) { - resultURL = "gopher://" + simpleURL; - } else if (simpleURL.startsWith("/")) { - resultURL = "file:" + simpleURL; - } else if (!hasProtocolName(simpleURL)) { - if (isSimpleHostName(simpleURL)) { - simpleURL = "www." + simpleURL + ".com"; - } - resultURL = "http://" + simpleURL; - } - - return resultURL; - } - - /** - * Given a possibly abbreviated URL, this predicate function returns - * true if it appears that the URL contains a protocol name - */ - public boolean hasProtocolName(String url) { - int index = url.indexOf(':'); - if (index <= 0) { // treat ":foo" as not having a protocol spec - return false; - } - - for (int i = 0; i < index; i++) { - char c = url.charAt(i); - - // REMIND: this is a guess at legal characters in a protocol -- - // need to be verified - if ((c >= 'A' && c <= 'Z') - || (c >= 'a' && c <= 'z') - || (c == '-')) { - continue; - } - - // found an illegal character - return false; - } - - return true; - } - - /** - * Returns true if the URL is just a single name, no periods or - * slashes, false otherwise - **/ - protected boolean isSimpleHostName(String url) { - - for (int i = 0; i < url.length(); i++) { - char c = url.charAt(i); - - // REMIND: this is a guess at legal characters in a protocol -- - // need to be verified - if ((c >= 'A' && c <= 'Z') - || (c >= 'a' && c <= 'z') - || (c >= '0' && c <= '9') - || (c == '-')) { - continue; - } - - // found an illegal character - return false; - } - - return true; - } -}
--- a/src/java.base/share/classes/sun/security/rsa/RSAKeyFactory.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.base/share/classes/sun/security/rsa/RSAKeyFactory.java Thu Jul 12 11:09:23 2018 -0700 @@ -100,7 +100,7 @@ private static void checkKeyAlgo(Key key, String expectedAlg) throws InvalidKeyException { String keyAlg = key.getAlgorithm(); - if (!(keyAlg.equalsIgnoreCase(expectedAlg))) { + if (keyAlg == null || !(keyAlg.equalsIgnoreCase(expectedAlg))) { throw new InvalidKeyException("Expected a " + expectedAlg + " key, but got " + keyAlg); } @@ -123,8 +123,7 @@ return (RSAKey)key; } else { try { - String keyAlgo = key.getAlgorithm(); - KeyType type = KeyType.lookup(keyAlgo); + KeyType type = KeyType.lookup(key.getAlgorithm()); RSAKeyFactory kf = RSAKeyFactory.getInstance(type); return (RSAKey) kf.engineTranslateKey(key); } catch (ProviderException e) { @@ -268,8 +267,7 @@ throw new InvalidKeyException("Invalid key", e); } } else if ("X.509".equals(key.getFormat())) { - byte[] encoded = key.getEncoded(); - RSAPublicKey translated = new RSAPublicKeyImpl(encoded); + RSAPublicKey translated = new RSAPublicKeyImpl(key.getEncoded()); // ensure the key algorithm matches the current KeyFactory instance checkKeyAlgo(translated, type.keyAlgo()); return translated; @@ -313,8 +311,8 @@ throw new InvalidKeyException("Invalid key", e); } } else if ("PKCS#8".equals(key.getFormat())) { - byte[] encoded = key.getEncoded(); - RSAPrivateKey translated = RSAPrivateCrtKeyImpl.newKey(encoded); + RSAPrivateKey translated = + RSAPrivateCrtKeyImpl.newKey(key.getEncoded()); // ensure the key algorithm matches the current KeyFactory instance checkKeyAlgo(translated, type.keyAlgo()); return translated;
--- a/src/java.base/share/classes/sun/security/rsa/RSAPrivateCrtKeyImpl.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.base/share/classes/sun/security/rsa/RSAPrivateCrtKeyImpl.java Thu Jul 12 11:09:23 2018 -0700 @@ -123,6 +123,10 @@ * Construct a key from its encoding. Called from newKey above. */ RSAPrivateCrtKeyImpl(byte[] encoded) throws InvalidKeyException { + if (encoded == null || encoded.length == 0) { + throw new InvalidKeyException("Missing key encoding"); + } + decode(encoded); RSAKeyFactory.checkRSAProviderKeyLengths(n.bitLength(), e); try {
--- a/src/java.base/share/classes/sun/security/rsa/RSAPublicKeyImpl.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.base/share/classes/sun/security/rsa/RSAPublicKeyImpl.java Thu Jul 12 11:09:23 2018 -0700 @@ -116,6 +116,9 @@ * Construct a key from its encoding. Used by RSAKeyFactory. */ RSAPublicKeyImpl(byte[] encoded) throws InvalidKeyException { + if (encoded == null || encoded.length == 0) { + throw new InvalidKeyException("Missing key encoding"); + } decode(encoded); // this sets n and e value RSAKeyFactory.checkRSAProviderKeyLengths(n.bitLength(), e); checkExponentRange(n, e);
--- a/src/java.base/share/classes/sun/security/rsa/RSAUtil.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.base/share/classes/sun/security/rsa/RSAUtil.java Thu Jul 12 11:09:23 2018 -0700 @@ -52,7 +52,11 @@ public String keyAlgo() { return algo; } - public static KeyType lookup(String name) { + public static KeyType lookup(String name) + throws InvalidKeyException, ProviderException { + if (name == null) { + throw new InvalidKeyException("Null key algorithm"); + } for (KeyType kt : KeyType.values()) { if (kt.keyAlgo().equalsIgnoreCase(name)) { return kt; @@ -133,21 +137,24 @@ throws ProviderException { if (params == null) return null; - String algName = params.getAlgorithm(); - KeyType type = KeyType.lookup(algName); - Class<? extends AlgorithmParameterSpec> specCls; - switch (type) { - case RSA: - throw new ProviderException("No params accepted for " + - type.keyAlgo()); - case PSS: - specCls = PSSParameterSpec.class; - break; - default: - throw new ProviderException("Unsupported RSA algorithm: " + algName); - } try { + String algName = params.getAlgorithm(); + KeyType type = KeyType.lookup(algName); + Class<? extends AlgorithmParameterSpec> specCls; + switch (type) { + case RSA: + throw new ProviderException("No params accepted for " + + type.keyAlgo()); + case PSS: + specCls = PSSParameterSpec.class; + break; + default: + throw new ProviderException("Unsupported RSA algorithm: " + algName); + } return params.getParameterSpec(specCls); + } catch (ProviderException pe) { + // pass it up + throw pe; } catch (Exception e) { throw new ProviderException(e); }
--- a/src/java.base/share/classes/sun/security/ssl/SSLSessionImpl.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.base/share/classes/sun/security/ssl/SSLSessionImpl.java Thu Jul 12 11:09:23 2018 -0700 @@ -658,7 +658,7 @@ */ @Override public Principal getLocalPrincipal() { - return ((localCerts == null && localCerts.length != 0) ? null : + return ((localCerts == null || localCerts.length == 0) ? null : localCerts[0].getSubjectX500Principal()); }
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/java.base/share/classes/sun/util/cldr/CLDRCalendarNameProviderImpl.java Thu Jul 12 11:09:23 2018 -0700 @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package sun.util.cldr; + +import static sun.util.locale.provider.LocaleProviderAdapter.Type; + +import java.util.Locale; +import java.util.Set; +import sun.util.locale.provider.AvailableLanguageTags; +import sun.util.locale.provider.CalendarNameProviderImpl; +import sun.util.locale.provider.LocaleProviderAdapter; + + +public class CLDRCalendarNameProviderImpl extends CalendarNameProviderImpl implements AvailableLanguageTags{ + + public CLDRCalendarNameProviderImpl(Type type, Set<String> langtags) { + super(type, langtags); + } + + @Override + public boolean isSupportedLocale(Locale locale) { + if (Locale.ROOT.equals(locale)) { + return true; + } + String calendarType = null; + if (locale.hasExtensions()) { + calendarType = locale.getUnicodeLocaleType("ca"); + locale = locale.stripExtensions(); + } + if (calendarType != null) { + switch (calendarType) { + case "buddhist": + case "japanese": + case "gregory": + case "islamic": + case "roc": + break; + default: + // Unknown calendar type + return false; + } + } + return LocaleProviderAdapter.forType(Type.CLDR).isSupportedProviderLocale(locale, langtags); + } +}
--- a/src/java.base/share/classes/sun/util/cldr/CLDRLocaleProviderAdapter.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.base/share/classes/sun/util/cldr/CLDRLocaleProviderAdapter.java Thu Jul 12 11:09:23 2018 -0700 @@ -45,6 +45,7 @@ import java.util.StringTokenizer; import java.util.concurrent.ConcurrentHashMap; import java.util.spi.CalendarDataProvider; +import java.util.spi.CalendarNameProvider; import java.util.spi.TimeZoneNameProvider; import sun.util.locale.provider.JRELocaleProviderAdapter; import sun.util.locale.provider.LocaleDataMetaInfo; @@ -133,6 +134,24 @@ } @Override + public CalendarNameProvider getCalendarNameProvider() { + if (calendarNameProvider == null) { + CalendarNameProvider provider = AccessController.doPrivileged( + (PrivilegedAction<CalendarNameProvider>) () + -> new CLDRCalendarNameProviderImpl( + getAdapterType(), + getLanguageTagSet("FormatData"))); + + synchronized (this) { + if (calendarNameProvider == null) { + calendarNameProvider = provider; + } + } + } + return calendarNameProvider; + } + + @Override public CollatorProvider getCollatorProvider() { return null; } @@ -166,7 +185,7 @@ return locs; } - private Locale applyAliases(Locale loc) { + private static Locale applyAliases(Locale loc) { if (langAliasesMap.isEmpty()) { langAliasesMap = baseMetaInfo.getLanguageAliasMap(); } @@ -264,19 +283,18 @@ } /** - * This method returns equivalent CLDR supported locale for zh-HK, - * no, no-NO locales so that COMPAT locales do not precede - * those locales during ResourceBundle search path. + * This method returns equivalent CLDR supported locale + * for no, no-NO locales so that COMPAT locales do not precede + * those locales during ResourceBundle search path, also if an alias exists for a locale, + * it returns equivalent locale, e.g for zh_HK it returns zh_Hant-HK. */ private static Locale getEquivalentLoc(Locale locale) { switch (locale.toString()) { - case "zh_HK": - return Locale.forLanguageTag("zh-Hant-HK"); case "no": case "no_NO": return Locale.forLanguageTag("nb"); } - return locale; + return applyAliases(locale); } @Override
--- a/src/java.base/share/classes/sun/util/locale/provider/CalendarNameProviderImpl.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.base/share/classes/sun/util/locale/provider/CalendarNameProviderImpl.java Thu Jul 12 11:09:23 2018 -0700 @@ -42,8 +42,8 @@ * @author Naoto Sato */ public class CalendarNameProviderImpl extends CalendarNameProvider implements AvailableLanguageTags { - private final LocaleProviderAdapter.Type type; - private final Set<String> langtags; + protected final LocaleProviderAdapter.Type type; + protected final Set<String> langtags; public CalendarNameProviderImpl(LocaleProviderAdapter.Type type, Set<String> langtags) { this.type = type; @@ -248,11 +248,8 @@ if (langtags.contains(locale.toLanguageTag())) { return true; } - if (type == LocaleProviderAdapter.Type.JRE) { - String oldname = locale.toString().replace('_', '-'); - return langtags.contains(oldname); - } - return false; + String oldname = locale.toString().replace('_', '-'); + return langtags.contains(oldname); } @Override
--- a/src/java.base/share/classes/sun/util/locale/provider/JRELocaleProviderAdapter.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.base/share/classes/sun/util/locale/provider/JRELocaleProviderAdapter.java Thu Jul 12 11:09:23 2018 -0700 @@ -131,7 +131,7 @@ private volatile LocaleNameProvider localeNameProvider; protected volatile TimeZoneNameProvider timeZoneNameProvider; protected volatile CalendarDataProvider calendarDataProvider; - private volatile CalendarNameProvider calendarNameProvider; + protected volatile CalendarNameProvider calendarNameProvider; private volatile CalendarProvider calendarProvider; private volatile JavaTimeDateTimePatternProvider javaTimeDateTimePatternProvider;
--- a/src/java.base/share/legal/public_suffix.md Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.base/share/legal/public_suffix.md Thu Jul 12 11:09:23 2018 -0700 @@ -1,7 +1,7 @@ ## Mozilla Public Suffix List ### Public Suffix Notice -<pre> +``` You are receiving a copy of the Mozilla Public Suffix List in the following file: <java-home>/lib/security/public_suffix_list.dat. The terms of the Oracle license do NOT apply to this file; it is licensed under the @@ -11,17 +11,17 @@ The Source Code of this file is available under the Mozilla Public License, v. 2.0 and is located at -https://github.com/publicsuffix/list/blob/03089bfe4aa5b7a2e291f33e07a28d20f875cb83/public_suffix_list.dat +https://raw.githubusercontent.com/publicsuffix/list/2225db8d9f4a2a27ec697c883360632fa0c16261/public_suffix_list.dat. If a copy of the MPL was not distributed with this file, you can obtain one -at http://mozilla.org/MPL/2.0/. +at https://mozilla.org/MPL/2.0/. Software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the specific language governing rights and limitations under the License. -</pre> +``` ### MPL v2.0 -<pre> +``` Mozilla Public License Version 2.0 ================================== @@ -381,7 +381,7 @@ This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this - file, You can obtain one at http://mozilla.org/MPL/2.0/. + file, You can obtain one at https://mozilla.org/MPL/2.0/. If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE @@ -396,4 +396,4 @@ This Source Code Form is "Incompatible With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0. -</pre> +```
--- a/src/java.base/share/native/libjava/VM.c Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.base/share/native/libjava/VM.c Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2004, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,3 +60,9 @@ Java_jdk_internal_misc_VM_getRuntimeArguments(JNIEnv *env, jclass cls) { return JVM_GetVmArguments(env); } + +JNIEXPORT void JNICALL +Java_jdk_internal_misc_VM_initializeFromArchive(JNIEnv *env, jclass ignore, + jclass c) { + JVM_InitializeFromArchive(env, c); +}
--- a/src/java.base/unix/classes/sun/nio/ch/UnixAsynchronousSocketChannelImpl.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.base/unix/classes/sun/nio/ch/UnixAsynchronousSocketChannelImpl.java Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -273,6 +273,7 @@ // invoke handler and set result CompletionHandler<Void,Object> handler = connectHandler; + connectHandler = null; Object att = connectAttachment; PendingFuture<Void,Object> future = connectFuture; if (handler == null) { @@ -405,6 +406,7 @@ this.readBuffer = null; this.readBuffers = null; this.readAttachment = null; + this.readHandler = null; // allow another read to be initiated enableReading(); @@ -600,6 +602,7 @@ this.writeBuffer = null; this.writeBuffers = null; this.writeAttachment = null; + this.writeHandler = null; // allow another write to be initiated enableWriting();
--- a/src/java.base/unix/native/libjava/java_props_md.c Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.base/unix/native/libjava/java_props_md.c Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -297,7 +297,23 @@ if (strcmp(p, "EUC-JP") == 0) { *std_encoding = "EUC-JP-LINUX"; } -#else +#endif + +#ifdef _AIX + if (strcmp(p, "big5") == 0) { + /* On AIX Traditional Chinese Big5 codeset is mapped to IBM-950 */ + *std_encoding = "IBM-950"; + } else if (strcmp(p, "IBM-943") == 0) { + /* + * On AIX, IBM-943 is mapped to IBM-943C in which symbol 'yen' and + * 'overline' are replaced with 'backslash' and 'tilde' from ASCII + * making first 96 code points same as ASCII. + */ + *std_encoding = "IBM-943C"; + } +#endif + +#ifdef __solaris__ if (strcmp(p,"eucJP") == 0) { /* For Solaris use customized vendor defined character * customized EUC-JP converter
--- a/src/java.base/windows/classes/sun/nio/fs/WindowsSecurity.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.base/windows/classes/sun/nio/fs/WindowsSecurity.java Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -102,9 +102,8 @@ final boolean stopImpersontating = impersontating; final boolean needToRevert = elevated; - return new Privilege() { - @Override - public void drop() { + return () -> { + try { if (token != 0L) { try { if (stopImpersontating) @@ -118,6 +117,8 @@ CloseHandle(token); } } + } finally { + LocalFree(pLuid); } }; }
--- a/src/java.base/windows/native/libjava/io_util_md.c Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.base/windows/native/libjava/io_util_md.c Thu Jul 12 11:09:23 2018 -0700 @@ -458,19 +458,20 @@ return 0; } - -int +jint handleSetLength(FD fd, jlong length) { HANDLE h = (HANDLE)fd; - long high = (long)(length >> 32); - DWORD ret; + FILE_END_OF_FILE_INFO eofInfo; - if (h == (HANDLE)(-1)) return -1; - ret = SetFilePointer(h, (long)(length), &high, FILE_BEGIN); - if (ret == 0xFFFFFFFF && GetLastError() != NO_ERROR) { + eofInfo.EndOfFile.QuadPart = length; + + if (h == INVALID_HANDLE_VALUE) { return -1; } - if (SetEndOfFile(h) == FALSE) return -1; + if (!SetFileInformationByHandle(h, FileEndOfFileInfo, &eofInfo, + sizeof(FILE_END_OF_FILE_INFO))) { + return -1; + } return 0; }
--- a/src/java.base/windows/native/libjava/io_util_md.h Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.base/windows/native/libjava/io_util_md.h Thu Jul 12 11:09:23 2018 -0700 @@ -43,7 +43,7 @@ int currentDirLength(const WCHAR* path, int pathlen); int handleAvailable(FD fd, jlong *pbytes); int handleSync(FD fd); -int handleSetLength(FD fd, jlong length); +jint handleSetLength(FD fd, jlong length); jlong handleGetLength(FD fd); JNIEXPORT jint handleRead(FD fd, void *buf, jint len); jint handleWrite(FD fd, const void *buf, jint len);
--- a/src/java.base/windows/native/libnio/fs/WindowsNativeDispatcher.c Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.base/windows/native/libnio/fs/WindowsNativeDispatcher.c Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1043,8 +1043,11 @@ if (pLuid == NULL) { JNU_ThrowInternalError(env, "Unable to allocate LUID structure"); } else { - if (LookupPrivilegeValueW(NULL, lpName, pLuid) == 0) + if (LookupPrivilegeValueW(NULL, lpName, pLuid) == 0) { + LocalFree(pLuid); throwWindowsException(env, GetLastError()); + return (jlong)0; + } } return ptr_to_jlong(pLuid); }
--- a/src/java.compiler/share/classes/javax/lang/model/util/AbstractAnnotationValueVisitor7.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.compiler/share/classes/javax/lang/model/util/AbstractAnnotationValueVisitor7.java Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -64,9 +64,12 @@ /** * Constructor for concrete subclasses to call. + * + * @deprecated Release 7 is obsolete; update to a visitor for a newer + * release level. */ - @SuppressWarnings("deprecation") // Superclass constructor deprecated + @Deprecated protected AbstractAnnotationValueVisitor7() { - super(); + super(); // Superclass constructor deprecated too } }
--- a/src/java.compiler/share/classes/javax/lang/model/util/AbstractAnnotationValueVisitor8.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.compiler/share/classes/javax/lang/model/util/AbstractAnnotationValueVisitor8.java Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -65,6 +65,7 @@ /** * Constructor for concrete subclasses to call. */ + @SuppressWarnings("deprecation") // Superclass constructor deprecated protected AbstractAnnotationValueVisitor8() { super(); }
--- a/src/java.compiler/share/classes/javax/lang/model/util/AbstractElementVisitor7.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.compiler/share/classes/javax/lang/model/util/AbstractElementVisitor7.java Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2018, Oacle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -67,9 +67,12 @@ public abstract class AbstractElementVisitor7<R, P> extends AbstractElementVisitor6<R, P> { /** * Constructor for concrete subclasses to call. + * + * @deprecated Release 7 is obsolete; update to a visitor for a newer + * release level. */ - @SuppressWarnings("deprecation") // Superclass constructor deprecated + @Deprecated protected AbstractElementVisitor7(){ - super(); + super(); // Superclass constructor deprecated too } }
--- a/src/java.compiler/share/classes/javax/lang/model/util/AbstractElementVisitor8.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.compiler/share/classes/javax/lang/model/util/AbstractElementVisitor8.java Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -68,6 +68,7 @@ /** * Constructor for concrete subclasses to call. */ + @SuppressWarnings("deprecation") // Superclass constructor deprecated protected AbstractElementVisitor8(){ super(); }
--- a/src/java.compiler/share/classes/javax/lang/model/util/AbstractTypeVisitor7.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.compiler/share/classes/javax/lang/model/util/AbstractTypeVisitor7.java Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -67,10 +67,13 @@ public abstract class AbstractTypeVisitor7<R, P> extends AbstractTypeVisitor6<R, P> { /** * Constructor for concrete subclasses to call. + * + * @deprecated Release 7 is obsolete; update to a visitor for a newer + * release level. */ - @SuppressWarnings("deprecation") // Superclass constructor deprecated + @Deprecated protected AbstractTypeVisitor7() { - super(); + super(); // Superclass constructor deprecated too } /**
--- a/src/java.compiler/share/classes/javax/lang/model/util/AbstractTypeVisitor8.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.compiler/share/classes/javax/lang/model/util/AbstractTypeVisitor8.java Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -68,6 +68,7 @@ /** * Constructor for concrete subclasses to call. */ + @SuppressWarnings("deprecation") // Superclass constructor deprecated protected AbstractTypeVisitor8() { super(); }
--- a/src/java.compiler/share/classes/javax/lang/model/util/ElementKindVisitor7.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.compiler/share/classes/javax/lang/model/util/ElementKindVisitor7.java Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -81,10 +81,13 @@ /** * Constructor for concrete subclasses; uses {@code null} for the * default value. + * + * @deprecated Release 7 is obsolete; update to a visitor for a newer + * release level. */ - @SuppressWarnings("deprecation") // Superclass constructor deprecated + @Deprecated protected ElementKindVisitor7() { - super(null); + super(null); // Superclass constructor deprecated too } /** @@ -92,10 +95,13 @@ * default value. * * @param defaultValue the value to assign to {@link #DEFAULT_VALUE} + * + * @deprecated Release 7 is obsolete; update to a visitor for a newer + * release level. */ - @SuppressWarnings("deprecation") // Superclass constructor deprecated + @Deprecated protected ElementKindVisitor7(R defaultValue) { - super(defaultValue); + super(defaultValue); // Superclass constructor deprecated too } /**
--- a/src/java.compiler/share/classes/javax/lang/model/util/ElementKindVisitor8.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.compiler/share/classes/javax/lang/model/util/ElementKindVisitor8.java Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -82,6 +82,7 @@ * Constructor for concrete subclasses; uses {@code null} for the * default value. */ + @SuppressWarnings("deprecation") // Superclass constructor deprecated protected ElementKindVisitor8() { super(null); } @@ -92,6 +93,7 @@ * * @param defaultValue the value to assign to {@link #DEFAULT_VALUE} */ + @SuppressWarnings("deprecation") // Superclass constructor deprecated protected ElementKindVisitor8(R defaultValue) { super(defaultValue); }
--- a/src/java.compiler/share/classes/javax/lang/model/util/ElementScanner7.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.compiler/share/classes/javax/lang/model/util/ElementScanner7.java Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -94,10 +94,13 @@ /** * Constructor for concrete subclasses; uses {@code null} for the * default value. + * + * @deprecated Release 7 is obsolete; update to a visitor for a newer + * release level. */ - @SuppressWarnings("deprecation") // Superclass constructor deprecated + @Deprecated protected ElementScanner7(){ - super(null); + super(null); // Superclass constructor deprecated too } /** @@ -105,10 +108,13 @@ * default value. * * @param defaultValue the default value + * + * @deprecated Release 7 is obsolete; update to a visitor for a newer + * release level. */ - @SuppressWarnings("deprecation") // Superclass constructor deprecated + @Deprecated protected ElementScanner7(R defaultValue){ - super(defaultValue); + super(defaultValue); // Superclass constructor deprecated too } /**
--- a/src/java.compiler/share/classes/javax/lang/model/util/ElementScanner8.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.compiler/share/classes/javax/lang/model/util/ElementScanner8.java Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -95,6 +95,7 @@ * Constructor for concrete subclasses; uses {@code null} for the * default value. */ + @SuppressWarnings("deprecation") // Superclass constructor deprecated protected ElementScanner8(){ super(null); } @@ -105,6 +106,7 @@ * * @param defaultValue the default value */ + @SuppressWarnings("deprecation") // Superclass constructor deprecated protected ElementScanner8(R defaultValue){ super(defaultValue); }
--- a/src/java.compiler/share/classes/javax/lang/model/util/SimpleAnnotationValueVisitor7.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.compiler/share/classes/javax/lang/model/util/SimpleAnnotationValueVisitor7.java Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -71,10 +71,13 @@ /** * Constructor for concrete subclasses; uses {@code null} for the * default value. + * + * @deprecated Release 7 is obsolete; update to a visitor for a newer + * release level. */ - @SuppressWarnings("deprecation") // Superclass constructor deprecated + @Deprecated protected SimpleAnnotationValueVisitor7() { - super(null); + super(null); // Superclass constructor deprecated too } /** @@ -82,9 +85,12 @@ * default value. * * @param defaultValue the value to assign to {@link #DEFAULT_VALUE} + * + * @deprecated Release 7 is obsolete; update to a visitor for a newer + * release level. */ - @SuppressWarnings("deprecation") // Superclass constructor deprecated + @Deprecated protected SimpleAnnotationValueVisitor7(R defaultValue) { - super(defaultValue); + super(defaultValue); // Superclass constructor deprecated too } }
--- a/src/java.compiler/share/classes/javax/lang/model/util/SimpleAnnotationValueVisitor8.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.compiler/share/classes/javax/lang/model/util/SimpleAnnotationValueVisitor8.java Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -72,6 +72,7 @@ * Constructor for concrete subclasses; uses {@code null} for the * default value. */ + @SuppressWarnings("deprecation") // Superclass constructor deprecated protected SimpleAnnotationValueVisitor8() { super(null); } @@ -82,6 +83,7 @@ * * @param defaultValue the value to assign to {@link #DEFAULT_VALUE} */ + @SuppressWarnings("deprecation") // Superclass constructor deprecated protected SimpleAnnotationValueVisitor8(R defaultValue) { super(defaultValue); }
--- a/src/java.compiler/share/classes/javax/lang/model/util/SimpleElementVisitor7.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.compiler/share/classes/javax/lang/model/util/SimpleElementVisitor7.java Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -77,10 +77,13 @@ /** * Constructor for concrete subclasses; uses {@code null} for the * default value. + * + * @deprecated Release 7 is obsolete; update to a visitor for a newer + * release level. */ - @SuppressWarnings("deprecation") // Superclass constructor deprecated + @Deprecated protected SimpleElementVisitor7(){ - super(null); + super(null); // Superclass constructor deprecated too } /** @@ -88,10 +91,13 @@ * default value. * * @param defaultValue the value to assign to {@link #DEFAULT_VALUE} + * + * @deprecated Release 7 is obsolete; update to a visitor for a newer + * release level. */ - @SuppressWarnings("deprecation") // Superclass constructor deprecated + @Deprecated protected SimpleElementVisitor7(R defaultValue){ - super(defaultValue); + super(defaultValue); // Superclass constructor deprecated too } /**
--- a/src/java.compiler/share/classes/javax/lang/model/util/SimpleElementVisitor8.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.compiler/share/classes/javax/lang/model/util/SimpleElementVisitor8.java Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -77,6 +77,7 @@ * Constructor for concrete subclasses; uses {@code null} for the * default value. */ + @SuppressWarnings("deprecation") // Superclass constructor deprecated protected SimpleElementVisitor8(){ super(null); } @@ -87,6 +88,7 @@ * * @param defaultValue the value to assign to {@link #DEFAULT_VALUE} */ + @SuppressWarnings("deprecation") // Superclass constructor deprecated protected SimpleElementVisitor8(R defaultValue){ super(defaultValue); }
--- a/src/java.compiler/share/classes/javax/lang/model/util/SimpleTypeVisitor7.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.compiler/share/classes/javax/lang/model/util/SimpleTypeVisitor7.java Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -77,10 +77,13 @@ /** * Constructor for concrete subclasses; uses {@code null} for the * default value. + * + * @deprecated Release 7 is obsolete; update to a visitor for a newer + * release level. */ - @SuppressWarnings("deprecation") // Superclass constructor deprecated + @Deprecated protected SimpleTypeVisitor7(){ - super(null); + super(null); // Superclass constructor deprecated too } /** @@ -88,10 +91,13 @@ * default value. * * @param defaultValue the value to assign to {@link #DEFAULT_VALUE} + * + * @deprecated Release 7 is obsolete; update to a visitor for a newer + * release level. */ - @SuppressWarnings("deprecation") // Superclass constructor deprecated + @Deprecated protected SimpleTypeVisitor7(R defaultValue){ - super(defaultValue); + super(defaultValue); // Superclass constructor deprecated too } /**
--- a/src/java.compiler/share/classes/javax/lang/model/util/SimpleTypeVisitor8.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.compiler/share/classes/javax/lang/model/util/SimpleTypeVisitor8.java Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -78,6 +78,7 @@ * Constructor for concrete subclasses; uses {@code null} for the * default value. */ + @SuppressWarnings("deprecation") // Superclass constructor deprecated protected SimpleTypeVisitor8(){ super(null); } @@ -88,6 +89,7 @@ * * @param defaultValue the value to assign to {@link #DEFAULT_VALUE} */ + @SuppressWarnings("deprecation") // Superclass constructor deprecated protected SimpleTypeVisitor8(R defaultValue){ super(defaultValue); }
--- a/src/java.compiler/share/classes/javax/lang/model/util/TypeKindVisitor7.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.compiler/share/classes/javax/lang/model/util/TypeKindVisitor7.java Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -78,10 +78,13 @@ /** * Constructor for concrete subclasses to call; uses {@code null} * for the default value. + * + * @deprecated Release 7 is obsolete; update to a visitor for a newer + * release level. */ - @SuppressWarnings("deprecation") // Superclass constructor deprecated + @Deprecated protected TypeKindVisitor7() { - super(null); + super(null); // Superclass constructor deprecated too } /** @@ -89,10 +92,13 @@ * for the default value. * * @param defaultValue the value to assign to {@link #DEFAULT_VALUE} + * + * @deprecated Release 7 is obsolete; update to a visitor for a newer + * release level. */ - @SuppressWarnings("deprecation") // Superclass constructor deprecated + @Deprecated protected TypeKindVisitor7(R defaultValue) { - super(defaultValue); + super(defaultValue); // Superclass constructor deprecated too } /**
--- a/src/java.compiler/share/classes/javax/lang/model/util/TypeKindVisitor8.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.compiler/share/classes/javax/lang/model/util/TypeKindVisitor8.java Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -79,6 +79,7 @@ * Constructor for concrete subclasses to call; uses {@code null} * for the default value. */ + @SuppressWarnings("deprecation") // Superclass constructor deprecated protected TypeKindVisitor8() { super(null); } @@ -89,6 +90,7 @@ * * @param defaultValue the value to assign to {@link #DEFAULT_VALUE} */ + @SuppressWarnings("deprecation") // Superclass constructor deprecated protected TypeKindVisitor8(R defaultValue) { super(defaultValue); }
--- a/src/java.desktop/macosx/classes/com/apple/laf/AquaTabbedPaneCopyFromBasicUI.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.desktop/macosx/classes/com/apple/laf/AquaTabbedPaneCopyFromBasicUI.java Thu Jul 12 11:09:23 2018 -0700 @@ -3320,7 +3320,7 @@ } else if (name == "indexForNullComponent") { isRunsDirty = true; updateHtmlViews((Integer) e.getNewValue(), true); - } else if (name == "font") { + } else if (name == "font" || SwingUtilities2.isScaleChanged(e)) { calculatedBaseline = false; } }
--- a/src/java.desktop/share/classes/com/sun/awt/SecurityWarning.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.desktop/share/classes/com/sun/awt/SecurityWarning.java Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,7 +45,10 @@ * for limited use outside of the core platform. This API may change * drastically between update release, and it may even be * removed or be moved to some other packages or classes. + * + * @deprecated This class is deprecated, no replacement. */ +@Deprecated(since = "11", forRemoval = true) public final class SecurityWarning { /**
--- a/src/java.desktop/share/classes/com/sun/media/sound/EventDispatcher.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.desktop/share/classes/com/sun/media/sound/EventDispatcher.java Thu Jul 12 11:09:23 2018 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -308,7 +308,12 @@ * called from auto-closing clips when their closed() method is called. */ void autoClosingClipClosed(AutoClosingClip clip) { - // nothing to do -- is removed from arraylist above + synchronized(autoClosingClips) { + int index = getAutoClosingClipIndex(clip); + if (index != -1) { + autoClosingClips.remove(index); + } + } }
--- a/src/java.desktop/share/classes/com/sun/media/sound/JavaSoundAudioClip.java Tue Jul 03 16:09:25 2018 +0530 +++ b/src/java.desktop/share/classes/com/sun/media/sound/JavaSoundAudioClip.java Thu Jul 12 11:09:23 2018 -0700 @@ -173,29 +173,31 @@ if (DEBUG || Printer.debug) Printer.debug("JavaSoundAudioClip.startImpl(loop="+loop+")"); try { if (clip != null) { - if (!clip.isOpen()) { - if (DEBUG || Printer.trace)Printer.trace("JavaSoundAudioClip: clip.open()"); - clip.open(loadedAudioFormat, loadedAudio, 0, loadedAudioByteLength); - } else { - if (DEBUG || Printer.trace)Printer.trace("JavaSoundAudioClip: clip.flush()"); - clip.flush(); - if (loop != clipLooping) { - // need to stop in case the looped status changed - if (DEBUG || Printer.trace)Printer.trace("JavaSoundAudioClip: clip.stop()"); - clip.stop(); + // We need to disable autoclosing mechanism otherwise the clip + // can be closed after "!clip.isOpen()" check, because of + // previous inactivity. + clip.setAutoClosing(false); + try { + if (!clip.isOpen()) { + clip.open(loadedAudioFormat, loadedAudio, 0, +