OpenJDK / jdk / hs
changeset 24991:e10a57d6e9d0
Merge
author | duke |
---|---|
date | Wed, 05 Jul 2017 19:46:17 +0200 |
parents | c9ca62c6a329 7eefcdab7678 |
children | 0de5415b1d2a |
files | jdk/make/mapfiles/libnpt/mapfile-vers jdk/src/share/classes/sun/management/manifest jdk/src/share/classes/sun/misc/Timeable.java jdk/src/share/classes/sun/misc/Timer.java jdk/src/share/npt/README.txt jdk/src/share/npt/npt.c jdk/src/share/npt/npt.h jdk/src/share/npt/utf.c jdk/src/share/npt/utf.h jdk/src/solaris/npt/npt_md.h jdk/src/solaris/npt/utf_md.c jdk/src/solaris/npt/utf_md.h jdk/src/windows/npt/npt_md.h jdk/src/windows/npt/utf_md.c jdk/src/windows/npt/utf_md.h jdk/test/sun/security/pkcs11/nss/src/MD5SUMS jdk/test/sun/security/pkcs11/nss/src/SHA1SUMS jdk/test/sun/security/pkcs11/nss/src/nss-3.13.1.tar.gz jdk/test/sun/tools/jhat/HatRun.java |
diffstat | 634 files changed, 8853 insertions(+), 7060 deletions(-) [+] |
line wrap: on
line diff
--- a/.hgtags-top-repo Thu Jun 19 11:22:18 2014 -0700 +++ b/.hgtags-top-repo Wed Jul 05 19:46:17 2017 +0200 @@ -261,3 +261,4 @@ cf22a728521f91a4692b433d39d730a0a1b23155 jdk9-b16 24152ee0ee1abef54a8bab04c099261dba7bcca5 jdk9-b17 65abab59f783fcf02ff8e133431c252f9e5f07d5 jdk9-b18 +75a08df650eb3126bab0c4d15241f5886162393c jdk9-b19
--- a/common/autoconf/flags.m4 Thu Jun 19 11:22:18 2014 -0700 +++ b/common/autoconf/flags.m4 Wed Jul 05 19:46:17 2017 +0200 @@ -116,15 +116,15 @@ AC_SUBST(RC_FLAGS) if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then - # FIXME: likely bug, should be CCXXFLAGS_JDK? or one for C or CXX. - CCXXFLAGS="$CCXXFLAGS -nologo" + # silence copyright notice and other headers. + COMMON_CCXXFLAGS="$COMMON_CCXXFLAGS -nologo" fi if test "x$SYSROOT" != "x"; then if test "x$TOOLCHAIN_TYPE" = xsolstudio; then if test "x$OPENJDK_TARGET_OS" = xsolaris; then # Solaris Studio does not have a concept of sysroot. Instead we must - # make sure the default include and lib dirs are appended to each + # make sure the default include and lib dirs are appended to each # compile and link command line. SYSROOT_CFLAGS="-I$SYSROOT/usr/include" SYSROOT_LDFLAGS="-L$SYSROOT/usr/lib$OPENJDK_TARGET_CPU_ISADIR \ @@ -302,6 +302,7 @@ # Debug symbols if test "x$TOOLCHAIN_TYPE" = xgcc; then if test "x$OPENJDK_TARGET_CPU_BITS" = "x64" && test "x$DEBUG_LEVEL" = "xfastdebug"; then + # reduce from default "-g2" option to save space CFLAGS_DEBUG_SYMBOLS="-g1" CXXFLAGS_DEBUG_SYMBOLS="-g1" else @@ -313,6 +314,7 @@ CXXFLAGS_DEBUG_SYMBOLS="-g" elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then CFLAGS_DEBUG_SYMBOLS="-g -xs" + # FIXME: likely a bug, this disables debug symbols rather than enables them CXXFLAGS_DEBUG_SYMBOLS="-g0 -xs" elif test "x$TOOLCHAIN_TYPE" = xxlc; then CFLAGS_DEBUG_SYMBOLS="-g" @@ -321,6 +323,31 @@ AC_SUBST(CFLAGS_DEBUG_SYMBOLS) AC_SUBST(CXXFLAGS_DEBUG_SYMBOLS) + # bounds, memory and behavior checking options + if test "x$TOOLCHAIN_TYPE" = xgcc; then + case $DEBUG_LEVEL in + release ) + # no adjustment + ;; + fastdebug ) + # Add compile time bounds checks. + CFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1" + CXXFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1" + ;; + slowdebug ) + # Add runtime bounds checks and symbol info. + CFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1" + CXXFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1" + if test "x$HAS_CFLAG_DETECT_UNDEFINED_BEHAVIOR" = "xtrue"; then + CFLAGS_DEBUG_OPTIONS="$CFLAGS_DEBUG_OPTIONS $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG" + CXXFLAGS_DEBUG_OPTIONS="$CXXFLAGS_DEBUG_OPTIONS $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG" + fi + ;; + esac + fi + AC_SUBST(CFLAGS_DEBUG_OPTIONS) + AC_SUBST(CXXFLAGS_DEBUG_OPTIONS) + # Optimization levels if test "x$TOOLCHAIN_TYPE" = xsolstudio; then CC_HIGHEST="$CC_HIGHEST -fns -fsimple -fsingle -xbuiltin=%all -xdepend -xrestrict -xlibmil" @@ -330,10 +357,12 @@ C_O_FLAG_HIGHEST="-xO4 -Wu,-O4~yz $CC_HIGHEST -xalias_level=basic -xregs=no%frameptr" C_O_FLAG_HI="-xO4 -Wu,-O4~yz -xregs=no%frameptr" C_O_FLAG_NORM="-xO2 -Wu,-O2~yz -xregs=no%frameptr" + C_O_FLAG_DEBUG="-xregs=no%frameptr" C_O_FLAG_NONE="-xregs=no%frameptr" CXX_O_FLAG_HIGHEST="-xO4 -Qoption ube -O4~yz $CC_HIGHEST -xregs=no%frameptr" CXX_O_FLAG_HI="-xO4 -Qoption ube -O4~yz -xregs=no%frameptr" CXX_O_FLAG_NORM="-xO2 -Qoption ube -O2~yz -xregs=no%frameptr" + CXX_O_FLAG_DEBUG="-xregs=no%frameptr" CXX_O_FLAG_NONE="-xregs=no%frameptr" if test "x$OPENJDK_TARGET_CPU_BITS" = "x32"; then C_O_FLAG_HIGHEST="$C_O_FLAG_HIGHEST -xchip=pentium" @@ -343,10 +372,12 @@ C_O_FLAG_HIGHEST="-xO4 -Wc,-Qrm-s -Wc,-Qiselect-T0 $CC_HIGHEST -xalias_level=basic -xprefetch=auto,explicit -xchip=ultra" C_O_FLAG_HI="-xO4 -Wc,-Qrm-s -Wc,-Qiselect-T0" C_O_FLAG_NORM="-xO2 -Wc,-Qrm-s -Wc,-Qiselect-T0" + C_O_FLAG_DEBUG="" C_O_FLAG_NONE="" CXX_O_FLAG_HIGHEST="-xO4 -Qoption cg -Qrm-s -Qoption cg -Qiselect-T0 $CC_HIGHEST -xprefetch=auto,explicit -xchip=ultra" CXX_O_FLAG_HI="-xO4 -Qoption cg -Qrm-s -Qoption cg -Qiselect-T0" CXX_O_FLAG_NORM="-xO2 -Qoption cg -Qrm-s -Qoption cg -Qiselect-T0" + C_O_FLAG_DEBUG="" CXX_O_FLAG_NONE="" fi else @@ -359,13 +390,17 @@ C_O_FLAG_HIGHEST="-Os" C_O_FLAG_HI="-Os" C_O_FLAG_NORM="-Os" - C_O_FLAG_NONE="" else C_O_FLAG_HIGHEST="-O3" C_O_FLAG_HI="-O3" C_O_FLAG_NORM="-O2" - C_O_FLAG_NONE="-O0" fi + if test "x$HAS_CFLAG_OPTIMIZE_DEBUG" = "xtrue"; then + C_O_FLAG_DEBUG="$CFLAG_OPTIMIZE_DEBUG_FLAG" + else + C_O_FLAG_DEBUG="-O0" + fi + C_O_FLAG_NONE="-O0" elif test "x$TOOLCHAIN_TYPE" = xclang; then if test "x$OPENJDK_TARGET_OS" = xmacosx; then # On MacOSX we optimize for size, something @@ -373,37 +408,63 @@ C_O_FLAG_HIGHEST="-Os" C_O_FLAG_HI="-Os" C_O_FLAG_NORM="-Os" - C_O_FLAG_NONE="" else C_O_FLAG_HIGHEST="-O3" C_O_FLAG_HI="-O3" C_O_FLAG_NORM="-O2" - C_O_FLAG_NONE="-O0" fi + C_O_FLAG_DEBUG="-O0" + C_O_FLAG_NONE="-O0" elif test "x$TOOLCHAIN_TYPE" = xxlc; then C_O_FLAG_HIGHEST="-O3" C_O_FLAG_HI="-O3 -qstrict" C_O_FLAG_NORM="-O2" - C_O_FLAG_NONE="" + C_O_FLAG_DEBUG="-qnoopt" + C_O_FLAG_NONE="-qnoop" elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then C_O_FLAG_HIGHEST="-O2" C_O_FLAG_HI="-O1" C_O_FLAG_NORM="-O1" + C_O_FLAG_DEBUG="-Od" C_O_FLAG_NONE="-Od" fi CXX_O_FLAG_HIGHEST="$C_O_FLAG_HIGHEST" CXX_O_FLAG_HI="$C_O_FLAG_HI" CXX_O_FLAG_NORM="$C_O_FLAG_NORM" + CXX_O_FLAG_DEBUG="$C_O_FLAG_DEBUG" CXX_O_FLAG_NONE="$C_O_FLAG_NONE" fi + # Adjust optimization flags according to debug level. + case $DEBUG_LEVEL in + release ) + # no adjustment + ;; + fastdebug ) + # Not quite so much optimization + C_O_FLAG_HI="$C_O_FLAG_NORM" + CXX_O_FLAG_HI="$CXX_O_FLAG_NORM" + ;; + slowdebug ) + # Disable optimization + C_O_FLAG_HIGHEST="$C_O_FLAG_DEBUG" + C_O_FLAG_HI="$C_O_FLAG_DEBUG" + C_O_FLAG_NORM="$C_O_FLAG_DEBUG" + CXX_O_FLAG_HIGHEST="$CXX_O_FLAG_DEBUG" + CXX_O_FLAG_HI="$CXX_O_FLAG_DEBUG" + CXX_O_FLAG_NORM="$CXX_O_FLAG_DEBUG" + ;; + esac + AC_SUBST(C_O_FLAG_HIGHEST) AC_SUBST(C_O_FLAG_HI) AC_SUBST(C_O_FLAG_NORM) + AC_SUBST(C_O_FLAG_DEBUG) AC_SUBST(C_O_FLAG_NONE) AC_SUBST(CXX_O_FLAG_HIGHEST) AC_SUBST(CXX_O_FLAG_HI) AC_SUBST(CXX_O_FLAG_NORM) + AC_SUBST(CXX_O_FLAG_DEBUG) AC_SUBST(CXX_O_FLAG_NONE) ]) @@ -461,11 +522,12 @@ # Later we will also have CFLAGS and LDFLAGS for the hotspot subrepo build. # - # Setup compiler/platform specific flags to CFLAGS_JDK, - # CXXFLAGS_JDK and CCXXFLAGS_JDK (common to C and CXX?) + # Setup compiler/platform specific flags into + # CFLAGS_JDK - C Compiler flags + # CXXFLAGS_JDK - C++ Compiler flags + # COMMON_CCXXFLAGS_JDK - common to C and C++ if test "x$TOOLCHAIN_TYPE" = xgcc; then - # these options are used for both C and C++ compiles - CCXXFLAGS_JDK="$CCXXFLAGS $CCXXFLAGS_JDK -Wall -Wno-parentheses -Wextra -Wno-unused -Wno-unused-parameter -Wformat=2 \ + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS $COMMON_CCXXFLAGS_JDK -Wall -Wno-parentheses -Wextra -Wno-unused -Wno-unused-parameter -Wformat=2 \ -pipe -D_GNU_SOURCE -D_REENTRANT -D_LARGEFILE64_SOURCE" case $OPENJDK_TARGET_CPU_ARCH in arm ) @@ -477,31 +539,31 @@ CFLAGS_JDK="${CFLAGS_JDK} -fno-strict-aliasing" ;; * ) - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -fno-omit-frame-pointer" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -fno-omit-frame-pointer" CFLAGS_JDK="${CFLAGS_JDK} -fno-strict-aliasing" ;; esac elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then - CCXXFLAGS_JDK="$CCXXFLAGS $CCXXFLAGS_JDK -DTRACING -DMACRO_MEMSYS_OPS -DBREAKPTS" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS $COMMON_CCXXFLAGS_JDK -DTRACING -DMACRO_MEMSYS_OPS -DBREAKPTS" if test "x$OPENJDK_TARGET_CPU_ARCH" = xx86; then - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DcpuIntel -Di586 -D$OPENJDK_TARGET_CPU_LEGACY_LIB" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DcpuIntel -Di586 -D$OPENJDK_TARGET_CPU_LEGACY_LIB" CFLAGS_JDK="$CFLAGS_JDK -erroff=E_BAD_PRAGMA_PACK_VALUE" fi - + CFLAGS_JDK="$CFLAGS_JDK -xc99=%none -xCC -errshort=tags -Xa -v -mt -W0,-noglobal" CXXFLAGS_JDK="$CXXFLAGS_JDK -errtags=yes +w -mt -features=no%except -DCC_NOEX -norunpath -xnolib" elif test "x$TOOLCHAIN_TYPE" = xxlc; then CFLAGS_JDK="$CFLAGS_JDK -D_GNU_SOURCE -D_REENTRANT -D_LARGEFILE64_SOURCE -DSTDC" CXXFLAGS_JDK="$CXXFLAGS_JDK -D_GNU_SOURCE -D_REENTRANT -D_LARGEFILE64_SOURCE -DSTDC" elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then - CCXXFLAGS_JDK="$CCXXFLAGS $CCXXFLAGS_JDK -Zi -MD -Zc:wchar_t- -W3 -wd4800 \ + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS $COMMON_CCXXFLAGS_JDK -Zi -MD -Zc:wchar_t- -W3 -wd4800 \ -D_STATIC_CPPLIB -D_DISABLE_DEPRECATE_STATIC_CPPLIB -DWIN32_LEAN_AND_MEAN \ -D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE \ -DWIN32 -DIAL" if test "x$OPENJDK_TARGET_CPU" = xx86_64; then - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_AMD64_ -Damd64" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_AMD64_ -Damd64" else - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_X86_ -Dx86" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_X86_ -Dx86" fi fi @@ -509,28 +571,20 @@ # Adjust flags according to debug level. case $DEBUG_LEVEL in - fastdebug ) - CFLAGS_JDK="$CFLAGS_JDK $CFLAGS_DEBUG_SYMBOLS" - CXXFLAGS_JDK="$CXXFLAGS_JDK $CXXFLAGS_DEBUG_SYMBOLS" - C_O_FLAG_HI="$C_O_FLAG_NORM" - C_O_FLAG_NORM="$C_O_FLAG_NORM" - CXX_O_FLAG_HI="$CXX_O_FLAG_NORM" - CXX_O_FLAG_NORM="$CXX_O_FLAG_NORM" + fastdebug | slowdebug ) + CFLAGS_JDK="$CFLAGS_JDK $CFLAGS_DEBUG_SYMBOLS $CFLAGS_DEBUG_OPTIONS" + CXXFLAGS_JDK="$CXXFLAGS_JDK $CXXFLAGS_DEBUG_SYMBOLS $CXXFLAGS_DEBUG_OPTIONS" JAVAC_FLAGS="$JAVAC_FLAGS -g" ;; - slowdebug ) - CFLAGS_JDK="$CFLAGS_JDK $CFLAGS_DEBUG_SYMBOLS" - CXXFLAGS_JDK="$CXXFLAGS_JDK $CXXFLAGS_DEBUG_SYMBOLS" - C_O_FLAG_HI="$C_O_FLAG_NONE" - C_O_FLAG_NORM="$C_O_FLAG_NONE" - CXX_O_FLAG_HI="$CXX_O_FLAG_NONE" - CXX_O_FLAG_NORM="$CXX_O_FLAG_NONE" - JAVAC_FLAGS="$JAVAC_FLAGS -g" + release ) + ;; + * ) + AC_MSG_ERROR([Unrecognized \$DEBUG_LEVEL: $DEBUG_LEVEL]) ;; esac # Setup LP64 - CCXXFLAGS_JDK="$CCXXFLAGS_JDK $ADD_LP64" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK $ADD_LP64" # Set some common defines. These works for all compilers, but assume # -D is universally accepted. @@ -543,49 +597,49 @@ # Note: -Dmacro is the same as #define macro 1 # -Dmacro= is the same as #define macro if test "x$OPENJDK_TARGET_OS" = xsolaris; then - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_LITTLE_ENDIAN=" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_LITTLE_ENDIAN=" else - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_LITTLE_ENDIAN" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_LITTLE_ENDIAN" fi else # Same goes for _BIG_ENDIAN. Do we really need to set *ENDIAN on Solaris if they # are defined in the system? if test "x$OPENJDK_TARGET_OS" = xsolaris; then - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_BIG_ENDIAN=" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_BIG_ENDIAN=" else - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_BIG_ENDIAN" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_BIG_ENDIAN" fi fi - + # Setup target OS define. Use OS target name but in upper case. OPENJDK_TARGET_OS_UPPERCASE=`$ECHO $OPENJDK_TARGET_OS | $TR 'abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'` - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D$OPENJDK_TARGET_OS_UPPERCASE" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D$OPENJDK_TARGET_OS_UPPERCASE" # Setup target CPU - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DARCH='\"$OPENJDK_TARGET_CPU_LEGACY\"' -D$OPENJDK_TARGET_CPU_LEGACY" - + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DARCH='\"$OPENJDK_TARGET_CPU_LEGACY\"' -D$OPENJDK_TARGET_CPU_LEGACY" + # Setup debug/release defines if test "x$DEBUG_LEVEL" = xrelease; then - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DNDEBUG" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DNDEBUG" if test "x$OPENJDK_TARGET_OS" = xsolaris; then - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DTRIMMED" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DTRIMMED" fi else - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DDEBUG" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DDEBUG" fi # Setup release name - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DRELEASE='\"\$(RELEASE)\"'" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DRELEASE='\"\$(RELEASE)\"'" # Set some additional per-OS defines. if test "x$OPENJDK_TARGET_OS" = xmacosx; then - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_ALLBSD_SOURCE -D_DARWIN_UNLIMITED_SELECT" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_ALLBSD_SOURCE -D_DARWIN_UNLIMITED_SELECT" elif test "x$OPENJDK_TARGET_OS" = xaix; then # FIXME: PPC64 should not be here. - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DPPC64" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DPPC64" elif test "x$OPENJDK_TARGET_OS" = xbsd; then - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_ALLBSD_SOURCE" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_ALLBSD_SOURCE" fi # Additional macosx handling @@ -595,22 +649,22 @@ # FIXME: clean this up, and/or move it elsewhere. # Setting these parameters makes it an error to link to macosx APIs that are - # newer than the given OS version and makes the linked binaries compatible + # newer than the given OS version and makes the linked binaries compatible # even if built on a newer version of the OS. # The expected format is X.Y.Z MACOSX_VERSION_MIN=10.7.0 AC_SUBST(MACOSX_VERSION_MIN) - + # The macro takes the version with no dots, ex: 1070 # Let the flags variables get resolved in make for easier override on make # command line. - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DMAC_OS_X_VERSION_MAX_ALLOWED=\$(subst .,,\$(MACOSX_VERSION_MIN)) -mmacosx-version-min=\$(MACOSX_VERSION_MIN)" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DMAC_OS_X_VERSION_MAX_ALLOWED=\$(subst .,,\$(MACOSX_VERSION_MIN)) -mmacosx-version-min=\$(MACOSX_VERSION_MIN)" LDFLAGS_JDK="$LDFLAGS_JDK -mmacosx-version-min=\$(MACOSX_VERSION_MIN)" fi fi # Setup some hard coded includes - CCXXFLAGS_JDK="$CCXXFLAGS_JDK \ + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK \ -I${JDK_OUTPUTDIR}/include \ -I${JDK_OUTPUTDIR}/include/$OPENJDK_TARGET_OS \ -I${JDK_TOPDIR}/src/share/javavm/export \ @@ -619,12 +673,12 @@ -I${JDK_TOPDIR}/src/$OPENJDK_TARGET_OS_API_DIR/native/common" # The shared libraries are compiled using the picflag. - CFLAGS_JDKLIB="$CCXXFLAGS_JDK $CFLAGS_JDK $PICFLAG $CFLAGS_JDKLIB_EXTRA" - CXXFLAGS_JDKLIB="$CCXXFLAGS_JDK $CXXFLAGS_JDK $PICFLAG $CXXFLAGS_JDKLIB_EXTRA " + CFLAGS_JDKLIB="$COMMON_CCXXFLAGS_JDK $CFLAGS_JDK $PICFLAG $CFLAGS_JDKLIB_EXTRA" + CXXFLAGS_JDKLIB="$COMMON_CCXXFLAGS_JDK $CXXFLAGS_JDK $PICFLAG $CXXFLAGS_JDKLIB_EXTRA " # Executable flags - CFLAGS_JDKEXE="$CCXXFLAGS_JDK $CFLAGS_JDK" - CXXFLAGS_JDKEXE="$CCXXFLAGS_JDK $CXXFLAGS_JDK" + CFLAGS_JDKEXE="$COMMON_CCXXFLAGS_JDK $CFLAGS_JDK" + CXXFLAGS_JDKEXE="$COMMON_CCXXFLAGS_JDK $CXXFLAGS_JDK" AC_SUBST(CFLAGS_JDKLIB) AC_SUBST(CFLAGS_JDKEXE) @@ -633,6 +687,7 @@ # Setup LDFLAGS et al. # + # Now this is odd. The JDK native libraries have to link against libjvm.so # On 32-bit machines there is normally two distinct libjvm.so:s, client and server. # Which should we link to? Are we lucky enough that the binary api to the libjvm.so library @@ -648,39 +703,93 @@ fi # TODO: make -debug optional "--disable-full-debug-symbols" LDFLAGS_JDK="$LDFLAGS_JDK -debug" - LDFLAGS_JDKLIB="${LDFLAGS_JDK} -dll -libpath:${JDK_OUTPUTDIR}/lib" - LDFLAGS_JDKLIB_SUFFIX="" + elif test "x$TOOLCHAIN_TYPE" = xgcc; then + # If this is a --hash-style=gnu system, use --hash-style=both, why? + # We have previously set HAS_GNU_HASH if this is the case + if test -n "$HAS_GNU_HASH"; then + LDFLAGS_JDK="${LDFLAGS_JDK} -Xlinker --hash-style=both" + fi + if test "x$OPENJDK_TARGET_OS" = xlinux; then + # And since we now know that the linker is gnu, then add -z defs, to forbid + # undefined symbols in object files. + LDFLAGS_JDK="${LDFLAGS_JDK} -Xlinker -z -Xlinker defs" + case $DEBUG_LEVEL in + release ) + # tell linker to optimize libraries. + # Should this be supplied to the OSS linker as well? + LDFLAGS_JDK="${LDFLAGS_JDK} -Xlinker -O1" + ;; + slowdebug ) + if test "x$HAS_LINKER_NOW" = "xtrue"; then + # do relocations at load + LDFLAGS_JDK="$LDFLAGS_JDK $LINKER_NOW_FLAG" + LDFLAGS_CXX_JDK="$LDFLAGS_CXX_JDK $LINKER_NOW_FLAG" + fi + if test "x$HAS_LINKER_RELRO" = "xtrue"; then + # mark relocations read only + LDFLAGS_JDK="$LDFLAGS_JDK $LINKER_RELRO_FLAG" + LDFLAGS_CXX_JDK="$LDFLAGS_CXX_JDK $LINKER_RELRO_FLAG" + fi + ;; + fastdebug ) + if test "x$HAS_LINKER_RELRO" = "xtrue"; then + # mark relocations read only + LDFLAGS_JDK="$LDFLAGS_JDK $LINKER_RELRO_FLAG" + LDFLAGS_CXX_JDK="$LDFLAGS_CXX_JDK $LINKER_RELRO_FLAG" + fi + ;; + * ) + AC_MSG_ERROR([Unrecognized \$DEBUG_LEVEL: $DEBUG_LEVEL]) + ;; + esac + fi + elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then + LDFLAGS_JDK="$LDFLAGS_JDK -z defs -xildoff -ztext" + LDFLAGS_CXX_JDK="$LDFLAGS_CXX_JDK -norunpath -xnolib" + fi + + if test "x$TOOLCHAIN_TYPE" = xgcc || test "x$TOOLCHAIN_TYPE" = xclang; then + # If undefined behaviour detection is enabled then we need to tell linker. + case $DEBUG_LEVEL in + release | fastdebug ) + ;; + slowdebug ) + AC_MSG_WARN([$HAS_CFLAG_DETECT_UNDEFINED_BEHAVIOR]) + if test "x$HAS_CFLAG_DETECT_UNDEFINED_BEHAVIOR" = "xtrue"; then + # enable undefined behaviour checking + LDFLAGS_JDK="$LDFLAGS_JDK `$ECHO -n $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG | sed -e "s/[ ]*\([^ ]\+\)/ -Xlinker \1/g"`" + LDFLAGS_CXX_JDK="$LDFLAGS_CXX_JDK `$ECHO -n $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG | sed -e "s/[ ]*\([^ ]\+\)/ -Xlinker \1/g"`" + fi + ;; + * ) + AC_MSG_ERROR([Unrecognized \$DEBUG_LEVEL: $DEBUG_LEVEL]) + ;; + esac + fi + + # Customize LDFLAGS for executables + + LDFLAGS_JDKEXE="${LDFLAGS_JDK}" + + if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then if test "x$OPENJDK_TARGET_CPU_BITS" = "x64"; then LDFLAGS_STACK_SIZE=1048576 else LDFLAGS_STACK_SIZE=327680 fi - LDFLAGS_JDKEXE="${LDFLAGS_JDK} /STACK:$LDFLAGS_STACK_SIZE" + LDFLAGS_JDKEXE="${LDFLAGS_JDKEXE} /STACK:$LDFLAGS_STACK_SIZE" + elif test "x$OPENJDK_TARGET_OS" = xlinux; then + LDFLAGS_JDKEXE="$LDFLAGS_JDKEXE -Xlinker --allow-shlib-undefined" + fi + + # Customize LDFLAGS for libs + LDFLAGS_JDKLIB="${LDFLAGS_JDK}" + + if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then + LDFLAGS_JDKLIB="${LDFLAGS_JDKLIB} -dll -libpath:${JDK_OUTPUTDIR}/lib" + LDFLAGS_JDKLIB_SUFFIX="" else - if test "x$TOOLCHAIN_TYPE" = xgcc; then - # If this is a --hash-style=gnu system, use --hash-style=both, why? - # We have previously set HAS_GNU_HASH if this is the case - if test -n "$HAS_GNU_HASH"; then - LDFLAGS_JDK="${LDFLAGS_JDK} -Xlinker --hash-style=both " - fi - if test "x$OPENJDK_TARGET_OS" = xlinux; then - # And since we now know that the linker is gnu, then add -z defs, to forbid - # undefined symbols in object files. - LDFLAGS_JDK="${LDFLAGS_JDK} -Xlinker -z -Xlinker defs" - if test "x$DEBUG_LEVEL" = "xrelease"; then - # When building release libraries, tell the linker optimize them. - # Should this be supplied to the OSS linker as well? - LDFLAGS_JDK="${LDFLAGS_JDK} -Xlinker -O1" - fi - fi - fi - - if test "x$TOOLCHAIN_TYPE" = xsolstudio; then - LDFLAGS_JDK="$LDFLAGS_JDK -z defs -xildoff -ztext" - LDFLAGS_CXX_JDK="$LDFLAGS_CXX_JDK -norunpath -xnolib" - fi - - LDFLAGS_JDKLIB="${LDFLAGS_JDK} $SHARED_LIBRARY_FLAGS \ + LDFLAGS_JDKLIB="${LDFLAGS_JDKLIB} ${SHARED_LIBRARY_FLAGS} \ -L${JDK_OUTPUTDIR}/lib${OPENJDK_TARGET_CPU_LIBDIR}" # On some platforms (mac) the linker warns about non existing -L dirs. @@ -701,12 +810,8 @@ if test "x$TOOLCHAIN_TYPE" = xsolstudio; then LDFLAGS_JDKLIB_SUFFIX="$LDFLAGS_JDKLIB_SUFFIX -lc" fi + fi - LDFLAGS_JDKEXE="${LDFLAGS_JDK}" - if test "x$OPENJDK_TARGET_OS" = xlinux; then - LDFLAGS_JDKEXE="$LDFLAGS_JDKEXE -Xlinker --allow-shlib-undefined" - fi - fi AC_SUBST(LDFLAGS_JDKLIB) AC_SUBST(LDFLAGS_JDKEXE) AC_SUBST(LDFLAGS_JDKLIB_SUFFIX) @@ -714,7 +819,6 @@ AC_SUBST(LDFLAGS_CXX_JDK) ]) - # FLAGS_COMPILER_CHECK_ARGUMENTS([ARGUMENT], [RUN-IF-TRUE], # [RUN-IF-FALSE]) # ------------------------------------------------------------ @@ -727,7 +831,7 @@ saved_cflags="$CFLAGS" CFLAGS="$CFLAGS $1" AC_LANG_PUSH([C]) - AC_COMPILE_IFELSE([AC_LANG_SOURCE([[int i;]])], [], + AC_COMPILE_IFELSE([AC_LANG_SOURCE([[int i;]])], [], [supports=no]) AC_LANG_POP([C]) CFLAGS="$saved_cflags" @@ -735,7 +839,7 @@ saved_cxxflags="$CXXFLAGS" CXXFLAGS="$CXXFLAG $1" AC_LANG_PUSH([C++]) - AC_COMPILE_IFELSE([AC_LANG_SOURCE([[int i;]])], [], + AC_COMPILE_IFELSE([AC_LANG_SOURCE([[int i;]])], [], [supports=no]) AC_LANG_POP([C++]) CXXFLAGS="$saved_cxxflags" @@ -748,6 +852,31 @@ fi ]) +# FLAGS_LINKER_CHECK_ARGUMENTS([ARGUMENT], [RUN-IF-TRUE], +# [RUN-IF-FALSE]) +# ------------------------------------------------------------ +# Check that the linker support an argument +AC_DEFUN([FLAGS_LINKER_CHECK_ARGUMENTS], +[ + AC_MSG_CHECKING([if linker supports "$1"]) + supports=yes + + saved_ldflags="$LDFLAGS" + LDFLAGS="$LDFLAGS $1" + AC_LANG_PUSH([C]) + AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])], + [], [supports=no]) + AC_LANG_POP([C]) + LDFLAGS="$saved_ldflags" + + AC_MSG_RESULT([$supports]) + if test "x$supports" = "xyes" ; then + m4_ifval([$2], [$2], [:]) + else + m4_ifval([$3], [$3], [:]) + fi +]) + AC_DEFUN_ONCE([FLAGS_SETUP_COMPILER_FLAGS_MISC], [ # Some Zero and Shark settings.
--- a/common/autoconf/generated-configure.sh Thu Jun 19 11:22:18 2014 -0700 +++ b/common/autoconf/generated-configure.sh Wed Jul 05 19:46:17 2017 +0200 @@ -692,13 +692,17 @@ LEGACY_EXTRA_CXXFLAGS LEGACY_EXTRA_CFLAGS CXX_O_FLAG_NONE +CXX_O_FLAG_DEBUG CXX_O_FLAG_NORM CXX_O_FLAG_HI CXX_O_FLAG_HIGHEST C_O_FLAG_NONE +C_O_FLAG_DEBUG C_O_FLAG_NORM C_O_FLAG_HI C_O_FLAG_HIGHEST +CXXFLAGS_DEBUG_OPTIONS +CFLAGS_DEBUG_OPTIONS CXXFLAGS_DEBUG_SYMBOLS CFLAGS_DEBUG_SYMBOLS CXX_FLAG_DEPS @@ -2346,6 +2350,52 @@ } # ac_fn_objc_try_compile +# ac_fn_c_try_link LINENO +# ----------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_link () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext conftest$ac_exeext + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + test -x conftest$ac_exeext + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information + # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would + # interfere with the next link command; also delete a directory that is + # left behind by Apple's compiler. We do this before executing the actions. + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_link + # ac_fn_cxx_check_header_mongrel LINENO HEADER VAR INCLUDES # --------------------------------------------------------- # Tests whether HEADER exists, giving a warning if it cannot be compiled using @@ -3761,13 +3811,18 @@ - # FLAGS_COMPILER_CHECK_ARGUMENTS([ARGUMENT], [RUN-IF-TRUE], # [RUN-IF-FALSE]) # ------------------------------------------------------------ # Check that the c and c++ compilers support an argument +# FLAGS_LINKER_CHECK_ARGUMENTS([ARGUMENT], [RUN-IF-TRUE], +# [RUN-IF-FALSE]) +# ------------------------------------------------------------ +# Check that the linker support an argument + + # @@ -4253,7 +4308,7 @@ #CUSTOM_AUTOCONF_INCLUDE # Do not change or remove the following line, it is needed for consistency checks: -DATE_WHEN_GENERATED=1399969244 +DATE_WHEN_GENERATED=1402614845 ############################################################################### # @@ -40264,6 +40319,8 @@ + + # The package path is used only on macosx? # FIXME: clean this up, and/or move it elsewhere. PACKAGE_PATH=/opt/local @@ -40289,6 +40346,242 @@ # If this is a --hash-style=gnu system, use --hash-style=both, why? HAS_GNU_HASH=`$CC -dumpspecs 2>/dev/null | $GREP 'hash-style=gnu'` # This is later checked when setting flags. + + # "-Og" suppported for GCC 4.8 and later + CFLAG_OPTIMIZE_DEBUG_FLAG="-Og" + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if compiler supports \"$CFLAG_OPTIMIZE_DEBUG_FLAG\"" >&5 +$as_echo_n "checking if compiler supports \"$CFLAG_OPTIMIZE_DEBUG_FLAG\"... " >&6; } + supports=yes + + saved_cflags="$CFLAGS" + CFLAGS="$CFLAGS $CFLAG_OPTIMIZE_DEBUG_FLAG" + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +int i; +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + +else + supports=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + CFLAGS="$saved_cflags" + + saved_cxxflags="$CXXFLAGS" + CXXFLAGS="$CXXFLAG $CFLAG_OPTIMIZE_DEBUG_FLAG" + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +int i; +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +else + supports=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + CXXFLAGS="$saved_cxxflags" + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $supports" >&5 +$as_echo "$supports" >&6; } + if test "x$supports" = "xyes" ; then + HAS_CFLAG_OPTIMIZE_DEBUG=true + else + HAS_CFLAG_OPTIMIZE_DEBUG=false + fi + + + # "-fsanitize=undefined" supported for GCC 4.9 and later + CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG="-fsanitize=undefined -fsanitize-recover" + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if compiler supports \"$CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG\"" >&5 +$as_echo_n "checking if compiler supports \"$CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG\"... " >&6; } + supports=yes + + saved_cflags="$CFLAGS" + CFLAGS="$CFLAGS $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG" + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +int i; +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + +else + supports=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + CFLAGS="$saved_cflags" + + saved_cxxflags="$CXXFLAGS" + CXXFLAGS="$CXXFLAG $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG" + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +int i; +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +else + supports=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + CXXFLAGS="$saved_cxxflags" + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $supports" >&5 +$as_echo "$supports" >&6; } + if test "x$supports" = "xyes" ; then + HAS_CFLAG_DETECT_UNDEFINED_BEHAVIOR=true + else + HAS_CFLAG_DETECT_UNDEFINED_BEHAVIOR=false + fi + + + # "-z relro" supported in GNU binutils 2.17 and later + LINKER_RELRO_FLAG="-Xlinker -z -Xlinker relro" + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if linker supports \"$LINKER_RELRO_FLAG\"" >&5 +$as_echo_n "checking if linker supports \"$LINKER_RELRO_FLAG\"... " >&6; } + supports=yes + + saved_ldflags="$LDFLAGS" + LDFLAGS="$LDFLAGS $LINKER_RELRO_FLAG" + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + +else + supports=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + LDFLAGS="$saved_ldflags" + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $supports" >&5 +$as_echo "$supports" >&6; } + if test "x$supports" = "xyes" ; then + HAS_LINKER_RELRO=true + else + HAS_LINKER_RELRO=false + fi + + + # "-z now" supported in GNU binutils 2.11 and later + LINKER_NOW_FLAG="-Xlinker -z -Xlinker now" + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if linker supports \"$LINKER_NOW_FLAG\"" >&5 +$as_echo_n "checking if linker supports \"$LINKER_NOW_FLAG\"... " >&6; } + supports=yes + + saved_ldflags="$LDFLAGS" + LDFLAGS="$LDFLAGS $LINKER_NOW_FLAG" + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + +else + supports=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + LDFLAGS="$saved_ldflags" + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $supports" >&5 +$as_echo "$supports" >&6; } + if test "x$supports" = "xyes" ; then + HAS_LINKER_NOW=true + else + HAS_LINKER_NOW=false + fi + fi # Check for broken SuSE 'ld' for which 'Only anonymous version tag is allowed @@ -40297,8 +40590,8 @@ if test "x$OPENJDK_TARGET_OS" = xlinux && test "x$TOOLCHAIN_TYPE" = xgcc; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for broken SuSE 'ld' which only understands anonymous version tags in executables" >&5 $as_echo_n "checking for broken SuSE 'ld' which only understands anonymous version tags in executables... " >&6; } - echo "SUNWprivate_1.1 { local: *; };" > version-script.map - echo "int main() { }" > main.c + $ECHO "SUNWprivate_1.1 { local: *; };" > version-script.map + $ECHO "int main() { }" > main.c if $CXX -Xlinker -version-script=version-script.map main.c 2>&5 >&5; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } @@ -40768,8 +41061,8 @@ if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then - # FIXME: likely bug, should be CCXXFLAGS_JDK? or one for C or CXX. - CCXXFLAGS="$CCXXFLAGS -nologo" + # silence copyright notice and other headers. + COMMON_CCXXFLAGS="$COMMON_CCXXFLAGS -nologo" fi if test "x$SYSROOT" != "x"; then @@ -40803,7 +41096,6 @@ # Now we can test some aspects on the target using configure macros. - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 $as_echo_n "checking for ANSI C header files... " >&6; } if ${ac_cv_header_stdc+:} false; then : @@ -41488,6 +41780,7 @@ # Debug symbols if test "x$TOOLCHAIN_TYPE" = xgcc; then if test "x$OPENJDK_TARGET_CPU_BITS" = "x64" && test "x$DEBUG_LEVEL" = "xfastdebug"; then + # reduce from default "-g2" option to save space CFLAGS_DEBUG_SYMBOLS="-g1" CXXFLAGS_DEBUG_SYMBOLS="-g1" else @@ -41499,6 +41792,7 @@ CXXFLAGS_DEBUG_SYMBOLS="-g" elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then CFLAGS_DEBUG_SYMBOLS="-g -xs" + # FIXME: likely a bug, this disables debug symbols rather than enables them CXXFLAGS_DEBUG_SYMBOLS="-g0 -xs" elif test "x$TOOLCHAIN_TYPE" = xxlc; then CFLAGS_DEBUG_SYMBOLS="-g" @@ -41507,6 +41801,31 @@ + # bounds, memory and behavior checking options + if test "x$TOOLCHAIN_TYPE" = xgcc; then + case $DEBUG_LEVEL in + release ) + # no adjustment + ;; + fastdebug ) + # Add compile time bounds checks. + CFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1" + CXXFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1" + ;; + slowdebug ) + # Add runtime bounds checks and symbol info. + CFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1" + CXXFLAGS_DEBUG_OPTIONS="-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1" + if test "x$HAS_CFLAG_DETECT_UNDEFINED_BEHAVIOR" = "xtrue"; then + CFLAGS_DEBUG_OPTIONS="$CFLAGS_DEBUG_OPTIONS $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG" + CXXFLAGS_DEBUG_OPTIONS="$CXXFLAGS_DEBUG_OPTIONS $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG" + fi + ;; + esac + fi + + + # Optimization levels if test "x$TOOLCHAIN_TYPE" = xsolstudio; then CC_HIGHEST="$CC_HIGHEST -fns -fsimple -fsingle -xbuiltin=%all -xdepend -xrestrict -xlibmil" @@ -41516,10 +41835,12 @@ C_O_FLAG_HIGHEST="-xO4 -Wu,-O4~yz $CC_HIGHEST -xalias_level=basic -xregs=no%frameptr" C_O_FLAG_HI="-xO4 -Wu,-O4~yz -xregs=no%frameptr" C_O_FLAG_NORM="-xO2 -Wu,-O2~yz -xregs=no%frameptr" + C_O_FLAG_DEBUG="-xregs=no%frameptr" C_O_FLAG_NONE="-xregs=no%frameptr" CXX_O_FLAG_HIGHEST="-xO4 -Qoption ube -O4~yz $CC_HIGHEST -xregs=no%frameptr" CXX_O_FLAG_HI="-xO4 -Qoption ube -O4~yz -xregs=no%frameptr" CXX_O_FLAG_NORM="-xO2 -Qoption ube -O2~yz -xregs=no%frameptr" + CXX_O_FLAG_DEBUG="-xregs=no%frameptr" CXX_O_FLAG_NONE="-xregs=no%frameptr" if test "x$OPENJDK_TARGET_CPU_BITS" = "x32"; then C_O_FLAG_HIGHEST="$C_O_FLAG_HIGHEST -xchip=pentium" @@ -41529,10 +41850,12 @@ C_O_FLAG_HIGHEST="-xO4 -Wc,-Qrm-s -Wc,-Qiselect-T0 $CC_HIGHEST -xalias_level=basic -xprefetch=auto,explicit -xchip=ultra" C_O_FLAG_HI="-xO4 -Wc,-Qrm-s -Wc,-Qiselect-T0" C_O_FLAG_NORM="-xO2 -Wc,-Qrm-s -Wc,-Qiselect-T0" + C_O_FLAG_DEBUG="" C_O_FLAG_NONE="" CXX_O_FLAG_HIGHEST="-xO4 -Qoption cg -Qrm-s -Qoption cg -Qiselect-T0 $CC_HIGHEST -xprefetch=auto,explicit -xchip=ultra" CXX_O_FLAG_HI="-xO4 -Qoption cg -Qrm-s -Qoption cg -Qiselect-T0" CXX_O_FLAG_NORM="-xO2 -Qoption cg -Qrm-s -Qoption cg -Qiselect-T0" + C_O_FLAG_DEBUG="" CXX_O_FLAG_NONE="" fi else @@ -41545,13 +41868,17 @@ C_O_FLAG_HIGHEST="-Os" C_O_FLAG_HI="-Os" C_O_FLAG_NORM="-Os" - C_O_FLAG_NONE="" else C_O_FLAG_HIGHEST="-O3" C_O_FLAG_HI="-O3" C_O_FLAG_NORM="-O2" - C_O_FLAG_NONE="-O0" - fi + fi + if test "x$HAS_CFLAG_OPTIMIZE_DEBUG" = "xtrue"; then + C_O_FLAG_DEBUG="$CFLAG_OPTIMIZE_DEBUG_FLAG" + else + C_O_FLAG_DEBUG="-O0" + fi + C_O_FLAG_NONE="-O0" elif test "x$TOOLCHAIN_TYPE" = xclang; then if test "x$OPENJDK_TARGET_OS" = xmacosx; then # On MacOSX we optimize for size, something @@ -41559,30 +41886,56 @@ C_O_FLAG_HIGHEST="-Os" C_O_FLAG_HI="-Os" C_O_FLAG_NORM="-Os" - C_O_FLAG_NONE="" else C_O_FLAG_HIGHEST="-O3" C_O_FLAG_HI="-O3" C_O_FLAG_NORM="-O2" - C_O_FLAG_NONE="-O0" - fi + fi + C_O_FLAG_DEBUG="-O0" + C_O_FLAG_NONE="-O0" elif test "x$TOOLCHAIN_TYPE" = xxlc; then C_O_FLAG_HIGHEST="-O3" C_O_FLAG_HI="-O3 -qstrict" C_O_FLAG_NORM="-O2" - C_O_FLAG_NONE="" + C_O_FLAG_DEBUG="-qnoopt" + C_O_FLAG_NONE="-qnoop" elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then C_O_FLAG_HIGHEST="-O2" C_O_FLAG_HI="-O1" C_O_FLAG_NORM="-O1" + C_O_FLAG_DEBUG="-Od" C_O_FLAG_NONE="-Od" fi CXX_O_FLAG_HIGHEST="$C_O_FLAG_HIGHEST" CXX_O_FLAG_HI="$C_O_FLAG_HI" CXX_O_FLAG_NORM="$C_O_FLAG_NORM" + CXX_O_FLAG_DEBUG="$C_O_FLAG_DEBUG" CXX_O_FLAG_NONE="$C_O_FLAG_NONE" fi + # Adjust optimization flags according to debug level. + case $DEBUG_LEVEL in + release ) + # no adjustment + ;; + fastdebug ) + # Not quite so much optimization + C_O_FLAG_HI="$C_O_FLAG_NORM" + CXX_O_FLAG_HI="$CXX_O_FLAG_NORM" + ;; + slowdebug ) + # Disable optimization + C_O_FLAG_HIGHEST="$C_O_FLAG_DEBUG" + C_O_FLAG_HI="$C_O_FLAG_DEBUG" + C_O_FLAG_NORM="$C_O_FLAG_DEBUG" + CXX_O_FLAG_HIGHEST="$CXX_O_FLAG_DEBUG" + CXX_O_FLAG_HI="$CXX_O_FLAG_DEBUG" + CXX_O_FLAG_NORM="$CXX_O_FLAG_DEBUG" + ;; + esac + + + @@ -41660,11 +42013,12 @@ # Later we will also have CFLAGS and LDFLAGS for the hotspot subrepo build. # - # Setup compiler/platform specific flags to CFLAGS_JDK, - # CXXFLAGS_JDK and CCXXFLAGS_JDK (common to C and CXX?) + # Setup compiler/platform specific flags into + # CFLAGS_JDK - C Compiler flags + # CXXFLAGS_JDK - C++ Compiler flags + # COMMON_CCXXFLAGS_JDK - common to C and C++ if test "x$TOOLCHAIN_TYPE" = xgcc; then - # these options are used for both C and C++ compiles - CCXXFLAGS_JDK="$CCXXFLAGS $CCXXFLAGS_JDK -Wall -Wno-parentheses -Wextra -Wno-unused -Wno-unused-parameter -Wformat=2 \ + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS $COMMON_CCXXFLAGS_JDK -Wall -Wno-parentheses -Wextra -Wno-unused -Wno-unused-parameter -Wformat=2 \ -pipe -D_GNU_SOURCE -D_REENTRANT -D_LARGEFILE64_SOURCE" case $OPENJDK_TARGET_CPU_ARCH in arm ) @@ -41676,14 +42030,14 @@ CFLAGS_JDK="${CFLAGS_JDK} -fno-strict-aliasing" ;; * ) - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -fno-omit-frame-pointer" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -fno-omit-frame-pointer" CFLAGS_JDK="${CFLAGS_JDK} -fno-strict-aliasing" ;; esac elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then - CCXXFLAGS_JDK="$CCXXFLAGS $CCXXFLAGS_JDK -DTRACING -DMACRO_MEMSYS_OPS -DBREAKPTS" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS $COMMON_CCXXFLAGS_JDK -DTRACING -DMACRO_MEMSYS_OPS -DBREAKPTS" if test "x$OPENJDK_TARGET_CPU_ARCH" = xx86; then - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DcpuIntel -Di586 -D$OPENJDK_TARGET_CPU_LEGACY_LIB" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DcpuIntel -Di586 -D$OPENJDK_TARGET_CPU_LEGACY_LIB" CFLAGS_JDK="$CFLAGS_JDK -erroff=E_BAD_PRAGMA_PACK_VALUE" fi @@ -41693,14 +42047,14 @@ CFLAGS_JDK="$CFLAGS_JDK -D_GNU_SOURCE -D_REENTRANT -D_LARGEFILE64_SOURCE -DSTDC" CXXFLAGS_JDK="$CXXFLAGS_JDK -D_GNU_SOURCE -D_REENTRANT -D_LARGEFILE64_SOURCE -DSTDC" elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then - CCXXFLAGS_JDK="$CCXXFLAGS $CCXXFLAGS_JDK -Zi -MD -Zc:wchar_t- -W3 -wd4800 \ + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS $COMMON_CCXXFLAGS_JDK -Zi -MD -Zc:wchar_t- -W3 -wd4800 \ -D_STATIC_CPPLIB -D_DISABLE_DEPRECATE_STATIC_CPPLIB -DWIN32_LEAN_AND_MEAN \ -D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE \ -DWIN32 -DIAL" if test "x$OPENJDK_TARGET_CPU" = xx86_64; then - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_AMD64_ -Damd64" - else - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_X86_ -Dx86" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_AMD64_ -Damd64" + else + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_X86_ -Dx86" fi fi @@ -41708,28 +42062,20 @@ # Adjust flags according to debug level. case $DEBUG_LEVEL in - fastdebug ) - CFLAGS_JDK="$CFLAGS_JDK $CFLAGS_DEBUG_SYMBOLS" - CXXFLAGS_JDK="$CXXFLAGS_JDK $CXXFLAGS_DEBUG_SYMBOLS" - C_O_FLAG_HI="$C_O_FLAG_NORM" - C_O_FLAG_NORM="$C_O_FLAG_NORM" - CXX_O_FLAG_HI="$CXX_O_FLAG_NORM" - CXX_O_FLAG_NORM="$CXX_O_FLAG_NORM" + fastdebug | slowdebug ) + CFLAGS_JDK="$CFLAGS_JDK $CFLAGS_DEBUG_SYMBOLS $CFLAGS_DEBUG_OPTIONS" + CXXFLAGS_JDK="$CXXFLAGS_JDK $CXXFLAGS_DEBUG_SYMBOLS $CXXFLAGS_DEBUG_OPTIONS" JAVAC_FLAGS="$JAVAC_FLAGS -g" ;; - slowdebug ) - CFLAGS_JDK="$CFLAGS_JDK $CFLAGS_DEBUG_SYMBOLS" - CXXFLAGS_JDK="$CXXFLAGS_JDK $CXXFLAGS_DEBUG_SYMBOLS" - C_O_FLAG_HI="$C_O_FLAG_NONE" - C_O_FLAG_NORM="$C_O_FLAG_NONE" - CXX_O_FLAG_HI="$CXX_O_FLAG_NONE" - CXX_O_FLAG_NORM="$CXX_O_FLAG_NONE" - JAVAC_FLAGS="$JAVAC_FLAGS -g" + release ) + ;; + * ) + as_fn_error $? "Unrecognized \$DEBUG_LEVEL: $DEBUG_LEVEL" "$LINENO" 5 ;; esac # Setup LP64 - CCXXFLAGS_JDK="$CCXXFLAGS_JDK $ADD_LP64" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK $ADD_LP64" # Set some common defines. These works for all compilers, but assume # -D is universally accepted. @@ -41742,49 +42088,49 @@ # Note: -Dmacro is the same as #define macro 1 # -Dmacro= is the same as #define macro if test "x$OPENJDK_TARGET_OS" = xsolaris; then - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_LITTLE_ENDIAN=" - else - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_LITTLE_ENDIAN" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_LITTLE_ENDIAN=" + else + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_LITTLE_ENDIAN" fi else # Same goes for _BIG_ENDIAN. Do we really need to set *ENDIAN on Solaris if they # are defined in the system? if test "x$OPENJDK_TARGET_OS" = xsolaris; then - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_BIG_ENDIAN=" - else - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_BIG_ENDIAN" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_BIG_ENDIAN=" + else + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_BIG_ENDIAN" fi fi # Setup target OS define. Use OS target name but in upper case. OPENJDK_TARGET_OS_UPPERCASE=`$ECHO $OPENJDK_TARGET_OS | $TR 'abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'` - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D$OPENJDK_TARGET_OS_UPPERCASE" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D$OPENJDK_TARGET_OS_UPPERCASE" # Setup target CPU - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DARCH='\"$OPENJDK_TARGET_CPU_LEGACY\"' -D$OPENJDK_TARGET_CPU_LEGACY" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DARCH='\"$OPENJDK_TARGET_CPU_LEGACY\"' -D$OPENJDK_TARGET_CPU_LEGACY" # Setup debug/release defines if test "x$DEBUG_LEVEL" = xrelease; then - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DNDEBUG" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DNDEBUG" if test "x$OPENJDK_TARGET_OS" = xsolaris; then - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DTRIMMED" - fi - else - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DDEBUG" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DTRIMMED" + fi + else + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DDEBUG" fi # Setup release name - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DRELEASE='\"\$(RELEASE)\"'" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DRELEASE='\"\$(RELEASE)\"'" # Set some additional per-OS defines. if test "x$OPENJDK_TARGET_OS" = xmacosx; then - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_ALLBSD_SOURCE -D_DARWIN_UNLIMITED_SELECT" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_ALLBSD_SOURCE -D_DARWIN_UNLIMITED_SELECT" elif test "x$OPENJDK_TARGET_OS" = xaix; then # FIXME: PPC64 should not be here. - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DPPC64" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DPPC64" elif test "x$OPENJDK_TARGET_OS" = xbsd; then - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -D_ALLBSD_SOURCE" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -D_ALLBSD_SOURCE" fi # Additional macosx handling @@ -41803,13 +42149,13 @@ # The macro takes the version with no dots, ex: 1070 # Let the flags variables get resolved in make for easier override on make # command line. - CCXXFLAGS_JDK="$CCXXFLAGS_JDK -DMAC_OS_X_VERSION_MAX_ALLOWED=\$(subst .,,\$(MACOSX_VERSION_MIN)) -mmacosx-version-min=\$(MACOSX_VERSION_MIN)" + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK -DMAC_OS_X_VERSION_MAX_ALLOWED=\$(subst .,,\$(MACOSX_VERSION_MIN)) -mmacosx-version-min=\$(MACOSX_VERSION_MIN)" LDFLAGS_JDK="$LDFLAGS_JDK -mmacosx-version-min=\$(MACOSX_VERSION_MIN)" fi fi # Setup some hard coded includes - CCXXFLAGS_JDK="$CCXXFLAGS_JDK \ + COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK \ -I${JDK_OUTPUTDIR}/include \ -I${JDK_OUTPUTDIR}/include/$OPENJDK_TARGET_OS \ -I${JDK_TOPDIR}/src/share/javavm/export \ @@ -41818,12 +42164,12 @@ -I${JDK_TOPDIR}/src/$OPENJDK_TARGET_OS_API_DIR/native/common" # The shared libraries are compiled using the picflag. - CFLAGS_JDKLIB="$CCXXFLAGS_JDK $CFLAGS_JDK $PICFLAG $CFLAGS_JDKLIB_EXTRA" - CXXFLAGS_JDKLIB="$CCXXFLAGS_JDK $CXXFLAGS_JDK $PICFLAG $CXXFLAGS_JDKLIB_EXTRA " + CFLAGS_JDKLIB="$COMMON_CCXXFLAGS_JDK $CFLAGS_JDK $PICFLAG $CFLAGS_JDKLIB_EXTRA" + CXXFLAGS_JDKLIB="$COMMON_CCXXFLAGS_JDK $CXXFLAGS_JDK $PICFLAG $CXXFLAGS_JDKLIB_EXTRA " # Executable flags - CFLAGS_JDKEXE="$CCXXFLAGS_JDK $CFLAGS_JDK" - CXXFLAGS_JDKEXE="$CCXXFLAGS_JDK $CXXFLAGS_JDK" + CFLAGS_JDKEXE="$COMMON_CCXXFLAGS_JDK $CFLAGS_JDK" + CXXFLAGS_JDKEXE="$COMMON_CCXXFLAGS_JDK $CXXFLAGS_JDK" @@ -41832,6 +42178,7 @@ # Setup LDFLAGS et al. # + # Now this is odd. The JDK native libraries have to link against libjvm.so # On 32-bit machines there is normally two distinct libjvm.so:s, client and server. # Which should we link to? Are we lucky enough that the binary api to the libjvm.so library @@ -41847,39 +42194,94 @@ fi # TODO: make -debug optional "--disable-full-debug-symbols" LDFLAGS_JDK="$LDFLAGS_JDK -debug" - LDFLAGS_JDKLIB="${LDFLAGS_JDK} -dll -libpath:${JDK_OUTPUTDIR}/lib" - LDFLAGS_JDKLIB_SUFFIX="" + elif test "x$TOOLCHAIN_TYPE" = xgcc; then + # If this is a --hash-style=gnu system, use --hash-style=both, why? + # We have previously set HAS_GNU_HASH if this is the case + if test -n "$HAS_GNU_HASH"; then + LDFLAGS_JDK="${LDFLAGS_JDK} -Xlinker --hash-style=both" + fi + if test "x$OPENJDK_TARGET_OS" = xlinux; then + # And since we now know that the linker is gnu, then add -z defs, to forbid + # undefined symbols in object files. + LDFLAGS_JDK="${LDFLAGS_JDK} -Xlinker -z -Xlinker defs" + case $DEBUG_LEVEL in + release ) + # tell linker to optimize libraries. + # Should this be supplied to the OSS linker as well? + LDFLAGS_JDK="${LDFLAGS_JDK} -Xlinker -O1" + ;; + slowdebug ) + if test "x$HAS_LINKER_NOW" = "xtrue"; then + # do relocations at load + LDFLAGS_JDK="$LDFLAGS_JDK $LINKER_NOW_FLAG" + LDFLAGS_CXX_JDK="$LDFLAGS_CXX_JDK $LINKER_NOW_FLAG" + fi + if test "x$HAS_LINKER_RELRO" = "xtrue"; then + # mark relocations read only + LDFLAGS_JDK="$LDFLAGS_JDK $LINKER_RELRO_FLAG" + LDFLAGS_CXX_JDK="$LDFLAGS_CXX_JDK $LINKER_RELRO_FLAG" + fi + ;; + fastdebug ) + if test "x$HAS_LINKER_RELRO" = "xtrue"; then + # mark relocations read only + LDFLAGS_JDK="$LDFLAGS_JDK $LINKER_RELRO_FLAG" + LDFLAGS_CXX_JDK="$LDFLAGS_CXX_JDK $LINKER_RELRO_FLAG" + fi + ;; + * ) + as_fn_error $? "Unrecognized \$DEBUG_LEVEL: $DEBUG_LEVEL" "$LINENO" 5 + ;; + esac + fi + elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then + LDFLAGS_JDK="$LDFLAGS_JDK -z defs -xildoff -ztext" + LDFLAGS_CXX_JDK="$LDFLAGS_CXX_JDK -norunpath -xnolib" + fi + + if test "x$TOOLCHAIN_TYPE" = xgcc || test "x$TOOLCHAIN_TYPE" = xclang; then + # If undefined behaviour detection is enabled then we need to tell linker. + case $DEBUG_LEVEL in + release | fastdebug ) + ;; + slowdebug ) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $HAS_CFLAG_DETECT_UNDEFINED_BEHAVIOR" >&5 +$as_echo "$as_me: WARNING: $HAS_CFLAG_DETECT_UNDEFINED_BEHAVIOR" >&2;} + if test "x$HAS_CFLAG_DETECT_UNDEFINED_BEHAVIOR" = "xtrue"; then + # enable undefined behaviour checking + LDFLAGS_JDK="$LDFLAGS_JDK `$ECHO -n $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG | sed -e "s/ *\(^ \+\)/ -Xlinker \1/g"`" + LDFLAGS_CXX_JDK="$LDFLAGS_CXX_JDK `$ECHO -n $CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG | sed -e "s/ *\(^ \+\)/ -Xlinker \1/g"`" + fi + ;; + * ) + as_fn_error $? "Unrecognized \$DEBUG_LEVEL: $DEBUG_LEVEL" "$LINENO" 5 + ;; + esac + fi + + # Customize LDFLAGS for executables + + LDFLAGS_JDKEXE="${LDFLAGS_JDK}" + + if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then if test "x$OPENJDK_TARGET_CPU_BITS" = "x64"; then LDFLAGS_STACK_SIZE=1048576 else LDFLAGS_STACK_SIZE=327680 fi - LDFLAGS_JDKEXE="${LDFLAGS_JDK} /STACK:$LDFLAGS_STACK_SIZE" - else - if test "x$TOOLCHAIN_TYPE" = xgcc; then - # If this is a --hash-style=gnu system, use --hash-style=both, why? - # We have previously set HAS_GNU_HASH if this is the case - if test -n "$HAS_GNU_HASH"; then - LDFLAGS_JDK="${LDFLAGS_JDK} -Xlinker --hash-style=both " - fi - if test "x$OPENJDK_TARGET_OS" = xlinux; then - # And since we now know that the linker is gnu, then add -z defs, to forbid - # undefined symbols in object files. - LDFLAGS_JDK="${LDFLAGS_JDK} -Xlinker -z -Xlinker defs" - if test "x$DEBUG_LEVEL" = "xrelease"; then - # When building release libraries, tell the linker optimize them. - # Should this be supplied to the OSS linker as well? - LDFLAGS_JDK="${LDFLAGS_JDK} -Xlinker -O1" - fi - fi - fi - - if test "x$TOOLCHAIN_TYPE" = xsolstudio; then - LDFLAGS_JDK="$LDFLAGS_JDK -z defs -xildoff -ztext" - LDFLAGS_CXX_JDK="$LDFLAGS_CXX_JDK -norunpath -xnolib" - fi - - LDFLAGS_JDKLIB="${LDFLAGS_JDK} $SHARED_LIBRARY_FLAGS \ + LDFLAGS_JDKEXE="${LDFLAGS_JDKEXE} /STACK:$LDFLAGS_STACK_SIZE" + elif test "x$OPENJDK_TARGET_OS" = xlinux; then + LDFLAGS_JDKEXE="$LDFLAGS_JDKEXE -Xlinker --allow-shlib-undefined" + fi + + # Customize LDFLAGS for libs + LDFLAGS_JDKLIB="${LDFLAGS_JDK}" + + if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then + LDFLAGS_JDKLIB="${LDFLAGS_JDKLIB} -dll -libpath:${JDK_OUTPUTDIR}/lib" + LDFLAGS_JDKLIB_SUFFIX="" + else + LDFLAGS_JDKLIB="${LDFLAGS_JDKLIB} ${SHARED_LIBRARY_FLAGS} \ -L${JDK_OUTPUTDIR}/lib${OPENJDK_TARGET_CPU_LIBDIR}" # On some platforms (mac) the linker warns about non existing -L dirs. @@ -41900,11 +42302,6 @@ if test "x$TOOLCHAIN_TYPE" = xsolstudio; then LDFLAGS_JDKLIB_SUFFIX="$LDFLAGS_JDKLIB_SUFFIX -lc" fi - - LDFLAGS_JDKEXE="${LDFLAGS_JDK}" - if test "x$OPENJDK_TARGET_OS" = xlinux; then - LDFLAGS_JDKEXE="$LDFLAGS_JDKEXE -Xlinker --allow-shlib-undefined" - fi fi
--- a/common/autoconf/toolchain.m4 Thu Jun 19 11:22:18 2014 -0700 +++ b/common/autoconf/toolchain.m4 Wed Jul 05 19:46:17 2017 +0200 @@ -24,11 +24,11 @@ # ######################################################################## -# This file is responsible for detecting, verifying and setting up the -# toolchain, i.e. the compiler, linker and related utilities. It will setup +# This file is responsible for detecting, verifying and setting up the +# toolchain, i.e. the compiler, linker and related utilities. It will setup # proper paths to the binaries, but it will not setup any flags. # -# The binaries used is determined by the toolchain type, which is the family of +# The binaries used is determined by the toolchain type, which is the family of # compilers and related tools that are used. ######################################################################## @@ -83,7 +83,7 @@ AC_SUBST(SHARED_LIBRARY) AC_SUBST(STATIC_LIBRARY) AC_SUBST(OBJ_SUFFIX) - AC_SUBST(EXE_SUFFIX) + AC_SUBST(EXE_SUFFIX) ]) # Determine which toolchain type to use, and make sure it is valid for this @@ -117,7 +117,7 @@ # First toolchain type in the list is the default DEFAULT_TOOLCHAIN=${VALID_TOOLCHAINS%% *} fi - + if test "x$with_toolchain_type" = xlist; then # List all toolchains AC_MSG_NOTICE([The following toolchains are valid on this platform:]) @@ -126,7 +126,7 @@ TOOLCHAIN_DESCRIPTION=${!toolchain_var_name} $PRINTF " %-10s %s\n" $toolchain "$TOOLCHAIN_DESCRIPTION" done - + exit 0 elif test "x$with_toolchain_type" != x; then # User override; check that it is valid @@ -168,10 +168,10 @@ AC_MSG_NOTICE([Using default toolchain $TOOLCHAIN_TYPE ($TOOLCHAIN_DESCRIPTION)]) else AC_MSG_NOTICE([Using user selected toolchain $TOOLCHAIN_TYPE ($TOOLCHAIN_DESCRIPTION). Default toolchain is $DEFAULT_TOOLCHAIN.]) - fi + fi ]) -# Before we start detecting the toolchain executables, we might need some +# Before we start detecting the toolchain executables, we might need some # special setup, e.g. additional paths etc. AC_DEFUN_ONCE([TOOLCHAIN_PRE_DETECTION], [ @@ -184,7 +184,7 @@ ORG_OBJCFLAGS="$OBJCFLAGS" # On Windows, we need to detect the visual studio installation first. - # This will change the PATH, but we need to keep that new PATH even + # This will change the PATH, but we need to keep that new PATH even # after toolchain detection is done, since the compiler (on x86) uses # it for DLL resolution in runtime. if test "x$OPENJDK_BUILD_OS" = "xwindows" && test "x$TOOLCHAIN_TYPE" = "xmicrosoft"; then @@ -208,7 +208,7 @@ PATH="/usr/ccs/bin:$PATH" fi - # Finally add TOOLCHAIN_PATH at the beginning, to allow --with-tools-dir to + # Finally add TOOLCHAIN_PATH at the beginning, to allow --with-tools-dir to # override all other locations. if test "x$TOOLCHAIN_PATH" != x; then PATH=$TOOLCHAIN_PATH:$PATH @@ -254,7 +254,7 @@ AC_MSG_NOTICE([The result from running with --version was: "$ALT_VERSION_OUTPUT"]) AC_MSG_ERROR([A $TOOLCHAIN_TYPE compiler is required. Try setting --with-tools-dir.]) fi - # Remove usage instructions (if present), and + # Remove usage instructions (if present), and # collapse compiler output into a single line COMPILER_VERSION_STRING=`$ECHO $COMPILER_VERSION_OUTPUT | \ $SED -e 's/ *@<:@Uu@:>@sage:.*//'` @@ -282,7 +282,7 @@ # There is no specific version flag, but all output starts with a version string. # First line typically looks something like: # Microsoft (R) 32-bit C/C++ Optimizing Compiler Version 16.00.40219.01 for 80x86 - COMPILER_VERSION_OUTPUT=`$COMPILER 2>&1 | $HEAD -n 1 | $TR -d '\r'` + COMPILER_VERSION_OUTPUT=`$COMPILER 2>&1 | $HEAD -n 1 | $TR -d '\r'` # Check that this is likely to be Microsoft CL.EXE. $ECHO "$COMPILER_VERSION_OUTPUT" | $GREP "Microsoft.*Compiler" > /dev/null if test $? -ne 0; then @@ -360,7 +360,7 @@ AC_MSG_NOTICE([Will use user supplied compiler $1=[$]$1]) if test "x`basename [$]$1`" = "x[$]$1"; then # A command without a complete path is provided, search $PATH. - + AC_PATH_PROGS(POTENTIAL_$1, [$]$1) if test "x$POTENTIAL_$1" != x; then $1=$POTENTIAL_$1 @@ -375,12 +375,12 @@ fi else # No user supplied value. Locate compiler ourselves. - + # If we are cross compiling, assume cross compilation tools follows the # cross compilation standard where they are prefixed with the autoconf - # standard name for the target. For example the binary + # standard name for the target. For example the binary # i686-sun-solaris2.10-gcc will cross compile for i686-sun-solaris2.10. - # If we are not cross compiling, then the default compiler name will be + # If we are not cross compiling, then the default compiler name will be # used. $1= @@ -450,9 +450,9 @@ TOOLCHAIN_CHECK_COMPILER_VERSION([$1], [$COMPILER_NAME]) ]) -# Detect the core components of the toolchain, i.e. the compilers (CC and CXX), -# preprocessor (CPP and CXXCPP), the linker (LD), the assembler (AS) and the -# archiver (AR). Verify that the compilers are correct according to the +# Detect the core components of the toolchain, i.e. the compilers (CC and CXX), +# preprocessor (CPP and CXXCPP), the linker (LD), the assembler (AS) and the +# archiver (AR). Verify that the compilers are correct according to the # toolchain type. AC_DEFUN_ONCE([TOOLCHAIN_DETECT_TOOLCHAIN_CORE], [ @@ -529,7 +529,7 @@ ]) # Setup additional tools that is considered a part of the toolchain, but not the -# core part. Many of these are highly platform-specific and do not exist, +# core part. Many of these are highly platform-specific and do not exist, # and/or are not needed on all platforms. AC_DEFUN_ONCE([TOOLCHAIN_DETECT_TOOLCHAIN_EXTRA], [ @@ -551,7 +551,7 @@ AC_CHECK_PROG([DUMPBIN], [dumpbin], [dumpbin],,,) BASIC_FIXUP_EXECUTABLE(DUMPBIN) fi - + if test "x$OPENJDK_TARGET_OS" = xsolaris; then BASIC_PATH_PROGS(STRIP, strip) BASIC_FIXUP_EXECUTABLE(STRIP) @@ -559,7 +559,7 @@ BASIC_FIXUP_EXECUTABLE(NM) BASIC_PATH_PROGS(GNM, gnm) BASIC_FIXUP_EXECUTABLE(GNM) - + BASIC_PATH_PROGS(MCS, mcs) BASIC_FIXUP_EXECUTABLE(MCS) elif test "x$OPENJDK_TARGET_OS" != xwindows; then @@ -592,17 +592,17 @@ # Setup the build tools (i.e, the compiler and linker used to build programs # that should be run on the build platform, not the target platform, as a build -# helper). Since the non-cross-compile case uses the normal, target compilers +# helper). Since the non-cross-compile case uses the normal, target compilers # for this, we can only do this after these have been setup. AC_DEFUN_ONCE([TOOLCHAIN_SETUP_BUILD_COMPILERS], -[ +[ if test "x$COMPILE_TYPE" = "xcross"; then # Now we need to find a C/C++ compiler that can build executables for the # build platform. We can't use the AC_PROG_CC macro, since it can only be # used once. Also, we need to do this without adding a tools dir to the # path, otherwise we might pick up cross-compilers which don't use standard # naming. - + # FIXME: we should list the discovered compilers as an exclude pattern! # If we do that, we can do this detection before POST_DETECTION, and still # find the build compilers in the tools dir, if needed. @@ -690,15 +690,39 @@ # If this is a --hash-style=gnu system, use --hash-style=both, why? HAS_GNU_HASH=`$CC -dumpspecs 2>/dev/null | $GREP 'hash-style=gnu'` # This is later checked when setting flags. + + # "-Og" suppported for GCC 4.8 and later + CFLAG_OPTIMIZE_DEBUG_FLAG="-Og" + FLAGS_COMPILER_CHECK_ARGUMENTS([$CFLAG_OPTIMIZE_DEBUG_FLAG], + [HAS_CFLAG_OPTIMIZE_DEBUG=true], + [HAS_CFLAG_OPTIMIZE_DEBUG=false]) + + # "-fsanitize=undefined" supported for GCC 4.9 and later + CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG="-fsanitize=undefined -fsanitize-recover" + FLAGS_COMPILER_CHECK_ARGUMENTS([$CFLAG_DETECT_UNDEFINED_BEHAVIOR_FLAG], + [HAS_CFLAG_DETECT_UNDEFINED_BEHAVIOR=true], + [HAS_CFLAG_DETECT_UNDEFINED_BEHAVIOR=false]) + + # "-z relro" supported in GNU binutils 2.17 and later + LINKER_RELRO_FLAG="-Xlinker -z -Xlinker relro" + FLAGS_LINKER_CHECK_ARGUMENTS([$LINKER_RELRO_FLAG], + [HAS_LINKER_RELRO=true], + [HAS_LINKER_RELRO=false]) + + # "-z now" supported in GNU binutils 2.11 and later + LINKER_NOW_FLAG="-Xlinker -z -Xlinker now" + FLAGS_LINKER_CHECK_ARGUMENTS([$LINKER_NOW_FLAG], + [HAS_LINKER_NOW=true], + [HAS_LINKER_NOW=false]) fi - # Check for broken SuSE 'ld' for which 'Only anonymous version tag is allowed + # Check for broken SuSE 'ld' for which 'Only anonymous version tag is allowed # in executable.' USING_BROKEN_SUSE_LD=no if test "x$OPENJDK_TARGET_OS" = xlinux && test "x$TOOLCHAIN_TYPE" = xgcc; then AC_MSG_CHECKING([for broken SuSE 'ld' which only understands anonymous version tags in executables]) - echo "SUNWprivate_1.1 { local: *; };" > version-script.map - echo "int main() { }" > main.c + $ECHO "SUNWprivate_1.1 { local: *; };" > version-script.map + $ECHO "int main() { }" > main.c if $CXX -Xlinker -version-script=version-script.map main.c 2>&AS_MESSAGE_LOG_FD >&AS_MESSAGE_LOG_FD; then AC_MSG_RESULT(no) USING_BROKEN_SUSE_LD=no
--- a/hotspot/.hgtags Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/.hgtags Wed Jul 05 19:46:17 2017 +0200 @@ -421,3 +421,4 @@ b14e7c0b7d3ec04127f565cda1d84122e205680c jdk9-b16 14b656df31c2cb09c505921061e79977823de71a jdk9-b17 871fd128548480095e0dc3fc34c422666baeec75 jdk9-b18 +d4cffb3ae6213c66c7522ebffe0349360a45f0ef jdk9-b19
--- a/hotspot/make/bsd/makefiles/fastdebug.make Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/make/bsd/makefiles/fastdebug.make Wed Jul 05 19:46:17 2017 +0200 @@ -24,37 +24,30 @@ # Sets make macros for making debug version of VM +# Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make +# They may also specify FASTDEBUG_CFLAGS, but it defaults to DEBUG_CFLAGS. + +FASTDEBUG_CFLAGS$(FASTDEBUG_CFLAGS) = $(DEBUG_CFLAGS) + # Compiler specific OPT_CFLAGS are passed in from gcc.make, sparcWorks.make OPT_CFLAGS/DEFAULT= $(OPT_CFLAGS) OPT_CFLAGS/BYFILE = $(OPT_CFLAGS/$@)$(OPT_CFLAGS/DEFAULT$(OPT_CFLAGS/$@)) # (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files) -ifeq ($(BUILDARCH), ia64) - # Bug in GCC, causes hang. -O1 will override the -O3 specified earlier - OPT_CFLAGS/callGenerator.o += -O1 - OPT_CFLAGS/ciTypeFlow.o += -O1 - OPT_CFLAGS/compile.o += -O1 - OPT_CFLAGS/concurrentMarkSweepGeneration.o += -O1 - OPT_CFLAGS/doCall.o += -O1 - OPT_CFLAGS/generateOopMap.o += -O1 - OPT_CFLAGS/generateOptoStub.o += -O1 - OPT_CFLAGS/graphKit.o += -O1 - OPT_CFLAGS/instanceKlass.o += -O1 - OPT_CFLAGS/interpreterRT_ia64.o += -O1 - OPT_CFLAGS/output.o += -O1 - OPT_CFLAGS/parse1.o += -O1 - OPT_CFLAGS/runtime.o += -O1 - OPT_CFLAGS/synchronizer.o += -O1 -endif - - # If you set HOTSPARC_GENERIC=yes, you disable all OPT_CFLAGS settings CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE) # Set the environment variable HOTSPARC_GENERIC to "true" # to inhibit the effect of the previous line on CFLAGS. +# The following lines are copied from debug.make, except that we +# consult FASTDEBUG_CFLAGS instead of DEBUG_CFLAGS. +# Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make +DEBUG_CFLAGS/DEFAULT= $(FASTDEBUG_CFLAGS) +DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@)) +CFLAGS += $(DEBUG_CFLAGS/BYFILE) + # Linker mapfile MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-debug ifeq ($(OS_VENDOR), Darwin)
--- a/hotspot/make/bsd/makefiles/gcc.make Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/make/bsd/makefiles/gcc.make Wed Jul 05 19:46:17 2017 +0200 @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # OS_VENDOR = $(shell uname -s) @@ -80,7 +80,7 @@ HOSTCC = $(CC) endif - AS = $(CC) -c + AS = $(CC) -c endif ifeq ($(OS_VENDOR), Darwin) @@ -100,7 +100,7 @@ endif ifeq ($(USE_CLANG), true) - # clang has precompiled headers support by default, but the user can switch + # Clang has precompiled headers support by default, but the user can switch # it off by using 'USE_PRECOMPILED_HEADER=0'. ifdef LP64 ifeq ($(USE_PRECOMPILED_HEADER),) @@ -112,29 +112,29 @@ # Clang produces an error if the PCH file was compiled with other options than the actual compilation unit. USE_PRECOMPILED_HEADER=0 endif - + ifeq ($(USE_PRECOMPILED_HEADER),1) - + ifndef LP64 $(error " Precompiled Headers only supported on 64-bit platforms!") endif - + PRECOMPILED_HEADER_DIR=. PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.pch - + PCH_FLAG = -include precompiled.hpp PCH_FLAG/DEFAULT = $(PCH_FLAG) PCH_FLAG/NO_PCH = -DNO_PCH PCH_FLAG/BY_FILE = $(PCH_FLAG/$@)$(PCH_FLAG/DEFAULT$(PCH_FLAG/$@)) - + VM_PCH_FLAG/LIBJVM = $(PCH_FLAG/BY_FILE) VM_PCH_FLAG/AOUT = VM_PCH_FLAG = $(VM_PCH_FLAG/$(LINK_INTO)) - + # We only use precompiled headers for the JVM build CFLAGS += $(VM_PCH_FLAG) - + # The following files are compiled at various optimization # levels due to optimization issues encountered at the # 'OPT_CFLAGS_DEFAULT' level. The Clang compiler issues a compile @@ -149,7 +149,7 @@ PCH_FLAG/sharedRuntimeTrig.o = $(PCH_FLAG/NO_PCH) PCH_FLAG/sharedRuntimeTrans.o = $(PCH_FLAG/NO_PCH) PCH_FLAG/unsafe.o = $(PCH_FLAG/NO_PCH) - + endif else # ($(USE_CLANG), true) # check for precompiled headers support @@ -272,21 +272,24 @@ CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(WARNING_FLAGS) # Special cases -CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@)) +CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@)) # XXXDARWIN: for _dyld_bind_fully_image_containing_address ifeq ($(OS_VENDOR), Darwin) CFLAGS_WARN/os_bsd.o = $(CFLAGS_WARN/DEFAULT) -Wno-deprecated-declarations endif +# optimization control flags (Used by fastdebug and release variants) +OPT_CFLAGS/NOOPT=-O0 +ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1" + # Allow basic optimizations which don't distrupt debugging. (Principally dead code elimination) + OPT_CFLAGS/DEBUG=-Og +else + # Allow no optimizations. + OPT_CFLAGS/DEBUG=-O0 +endif OPT_CFLAGS/SIZE=-Os OPT_CFLAGS/SPEED=-O3 -# Hotspot uses very unstrict aliasing turn this optimization off -# This option is added to CFLAGS rather than OPT_CFLAGS -# so that OPT_CFLAGS overrides get this option too. -CFLAGS += -fno-strict-aliasing - -# The flags to use for an Optimized g++ build ifeq ($(OS_VENDOR), Darwin) # use -Os by default, unless -O3 can be proved to be worth the cost, as per policy # <https://wiki.openjdk.java.net/display/MacOSXPort/Compiler+Errata> @@ -295,6 +298,11 @@ OPT_CFLAGS_DEFAULT ?= SPEED endif +# Hotspot uses very unstrict aliasing turn this optimization off +# This option is added to CFLAGS rather than OPT_CFLAGS +# so that OPT_CFLAGS overrides get this option too. +CFLAGS += -fno-strict-aliasing + ifdef OPT_CFLAGS ifneq ("$(origin OPT_CFLAGS)", "command line") $(error " Use OPT_EXTRAS instead of OPT_CFLAGS to add extra flags to OPT_CFLAGS.") @@ -309,8 +317,6 @@ OPT_CFLAGS += -fno-expensive-optimizations endif -OPT_CFLAGS/NOOPT=-O0 - # Work around some compiler bugs. ifeq ($(USE_CLANG), true) ifeq ($(shell expr $(CC_VER_MAJOR) = 4 \& $(CC_VER_MINOR) = 2), 1) @@ -338,7 +344,7 @@ endif ifeq ($(OS_VENDOR), Darwin) - # Setting these parameters makes it an error to link to macosx APIs that are + # Setting these parameters makes it an error to link to macosx APIs that are # newer than the given OS version and makes the linked binaries compatible even # if built on a newer version of the OS. # The expected format is X.Y.Z @@ -371,10 +377,22 @@ ifeq ($(USE_CLANG),) # statically link libgcc and/or libgcc_s, libgcc does not exist before gcc-3.x. - ifneq ("${CC_VER_MAJOR}", "2") + ifneq ($(CC_VER_MAJOR), 2) STATIC_LIBGCC += -static-libgcc endif + ifneq ($(OS_VENDOR), Darwin) + ifneq (, findstring(debug,$(BUILD_FLAVOR))) + # for relocations read-only + LFLAGS += -Xlinker -z -Xlinker relro + + ifeq ($(BUILD_FLAVOR), debug) + # disable incremental relocations linking + LFLAGS += -Xlinker -z -Xlinker now + endif + endif + endif + ifeq ($(BUILDARCH), ia64) LFLAGS += -Wl,-relax endif @@ -425,6 +443,14 @@ CFLAGS += -flimit-debug-info endif +ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1" + # Allow basic optimizations which don't distrupt debugging. (Principally dead code elimination) + DEBUG_CFLAGS=-Og +else + # Allow no optimizations. + DEBUG_CFLAGS=-O0 +endif + # DEBUG_BINARIES uses full -g debug information for all configs ifeq ($(DEBUG_BINARIES), true) CFLAGS += -g @@ -441,9 +467,14 @@ DEBUG_CFLAGS/ppc = -g DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH)) ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),) - DEBUG_CFLAGS += -gstabs + ifeq ($(USE_CLANG), true) + # Clang doesn't understand -gstabs + DEBUG_CFLAGS += -g + else + DEBUG_CFLAGS += -gstabs + endif endif - + ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) FASTDEBUG_CFLAGS/ia64 = -g FASTDEBUG_CFLAGS/amd64 = -g @@ -458,7 +489,7 @@ FASTDEBUG_CFLAGS += -gstabs endif endif - + OPT_CFLAGS/ia64 = -g OPT_CFLAGS/amd64 = -g OPT_CFLAGS/arm = -g @@ -475,6 +506,18 @@ endif endif +ifeq ($(USE_CLANG),) + # Enable bounds checking. + # _FORTIFY_SOURCE appears in GCC 4.0+ + ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) )" "1" + # compile time size bounds checks + FASTDEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1 + + # and runtime size bounds checks and paranoid stack smashing checks. + DEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1 + endif +endif + # If we are building HEADLESS, pass on to VM # so it can set the java.awt.headless property ifdef HEADLESS
--- a/hotspot/make/linux/makefiles/fastdebug.make Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/make/linux/makefiles/fastdebug.make Wed Jul 05 19:46:17 2017 +0200 @@ -24,6 +24,11 @@ # Sets make macros for making debug version of VM +# Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make +# They may also specify FASTDEBUG_CFLAGS, but it defaults to DEBUG_CFLAGS. + +FASTDEBUG_CFLAGS$(FASTDEBUG_CFLAGS) = $(DEBUG_CFLAGS) + # Compiler specific OPT_CFLAGS are passed in from gcc.make, sparcWorks.make OPT_CFLAGS/DEFAULT= $(OPT_CFLAGS) OPT_CFLAGS/BYFILE = $(OPT_CFLAGS/$@)$(OPT_CFLAGS/DEFAULT$(OPT_CFLAGS/$@)) @@ -54,6 +59,12 @@ # Set the environment variable HOTSPARC_GENERIC to "true" # to inhibit the effect of the previous line on CFLAGS. +# The following lines are copied from debug.make, except that we +# consult FASTDEBUG_CFLAGS instead of DEBUG_CFLAGS. +# Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make +DEBUG_CFLAGS/DEFAULT= $(FASTDEBUG_CFLAGS) +DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@)) +CFLAGS += $(DEBUG_CFLAGS/BYFILE) # Linker mapfile MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug
--- a/hotspot/make/linux/makefiles/gcc.make Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/make/linux/makefiles/gcc.make Wed Jul 05 19:46:17 2017 +0200 @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # #------------------------------------------------------------------------ @@ -62,7 +62,6 @@ CC_VER_MINOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f2) endif - ifeq ($(USE_CLANG), true) # Clang has precompiled headers support by default, but the user can switch # it off by using 'USE_PRECOMPILED_HEADER=0'. @@ -104,7 +103,7 @@ # But Clang doesn't support a precompiled header which was compiled with -O3 # to be used in a compilation unit which uses '-O0'. We could also prepare an # extra '-O0' PCH file for the opt build and use it here, but it's probably - # not worth the effoert as long as only two files need this special handling. + # not worth the effort as long as only two files need this special handling. PCH_FLAG/loopTransform.o = $(PCH_FLAG/NO_PCH) PCH_FLAG/sharedRuntimeTrig.o = $(PCH_FLAG/NO_PCH) PCH_FLAG/sharedRuntimeTrans.o = $(PCH_FLAG/NO_PCH) @@ -226,19 +225,28 @@ endif CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(WARNING_FLAGS) + # Special cases -CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@)) +CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@)) -# The flags to use for an Optimized g++ build +# optimization control flags (Used by fastdebug and release variants) +OPT_CFLAGS/NOOPT=-O0 +ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1" + # Allow basic optimizations which don't distrupt debugging. (Principally dead code elimination) + OPT_CFLAGS/DEBUG=-Og +else + # Allow no optimizations. + OPT_CFLAGS/DEBUG=-O0 +endif OPT_CFLAGS/SIZE=-Os OPT_CFLAGS/SPEED=-O3 +OPT_CFLAGS_DEFAULT ?= SPEED + # Hotspot uses very unstrict aliasing turn this optimization off # This option is added to CFLAGS rather than OPT_CFLAGS # so that OPT_CFLAGS overrides get this option too. -CFLAGS += -fno-strict-aliasing - -OPT_CFLAGS_DEFAULT ?= SPEED +CFLAGS += -fno-strict-aliasing ifdef OPT_CFLAGS ifneq ("$(origin OPT_CFLAGS)", "command line") @@ -248,14 +256,12 @@ OPT_CFLAGS = $(OPT_CFLAGS/$(OPT_CFLAGS_DEFAULT)) $(OPT_EXTRAS) -# The gcc compiler segv's on ia64 when compiling bytecodeInterpreter.cpp +# The gcc compiler segv's on ia64 when compiling bytecodeInterpreter.cpp # if we use expensive-optimizations ifeq ($(BUILDARCH), ia64) OPT_CFLAGS += -fno-expensive-optimizations endif -OPT_CFLAGS/NOOPT=-O0 - # Work around some compiler bugs. ifeq ($(USE_CLANG), true) ifeq ($(shell expr $(CC_VER_MAJOR) = 4 \& $(CC_VER_MINOR) = 2), 1) @@ -271,7 +277,7 @@ # Flags for generating make dependency flags. DEPFLAGS = -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d) ifeq ($(USE_CLANG),) - ifneq ("${CC_VER_MAJOR}", "2") + ifneq ($(CC_VER_MAJOR), 2) DEPFLAGS += -fpch-deps endif endif @@ -282,21 +288,26 @@ # statically link libstdc++.so, work with gcc but ignored by g++ STATIC_STDCXX = -Wl,-Bstatic -lstdc++ -Wl,-Bdynamic +# Enable linker optimization +LFLAGS += -Xlinker -O1 + ifeq ($(USE_CLANG),) - # statically link libgcc and/or libgcc_s, libgcc does not exist before gcc-3.x. - ifneq ("${CC_VER_MAJOR}", "2") - STATIC_LIBGCC += -static-libgcc + STATIC_LIBGCC += -static-libgcc + + ifneq (, findstring(debug,$(BUILD_FLAVOR))) + # for relocations read-only + LFLAGS += -Xlinker -z -Xlinker relro + + ifeq ($(BUILD_FLAVOR), debug) + # disable incremental relocations linking + LFLAGS += -Xlinker -z -Xlinker now + endif endif ifeq ($(BUILDARCH), ia64) LFLAGS += -Wl,-relax endif -endif -# Enable linker optimization -LFLAGS += -Xlinker -O1 - -ifeq ($(USE_CLANG),) # If this is a --hash-style=gnu system, use --hash-style=both # The gnu .hash section won't work on some Linux systems like SuSE 10. _HAS_HASH_STYLE_GNU:=$(shell $(CC) -dumpspecs | grep -- '--hash-style=gnu') @@ -333,6 +344,14 @@ CFLAGS += -flimit-debug-info endif +ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1" + # Allow basic optimizations which don't distrupt debugging. (Principally dead code elimination) + DEBUG_CFLAGS=-Og +else + # Allow no optimizations. + DEBUG_CFLAGS=-O0 +endif + # DEBUG_BINARIES uses full -g debug information for all configs ifeq ($(DEBUG_BINARIES), true) CFLAGS += -g @@ -355,6 +374,18 @@ endif endif +ifeq ($(USE_CLANG),) + # Enable bounds checking. + # _FORTIFY_SOURCE appears in GCC 4.0+ + ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) )" "1" + # compile time size bounds checks + FASTDEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1 + + # and runtime size bounds checks and paranoid stack smashing checks. + DEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1 + endif +endif + # If we are building HEADLESS, pass on to VM # so it can set the java.awt.headless property ifdef HEADLESS
--- a/hotspot/make/solaris/makefiles/gcc.make Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/make/solaris/makefiles/gcc.make Wed Jul 05 19:46:17 2017 +0200 @@ -19,7 +19,7 @@ # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA # or visit www.oracle.com if you need additional information or have any # questions. -# +# # #------------------------------------------------------------------------ @@ -40,7 +40,7 @@ CC_VER_MAJOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f1) CC_VER_MINOR := $(shell $(CC) -dumpversion | sed 's/egcs-//' | cut -d'.' -f2) -# Check for the versions of C++ and C compilers ($CXX and $CC) used. +# Check for the versions of C++ and C compilers ($CXX and $CC) used. # Get the last thing on the line that looks like x.x+ (x is a digit). COMPILER_REV := \ @@ -98,7 +98,7 @@ ifeq ($(BUILDARCH), amd64) ASFLAGS += -march=k8 -march=amd64 -LFLAGS += -march=k8 +LFLAGS += -march=k8 endif @@ -115,21 +115,44 @@ endif -# Compiler warnings are treated as errors -WARNINGS_ARE_ERRORS = -Werror +# Compiler warnings are treated as errors +WARNINGS_ARE_ERRORS = -Werror + # Enable these warnings. See 'info gcc' about details on these options WARNING_FLAGS = -Wpointer-arith -Wconversion -Wsign-compare -Wundef -Wformat=2 CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(WARNING_FLAGS) -# Special cases -CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@)) + +# Special cases +CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@)) -# The flags to use for an Optimized g++ build -OPT_CFLAGS += -O3 +# optimization control flags (Used by fastdebug and release variants) +OPT_CFLAGS/NOOPT=-O0 +ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1" + # Allow basic optimizations which don't distrupt debugging. (Principally dead code elimination) + OPT_CFLAGS/DEBUG=-Og ++else + # Allow no optimizations. + OPT_CFLAGS/DEBUG=-O0 +endif +OPT_CFLAGS/SIZE=-Os +OPT_CFLAGS/SPEED=-O3 + +OPT_CFLAGS_DEFAULT ?= SPEED # Hotspot uses very unstrict aliasing turn this optimization off -OPT_CFLAGS += -fno-strict-aliasing +# This option is added to CFLAGS rather than OPT_CFLAGS +# so that OPT_CFLAGS overrides get this option too. +CFLAGS += -fno-strict-aliasing -# The gcc compiler segv's on ia64 when compiling bytecodeInterpreter.cpp +ifdef OPT_CFLAGS + ifneq ("$(origin OPT_CFLAGS)", "command line") + $(error " Use OPT_EXTRAS instead of OPT_CFLAGS to add extra flags to OPT_CFLAGS.") + endif +endif + +OPT_CFLAGS = $(OPT_CFLAGS/$(OPT_CFLAGS_DEFAULT)) $(OPT_EXTRAS) + +# The gcc compiler segv's on ia64 when compiling bytecodeInterpreter.cpp # if we use expensive-optimizations # Note: all ia64 setting reflect the ones for linux # No actial testing was performed: there is no Solaris on ia64 presently @@ -137,10 +160,20 @@ OPT_CFLAGS/bytecodeInterpreter.o += -fno-expensive-optimizations endif -OPT_CFLAGS/NOOPT=-O0 +# Work around some compiler bugs. +ifeq ($(USE_CLANG), true) + ifeq ($(shell expr $(CC_VER_MAJOR) = 4 \& $(CC_VER_MINOR) = 2), 1) + OPT_CFLAGS/loopTransform.o += $(OPT_CFLAGS/NOOPT) + endif +else + # 6835796. Problem in GCC 4.3.0 with mulnode.o optimized compilation. + ifeq ($(shell expr $(CC_VER_MAJOR) = 4 \& $(CC_VER_MINOR) = 3), 1) + OPT_CFLAGS/mulnode.o += $(OPT_CFLAGS/NOOPT) + endif +endif # Flags for generating make dependency flags. -ifneq ("${CC_VER_MAJOR}", "2") +ifneq ($(CC_VER_MAJOR), 2) DEPFLAGS = -fpch-deps -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d) endif @@ -155,26 +188,37 @@ # statically link libstdc++.so, work with gcc but ignored by g++ STATIC_STDCXX = -Wl,-Bstatic -lstdc++ -Wl,-Bdynamic -# statically link libgcc and/or libgcc_s, libgcc does not exist before gcc-3.x. -ifneq ("${CC_VER_MAJOR}", "2") -STATIC_LIBGCC += -static-libgcc -endif - -ifeq ($(BUILDARCH), ia64) -# Note: all ia64 setting reflect the ones for linux -# No actial testing was performed: there is no Solaris on ia64 presently -LFLAGS += -Wl,-relax -endif ifdef USE_GNULD -# Enable linker optimization -LFLAGS += -Xlinker -O1 + # statically link libgcc and/or libgcc_s, libgcc does not exist before gcc-3.x. + ifneq ($(CC_VER_MAJOR), 2) + STATIC_LIBGCC += -static-libgcc + endif + + # Enable linker optimization + LFLAGS += -Xlinker -O1 + + ifneq (, findstring(debug,$(BUILD_FLAVOR))) + # for relocations read-only + LFLAGS += -Xlinker -z -Xlinker relro -# Use $(MAPFLAG:FILENAME=real_file_name) to specify a map file. -MAPFLAG = -Xlinker --version-script=FILENAME -else -MAPFLAG = -Xlinker -M -Xlinker FILENAME -endif + ifeq ($(BUILD_FLAVOR), debug) + # disable incremental relocations linking + LFLAGS += -Xlinker -z -Xlinker now + endif + endif + + ifeq ($(BUILDARCH), ia64) + # Note: all ia64 setting reflect the ones for linux + # No actual testing was performed: there is no Solaris on ia64 presently + LFLAGS += -Wl,-relax + endif + + # Use $(MAPFLAG:FILENAME=real_file_name) to specify a map file. + MAPFLAG = -Xlinker --version-script=FILENAME +else + MAPFLAG = -Xlinker -M -Xlinker FILENAME +endif # Use $(SONAMEFLAG:SONAME=soname) to specify the intrinsic name of a shared obj SONAMEFLAG = -Xlinker -soname=SONAME @@ -185,15 +229,34 @@ #------------------------------------------------------------------------ # Debug flags -# Use the stabs format for debugging information (this is the default -# on gcc-2.91). It's good enough, has all the information about line -# numbers and local variables, and libjvm.so is only about 16M. -# Change this back to "-g" if you want the most expressive format. -# (warning: that could easily inflate libjvm.so to 150M!) -# Note: The Itanium gcc compiler crashes when using -gstabs. -DEBUG_CFLAGS/ia64 = -g -DEBUG_CFLAGS/amd64 = -g -DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH)) -ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),) -DEBUG_CFLAGS += -gstabs -endif +ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1" + # Allow basic optimizations which don't distrupt debugging. (Principally dead code elimination) + DEBUG_CFLAGS=-Og +else + # Allow no optimizations. + DEBUG_CFLAGS=-O0 +endif + + +# Use the stabs format for debugging information (this is the default +# on gcc-2.91). It's good enough, has all the information about line +# numbers and local variables, and libjvm.so is only about 16M. +# Change this back to "-g" if you want the most expressive format. +# (warning: that could easily inflate libjvm.so to 150M!) +# Note: The Itanium gcc compiler crashes when using -gstabs. +DEBUG_CFLAGS/ia64 = -g +DEBUG_CFLAGS/amd64 = -g +DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH)) +ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),) + DEBUG_CFLAGS += -gstabs +endif + +# Enable bounds checking. +# _FORTIFY_SOURCE appears in GCC 4.0+ +ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) )" "1" + # compile time size bounds checks + FASTDEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=1 + + # and runtime size bounds checks and paranoid stack smashing checks. + DEBUG_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fstack-protector-all --param ssp-buffer-size=1 +endif
--- a/hotspot/src/cpu/ppc/vm/ppc.ad Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/cpu/ppc/vm/ppc.ad Wed Jul 05 19:46:17 2017 +0200 @@ -1285,9 +1285,9 @@ void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) { Compile *C = ra_->C; - iRegPdstOper *op_dst = new (C) iRegPdstOper(); - MachNode *m1 = new (C) loadToc_hiNode(); - MachNode *m2 = new (C) loadToc_loNode(); + iRegPdstOper *op_dst = new iRegPdstOper(); + MachNode *m1 = new loadToc_hiNode(); + MachNode *m2 = new loadToc_loNode(); m1->add_req(NULL); m2->add_req(NULL, m1); @@ -2232,9 +2232,9 @@ MachTypeNode *Matcher::make_decode_node(Compile *C) { assert(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0, "This method is only implemented for unscaled cOops mode so far"); - MachTypeNode *decode = new (C) decodeN_unscaledNode(); - decode->set_opnd_array(0, new (C) iRegPdstOper()); - decode->set_opnd_array(1, new (C) iRegNsrcOper()); + MachTypeNode *decode = new decodeN_unscaledNode(); + decode->set_opnd_array(0, new iRegPdstOper()); + decode->set_opnd_array(1, new iRegNsrcOper()); return decode; } */ @@ -2600,20 +2600,20 @@ const bool large_constant_pool = true; // TODO: PPC port C->cfg()->_consts_size > 4000; if (large_constant_pool) { // Create new nodes. - loadConL_hiNode *m1 = new (C) loadConL_hiNode(); - loadConL_loNode *m2 = new (C) loadConL_loNode(); + loadConL_hiNode *m1 = new loadConL_hiNode(); + loadConL_loNode *m2 = new loadConL_loNode(); // inputs for new nodes m1->add_req(NULL, toc); m2->add_req(NULL, m1); // operands for new nodes - m1->_opnds[0] = new (C) iRegLdstOper(); // dst - m1->_opnds[1] = immSrc; // src - m1->_opnds[2] = new (C) iRegPdstOper(); // toc - m2->_opnds[0] = new (C) iRegLdstOper(); // dst - m2->_opnds[1] = immSrc; // src - m2->_opnds[2] = new (C) iRegLdstOper(); // base + m1->_opnds[0] = new iRegLdstOper(); // dst + m1->_opnds[1] = immSrc; // src + m1->_opnds[2] = new iRegPdstOper(); // toc + m2->_opnds[0] = new iRegLdstOper(); // dst + m2->_opnds[1] = immSrc; // src + m2->_opnds[2] = new iRegLdstOper(); // base // Initialize ins_attrib TOC fields. m1->_const_toc_offset = -1; @@ -2633,15 +2633,15 @@ nodes._last = nodes._large_lo; assert(m2->bottom_type()->isa_long(), "must be long"); } else { - loadConLNode *m2 = new (C) loadConLNode(); + loadConLNode *m2 = new loadConLNode(); // inputs for new nodes m2->add_req(NULL, toc); // operands for new nodes - m2->_opnds[0] = new (C) iRegLdstOper(); // dst - m2->_opnds[1] = immSrc; // src - m2->_opnds[2] = new (C) iRegPdstOper(); // toc + m2->_opnds[0] = new iRegLdstOper(); // dst + m2->_opnds[1] = immSrc; // src + m2->_opnds[2] = new iRegPdstOper(); // toc // Initialize ins_attrib instruction offset. m2->_cbuf_insts_offset = -1; @@ -2750,20 +2750,20 @@ const bool large_constant_pool = true; // TODO: PPC port C->cfg()->_consts_size > 4000; if (large_constant_pool) { // Create new nodes. - loadConP_hiNode *m1 = new (C) loadConP_hiNode(); - loadConP_loNode *m2 = new (C) loadConP_loNode(); + loadConP_hiNode *m1 = new loadConP_hiNode(); + loadConP_loNode *m2 = new loadConP_loNode(); // inputs for new nodes m1->add_req(NULL, n_toc); m2->add_req(NULL, m1); // operands for new nodes - m1->_opnds[0] = new (C) iRegPdstOper(); // dst - m1->_opnds[1] = op_src; // src - m1->_opnds[2] = new (C) iRegPdstOper(); // toc - m2->_opnds[0] = new (C) iRegPdstOper(); // dst - m2->_opnds[1] = op_src; // src - m2->_opnds[2] = new (C) iRegLdstOper(); // base + m1->_opnds[0] = new iRegPdstOper(); // dst + m1->_opnds[1] = op_src; // src + m1->_opnds[2] = new iRegPdstOper(); // toc + m2->_opnds[0] = new iRegPdstOper(); // dst + m2->_opnds[1] = op_src; // src + m2->_opnds[2] = new iRegLdstOper(); // base // Initialize ins_attrib TOC fields. m1->_const_toc_offset = -1; @@ -2777,15 +2777,15 @@ nodes->push(m2); assert(m2->bottom_type()->isa_ptr(), "must be ptr"); } else { - loadConPNode *m2 = new (C) loadConPNode(); + loadConPNode *m2 = new loadConPNode(); // inputs for new nodes m2->add_req(NULL, n_toc); // operands for new nodes - m2->_opnds[0] = new (C) iRegPdstOper(); // dst - m2->_opnds[1] = op_src; // src - m2->_opnds[2] = new (C) iRegPdstOper(); // toc + m2->_opnds[0] = new iRegPdstOper(); // dst + m2->_opnds[1] = op_src; // src + m2->_opnds[2] = new iRegPdstOper(); // toc // Register allocation for new nodes. ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); @@ -2802,9 +2802,9 @@ MachNode *m2; if (large_constant_pool) { - m2 = new (C) loadConFCompNode(); + m2 = new loadConFCompNode(); } else { - m2 = new (C) loadConFNode(); + m2 = new loadConFNode(); } // inputs for new nodes m2->add_req(NULL, n_toc); @@ -2812,7 +2812,7 @@ // operands for new nodes m2->_opnds[0] = op_dst; m2->_opnds[1] = op_src; - m2->_opnds[2] = new (C) iRegPdstOper(); // constanttablebase + m2->_opnds[2] = new iRegPdstOper(); // constanttablebase // register allocation for new nodes ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); @@ -2826,9 +2826,9 @@ MachNode *m2; if (large_constant_pool) { - m2 = new (C) loadConDCompNode(); + m2 = new loadConDCompNode(); } else { - m2 = new (C) loadConDNode(); + m2 = new loadConDNode(); } // inputs for new nodes m2->add_req(NULL, n_toc); @@ -2836,7 +2836,7 @@ // operands for new nodes m2->_opnds[0] = op_dst; m2->_opnds[1] = op_src; - m2->_opnds[2] = new (C) iRegPdstOper(); // constanttablebase + m2->_opnds[2] = new iRegPdstOper(); // constanttablebase // register allocation for new nodes ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); @@ -2918,15 +2918,15 @@ if (VM_Version::has_isel()) { // use isel instruction with Power 7 - cmpP_reg_imm16Node *n_compare = new (C) cmpP_reg_imm16Node(); - encodeP_subNode *n_sub_base = new (C) encodeP_subNode(); - encodeP_shiftNode *n_shift = new (C) encodeP_shiftNode(); - cond_set_0_oopNode *n_cond_set = new (C) cond_set_0_oopNode(); + cmpP_reg_imm16Node *n_compare = new cmpP_reg_imm16Node(); + encodeP_subNode *n_sub_base = new encodeP_subNode(); + encodeP_shiftNode *n_shift = new encodeP_shiftNode(); + cond_set_0_oopNode *n_cond_set = new cond_set_0_oopNode(); n_compare->add_req(n_region, n_src); n_compare->_opnds[0] = op_crx; n_compare->_opnds[1] = op_src; - n_compare->_opnds[2] = new (C) immL16Oper(0); + n_compare->_opnds[2] = new immL16Oper(0); n_sub_base->add_req(n_region, n_src); n_sub_base->_opnds[0] = op_dst; @@ -2956,10 +2956,10 @@ } else { // before Power 7 - moveRegNode *n_move = new (C) moveRegNode(); - cmpP_reg_imm16Node *n_compare = new (C) cmpP_reg_imm16Node(); - encodeP_shiftNode *n_shift = new (C) encodeP_shiftNode(); - cond_sub_baseNode *n_sub_base = new (C) cond_sub_baseNode(); + moveRegNode *n_move = new moveRegNode(); + cmpP_reg_imm16Node *n_compare = new cmpP_reg_imm16Node(); + encodeP_shiftNode *n_shift = new encodeP_shiftNode(); + cond_sub_baseNode *n_sub_base = new cond_sub_baseNode(); n_move->add_req(n_region, n_src); n_move->_opnds[0] = op_dst; @@ -2971,7 +2971,7 @@ n_compare->_opnds[0] = op_crx; n_compare->_opnds[1] = op_src; - n_compare->_opnds[2] = new (C) immL16Oper(0); + n_compare->_opnds[2] = new immL16Oper(0); n_sub_base->add_req(n_region, n_compare, n_src); n_sub_base->_opnds[0] = op_dst; @@ -3000,13 +3000,13 @@ enc_class postalloc_expand_encode_oop_not_null(iRegNdst dst, iRegPdst src) %{ - encodeP_subNode *n1 = new (C) encodeP_subNode(); + encodeP_subNode *n1 = new encodeP_subNode(); n1->add_req(n_region, n_src); n1->_opnds[0] = op_dst; n1->_opnds[1] = op_src; n1->_bottom_type = _bottom_type; - encodeP_shiftNode *n2 = new (C) encodeP_shiftNode(); + encodeP_shiftNode *n2 = new encodeP_shiftNode(); n2->add_req(n_region, n1); n2->_opnds[0] = op_dst; n2->_opnds[1] = op_dst; @@ -3020,13 +3020,13 @@ %} enc_class postalloc_expand_decode_oop(iRegPdst dst, iRegNsrc src, flagsReg crx) %{ - decodeN_shiftNode *n_shift = new (C) decodeN_shiftNode(); - cmpN_reg_imm0Node *n_compare = new (C) cmpN_reg_imm0Node(); + decodeN_shiftNode *n_shift = new decodeN_shiftNode(); + cmpN_reg_imm0Node *n_compare = new cmpN_reg_imm0Node(); n_compare->add_req(n_region, n_src); n_compare->_opnds[0] = op_crx; n_compare->_opnds[1] = op_src; - n_compare->_opnds[2] = new (C) immN_0Oper(TypeNarrowOop::NULL_PTR); + n_compare->_opnds[2] = new immN_0Oper(TypeNarrowOop::NULL_PTR); n_shift->add_req(n_region, n_src); n_shift->_opnds[0] = op_dst; @@ -3036,13 +3036,13 @@ if (VM_Version::has_isel()) { // use isel instruction with Power 7 - decodeN_addNode *n_add_base = new (C) decodeN_addNode(); + decodeN_addNode *n_add_base = new decodeN_addNode(); n_add_base->add_req(n_region, n_shift); n_add_base->_opnds[0] = op_dst; n_add_base->_opnds[1] = op_dst; n_add_base->_bottom_type = _bottom_type; - cond_set_0_ptrNode *n_cond_set = new (C) cond_set_0_ptrNode(); + cond_set_0_ptrNode *n_cond_set = new cond_set_0_ptrNode(); n_cond_set->add_req(n_region, n_compare, n_add_base); n_cond_set->_opnds[0] = op_dst; n_cond_set->_opnds[1] = op_crx; @@ -3064,7 +3064,7 @@ } else { // before Power 7 - cond_add_baseNode *n_add_base = new (C) cond_add_baseNode(); + cond_add_baseNode *n_add_base = new cond_add_baseNode(); n_add_base->add_req(n_region, n_compare, n_shift); n_add_base->_opnds[0] = op_dst; @@ -3086,13 +3086,13 @@ %} enc_class postalloc_expand_decode_oop_not_null(iRegPdst dst, iRegNsrc src) %{ - decodeN_shiftNode *n1 = new (C) decodeN_shiftNode(); + decodeN_shiftNode *n1 = new decodeN_shiftNode(); n1->add_req(n_region, n_src); n1->_opnds[0] = op_dst; n1->_opnds[1] = op_src; n1->_bottom_type = _bottom_type; - decodeN_addNode *n2 = new (C) decodeN_addNode(); + decodeN_addNode *n2 = new decodeN_addNode(); n2->add_req(n_region, n1); n2->_opnds[0] = op_dst; n2->_opnds[1] = op_dst; @@ -3388,7 +3388,7 @@ // Create new nodes. // Make an operand with the bit pattern to load as float. - immLOper *op_repl = new (C) immLOper((jlong)replicate_immF(op_src->constantF())); + immLOper *op_repl = new immLOper((jlong)replicate_immF(op_src->constantF())); loadConLNodesTuple loadConLNodes = loadConLNodesTuple_create(C, ra_, n_toc, op_repl, @@ -3611,11 +3611,11 @@ // Create the nodes for loading the IC from the TOC. loadConLNodesTuple loadConLNodes_IC = - loadConLNodesTuple_create(C, ra_, n_toc, new (C) immLOper((jlong)Universe::non_oop_word()), + loadConLNodesTuple_create(C, ra_, n_toc, new immLOper((jlong)Universe::non_oop_word()), OptoReg::Name(R19_H_num), OptoReg::Name(R19_num)); // Create the call node. - CallDynamicJavaDirectSchedNode *call = new (C) CallDynamicJavaDirectSchedNode(); + CallDynamicJavaDirectSchedNode *call = new CallDynamicJavaDirectSchedNode(); call->_method_handle_invoke = _method_handle_invoke; call->_vtable_index = _vtable_index; call->_method = _method; @@ -3765,7 +3765,7 @@ #if defined(ABI_ELFv2) jlong entry_address = (jlong) this->entry_point(); assert(entry_address, "need address here"); - loadConLNodes_Entry = loadConLNodesTuple_create(C, ra_, n_toc, new (C) immLOper(entry_address), + loadConLNodes_Entry = loadConLNodesTuple_create(C, ra_, n_toc, new immLOper(entry_address), OptoReg::Name(R12_H_num), OptoReg::Name(R12_num)); #else // Get the struct that describes the function we are about to call. @@ -3777,42 +3777,42 @@ loadConLNodesTuple loadConLNodes_Toc; // Create nodes and operands for loading the entry point. - loadConLNodes_Entry = loadConLNodesTuple_create(C, ra_, n_toc, new (C) immLOper(entry_address), + loadConLNodes_Entry = loadConLNodesTuple_create(C, ra_, n_toc, new immLOper(entry_address), OptoReg::Name(R12_H_num), OptoReg::Name(R12_num)); // Create nodes and operands for loading the env pointer. if (fd->env() != NULL) { - loadConLNodes_Env = loadConLNodesTuple_create(C, ra_, n_toc, new (C) immLOper((jlong) fd->env()), + loadConLNodes_Env = loadConLNodesTuple_create(C, ra_, n_toc, new immLOper((jlong) fd->env()), OptoReg::Name(R11_H_num), OptoReg::Name(R11_num)); } else { loadConLNodes_Env._large_hi = NULL; loadConLNodes_Env._large_lo = NULL; loadConLNodes_Env._small = NULL; - loadConLNodes_Env._last = new (C) loadConL16Node(); - loadConLNodes_Env._last->_opnds[0] = new (C) iRegLdstOper(); - loadConLNodes_Env._last->_opnds[1] = new (C) immL16Oper(0); + loadConLNodes_Env._last = new loadConL16Node(); + loadConLNodes_Env._last->_opnds[0] = new iRegLdstOper(); + loadConLNodes_Env._last->_opnds[1] = new immL16Oper(0); ra_->set_pair(loadConLNodes_Env._last->_idx, OptoReg::Name(R11_H_num), OptoReg::Name(R11_num)); } // Create nodes and operands for loading the Toc point. - loadConLNodes_Toc = loadConLNodesTuple_create(C, ra_, n_toc, new (C) immLOper((jlong) fd->toc()), + loadConLNodes_Toc = loadConLNodesTuple_create(C, ra_, n_toc, new immLOper((jlong) fd->toc()), OptoReg::Name(R2_H_num), OptoReg::Name(R2_num)); #endif // ABI_ELFv2 // mtctr node - MachNode *mtctr = new (C) CallLeafDirect_mtctrNode(); + MachNode *mtctr = new CallLeafDirect_mtctrNode(); assert(loadConLNodes_Entry._last != NULL, "entry must exist"); mtctr->add_req(0, loadConLNodes_Entry._last); - mtctr->_opnds[0] = new (C) iRegLdstOper(); - mtctr->_opnds[1] = new (C) iRegLdstOper(); + mtctr->_opnds[0] = new iRegLdstOper(); + mtctr->_opnds[1] = new iRegLdstOper(); // call node - MachCallLeafNode *call = new (C) CallLeafDirectNode(); + MachCallLeafNode *call = new CallLeafDirectNode(); call->_opnds[0] = _opnds[0]; - call->_opnds[1] = new (C) methodOper((intptr_t) entry_address); // May get set later. + call->_opnds[1] = new methodOper((intptr_t) entry_address); // May get set later. // Make the new call node look like the old one. call->_name = _name; @@ -6050,9 +6050,9 @@ format %{ "LoadN $dst, $src \t// postalloc expanded" %} // mask postalloc_expand %{ - MachNode *m1 = new (C) loadConN_hiNode(); - MachNode *m2 = new (C) loadConN_loNode(); - MachNode *m3 = new (C) clearMs32bNode(); + MachNode *m1 = new loadConN_hiNode(); + MachNode *m2 = new loadConN_loNode(); + MachNode *m3 = new clearMs32bNode(); m1->add_req(NULL); m2->add_req(NULL, m1); m3->add_req(NULL, m2); @@ -6117,7 +6117,7 @@ format %{ "LoadN $dst, $src \t// postalloc expanded" %} // mask postalloc_expand %{ // Load high bits into register. Sign extended. - MachNode *m1 = new (C) loadConNKlass_hiNode(); + MachNode *m1 = new loadConNKlass_hiNode(); m1->add_req(NULL); m1->_opnds[0] = op_dst; m1->_opnds[1] = op_src; @@ -6127,7 +6127,7 @@ MachNode *m2 = m1; if (!Assembler::is_uimm((jlong)Klass::encode_klass((Klass *)op_src->constant()), 31)) { // Value might be 1-extended. Mask out these bits. - m2 = new (C) clearMs32bNode(); + m2 = new clearMs32bNode(); m2->add_req(NULL, m1); m2->_opnds[0] = op_dst; m2->_opnds[1] = op_dst; @@ -6135,7 +6135,7 @@ nodes->push(m2); } - MachNode *m3 = new (C) loadConNKlass_loNode(); + MachNode *m3 = new loadConNKlass_loNode(); m3->add_req(NULL, m2); m3->_opnds[0] = op_dst; m3->_opnds[1] = op_src; @@ -6987,14 +6987,14 @@ format %{ "EncodePKlass $dst, $src\t// $src != Null, postalloc expanded" %} postalloc_expand %{ - encodePKlass_sub_baseNode *n1 = new (C) encodePKlass_sub_baseNode(); + encodePKlass_sub_baseNode *n1 = new encodePKlass_sub_baseNode(); n1->add_req(n_region, n_base, n_src); n1->_opnds[0] = op_dst; n1->_opnds[1] = op_base; n1->_opnds[2] = op_src; n1->_bottom_type = _bottom_type; - encodePKlass_shiftNode *n2 = new (C) encodePKlass_shiftNode(); + encodePKlass_shiftNode *n2 = new encodePKlass_shiftNode(); n2->add_req(n_region, n1); n2->_opnds[0] = op_dst; n2->_opnds[1] = op_dst; @@ -7064,14 +7064,14 @@ format %{ "DecodeNKlass $dst = $base + ($src << 3) \t// $src != NULL, postalloc expanded" %} postalloc_expand %{ - decodeNKlass_add_baseNode *n1 = new (C) decodeNKlass_add_baseNode(); + decodeNKlass_add_baseNode *n1 = new decodeNKlass_add_baseNode(); n1->add_req(n_region, n_base, n_src); n1->_opnds[0] = op_dst; n1->_opnds[1] = op_base; n1->_opnds[2] = op_src; n1->_bottom_type = _bottom_type; - decodeNKlass_shiftNode *n2 = new (C) decodeNKlass_shiftNode(); + decodeNKlass_shiftNode *n2 = new decodeNKlass_shiftNode(); n2->add_req(n_region, n1); n2->_opnds[0] = op_dst; n2->_opnds[1] = op_dst; @@ -9773,8 +9773,8 @@ // // Create new nodes. - MachNode *m1 = new (C) loadConI16Node(); - MachNode *m2 = new (C) cmovI_bso_stackSlotLNode(); + MachNode *m1 = new loadConI16Node(); + MachNode *m2 = new cmovI_bso_stackSlotLNode(); // inputs for new nodes m1->add_req(n_region); @@ -9785,7 +9785,7 @@ // operands for new nodes m1->_opnds[0] = op_dst; - m1->_opnds[1] = new (C) immI16Oper(0); + m1->_opnds[1] = new immI16Oper(0); m2->_opnds[0] = op_dst; m2->_opnds[1] = op_crx; @@ -9942,8 +9942,8 @@ // // Create new nodes. - MachNode *m1 = new (C) loadConL16Node(); - MachNode *m2 = new (C) cmovL_bso_stackSlotLNode(); + MachNode *m1 = new loadConL16Node(); + MachNode *m2 = new cmovL_bso_stackSlotLNode(); // inputs for new nodes m1->add_req(n_region); @@ -9952,7 +9952,7 @@ // operands for new nodes m1->_opnds[0] = op_dst; - m1->_opnds[1] = new (C) immL16Oper(0); + m1->_opnds[1] = new immL16Oper(0); m2->_opnds[0] = op_dst; m2->_opnds[1] = op_crx; m2->_opnds[2] = op_mem; @@ -10288,8 +10288,8 @@ // // Create new nodes. - MachNode *m1 = new (C) loadConI16Node(); - MachNode *m2 = new (C) cmovI_conIvalueMinus1_conIvalue1Node(); + MachNode *m1 = new loadConI16Node(); + MachNode *m2 = new cmovI_conIvalueMinus1_conIvalue1Node(); // inputs for new nodes m1->add_req(n_region); @@ -10298,7 +10298,7 @@ // operands for new nodes m1->_opnds[0] = op_dst; - m1->_opnds[1] = new (C) immI16Oper(0); + m1->_opnds[1] = new immI16Oper(0); m2->_opnds[0] = op_dst; m2->_opnds[1] = op_crx; @@ -10623,8 +10623,8 @@ // // Create new nodes. - MachNode *m1 = new (C) cmpFUnordered_reg_regNode(); - MachNode *m2 = new (C) cmov_bns_lessNode(); + MachNode *m1 = new cmpFUnordered_reg_regNode(); + MachNode *m2 = new cmov_bns_lessNode(); // inputs for new nodes m1->add_req(n_region, n_src1, n_src2); @@ -10698,8 +10698,8 @@ // // create new nodes - MachNode *m1 = new (C) cmpDUnordered_reg_regNode(); - MachNode *m2 = new (C) cmov_bns_lessNode(); + MachNode *m1 = new cmpDUnordered_reg_regNode(); + MachNode *m2 = new cmov_bns_lessNode(); // inputs for new nodes m1->add_req(n_region, n_src1, n_src2);
--- a/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp Wed Jul 05 19:46:17 2017 +0200 @@ -123,6 +123,7 @@ fpop2_op3 = 0x35, impdep1_op3 = 0x36, aes3_op3 = 0x36, + sha_op3 = 0x36, alignaddr_op3 = 0x36, faligndata_op3 = 0x36, flog3_op3 = 0x36, @@ -223,7 +224,11 @@ mwtos_opf = 0x119, aes_kexpand0_opf = 0x130, - aes_kexpand2_opf = 0x131 + aes_kexpand2_opf = 0x131, + + sha1_opf = 0x141, + sha256_opf = 0x142, + sha512_opf = 0x143 }; enum op5s { @@ -595,6 +600,11 @@ // AES crypto instructions supported only on certain processors static void aes_only() { assert( VM_Version::has_aes(), "This instruction only works on SPARC with AES instructions support"); } + // SHA crypto instructions supported only on certain processors + static void sha1_only() { assert( VM_Version::has_sha1(), "This instruction only works on SPARC with SHA1"); } + static void sha256_only() { assert( VM_Version::has_sha256(), "This instruction only works on SPARC with SHA256"); } + static void sha512_only() { assert( VM_Version::has_sha512(), "This instruction only works on SPARC with SHA512"); } + // instruction only in VIS1 static void vis1_only() { assert( VM_Version::has_vis1(), "This instruction only works on SPARC with VIS1"); } @@ -1179,7 +1189,6 @@ u_field(3, 29, 25) | immed(true) | simm(simm13a, 13)); } inline void wrfprs( Register d) { v9_only(); emit_int32( op(arith_op) | rs1(d) | op3(wrreg_op3) | u_field(6, 29, 25)); } - // VIS1 instructions void alignaddr( Register s1, Register s2, Register d ) { vis1_only(); emit_int32( op(arith_op) | rd(d) | op3(alignaddr_op3) | rs1(s1) | opf(alignaddr_opf) | rs2(s2)); } @@ -1203,6 +1212,12 @@ void movwtos( Register s, FloatRegister d ) { vis3_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::S) | op3(mftoi_op3) | opf(mwtos_opf) | rs2(s)); } void movxtod( Register s, FloatRegister d ) { vis3_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(mftoi_op3) | opf(mxtod_opf) | rs2(s)); } + // Crypto SHA instructions + + void sha1() { sha1_only(); emit_int32( op(arith_op) | op3(sha_op3) | opf(sha1_opf)); } + void sha256() { sha256_only(); emit_int32( op(arith_op) | op3(sha_op3) | opf(sha256_opf)); } + void sha512() { sha512_only(); emit_int32( op(arith_op) | op3(sha_op3) | opf(sha512_opf)); } + // Creation Assembler(CodeBuffer* code) : AbstractAssembler(code) { #ifdef CHECK_DELAY
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -1612,13 +1612,10 @@ __ set((intptr_t)os::get_polling_page(), tmp->as_register()); if (info != NULL) { add_debug_info_for_branch(info); - } else { - __ relocate(relocInfo::poll_type); } - int offset = __ offset(); + __ relocate(relocInfo::poll_type); __ ld_ptr(tmp->as_register(), 0, G0); - return offset; }
--- a/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -872,21 +872,19 @@ void LIRGenerator::do_NewInstance(NewInstance* x) { + print_if_not_loaded(x); + // This instruction can be deoptimized in the slow path : use // O0 as result register. const LIR_Opr reg = result_register_for(x->type()); -#ifndef PRODUCT - if (PrintNotLoaded && !x->klass()->is_loaded()) { - tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci()); - } -#endif + CodeEmitInfo* info = state_for(x, x->state()); LIR_Opr tmp1 = FrameMap::G1_oop_opr; LIR_Opr tmp2 = FrameMap::G3_oop_opr; LIR_Opr tmp3 = FrameMap::G4_oop_opr; LIR_Opr tmp4 = FrameMap::O1_oop_opr; LIR_Opr klass_reg = FrameMap::G5_metadata_opr; - new_instance(reg, x->klass(), tmp1, tmp2, tmp3, tmp4, klass_reg, info); + new_instance(reg, x->klass(), x->is_unresolved(), tmp1, tmp2, tmp3, tmp4, klass_reg, info); LIR_Opr result = rlock_result(x); __ move(reg, result); }
--- a/hotspot/src/cpu/sparc/vm/compiledIC_sparc.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/cpu/sparc/vm/compiledIC_sparc.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -135,7 +135,7 @@ if (TraceICs) { ResourceMark rm; tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", - instruction_address(), + p2i(instruction_address()), callee->name_and_sig_as_C_string()); }
--- a/hotspot/src/cpu/sparc/vm/debug_sparc.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/cpu/sparc/vm/debug_sparc.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -42,20 +42,24 @@ intptr_t *pc = NULL; intptr_t *next_pc = NULL; int count = 0; - tty->print("register window backtrace from %#x:\n", sp); + tty->print_cr("register window backtrace from " INTPTR_FORMAT ":", p2i(sp)); while (sp != NULL && ((intptr_t)sp & 7) == 0 && sp > prev_sp && sp < prev_sp+1000) { pc = next_pc; next_pc = (intptr_t*) sp[I7->sp_offset_in_saved_window()]; - tty->print("[%d] sp=%#x pc=", count, sp); + tty->print("[%d] sp=" INTPTR_FORMAT " pc=", count, p2i(sp)); findpc((intptr_t)pc); if (WizardMode && Verbose) { // print register window contents also - tty->print_cr(" L0..L7: {%#x %#x %#x %#x %#x %#x %#x %#x}", - sp[0+0],sp[0+1],sp[0+2],sp[0+3], - sp[0+4],sp[0+5],sp[0+6],sp[0+7]); - tty->print_cr(" I0..I7: {%#x %#x %#x %#x %#x %#x %#x %#x}", - sp[8+0],sp[8+1],sp[8+2],sp[8+3], - sp[8+4],sp[8+5],sp[8+6],sp[8+7]); + tty->print_cr(" L0..L7: {" + INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT " " + INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT " ", + sp[0+0], sp[0+1], sp[0+2], sp[0+3], + sp[0+4], sp[0+5], sp[0+6], sp[0+7]); + tty->print_cr(" I0..I7: {" + INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT " " + INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT " " INTPTR_FORMAT " ", + sp[8+0], sp[8+1], sp[8+2], sp[8+3], + sp[8+4], sp[8+5], sp[8+6], sp[8+7]); // (and print stack frame contents too??) CodeBlob *b = CodeCache::find_blob((address) pc); @@ -74,7 +78,7 @@ count += 1; } if (sp != NULL) - tty->print("[%d] sp=%#x [bogus sp!]", count, sp); + tty->print("[%d] sp=" INTPTR_FORMAT " [bogus sp!]", count, p2i(sp)); } #endif // PRODUCT
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/cpu/sparc/vm/frame_sparc.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -557,7 +557,8 @@ // QQQ this assert is invalid (or too strong anyway) sice _pc could // be original pc and frame could have the deopt pc. // assert(_pc == *O7_addr() + pc_return_offset, "frame has wrong pc"); - tty->print_cr("patch_pc at address 0x%x [0x%x -> 0x%x] ", O7_addr(), _pc, pc); + tty->print_cr("patch_pc at address " INTPTR_FORMAT " [" INTPTR_FORMAT " -> " INTPTR_FORMAT "]", + p2i(O7_addr()), p2i(_pc), p2i(pc)); } _cb = CodeCache::find_blob(pc); *O7_addr() = pc - pc_return_offset;
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -1202,7 +1202,7 @@ if ( j != last ) s->print(" - f%d", last); s->print(" = %f", val); s->fill_to(25); - s->print_cr(" (0x%x)", val); + s->print_cr(" (0x%x)", *(int*)&val); j = last + 1; } s->cr();
--- a/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -483,7 +483,7 @@ const char* mh_reg_name = has_mh ? "G3_mh" : "G3"; tty->print_cr("MH %s %s="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT " args=" INTPTR_FORMAT, adaptername, mh_reg_name, - (intptr_t) mh, saved_sp, args); + p2i(mh), p2i(saved_sp), p2i(args)); if (Verbose) { // dumping last frame with frame::describe
--- a/hotspot/src/cpu/sparc/vm/nativeInst_sparc.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/cpu/sparc/vm/nativeInst_sparc.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -78,7 +78,7 @@ } void NativeInstruction::print() { - tty->print_cr(INTPTR_FORMAT ": 0x%x", addr_at(0), long_at(0)); + tty->print_cr(INTPTR_FORMAT ": 0x%x", p2i(addr_at(0)), long_at(0)); } void NativeInstruction::set_long_at(int offset, int i) { @@ -142,7 +142,7 @@ } void NativeCall::print() { - tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination()); + tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, p2i(instruction_address()), p2i(destination())); } @@ -271,7 +271,7 @@ } void NativeFarCall::print() { - tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination()); + tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, p2i(instruction_address()), p2i(destination())); } bool NativeFarCall::destination_is_compiled_verified_entry_point() { @@ -324,7 +324,7 @@ void NativeMovConstReg::print() { - tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data()); + tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, p2i(instruction_address()), data()); } @@ -446,7 +446,7 @@ void NativeMovConstRegPatching::print() { - tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data()); + tty->print_cr(INTPTR_FORMAT ": mov reg, 0x%x", p2i(instruction_address()), data()); } @@ -585,9 +585,10 @@ void NativeMovRegMem::print() { if (is_immediate()) { - tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset()); + // offset is a signed 13-bit immediate, so casting it to int will not lose significant bits + tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %d]", p2i(instruction_address()), (int)offset()); } else { - tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address()); + tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", p2i(instruction_address())); } } @@ -689,149 +690,6 @@ // End code for unit testing implementation of NativeMovRegMem class -//-------------------------------------------------------------------------------- - - -void NativeMovRegMemPatching::copy_instruction_to(address new_instruction_address) { - Untested("copy_instruction_to"); - int instruction_size = next_instruction_address() - instruction_address(); - for (int i = 0; i < instruction_size; i += wordSize) { - *(long*)(new_instruction_address + i) = *(long*)(address(this) + i); - } -} - - -void NativeMovRegMemPatching::verify() { - NativeInstruction::verify(); - // make sure code pattern is actually a "ld" or "st" of some sort. - int i0 = long_at(0); - int op3 = inv_op3(i0); - - assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok"); - - if (!(is_op(i0, Assembler::ldst_op) && - inv_immed(i0) && - 0 != (op3 < op3_ldst_int_limit - ? (1 << op3 ) & (op3_mask_ld | op3_mask_st) - : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf)))) { - int i1 = long_at(ldst_offset); - Register rd = inv_rd(i0); - - op3 = inv_op3(i1); - if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) && - 0 != (op3 < op3_ldst_int_limit - ? (1 << op3 ) & (op3_mask_ld | op3_mask_st) - : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) { - fatal("not a ld* or st* op"); - } - } -} - - -void NativeMovRegMemPatching::print() { - if (is_immediate()) { - tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset()); - } else { - tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address()); - } -} - - -// Code for unit testing implementation of NativeMovRegMemPatching class -void NativeMovRegMemPatching::test() { -#ifdef ASSERT - ResourceMark rm; - CodeBuffer cb("test", 1000, 1000); - MacroAssembler* a = new MacroAssembler(&cb); - NativeMovRegMemPatching* nm; - uint idx = 0; - uint idx1; - int offsets[] = { - 0x0, - 0xffffffff, - 0x7fffffff, - 0x80000000, - 4096, - 4097, - 0x20, - 0x4000, - }; - - VM_Version::allow_all(); - - AddressLiteral al(0xffffffff, relocInfo::external_word_type); - a->ldsw( G5, al.low10(), G4); idx++; - a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); - a->ldsw( G5, I3, G4 ); idx++; - a->ldsb( G5, al.low10(), G4); idx++; - a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); - a->ldsb( G5, I3, G4 ); idx++; - a->ldsh( G5, al.low10(), G4); idx++; - a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); - a->ldsh( G5, I3, G4 ); idx++; - a->lduw( G5, al.low10(), G4); idx++; - a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); - a->lduw( G5, I3, G4 ); idx++; - a->ldub( G5, al.low10(), G4); idx++; - a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); - a->ldub( G5, I3, G4 ); idx++; - a->lduh( G5, al.low10(), G4); idx++; - a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); - a->lduh( G5, I3, G4 ); idx++; - a->ldx( G5, al.low10(), G4); idx++; - a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); - a->ldx( G5, I3, G4 ); idx++; - a->ldd( G5, al.low10(), G4); idx++; - a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); - a->ldd( G5, I3, G4 ); idx++; - a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++; - a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); - a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++; - - a->stw( G5, G4, al.low10()); idx++; - a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); - a->stw( G5, G4, I3 ); idx++; - a->stb( G5, G4, al.low10()); idx++; - a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); - a->stb( G5, G4, I3 ); idx++; - a->sth( G5, G4, al.low10()); idx++; - a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); - a->sth( G5, G4, I3 ); idx++; - a->stx( G5, G4, al.low10()); idx++; - a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); - a->stx( G5, G4, I3 ); idx++; - a->std( G5, G4, al.low10()); idx++; - a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); - a->std( G5, G4, I3 ); idx++; - a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++; - a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); - a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++; - - nm = nativeMovRegMemPatching_at( cb.insts_begin() ); - nm->print(); - nm->set_offset( low10(0) ); - nm->print(); - nm->add_offset_in_bytes( low10(0xbb) * wordSize ); - nm->print(); - - while (--idx) { - nm = nativeMovRegMemPatching_at( nm->next_instruction_address() ); - nm->print(); - for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) { - nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] ); - assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]), - "check unit test"); - nm->print(); - } - nm->add_offset_in_bytes( low10(0xbb) * wordSize ); - nm->print(); - } - - VM_Version::revert(); -#endif // ASSERT -} -// End code for unit testing implementation of NativeMovRegMemPatching class - //-------------------------------------------------------------------------------- @@ -863,7 +721,7 @@ void NativeJump::print() { - tty->print_cr(INTPTR_FORMAT ": jmpl reg, " INTPTR_FORMAT, instruction_address(), jump_destination()); + tty->print_cr(INTPTR_FORMAT ": jmpl reg, " INTPTR_FORMAT, p2i(instruction_address()), p2i(jump_destination())); }
--- a/hotspot/src/cpu/sparc/vm/nativeInst_sparc.hpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/cpu/sparc/vm/nativeInst_sparc.hpp Wed Jul 05 19:46:17 2017 +0200 @@ -38,7 +38,6 @@ // - - NativeMovConstReg // - - NativeMovConstRegPatching // - - NativeMovRegMem -// - - NativeMovRegMemPatching // - - NativeJump // - - NativeGeneralJump // - - NativeIllegalInstruction @@ -710,96 +709,6 @@ }; -// An interface for accessing/manipulating native memory ops -// ld* [reg + offset], reg -// st* reg, [reg + offset] -// sethi %hi(imm), reg; nop; add reg, %lo(imm), reg; ld* [reg1 + reg], reg2 -// sethi %hi(imm), reg; nop; add reg, %lo(imm), reg; st* reg2, [reg1 + reg] -// Ops covered: {lds,ldu,st}{w,b,h}, {ld,st}{d,x} -// -// Note that it is identical to NativeMovRegMem with the exception of a nop between the -// sethi and the add. The nop is required to be in the delay slot of the call instruction -// which overwrites the sethi during patching. -class NativeMovRegMemPatching; -inline NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address); -class NativeMovRegMemPatching: public NativeInstruction { - public: - enum Sparc_specific_constants { - op3_mask_ld = 1 << Assembler::lduw_op3 | - 1 << Assembler::ldub_op3 | - 1 << Assembler::lduh_op3 | - 1 << Assembler::ldd_op3 | - 1 << Assembler::ldsw_op3 | - 1 << Assembler::ldsb_op3 | - 1 << Assembler::ldsh_op3 | - 1 << Assembler::ldx_op3, - op3_mask_st = 1 << Assembler::stw_op3 | - 1 << Assembler::stb_op3 | - 1 << Assembler::sth_op3 | - 1 << Assembler::std_op3 | - 1 << Assembler::stx_op3, - op3_ldst_int_limit = Assembler::ldf_op3, - op3_mask_ldf = 1 << (Assembler::ldf_op3 - op3_ldst_int_limit) | - 1 << (Assembler::lddf_op3 - op3_ldst_int_limit), - op3_mask_stf = 1 << (Assembler::stf_op3 - op3_ldst_int_limit) | - 1 << (Assembler::stdf_op3 - op3_ldst_int_limit), - - offset_width = 13, - sethi_offset = 0, -#ifdef _LP64 - nop_offset = 7 * BytesPerInstWord, -#else - nop_offset = 4, -#endif - add_offset = nop_offset + BytesPerInstWord, - ldst_offset = add_offset + BytesPerInstWord - }; - bool is_immediate() const { - // check if instruction is ld* [reg + offset], reg or st* reg, [reg + offset] - int i0 = long_at(0); - return (is_op(i0, Assembler::ldst_op)); - } - - address instruction_address() const { return addr_at(0); } - address next_instruction_address() const { - return addr_at(is_immediate()? 4 : 16); - } - int offset() const { - return is_immediate()? inv_simm(long_at(0), offset_width) : - nativeMovConstRegPatching_at(addr_at(0))->data(); - } - void set_offset(int x) { - if (is_immediate()) { - guarantee(fits_in_simm(x, offset_width), "data block offset overflow"); - set_long_at(0, set_simm(long_at(0), x, offset_width)); - } - else - nativeMovConstRegPatching_at(addr_at(0))->set_data(x); - } - - void add_offset_in_bytes(intptr_t radd_offset) { - set_offset (offset() + radd_offset); - } - - void copy_instruction_to(address new_instruction_address); - - void verify(); - void print (); - - // unit test stuff - static void test(); - - private: - friend inline NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address) { - NativeMovRegMemPatching* test = (NativeMovRegMemPatching*)address; - #ifdef ASSERT - test->verify(); - #endif - return test; - } -}; - - // An interface for accessing/manipulating native jumps // jump_to addr // == sethi %hi22(addr), temp ; jumpl reg, %lo10(addr), G0 ; <delay>
--- a/hotspot/src/cpu/sparc/vm/sparc.ad Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/cpu/sparc/vm/sparc.ad Wed Jul 05 19:46:17 2017 +0200 @@ -1206,10 +1206,10 @@ } if (Assembler::is_simm13(-framesize)) { - st->print ("SAVE R_SP,-%d,R_SP",framesize); + st->print ("SAVE R_SP,-" SIZE_FORMAT ",R_SP",framesize); } else { - st->print_cr("SETHI R_SP,hi%%(-%d),R_G3",framesize); st->print("\t"); - st->print_cr("ADD R_G3,lo%%(-%d),R_G3",framesize); st->print("\t"); + st->print_cr("SETHI R_SP,hi%%(-" SIZE_FORMAT "),R_G3",framesize); st->print("\t"); + st->print_cr("ADD R_G3,lo%%(-" SIZE_FORMAT "),R_G3",framesize); st->print("\t"); st->print ("SAVE R_SP,R_G3,R_SP"); }
--- a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -4575,6 +4575,219 @@ return start; } + address generate_sha1_implCompress(bool multi_block, const char *name) { + __ align(CodeEntryAlignment); + StubCodeMark mark(this, "StubRoutines", name); + address start = __ pc(); + + Label L_sha1_loop, L_sha1_unaligned_input, L_sha1_unaligned_input_loop; + int i; + + Register buf = O0; // byte[] source+offset + Register state = O1; // int[] SHA.state + Register ofs = O2; // int offset + Register limit = O3; // int limit + + // load state into F0-F4 + for (i = 0; i < 5; i++) { + __ ldf(FloatRegisterImpl::S, state, i*4, as_FloatRegister(i)); + } + + __ andcc(buf, 7, G0); + __ br(Assembler::notZero, false, Assembler::pn, L_sha1_unaligned_input); + __ delayed()->nop(); + + __ BIND(L_sha1_loop); + // load buf into F8-F22 + for (i = 0; i < 8; i++) { + __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8)); + } + __ sha1(); + if (multi_block) { + __ add(ofs, 64, ofs); + __ add(buf, 64, buf); + __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha1_loop); + __ mov(ofs, O0); // to be returned + } + + // store F0-F4 into state and return + for (i = 0; i < 4; i++) { + __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4); + } + __ retl(); + __ delayed()->stf(FloatRegisterImpl::S, F4, state, 0x10); + + __ BIND(L_sha1_unaligned_input); + __ alignaddr(buf, G0, buf); + + __ BIND(L_sha1_unaligned_input_loop); + // load buf into F8-F22 + for (i = 0; i < 9; i++) { + __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8)); + } + for (i = 0; i < 8; i++) { + __ faligndata(as_FloatRegister(i*2 + 8), as_FloatRegister(i*2 + 10), as_FloatRegister(i*2 + 8)); + } + __ sha1(); + if (multi_block) { + __ add(ofs, 64, ofs); + __ add(buf, 64, buf); + __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha1_unaligned_input_loop); + __ mov(ofs, O0); // to be returned + } + + // store F0-F4 into state and return + for (i = 0; i < 4; i++) { + __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4); + } + __ retl(); + __ delayed()->stf(FloatRegisterImpl::S, F4, state, 0x10); + + return start; + } + + address generate_sha256_implCompress(bool multi_block, const char *name) { + __ align(CodeEntryAlignment); + StubCodeMark mark(this, "StubRoutines", name); + address start = __ pc(); + + Label L_sha256_loop, L_sha256_unaligned_input, L_sha256_unaligned_input_loop; + int i; + + Register buf = O0; // byte[] source+offset + Register state = O1; // int[] SHA2.state + Register ofs = O2; // int offset + Register limit = O3; // int limit + + // load state into F0-F7 + for (i = 0; i < 8; i++) { + __ ldf(FloatRegisterImpl::S, state, i*4, as_FloatRegister(i)); + } + + __ andcc(buf, 7, G0); + __ br(Assembler::notZero, false, Assembler::pn, L_sha256_unaligned_input); + __ delayed()->nop(); + + __ BIND(L_sha256_loop); + // load buf into F8-F22 + for (i = 0; i < 8; i++) { + __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8)); + } + __ sha256(); + if (multi_block) { + __ add(ofs, 64, ofs); + __ add(buf, 64, buf); + __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha256_loop); + __ mov(ofs, O0); // to be returned + } + + // store F0-F7 into state and return + for (i = 0; i < 7; i++) { + __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4); + } + __ retl(); + __ delayed()->stf(FloatRegisterImpl::S, F7, state, 0x1c); + + __ BIND(L_sha256_unaligned_input); + __ alignaddr(buf, G0, buf); + + __ BIND(L_sha256_unaligned_input_loop); + // load buf into F8-F22 + for (i = 0; i < 9; i++) { + __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 8)); + } + for (i = 0; i < 8; i++) { + __ faligndata(as_FloatRegister(i*2 + 8), as_FloatRegister(i*2 + 10), as_FloatRegister(i*2 + 8)); + } + __ sha256(); + if (multi_block) { + __ add(ofs, 64, ofs); + __ add(buf, 64, buf); + __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha256_unaligned_input_loop); + __ mov(ofs, O0); // to be returned + } + + // store F0-F7 into state and return + for (i = 0; i < 7; i++) { + __ stf(FloatRegisterImpl::S, as_FloatRegister(i), state, i*4); + } + __ retl(); + __ delayed()->stf(FloatRegisterImpl::S, F7, state, 0x1c); + + return start; + } + + address generate_sha512_implCompress(bool multi_block, const char *name) { + __ align(CodeEntryAlignment); + StubCodeMark mark(this, "StubRoutines", name); + address start = __ pc(); + + Label L_sha512_loop, L_sha512_unaligned_input, L_sha512_unaligned_input_loop; + int i; + + Register buf = O0; // byte[] source+offset + Register state = O1; // long[] SHA5.state + Register ofs = O2; // int offset + Register limit = O3; // int limit + + // load state into F0-F14 + for (i = 0; i < 8; i++) { + __ ldf(FloatRegisterImpl::D, state, i*8, as_FloatRegister(i*2)); + } + + __ andcc(buf, 7, G0); + __ br(Assembler::notZero, false, Assembler::pn, L_sha512_unaligned_input); + __ delayed()->nop(); + + __ BIND(L_sha512_loop); + // load buf into F16-F46 + for (i = 0; i < 16; i++) { + __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 16)); + } + __ sha512(); + if (multi_block) { + __ add(ofs, 128, ofs); + __ add(buf, 128, buf); + __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha512_loop); + __ mov(ofs, O0); // to be returned + } + + // store F0-F14 into state and return + for (i = 0; i < 7; i++) { + __ stf(FloatRegisterImpl::D, as_FloatRegister(i*2), state, i*8); + } + __ retl(); + __ delayed()->stf(FloatRegisterImpl::D, F14, state, 0x38); + + __ BIND(L_sha512_unaligned_input); + __ alignaddr(buf, G0, buf); + + __ BIND(L_sha512_unaligned_input_loop); + // load buf into F16-F46 + for (i = 0; i < 17; i++) { + __ ldf(FloatRegisterImpl::D, buf, i*8, as_FloatRegister(i*2 + 16)); + } + for (i = 0; i < 16; i++) { + __ faligndata(as_FloatRegister(i*2 + 16), as_FloatRegister(i*2 + 18), as_FloatRegister(i*2 + 16)); + } + __ sha512(); + if (multi_block) { + __ add(ofs, 128, ofs); + __ add(buf, 128, buf); + __ cmp_and_brx_short(ofs, limit, Assembler::lessEqual, Assembler::pt, L_sha512_unaligned_input_loop); + __ mov(ofs, O0); // to be returned + } + + // store F0-F14 into state and return + for (i = 0; i < 7; i++) { + __ stf(FloatRegisterImpl::D, as_FloatRegister(i*2), state, i*8); + } + __ retl(); + __ delayed()->stf(FloatRegisterImpl::D, F14, state, 0x38); + + return start; + } + void generate_initial() { // Generates all stubs and initializes the entry points @@ -4647,6 +4860,20 @@ StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt(); StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel(); } + + // generate SHA1/SHA256/SHA512 intrinsics code + if (UseSHA1Intrinsics) { + StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress"); + StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB"); + } + if (UseSHA256Intrinsics) { + StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress"); + StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB"); + } + if (UseSHA512Intrinsics) { + StubRoutines::_sha512_implCompress = generate_sha512_implCompress(false, "sha512_implCompress"); + StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(true, "sha512_implCompressMB"); + } }
--- a/hotspot/src/cpu/sparc/vm/stubRoutines_sparc.hpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/cpu/sparc/vm/stubRoutines_sparc.hpp Wed Jul 05 19:46:17 2017 +0200 @@ -41,7 +41,7 @@ enum /* platform_dependent_constants */ { // %%%%%%%% May be able to shrink this a lot code_size1 = 20000, // simply increase if too small (assembler will crash if too small) - code_size2 = 22000 // simply increase if too small (assembler will crash if too small) + code_size2 = 23000 // simply increase if too small (assembler will crash if too small) }; class Sparc {
--- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -1722,15 +1722,15 @@ if (caller->is_interpreted_frame()) { tty->print("interpreted "); } - tty->print_cr("caller fp=0x%x sp=0x%x", caller->fp(), caller->sp()); - tty->print_cr("save area = 0x%x, 0x%x", caller->sp(), caller->sp() + 16); - tty->print_cr("save area = 0x%x, 0x%x", caller->fp(), caller->fp() + 16); - tty->print_cr("interpreter fp=0x%x sp=0x%x", interpreter_frame->fp(), interpreter_frame->sp()); - tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->sp(), interpreter_frame->sp() + 16); - tty->print_cr("save area = 0x%x, 0x%x", interpreter_frame->fp(), interpreter_frame->fp() + 16); - tty->print_cr("Llocals = 0x%x", locals); - tty->print_cr("Lesp = 0x%x", esp); - tty->print_cr("Lmonitors = 0x%x", monitors); + tty->print_cr("caller fp=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, p2i(caller->fp()), p2i(caller->sp())); + tty->print_cr("save area = " INTPTR_FORMAT ", " INTPTR_FORMAT, p2i(caller->sp()), p2i(caller->sp() + 16)); + tty->print_cr("save area = " INTPTR_FORMAT ", " INTPTR_FORMAT, p2i(caller->fp()), p2i(caller->fp() + 16)); + tty->print_cr("interpreter fp=" INTPTR_FORMAT ", " INTPTR_FORMAT, p2i(interpreter_frame->fp()), p2i(interpreter_frame->sp())); + tty->print_cr("save area = " INTPTR_FORMAT ", " INTPTR_FORMAT, p2i(interpreter_frame->sp()), p2i(interpreter_frame->sp() + 16)); + tty->print_cr("save area = " INTPTR_FORMAT ", " INTPTR_FORMAT, p2i(interpreter_frame->fp()), p2i(interpreter_frame->fp() + 16)); + tty->print_cr("Llocals = " INTPTR_FORMAT, p2i(locals)); + tty->print_cr("Lesp = " INTPTR_FORMAT, p2i(esp)); + tty->print_cr("Lmonitors = " INTPTR_FORMAT, p2i(monitors)); } if (method->max_locals() > 0) {
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -234,7 +234,7 @@ assert((OptoLoopAlignment % relocInfo::addr_unit()) == 0, "alignment is not a multiple of NOP size"); char buf[512]; - jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", + jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", (has_v9() ? ", v9" : (has_v8() ? ", v8" : "")), (has_hardware_popc() ? ", popc" : ""), (has_vis1() ? ", vis1" : ""), @@ -243,6 +243,9 @@ (has_blk_init() ? ", blk_init" : ""), (has_cbcond() ? ", cbcond" : ""), (has_aes() ? ", aes" : ""), + (has_sha1() ? ", sha1" : ""), + (has_sha256() ? ", sha256" : ""), + (has_sha512() ? ", sha512" : ""), (is_ultra3() ? ", ultra3" : ""), (is_sun4v() ? ", sun4v" : ""), (is_niagara_plus() ? ", niagara_plus" : (is_niagara() ? ", niagara" : "")), @@ -301,6 +304,58 @@ } } + // SHA1, SHA256, and SHA512 instructions were added to SPARC T-series at different times + if (has_sha1() || has_sha256() || has_sha512()) { + if (UseVIS > 0) { // SHA intrinsics use VIS1 instructions + if (FLAG_IS_DEFAULT(UseSHA)) { + FLAG_SET_DEFAULT(UseSHA, true); + } + } else { + if (UseSHA) { + warning("SPARC SHA intrinsics require VIS1 instruction support. Intrinsics will be disabled."); + FLAG_SET_DEFAULT(UseSHA, false); + } + } + } else if (UseSHA) { + warning("SHA instructions are not available on this CPU"); + FLAG_SET_DEFAULT(UseSHA, false); + } + + if (!UseSHA) { + FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); + FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); + FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); + } else { + if (has_sha1()) { + if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) { + FLAG_SET_DEFAULT(UseSHA1Intrinsics, true); + } + } else if (UseSHA1Intrinsics) { + warning("SHA1 instruction is not available on this CPU."); + FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); + } + if (has_sha256()) { + if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) { + FLAG_SET_DEFAULT(UseSHA256Intrinsics, true); + } + } else if (UseSHA256Intrinsics) { + warning("SHA256 instruction (for SHA-224 and SHA-256) is not available on this CPU."); + FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); + } + + if (has_sha512()) { + if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) { + FLAG_SET_DEFAULT(UseSHA512Intrinsics, true); + } + } else if (UseSHA512Intrinsics) { + warning("SHA512 instruction (for SHA-384 and SHA-512) is not available on this CPU."); + FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); + } + if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) { + FLAG_SET_DEFAULT(UseSHA, false); + } + } + if (FLAG_IS_DEFAULT(ContendedPaddingWidth) && (cache_line_size > ContendedPaddingWidth)) ContendedPaddingWidth = cache_line_size;
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.hpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.hpp Wed Jul 05 19:46:17 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,7 +50,10 @@ T_family = 16, T1_model = 17, sparc5_instructions = 18, - aes_instructions = 19 + aes_instructions = 19, + sha1_instruction = 20, + sha256_instruction = 21, + sha512_instruction = 22 }; enum Feature_Flag_Set { @@ -77,6 +80,9 @@ T1_model_m = 1 << T1_model, sparc5_instructions_m = 1 << sparc5_instructions, aes_instructions_m = 1 << aes_instructions, + sha1_instruction_m = 1 << sha1_instruction, + sha256_instruction_m = 1 << sha256_instruction, + sha512_instruction_m = 1 << sha512_instruction, generic_v8_m = v8_instructions_m | hardware_mul32_m | hardware_div32_m | hardware_fsmuld_m, generic_v9_m = generic_v8_m | v9_instructions_m, @@ -129,6 +135,9 @@ static bool has_cbcond() { return (_features & cbcond_instructions_m) != 0; } static bool has_sparc5_instr() { return (_features & sparc5_instructions_m) != 0; } static bool has_aes() { return (_features & aes_instructions_m) != 0; } + static bool has_sha1() { return (_features & sha1_instruction_m) != 0; } + static bool has_sha256() { return (_features & sha256_instruction_m) != 0; } + static bool has_sha512() { return (_features & sha512_instruction_m) != 0; } static bool supports_compare_and_exchange() { return has_v9(); }
--- a/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -111,7 +111,7 @@ if (PrintMiscellaneous && (WizardMode || Verbose)) { tty->print_cr("vtable #%d at "PTR_FORMAT"[%d] left over: %d", - vtable_index, s->entry_point(), + vtable_index, p2i(s->entry_point()), (int)(s->code_end() - s->entry_point()), (int)(s->code_end() - __ pc())); } @@ -206,7 +206,7 @@ if (PrintMiscellaneous && (WizardMode || Verbose)) { tty->print_cr("itable #%d at "PTR_FORMAT"[%d] left over: %d", - itable_index, s->entry_point(), + itable_index, p2i(s->entry_point()), (int)(s->code_end() - s->entry_point()), (int)(s->code_end() - __ pc())); }
--- a/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -1085,14 +1085,11 @@ void LIRGenerator::do_NewInstance(NewInstance* x) { -#ifndef PRODUCT - if (PrintNotLoaded && !x->klass()->is_loaded()) { - tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci()); - } -#endif + print_if_not_loaded(x); + CodeEmitInfo* info = state_for(x, x->state()); LIR_Opr reg = result_register_for(x->type()); - new_instance(reg, x->klass(), + new_instance(reg, x->klass(), x->is_unresolved(), FrameMap::rcx_oop_opr, FrameMap::rdi_oop_opr, FrameMap::rsi_oop_opr,
--- a/hotspot/src/cpu/x86/vm/nativeInst_x86.hpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/cpu/x86/vm/nativeInst_x86.hpp Wed Jul 05 19:46:17 2017 +0200 @@ -327,18 +327,6 @@ return test; } -class NativeMovRegMemPatching: public NativeMovRegMem { - private: - friend NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address) { - NativeMovRegMemPatching* test = (NativeMovRegMemPatching*)(address - instruction_offset); - #ifdef ASSERT - test->verify(); - #endif - return test; - } -}; - - // An interface for accessing/manipulating native leal instruction of form: // leal reg, [reg + offset]
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -590,6 +590,17 @@ FLAG_SET_DEFAULT(UseAESIntrinsics, false); } + if (UseSHA) { + warning("SHA instructions are not available on this CPU"); + FLAG_SET_DEFAULT(UseSHA, false); + } + if (UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics) { + warning("SHA intrinsics are not available on this CPU"); + FLAG_SET_DEFAULT(UseSHA1Intrinsics, false); + FLAG_SET_DEFAULT(UseSHA256Intrinsics, false); + FLAG_SET_DEFAULT(UseSHA512Intrinsics, false); + } + // Adjust RTM (Restricted Transactional Memory) flags if (!supports_rtm() && UseRTMLocking) { // Can't continue because UseRTMLocking affects UseBiasedLocking flag
--- a/hotspot/src/cpu/x86/vm/x86_32.ad Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/cpu/x86/vm/x86_32.ad Wed Jul 05 19:46:17 2017 +0200 @@ -1401,22 +1401,22 @@ // No transformation necessary. return; case INDIRECT: - new_memory = new (C) indirect_win95_safeOper( ); + new_memory = new indirect_win95_safeOper( ); break; case INDOFFSET8: - new_memory = new (C) indOffset8_win95_safeOper(memory->disp(NULL, NULL, 0)); + new_memory = new indOffset8_win95_safeOper(memory->disp(NULL, NULL, 0)); break; case INDOFFSET32: - new_memory = new (C) indOffset32_win95_safeOper(memory->disp(NULL, NULL, 0)); + new_memory = new indOffset32_win95_safeOper(memory->disp(NULL, NULL, 0)); break; case INDINDEXOFFSET: - new_memory = new (C) indIndexOffset_win95_safeOper(memory->disp(NULL, NULL, 0)); + new_memory = new indIndexOffset_win95_safeOper(memory->disp(NULL, NULL, 0)); break; case INDINDEXSCALE: - new_memory = new (C) indIndexScale_win95_safeOper(memory->scale()); + new_memory = new indIndexScale_win95_safeOper(memory->scale()); break; case INDINDEXSCALEOFFSET: - new_memory = new (C) indIndexScaleOffset_win95_safeOper(memory->scale(), memory->disp(NULL, NULL, 0)); + new_memory = new indIndexScaleOffset_win95_safeOper(memory->scale(), memory->disp(NULL, NULL, 0)); break; case LOAD_LONG_INDIRECT: case LOAD_LONG_INDOFFSET32:
--- a/hotspot/src/os/aix/vm/os_aix.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/os/aix/vm/os_aix.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -4734,10 +4734,8 @@ // // Thread.interrupt and object.notify{All} both call Event::set. // That is, we treat thread.interrupt as a special case of notification. - // The underlying Solaris implementation, cond_timedwait, admits - // spurious/premature wakeups, but the JLS/JVM spec prevents the - // JVM from making those visible to Java code. As such, we must - // filter out spurious wakeups. We assume all ETIME returns are valid. + // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false. + // We assume all ETIME returns are valid. // // TODO: properly differentiate simultaneous notify+interrupt. // In that case, we should propagate the notify to another waiter.
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/os/bsd/vm/os_bsd.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -4299,10 +4299,8 @@ // // Thread.interrupt and object.notify{All} both call Event::set. // That is, we treat thread.interrupt as a special case of notification. - // The underlying Solaris implementation, cond_timedwait, admits - // spurious/premature wakeups, but the JLS/JVM spec prevents the - // JVM from making those visible to Java code. As such, we must - // filter out spurious wakeups. We assume all ETIME returns are valid. + // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false. + // We assume all ETIME returns are valid. // // TODO: properly differentiate simultaneous notify+interrupt. // In that case, we should propagate the notify to another waiter.
--- a/hotspot/src/os/linux/vm/os_linux.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/os/linux/vm/os_linux.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -5538,10 +5538,8 @@ // // Thread.interrupt and object.notify{All} both call Event::set. // That is, we treat thread.interrupt as a special case of notification. - // The underlying Solaris implementation, cond_timedwait, admits - // spurious/premature wakeups, but the JLS/JVM spec prevents the - // JVM from making those visible to Java code. As such, we must - // filter out spurious wakeups. We assume all ETIME returns are valid. + // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false. + // We assume all ETIME returns are valid. // // TODO: properly differentiate simultaneous notify+interrupt. // In that case, we should propagate the notify to another waiter.
--- a/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -234,7 +234,7 @@ SIG_REGS(sc).u_regs[CON_G3], SIG_REGS(sc).u_regs[CON_G4]); st->print_cr(" G5=" INTPTR_FORMAT " G6=" INTPTR_FORMAT - " G7=" INTPTR_FORMAT " Y=" INTPTR_FORMAT, + " G7=" INTPTR_FORMAT " Y=0x%x", SIG_REGS(sc).u_regs[CON_G5], SIG_REGS(sc).u_regs[CON_G6], SIG_REGS(sc).u_regs[CON_G7], @@ -285,7 +285,7 @@ st->cr(); st->cr(); - st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp); + st->print_cr("Top of Stack: (sp=" INTPTR_FORMAT ")", p2i(sp)); print_hex_dump(st, (address)sp, (address)(sp + 32), sizeof(intptr_t)); st->cr(); @@ -293,7 +293,7 @@ // point to garbage if entry point in an nmethod is corrupted. Leave // this at the end, and hope for the best. address pc = os::Linux::ucontext_get_pc(uc); - st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc); + st->print_cr("Instructions: (pc=" INTPTR_FORMAT ")", p2i(pc)); print_hex_dump(st, pc - 32, pc + 32, sizeof(char)); } @@ -453,7 +453,7 @@ && pc < MacroAssembler::_verify_oop_implicit_branch[1] ) { *stub = MacroAssembler::_verify_oop_implicit_branch[2]; warning("fixed up memory fault in +VerifyOops at address " - INTPTR_FORMAT, fault); + INTPTR_FORMAT, p2i(fault)); return true; } return false;
--- a/hotspot/src/os_cpu/linux_sparc/vm/vm_version_linux_sparc.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/os_cpu/linux_sparc/vm/vm_version_linux_sparc.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -36,7 +36,7 @@ } while (!feof(fp)) { - if (fscanf(fp, "cpu\t\t: %100[^\n]", &cpu) == 1) { + if (fscanf(fp, "cpu\t\t: %100[^\n]", cpu) == 1) { if (strstr(cpu, "Niagara") != NULL) { rv = true; }
--- a/hotspot/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -137,6 +137,21 @@ #endif if (av & AV_SPARC_AES) features |= aes_instructions_m; +#ifndef AV_SPARC_SHA1 +#define AV_SPARC_SHA1 0x00400000 /* sha1 instruction supported */ +#endif + if (av & AV_SPARC_SHA1) features |= sha1_instruction_m; + +#ifndef AV_SPARC_SHA256 +#define AV_SPARC_SHA256 0x00800000 /* sha256 instruction supported */ +#endif + if (av & AV_SPARC_SHA256) features |= sha256_instruction_m; + +#ifndef AV_SPARC_SHA512 +#define AV_SPARC_SHA512 0x01000000 /* sha512 instruction supported */ +#endif + if (av & AV_SPARC_SHA512) features |= sha512_instruction_m; + } else { // getisax(2) failed, use the old legacy code. #ifndef PRODUCT
--- a/hotspot/src/share/vm/adlc/output_c.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/adlc/output_c.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -1000,7 +1000,7 @@ fprintf(fp_cpp, "void Bundle::initialize_nops(MachNode * nop_list[%d], Compile *C) {\n", nopcnt); int i = 0; for ( _pipeline->_noplist.reset(); (nop = _pipeline->_noplist.iter()) != NULL; i++ ) { - fprintf(fp_cpp, " nop_list[%d] = (MachNode *) new (C) %sNode();\n", i, nop); + fprintf(fp_cpp, " nop_list[%d] = (MachNode *) new %sNode();\n", i, nop); } fprintf(fp_cpp, "};\n\n"); fprintf(fp_cpp, "#ifndef PRODUCT\n"); @@ -1328,7 +1328,7 @@ preplace->next_instruction(root_inst); InstructForm *root_form = globals[root_inst]->is_instruction(); assert( root_form != NULL, "Replacement instruction was not previously defined"); - fprintf(fp, " %sNode *root = new (C) %sNode();\n", root_inst, root_inst); + fprintf(fp, " %sNode *root = new %sNode();\n", root_inst, root_inst); int inst_num; const char *op_name; @@ -1497,11 +1497,11 @@ new_oper = frm->is_operand(); char *tmp = (char *)node->_exprule->_newopconst[new_id]; if (tmp == NULL) { - fprintf(fp," MachOper *op%d = new (C) %sOper();\n", + fprintf(fp," MachOper *op%d = new %sOper();\n", cnt, new_oper->_ident); } else { - fprintf(fp," MachOper *op%d = new (C) %sOper(%s);\n", + fprintf(fp," MachOper *op%d = new %sOper(%s);\n", cnt, new_oper->_ident, tmp); } } @@ -1566,7 +1566,7 @@ } // Build the node for the instruction - fprintf(fp,"\n %sNode *n%d = new (C) %sNode();\n", new_id, cnt, new_id); + fprintf(fp,"\n %sNode *n%d = new %sNode();\n", new_id, cnt, new_id); // Add control edge for this node fprintf(fp," n%d->add_req(_in[0]);\n", cnt); // Build the operand for the value this node defines. @@ -1729,7 +1729,7 @@ declared_def = true; } if (op && op->_interface && op->_interface->is_RegInterface()) { - fprintf(fp," def = new (C) MachTempNode(state->MachOperGenerator( %s, C ));\n", + fprintf(fp," def = new MachTempNode(state->MachOperGenerator( %s, C ));\n", machOperEnum(op->_ident)); fprintf(fp," add_req(def);\n"); // The operand for TEMP is already constructed during @@ -1760,7 +1760,7 @@ } fprintf(fp," kill = "); - fprintf(fp,"new (C) MachProjNode( %s, %d, (%s), Op_%s );\n", + fprintf(fp,"new MachProjNode( %s, %d, (%s), Op_%s );\n", machNode, proj_no++, regmask, ideal_type); fprintf(fp," proj_list.push(kill);\n"); } @@ -2840,7 +2840,7 @@ // generate code to create a clone for a class derived from MachOper // // (0) MachOper *MachOperXOper::clone(Compile* C) const { -// (1) return new (C) MachXOper( _ccode, _c0, _c1, ..., _cn); +// (1) return new MachXOper( _ccode, _c0, _c1, ..., _cn); // (2) } // static void defineClone(FILE *fp, FormDict &globalNames, OperandForm &oper) { @@ -2849,7 +2849,7 @@ const int num_consts = oper.num_consts(globalNames); const bool is_ideal_bool = oper.is_ideal_bool(); if( (num_consts > 0) ) { - fprintf(fp," return new (C) %sOper(", oper._ident); + fprintf(fp," return new %sOper(", oper._ident); // generate parameters for constants int i = 0; fprintf(fp,"_c%d", i); @@ -2861,7 +2861,7 @@ } else { assert( num_consts == 0, "Currently support zero or one constant per operand clone function"); - fprintf(fp," return new (C) %sOper();\n", oper._ident); + fprintf(fp," return new %sOper();\n", oper._ident); } // finish method fprintf(fp,"}\n"); @@ -3106,7 +3106,7 @@ defineIn_RegMask(_CPP_MISC_file._fp, _globalNames, *oper); fprintf(fp,"MachOper *%sOper::clone(Compile* C) const {\n", oper->_ident); - fprintf(fp," return new (C) %sOper(_label, _block_num);\n", oper->_ident); + fprintf(fp," return new %sOper(_label, _block_num);\n", oper->_ident); fprintf(fp,"}\n"); fprintf(fp,"uint %sOper::opcode() const { return %s; }\n", @@ -3125,7 +3125,7 @@ defineIn_RegMask(_CPP_MISC_file._fp, _globalNames, *oper); fprintf(fp,"MachOper *%sOper::clone(Compile* C) const {\n", oper->_ident); - fprintf(fp," return new (C) %sOper(_method);\n", oper->_ident); + fprintf(fp," return new %sOper(_method);\n", oper->_ident); fprintf(fp,"}\n"); fprintf(fp,"uint %sOper::opcode() const { return %s; }\n", @@ -3815,7 +3815,7 @@ // Generate the case statement for this opcode fprintf(fp, " case %s:", opEnumName); - fprintf(fp, "\n return new (C) %sOper(", opName); + fprintf(fp, "\n return new %sOper(", opName); // Access parameters for constructor from the stat object // // Build access to condition code value @@ -3894,7 +3894,7 @@ const char *opClass = inst->_ident; // Create the MachNode object - fprintf(fp_cpp, "%s %sNode *node = new (C) %sNode();\n",indent, opClass,opClass); + fprintf(fp_cpp, "%s %sNode *node = new %sNode();\n",indent, opClass,opClass); if ( (inst->num_post_match_opnds() != 0) ) { // Instruction that contains operands which are not in match rule. @@ -3936,7 +3936,7 @@ // Check for multiple constants and then fill them in. // Just like MachOperGenerator const char *opName = inst->_matrule->_rChild->_opType; - fprintf(fp_cpp, "new (C) %sOper(", opName); + fprintf(fp_cpp, "new %sOper(", opName); // Grab operand form OperandForm *op = (_globalNames[opName])->is_operand(); // Look up the number of constants @@ -4010,7 +4010,7 @@ fprintf(fp_cpp, "// Build CISC version of this instruction\n"); fprintf(fp_cpp, "MachNode *%sNode::cisc_version( int offset, Compile* C ) {\n", this->_ident); // Create the MachNode object - fprintf(fp_cpp, " %sNode *node = new (C) %sNode();\n", name, name); + fprintf(fp_cpp, " %sNode *node = new %sNode();\n", name, name); // Fill in the bottom_type where requested if ( this->captures_bottom_type(AD.globalNames()) ) { fprintf(fp_cpp, " node->_bottom_type = bottom_type();\n"); @@ -4026,7 +4026,7 @@ fprintf(fp_cpp, " fill_new_machnode(node, C);\n"); // Construct operand to access [stack_pointer + offset] fprintf(fp_cpp, " // Construct operand to access [stack_pointer + offset]\n"); - fprintf(fp_cpp, " node->set_opnd_array(cisc_operand(), new (C) %sOper(offset));\n", cisc_oper_name); + fprintf(fp_cpp, " node->set_opnd_array(cisc_operand(), new %sOper(offset));\n", cisc_oper_name); fprintf(fp_cpp, "\n"); // Return result and exit scope @@ -4057,7 +4057,7 @@ fprintf(fp_cpp, "// Build short branch version of this instruction\n"); fprintf(fp_cpp, "MachNode *%sNode::short_branch_version(Compile* C) {\n", this->_ident); // Create the MachNode object - fprintf(fp_cpp, " %sNode *node = new (C) %sNode();\n", name, name); + fprintf(fp_cpp, " %sNode *node = new %sNode();\n", name, name); if( is_ideal_if() ) { fprintf(fp_cpp, " node->_prob = _prob;\n"); fprintf(fp_cpp, " node->_fcnt = _fcnt;\n");
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -2054,7 +2054,7 @@ bool will_link; ciKlass* klass = stream()->get_klass(will_link); assert(klass->is_instance_klass(), "must be an instance klass"); - NewInstance* new_instance = new NewInstance(klass->as_instance_klass(), state_before); + NewInstance* new_instance = new NewInstance(klass->as_instance_klass(), state_before, stream()->is_unresolved_klass()); _memory->new_instance(new_instance); apush(append_split(new_instance)); }
--- a/hotspot/src/share/vm/c1/c1_Instruction.hpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/c1/c1_Instruction.hpp Wed Jul 05 19:46:17 2017 +0200 @@ -1291,16 +1291,18 @@ LEAF(NewInstance, StateSplit) private: ciInstanceKlass* _klass; + bool _is_unresolved; public: // creation - NewInstance(ciInstanceKlass* klass, ValueStack* state_before) + NewInstance(ciInstanceKlass* klass, ValueStack* state_before, bool is_unresolved) : StateSplit(instanceType, state_before) - , _klass(klass) + , _klass(klass), _is_unresolved(is_unresolved) {} // accessors ciInstanceKlass* klass() const { return _klass; } + bool is_unresolved() const { return _is_unresolved; } virtual bool needs_exception_state() const { return false; }
--- a/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/c1/c1_LIRAssembler.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -336,7 +336,6 @@ void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) { - _masm->code_section()->relocate(pc(), relocInfo::poll_type); int pc_offset = code_offset(); flush_debug_info(pc_offset); info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -466,8 +466,11 @@ } -void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info) { - if (!obj->is_loaded() || PatchALot) { +void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve) { + /* C2 relies on constant pool entries being resolved (ciTypeFlow), so if TieredCompilation + * is active and the class hasn't yet been resolved we need to emit a patch that resolves + * the class. */ + if ((TieredCompilation && need_resolve) || !obj->is_loaded() || PatchALot) { assert(info != NULL, "info must be set if class is not loaded"); __ klass2reg_patch(NULL, r, info); } else { @@ -660,9 +663,18 @@ __ unlock_object(hdr, object, lock, scratch, slow_path); } - -void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) { - klass2reg_with_patching(klass_reg, klass, info); +#ifndef PRODUCT +void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) { + if (PrintNotLoaded && !new_instance->klass()->is_loaded()) { + tty->print_cr(" ###class not loaded at new bci %d", new_instance->printable_bci()); + } else if (PrintNotLoaded && (TieredCompilation && new_instance->is_unresolved())) { + tty->print_cr(" ###class not resolved at new bci %d", new_instance->printable_bci()); + } +} +#endif + +void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) { + klass2reg_with_patching(klass_reg, klass, info, is_unresolved); // If klass is not loaded we do not know if the klass has finalizers: if (UseFastNewInstance && klass->is_loaded() && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
--- a/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/c1/c1_LIRGenerator.hpp Wed Jul 05 19:46:17 2017 +0200 @@ -169,6 +169,8 @@ return this; } + void print_if_not_loaded(const NewInstance* new_instance) PRODUCT_RETURN; + #ifdef ASSERT LIR_List* lir(const char * file, int line) const { _lir->set_file_and_line(file, line); @@ -307,7 +309,7 @@ void store_stack_parameter (LIR_Opr opr, ByteSize offset_from_sp_in_bytes); - void klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info); + void klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve = false); // this loads the length and compares against the index void array_range_check (LIR_Opr array, LIR_Opr index, CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info); @@ -325,7 +327,7 @@ void monitor_enter (LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info); void monitor_exit (LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no); - void new_instance (LIR_Opr dst, ciInstanceKlass* klass, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info); + void new_instance (LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info); // machine dependent void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info);
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -123,24 +123,24 @@ int Runtime1::_throw_array_store_exception_count = 0; int Runtime1::_throw_count = 0; -static int _byte_arraycopy_cnt = 0; -static int _short_arraycopy_cnt = 0; -static int _int_arraycopy_cnt = 0; -static int _long_arraycopy_cnt = 0; -static int _oop_arraycopy_cnt = 0; +static int _byte_arraycopy_stub_cnt = 0; +static int _short_arraycopy_stub_cnt = 0; +static int _int_arraycopy_stub_cnt = 0; +static int _long_arraycopy_stub_cnt = 0; +static int _oop_arraycopy_stub_cnt = 0; address Runtime1::arraycopy_count_address(BasicType type) { switch (type) { case T_BOOLEAN: - case T_BYTE: return (address)&_byte_arraycopy_cnt; + case T_BYTE: return (address)&_byte_arraycopy_stub_cnt; case T_CHAR: - case T_SHORT: return (address)&_short_arraycopy_cnt; + case T_SHORT: return (address)&_short_arraycopy_stub_cnt; case T_FLOAT: - case T_INT: return (address)&_int_arraycopy_cnt; + case T_INT: return (address)&_int_arraycopy_stub_cnt; case T_DOUBLE: - case T_LONG: return (address)&_long_arraycopy_cnt; + case T_LONG: return (address)&_long_arraycopy_stub_cnt; case T_ARRAY: - case T_OBJECT: return (address)&_oop_arraycopy_cnt; + case T_OBJECT: return (address)&_oop_arraycopy_stub_cnt; default: ShouldNotReachHere(); return NULL; @@ -1479,13 +1479,13 @@ tty->print_cr(" _ic_miss_cnt: %d", SharedRuntime::_ic_miss_ctr); tty->print_cr(" _generic_arraycopy_cnt: %d", _generic_arraycopy_cnt); tty->print_cr(" _generic_arraycopystub_cnt: %d", _generic_arraycopystub_cnt); - tty->print_cr(" _byte_arraycopy_cnt: %d", _byte_arraycopy_cnt); - tty->print_cr(" _short_arraycopy_cnt: %d", _short_arraycopy_cnt); - tty->print_cr(" _int_arraycopy_cnt: %d", _int_arraycopy_cnt); - tty->print_cr(" _long_arraycopy_cnt: %d", _long_arraycopy_cnt); + tty->print_cr(" _byte_arraycopy_cnt: %d", _byte_arraycopy_stub_cnt); + tty->print_cr(" _short_arraycopy_cnt: %d", _short_arraycopy_stub_cnt); + tty->print_cr(" _int_arraycopy_cnt: %d", _int_arraycopy_stub_cnt); + tty->print_cr(" _long_arraycopy_cnt: %d", _long_arraycopy_stub_cnt); tty->print_cr(" _primitive_arraycopy_cnt: %d", _primitive_arraycopy_cnt); tty->print_cr(" _oop_arraycopy_cnt (C): %d", Runtime1::_oop_arraycopy_cnt); - tty->print_cr(" _oop_arraycopy_cnt (stub): %d", _oop_arraycopy_cnt); + tty->print_cr(" _oop_arraycopy_cnt (stub): %d", _oop_arraycopy_stub_cnt); tty->print_cr(" _arraycopy_slowcase_cnt: %d", _arraycopy_slowcase_cnt); tty->print_cr(" _arraycopy_checkcast_cnt: %d", _arraycopy_checkcast_cnt); tty->print_cr(" _arraycopy_checkcast_attempt_cnt:%d", _arraycopy_checkcast_attempt_cnt);
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp Wed Jul 05 19:46:17 2017 +0200 @@ -790,6 +790,26 @@ do_name( decrypt_name, "decrypt") \ do_signature(byteArray_int_int_byteArray_int_signature, "([BII[BI)I") \ \ + /* support for sun.security.provider.SHA */ \ + do_class(sun_security_provider_sha, "sun/security/provider/SHA") \ + do_intrinsic(_sha_implCompress, sun_security_provider_sha, implCompress_name, implCompress_signature, F_R) \ + do_name( implCompress_name, "implCompress") \ + do_signature(implCompress_signature, "([BI)V") \ + \ + /* support for sun.security.provider.SHA2 */ \ + do_class(sun_security_provider_sha2, "sun/security/provider/SHA2") \ + do_intrinsic(_sha2_implCompress, sun_security_provider_sha2, implCompress_name, implCompress_signature, F_R) \ + \ + /* support for sun.security.provider.SHA5 */ \ + do_class(sun_security_provider_sha5, "sun/security/provider/SHA5") \ + do_intrinsic(_sha5_implCompress, sun_security_provider_sha5, implCompress_name, implCompress_signature, F_R) \ + \ + /* support for sun.security.provider.DigestBase */ \ + do_class(sun_security_provider_digestbase, "sun/security/provider/DigestBase") \ + do_intrinsic(_digestBase_implCompressMB, sun_security_provider_digestbase, implCompressMB_name, implCompressMB_signature, F_R) \ + do_name( implCompressMB_name, "implCompressMultiBlock") \ + do_signature(implCompressMB_signature, "([BII)I") \ + \ /* support for java.util.zip */ \ do_class(java_util_zip_CRC32, "java/util/zip/CRC32") \ do_intrinsic(_updateCRC32, java_util_zip_CRC32, update_name, int2_int_signature, F_SN) \
--- a/hotspot/src/share/vm/code/relocInfo.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/code/relocInfo.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -877,11 +877,7 @@ void internal_word_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) { address target = _target; if (target == NULL) { - if (addr_in_const()) { - target = new_addr_for(*(address*)addr(), src, dest); - } else { - target = new_addr_for(pd_get_address_from_code(), src, dest); - } + target = new_addr_for(this->target(), src, dest); } set_value(target); } @@ -890,7 +886,11 @@ address internal_word_Relocation::target() { address target = _target; if (target == NULL) { - target = pd_get_address_from_code(); + if (addr_in_const()) { + target = *(address*)addr(); + } else { + target = pd_get_address_from_code(); + } } return target; }
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -1341,13 +1341,14 @@ bool CMSAdaptiveSizePolicy::print_adaptive_size_policy_on( outputStream* st) const { - if (!UseAdaptiveSizePolicy) return false; + if (!UseAdaptiveSizePolicy) { + return false; + } GenCollectedHeap* gch = GenCollectedHeap::heap(); - Generation* gen0 = gch->get_gen(0); - DefNewGeneration* def_new = gen0->as_DefNewGeneration(); - return - AdaptiveSizePolicy::print_adaptive_size_policy_on( + Generation* young = gch->get_gen(0); + DefNewGeneration* def_new = young->as_DefNewGeneration(); + return AdaptiveSizePolicy::print_adaptive_size_policy_on( st, def_new->tenuring_threshold()); }
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -60,21 +60,21 @@ if (UseParNewGC) { if (UseAdaptiveSizePolicy) { _generations[0] = new GenerationSpec(Generation::ASParNew, - _initial_gen0_size, _max_gen0_size); + _initial_young_size, _max_young_size); } else { _generations[0] = new GenerationSpec(Generation::ParNew, - _initial_gen0_size, _max_gen0_size); + _initial_young_size, _max_young_size); } } else { _generations[0] = new GenerationSpec(Generation::DefNew, - _initial_gen0_size, _max_gen0_size); + _initial_young_size, _max_young_size); } if (UseAdaptiveSizePolicy) { _generations[1] = new GenerationSpec(Generation::ASConcurrentMarkSweep, - _initial_gen1_size, _max_gen1_size); + _initial_old_size, _max_old_size); } else { _generations[1] = new GenerationSpec(Generation::ConcurrentMarkSweep, - _initial_gen1_size, _max_gen1_size); + _initial_old_size, _max_old_size); } if (_generations[0] == NULL || _generations[1] == NULL) {
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -1138,8 +1138,8 @@ void CMSCollector::icms_update_allocation_limits() { - Generation* gen0 = GenCollectedHeap::heap()->get_gen(0); - EdenSpace* eden = gen0->as_DefNewGeneration()->eden(); + Generation* young = GenCollectedHeap::heap()->get_gen(0); + EdenSpace* eden = young->as_DefNewGeneration()->eden(); const unsigned int duty_cycle = stats().icms_update_duty_cycle(); if (CMSTraceIncrementalPacing) {
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Wed Jul 05 19:46:17 2017 +0200 @@ -1193,10 +1193,9 @@ // Does a "full" (forced) collection invoked on this generation collect // all younger generations as well? Note that the second conjunct is a // hack to allow the collection of the younger gen first if the flag is - // set. This is better than using th policy's should_collect_gen0_first() - // since that causes us to do an extra unnecessary pair of restart-&-stop-world. + // set. virtual bool full_collects_younger_generations() const { - return UseCMSCompactAtFullCollection && !CollectGen0First; + return UseCMSCompactAtFullCollection && !ScavengeBeforeFullGC; } void space_iterate(SpaceClosure* blk, bool usedOnly = false);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -3622,7 +3622,7 @@ void G1CollectedHeap::print_tracing_info() const { // We'll overload this to mean "trace GC pause statistics." - if (TraceGen0Time || TraceGen1Time) { + if (TraceYoungGenTime || TraceOldGenTime) { // The "G1CollectorPolicy" is keeping track of these stats, so delegate // to that. g1_policy()->print_tracing_info();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -809,7 +809,7 @@ double full_gc_time_sec = end_sec - _full_collection_start_sec; double full_gc_time_ms = full_gc_time_sec * 1000.0; - _trace_gen1_time_data.record_full_collection(full_gc_time_ms); + _trace_old_gen_time_data.record_full_collection(full_gc_time_ms); update_recent_gc_times(end_sec, full_gc_time_ms); @@ -851,7 +851,7 @@ _g1->used(), _g1->recalculate_used())); double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0; - _trace_gen0_time_data.record_start_collection(s_w_t_ms); + _trace_young_gen_time_data.record_start_collection(s_w_t_ms); _stop_world_start = 0.0; record_heap_size_info_at_start(false /* full */); @@ -906,7 +906,7 @@ void G1CollectorPolicy::record_concurrent_pause() { if (_stop_world_start > 0.0) { double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0; - _trace_gen0_time_data.record_yield_time(yield_ms); + _trace_young_gen_time_data.record_yield_time(yield_ms); } } @@ -993,7 +993,7 @@ evacuation_info.set_bytes_copied(_bytes_copied_during_gc); if (update_stats) { - _trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times()); + _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times()); // this is where we update the allocation rate of the application double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms); @@ -1415,8 +1415,8 @@ } void G1CollectorPolicy::print_tracing_info() const { - _trace_gen0_time_data.print(); - _trace_gen1_time_data.print(); + _trace_young_gen_time_data.print(); + _trace_old_gen_time_data.print(); } void G1CollectorPolicy::print_yg_surv_rate_info() const { @@ -1973,9 +1973,9 @@ _last_gc_was_young = gcs_are_young() ? true : false; if (_last_gc_was_young) { - _trace_gen0_time_data.increment_young_collection_count(); + _trace_young_gen_time_data.increment_young_collection_count(); } else { - _trace_gen0_time_data.increment_mixed_collection_count(); + _trace_young_gen_time_data.increment_mixed_collection_count(); } // The young list is laid with the survivor regions from the previous @@ -2156,20 +2156,20 @@ evacuation_info.set_collectionset_regions(cset_region_length()); } -void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) { - if(TraceGen0Time) { +void TraceYoungGenTimeData::record_start_collection(double time_to_stop_the_world_ms) { + if(TraceYoungGenTime) { _all_stop_world_times_ms.add(time_to_stop_the_world_ms); } } -void TraceGen0TimeData::record_yield_time(double yield_time_ms) { - if(TraceGen0Time) { +void TraceYoungGenTimeData::record_yield_time(double yield_time_ms) { + if(TraceYoungGenTime) { _all_yield_times_ms.add(yield_time_ms); } } -void TraceGen0TimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) { - if(TraceGen0Time) { +void TraceYoungGenTimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) { + if(TraceYoungGenTime) { _total.add(pause_time_ms); _other.add(pause_time_ms - phase_times->accounted_time_ms()); _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms()); @@ -2194,34 +2194,34 @@ } } -void TraceGen0TimeData::increment_young_collection_count() { - if(TraceGen0Time) { +void TraceYoungGenTimeData::increment_young_collection_count() { + if(TraceYoungGenTime) { ++_young_pause_num; } } -void TraceGen0TimeData::increment_mixed_collection_count() { - if(TraceGen0Time) { +void TraceYoungGenTimeData::increment_mixed_collection_count() { + if(TraceYoungGenTime) { ++_mixed_pause_num; } } -void TraceGen0TimeData::print_summary(const char* str, - const NumberSeq* seq) const { +void TraceYoungGenTimeData::print_summary(const char* str, + const NumberSeq* seq) const { double sum = seq->sum(); gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)", str, sum / 1000.0, seq->avg()); } -void TraceGen0TimeData::print_summary_sd(const char* str, - const NumberSeq* seq) const { +void TraceYoungGenTimeData::print_summary_sd(const char* str, + const NumberSeq* seq) const { print_summary(str, seq); gclog_or_tty->print_cr("%+45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)", "(num", seq->num(), seq->sd(), seq->maximum()); } -void TraceGen0TimeData::print() const { - if (!TraceGen0Time) { +void TraceYoungGenTimeData::print() const { + if (!TraceYoungGenTime) { return; } @@ -2258,14 +2258,14 @@ print_summary_sd(" Yields", &_all_yield_times_ms); } -void TraceGen1TimeData::record_full_collection(double full_gc_time_ms) { - if (TraceGen1Time) { +void TraceOldGenTimeData::record_full_collection(double full_gc_time_ms) { + if (TraceOldGenTime) { _all_full_gc_times.add(full_gc_time_ms); } } -void TraceGen1TimeData::print() const { - if (!TraceGen1Time) { +void TraceOldGenTimeData::print() const { + if (!TraceOldGenTime) { return; }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Wed Jul 05 19:46:17 2017 +0200 @@ -38,10 +38,10 @@ class CollectionSetChooser; class G1GCPhaseTimes; -// TraceGen0Time collects data on _both_ young and mixed evacuation pauses +// TraceYoungGenTime collects data on _both_ young and mixed evacuation pauses // (the latter may contain non-young regions - i.e. regions that are -// technically in Gen1) while TraceGen1Time collects data about full GCs. -class TraceGen0TimeData : public CHeapObj<mtGC> { +// technically in old) while TraceOldGenTime collects data about full GCs. +class TraceYoungGenTimeData : public CHeapObj<mtGC> { private: unsigned _young_pause_num; unsigned _mixed_pause_num; @@ -66,7 +66,7 @@ void print_summary_sd(const char* str, const NumberSeq* seq) const; public: - TraceGen0TimeData() : _young_pause_num(0), _mixed_pause_num(0) {}; + TraceYoungGenTimeData() : _young_pause_num(0), _mixed_pause_num(0) {}; void record_start_collection(double time_to_stop_the_world_ms); void record_yield_time(double yield_time_ms); void record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times); @@ -75,7 +75,7 @@ void print() const; }; -class TraceGen1TimeData : public CHeapObj<mtGC> { +class TraceOldGenTimeData : public CHeapObj<mtGC> { private: NumberSeq _all_full_gc_times; @@ -187,8 +187,8 @@ TruncatedSeq* _concurrent_mark_remark_times_ms; TruncatedSeq* _concurrent_mark_cleanup_times_ms; - TraceGen0TimeData _trace_gen0_time_data; - TraceGen1TimeData _trace_gen1_time_data; + TraceYoungGenTimeData _trace_young_gen_time_data; + TraceOldGenTimeData _trace_old_gen_time_data; double _stop_world_start; @@ -202,20 +202,20 @@ // locker is active. This should be >= _young_list_target_length; uint _young_list_max_length; - bool _last_gc_was_young; + bool _last_gc_was_young; - bool _during_marking; - bool _in_marking_window; - bool _in_marking_window_im; + bool _during_marking; + bool _in_marking_window; + bool _in_marking_window_im; - SurvRateGroup* _short_lived_surv_rate_group; - SurvRateGroup* _survivor_surv_rate_group; + SurvRateGroup* _short_lived_surv_rate_group; + SurvRateGroup* _survivor_surv_rate_group; // add here any more surv rate groups - double _gc_overhead_perc; + double _gc_overhead_perc; double _reserve_factor; - uint _reserve_regions; + uint _reserve_regions; bool during_marking() { return _during_marking;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -35,14 +35,14 @@ AdjoiningGenerations::AdjoiningGenerations(ReservedSpace old_young_rs, GenerationSizer* policy, size_t alignment) : - _virtual_spaces(old_young_rs, policy->min_gen1_size(), - policy->min_gen0_size(), alignment) { - size_t init_low_byte_size = policy->initial_gen1_size(); - size_t min_low_byte_size = policy->min_gen1_size(); - size_t max_low_byte_size = policy->max_gen1_size(); - size_t init_high_byte_size = policy->initial_gen0_size(); - size_t min_high_byte_size = policy->min_gen0_size(); - size_t max_high_byte_size = policy->max_gen0_size(); + _virtual_spaces(old_young_rs, policy->min_old_size(), + policy->min_young_size(), alignment) { + size_t init_low_byte_size = policy->initial_old_size(); + size_t min_low_byte_size = policy->min_old_size(); + size_t max_low_byte_size = policy->max_old_size(); + size_t init_high_byte_size = policy->initial_young_size(); + size_t min_high_byte_size = policy->min_young_size(); + size_t max_high_byte_size = policy->max_young_size(); assert(min_low_byte_size <= init_low_byte_size && init_low_byte_size <= max_low_byte_size, "Parameter check");
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/generationSizer.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/generationSizer.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -32,8 +32,8 @@ SIZE_FORMAT "," SIZE_FORMAT " " SIZE_FORMAT, str, - _min_gen1_size / K, _max_gen1_size / K, - _min_gen0_size / K, _max_gen0_size / K, + _min_old_size / K, _max_old_size / K, + _min_young_size / K, _max_young_size / K, _max_heap_byte_size / K); } }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -623,11 +623,11 @@ } void ParallelScavengeHeap::print_tracing_info() const { - if (TraceGen0Time) { + if (TraceYoungGenTime) { double time = PSScavenge::accumulated_time()->seconds(); tty->print_cr("[Accumulated GC generation 0 time %3.7f secs]", time); } - if (TraceGen1Time) { + if (TraceOldGenTime) { double time = UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweep::accumulated_time()->seconds(); tty->print_cr("[Accumulated GC generation 1 time %3.7f secs]", time); }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -174,7 +174,7 @@ TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); - if (TraceGen1Time) accumulated_time()->start(); + if (TraceOldGenTime) accumulated_time()->start(); // Let the size policy know we're starting size_policy->major_collection_begin(); @@ -354,7 +354,7 @@ // We collected the heap, recalculate the metaspace capacity MetaspaceGC::compute_new_size(); - if (TraceGen1Time) accumulated_time()->stop(); + if (TraceOldGenTime) accumulated_time()->stop(); if (PrintGC) { if (PrintGCDetails) {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -2061,7 +2061,7 @@ TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); - if (TraceGen1Time) accumulated_time()->start(); + if (TraceOldGenTime) accumulated_time()->start(); // Let the size policy know we're starting size_policy->major_collection_begin(); @@ -2188,7 +2188,7 @@ // Resize the metaspace capacity after a collection MetaspaceGC::compute_new_size(); - if (TraceGen1Time) accumulated_time()->stop(); + if (TraceOldGenTime) accumulated_time()->stop(); if (PrintGC) { if (PrintGCDetails) {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -336,7 +336,7 @@ TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(false /* not full GC */,gc_cause); - if (TraceGen0Time) accumulated_time()->start(); + if (TraceYoungGenTime) accumulated_time()->start(); // Let the size policy know we're starting size_policy->minor_collection_begin(); @@ -660,7 +660,7 @@ CardTableExtension::verify_all_young_refs_imprecise(); } - if (TraceGen0Time) accumulated_time()->stop(); + if (TraceYoungGenTime) accumulated_time()->stop(); if (PrintGC) { if (PrintGCDetails) {
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -196,13 +196,13 @@ // GenCollectorPolicy methods GenCollectorPolicy::GenCollectorPolicy() : - _min_gen0_size(0), - _initial_gen0_size(0), - _max_gen0_size(0), + _min_young_size(0), + _initial_young_size(0), + _max_young_size(0), _gen_alignment(0), - _min_gen1_size(0), - _initial_gen1_size(0), - _max_gen1_size(0), + _min_old_size(0), + _initial_old_size(0), + _max_old_size(0), _generations(NULL) {} @@ -236,7 +236,7 @@ #ifdef ASSERT void GenCollectorPolicy::assert_flags() { CollectorPolicy::assert_flags(); - assert(NewSize >= _min_gen0_size, "Ergonomics decided on a too small young gen size"); + assert(NewSize >= _min_young_size, "Ergonomics decided on a too small young gen size"); assert(NewSize <= MaxNewSize, "Ergonomics decided on incompatible initial and maximum young gen sizes"); assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young gen and heap sizes"); assert(NewSize % _gen_alignment == 0, "NewSize alignment"); @@ -249,28 +249,28 @@ CollectorPolicy::assert_size_info(); // GenCollectorPolicy::initialize_size_info may update the MaxNewSize assert(MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young and heap sizes"); - assert(NewSize == _initial_gen0_size, "Discrepancy between NewSize flag and local storage"); - assert(MaxNewSize == _max_gen0_size, "Discrepancy between MaxNewSize flag and local storage"); - assert(OldSize == _initial_gen1_size, "Discrepancy between OldSize flag and local storage"); - assert(_min_gen0_size <= _initial_gen0_size, "Ergonomics decided on incompatible minimum and initial young gen sizes"); - assert(_initial_gen0_size <= _max_gen0_size, "Ergonomics decided on incompatible initial and maximum young gen sizes"); - assert(_min_gen0_size % _gen_alignment == 0, "_min_gen0_size alignment"); - assert(_initial_gen0_size % _gen_alignment == 0, "_initial_gen0_size alignment"); - assert(_max_gen0_size % _gen_alignment == 0, "_max_gen0_size alignment"); - assert(_min_gen0_size <= bound_minus_alignment(_min_gen0_size, _min_heap_byte_size), + assert(NewSize == _initial_young_size, "Discrepancy between NewSize flag and local storage"); + assert(MaxNewSize == _max_young_size, "Discrepancy between MaxNewSize flag and local storage"); + assert(OldSize == _initial_old_size, "Discrepancy between OldSize flag and local storage"); + assert(_min_young_size <= _initial_young_size, "Ergonomics decided on incompatible minimum and initial young gen sizes"); + assert(_initial_young_size <= _max_young_size, "Ergonomics decided on incompatible initial and maximum young gen sizes"); + assert(_min_young_size % _gen_alignment == 0, "_min_young_size alignment"); + assert(_initial_young_size % _gen_alignment == 0, "_initial_young_size alignment"); + assert(_max_young_size % _gen_alignment == 0, "_max_young_size alignment"); + assert(_min_young_size <= bound_minus_alignment(_min_young_size, _min_heap_byte_size), "Ergonomics made minimum young generation larger than minimum heap"); - assert(_initial_gen0_size <= bound_minus_alignment(_initial_gen0_size, _initial_heap_byte_size), + assert(_initial_young_size <= bound_minus_alignment(_initial_young_size, _initial_heap_byte_size), "Ergonomics made initial young generation larger than initial heap"); - assert(_max_gen0_size <= bound_minus_alignment(_max_gen0_size, _max_heap_byte_size), + assert(_max_young_size <= bound_minus_alignment(_max_young_size, _max_heap_byte_size), "Ergonomics made maximum young generation lager than maximum heap"); - assert(_min_gen1_size <= _initial_gen1_size, "Ergonomics decided on incompatible minimum and initial old gen sizes"); - assert(_initial_gen1_size <= _max_gen1_size, "Ergonomics decided on incompatible initial and maximum old gen sizes"); - assert(_max_gen1_size % _gen_alignment == 0, "_max_gen1_size alignment"); - assert(_initial_gen1_size % _gen_alignment == 0, "_initial_gen1_size alignment"); - assert(_max_heap_byte_size <= (_max_gen0_size + _max_gen1_size), "Total maximum heap sizes must be sum of generation maximum sizes"); - assert(_min_gen0_size + _min_gen1_size <= _min_heap_byte_size, "Minimum generation sizes exceed minimum heap size"); - assert(_initial_gen0_size + _initial_gen1_size == _initial_heap_byte_size, "Initial generation sizes should match initial heap size"); - assert(_max_gen0_size + _max_gen1_size == _max_heap_byte_size, "Maximum generation sizes should match maximum heap size"); + assert(_min_old_size <= _initial_old_size, "Ergonomics decided on incompatible minimum and initial old gen sizes"); + assert(_initial_old_size <= _max_old_size, "Ergonomics decided on incompatible initial and maximum old gen sizes"); + assert(_max_old_size % _gen_alignment == 0, "_max_old_size alignment"); + assert(_initial_old_size % _gen_alignment == 0, "_initial_old_size alignment"); + assert(_max_heap_byte_size <= (_max_young_size + _max_old_size), "Total maximum heap sizes must be sum of generation maximum sizes"); + assert(_min_young_size + _min_old_size <= _min_heap_byte_size, "Minimum generation sizes exceed minimum heap size"); + assert(_initial_young_size + _initial_old_size == _initial_heap_byte_size, "Initial generation sizes should match initial heap size"); + assert(_max_young_size + _max_old_size == _max_heap_byte_size, "Maximum generation sizes should match maximum heap size"); } #endif // ASSERT @@ -323,8 +323,8 @@ // later when setting the initial and minimum young generation size. NewSize = bounded_new_size; } - _min_gen0_size = smallest_new_size; - _initial_gen0_size = NewSize; + _min_young_size = smallest_new_size; + _initial_young_size = NewSize; if (!FLAG_IS_DEFAULT(MaxNewSize)) { if (MaxNewSize >= MaxHeapSize) { @@ -338,14 +338,14 @@ FLAG_SET_ERGO(uintx, MaxNewSize, smaller_max_new_size); if (NewSize > MaxNewSize) { FLAG_SET_ERGO(uintx, NewSize, MaxNewSize); - _initial_gen0_size = NewSize; + _initial_young_size = NewSize; } - } else if (MaxNewSize < _initial_gen0_size) { - FLAG_SET_ERGO(uintx, MaxNewSize, _initial_gen0_size); + } else if (MaxNewSize < _initial_young_size) { + FLAG_SET_ERGO(uintx, MaxNewSize, _initial_young_size); } else if (!is_size_aligned(MaxNewSize, _gen_alignment)) { FLAG_SET_ERGO(uintx, MaxNewSize, align_size_down(MaxNewSize, _gen_alignment)); } - _max_gen0_size = MaxNewSize; + _max_young_size = MaxNewSize; } if (NewSize > MaxNewSize) { @@ -357,7 +357,7 @@ NewSize/K, MaxNewSize/K, NewSize/K); } FLAG_SET_ERGO(uintx, MaxNewSize, NewSize); - _max_gen0_size = MaxNewSize; + _max_young_size = MaxNewSize; } if (SurvivorRatio < 1 || NewRatio < 1) { @@ -393,7 +393,7 @@ double shrink_factor = (double) MaxHeapSize / calculated_size; uintx smaller_new_size = align_size_down((uintx)(NewSize * shrink_factor), _gen_alignment); FLAG_SET_ERGO(uintx, NewSize, MAX2(young_gen_size_lower_bound(), smaller_new_size)); - _initial_gen0_size = NewSize; + _initial_young_size = NewSize; // OldSize is already aligned because above we aligned MaxHeapSize to // _heap_alignment, and we just made sure that NewSize is aligned to @@ -406,16 +406,16 @@ } } - // Update NewSize, if possible, to avoid sizing gen0 to small when only + // Update NewSize, if possible, to avoid sizing the young gen too small when only // OldSize is set on the command line. if (FLAG_IS_CMDLINE(OldSize) && !FLAG_IS_CMDLINE(NewSize)) { if (OldSize < _initial_heap_byte_size) { size_t new_size = _initial_heap_byte_size - OldSize; - // Need to compare against the flag value for max since _max_gen0_size + // Need to compare against the flag value for max since _max_young_size // might not have been set yet. - if (new_size >= _min_gen0_size && new_size <= MaxNewSize) { + if (new_size >= _min_young_size && new_size <= MaxNewSize) { FLAG_SET_ERGO(uintx, NewSize, new_size); - _initial_gen0_size = NewSize; + _initial_young_size = NewSize; } } } @@ -444,97 +444,77 @@ void GenCollectorPolicy::initialize_size_info() { CollectorPolicy::initialize_size_info(); - // _space_alignment is used for alignment within a generation. - // There is additional alignment done down stream for some - // collectors that sometimes causes unwanted rounding up of - // generations sizes. + _initial_young_size = NewSize; + _max_young_size = MaxNewSize; + _initial_old_size = OldSize; - // Determine maximum size of gen0 + // Determine maximum size of the young generation. - size_t max_new_size = 0; - if (!FLAG_IS_DEFAULT(MaxNewSize)) { - max_new_size = MaxNewSize; - } else { - max_new_size = scale_by_NewRatio_aligned(_max_heap_byte_size); + if (FLAG_IS_DEFAULT(MaxNewSize)) { + _max_young_size = scale_by_NewRatio_aligned(_max_heap_byte_size); // Bound the maximum size by NewSize below (since it historically // would have been NewSize and because the NewRatio calculation could // yield a size that is too small) and bound it by MaxNewSize above. // Ergonomics plays here by previously calculating the desired // NewSize and MaxNewSize. - max_new_size = MIN2(MAX2(max_new_size, NewSize), MaxNewSize); + _max_young_size = MIN2(MAX2(_max_young_size, _initial_young_size), MaxNewSize); } - assert(max_new_size > 0, "All paths should set max_new_size"); - // Given the maximum gen0 size, determine the initial and - // minimum gen0 sizes. + // Given the maximum young size, determine the initial and + // minimum young sizes. if (_max_heap_byte_size == _initial_heap_byte_size) { - // The maxium and initial heap sizes are the same so the generation's + // The maximum and initial heap sizes are the same so the generation's // initial size must be the same as it maximum size. Use NewSize as the // size if set on command line. - size_t fixed_young_size = FLAG_IS_CMDLINE(NewSize) ? NewSize : max_new_size; - - _initial_gen0_size = fixed_young_size; - _max_gen0_size = fixed_young_size; + _max_young_size = FLAG_IS_CMDLINE(NewSize) ? NewSize : _max_young_size; + _initial_young_size = _max_young_size; // Also update the minimum size if min == initial == max. if (_max_heap_byte_size == _min_heap_byte_size) { - _min_gen0_size = fixed_young_size; + _min_young_size = _max_young_size; } } else { - size_t desired_new_size = 0; if (FLAG_IS_CMDLINE(NewSize)) { // If NewSize is set on the command line, we should use it as // the initial size, but make sure it is within the heap bounds. - desired_new_size = - MIN2(max_new_size, bound_minus_alignment(NewSize, _initial_heap_byte_size)); - _min_gen0_size = bound_minus_alignment(desired_new_size, _min_heap_byte_size); + _initial_young_size = + MIN2(_max_young_size, bound_minus_alignment(NewSize, _initial_heap_byte_size)); + _min_young_size = bound_minus_alignment(_initial_young_size, _min_heap_byte_size); } else { // For the case where NewSize is not set on the command line, use // NewRatio to size the initial generation size. Use the current // NewSize as the floor, because if NewRatio is overly large, the resulting // size can be too small. - desired_new_size = - MIN2(max_new_size, MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize)); + _initial_young_size = + MIN2(_max_young_size, MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize)); } - _initial_gen0_size = desired_new_size; - _max_gen0_size = max_new_size; - } - - // Write back to flags if necessary. - if (NewSize != _initial_gen0_size) { - FLAG_SET_ERGO(uintx, NewSize, _initial_gen0_size); - } - - if (MaxNewSize != _max_gen0_size) { - FLAG_SET_ERGO(uintx, MaxNewSize, _max_gen0_size); } if (PrintGCDetails && Verbose) { - gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT " Initial gen0 " - SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, - _min_gen0_size, _initial_gen0_size, _max_gen0_size); + gclog_or_tty->print_cr("1: Minimum young " SIZE_FORMAT " Initial young " + SIZE_FORMAT " Maximum young " SIZE_FORMAT, + _min_young_size, _initial_young_size, _max_young_size); } // At this point the minimum, initial and maximum sizes - // of the overall heap and of gen0 have been determined. - // The maximum gen1 size can be determined from the maximum gen0 + // of the overall heap and of the young generation have been determined. + // The maximum old size can be determined from the maximum young // and maximum heap size since no explicit flags exist - // for setting the gen1 maximum. - _max_gen1_size = MAX2(_max_heap_byte_size - _max_gen0_size, _gen_alignment); + // for setting the old generation maximum. + _max_old_size = MAX2(_max_heap_byte_size - _max_young_size, _gen_alignment); // If no explicit command line flag has been set for the - // gen1 size, use what is left for gen1 + // old generation size, use what is left. if (!FLAG_IS_CMDLINE(OldSize)) { // The user has not specified any value but the ergonomics // may have chosen a value (which may or may not be consistent // with the overall heap size). In either case make // the minimum, maximum and initial sizes consistent - // with the gen0 sizes and the overall heap sizes. - _min_gen1_size = _gen_alignment; - _initial_gen1_size = MIN2(_max_gen1_size, MAX2(_initial_heap_byte_size - _initial_gen0_size, _min_gen1_size)); - // _max_gen1_size has already been made consistent above - FLAG_SET_ERGO(uintx, OldSize, _initial_gen1_size); + // with the young sizes and the overall heap sizes. + _min_old_size = _gen_alignment; + _initial_old_size = MIN2(_max_old_size, MAX2(_initial_heap_byte_size - _initial_young_size, _min_old_size)); + // _max_old_size has already been made consistent above. } else { // OldSize has been explicitly set on the command line. Use it // for the initial size but make sure the minimum allow a young @@ -543,69 +523,68 @@ // with other command line flags, issue a warning. // The generation minimums and the overall heap minimum should // be within one generation alignment. - if (OldSize > _max_gen1_size) { + if (_initial_old_size > _max_old_size) { warning("Inconsistency between maximum heap size and maximum " "generation sizes: using maximum heap = " SIZE_FORMAT " -XX:OldSize flag is being ignored", _max_heap_byte_size); - FLAG_SET_ERGO(uintx, OldSize, _max_gen1_size); + _initial_old_size = _max_old_size; } - _min_gen1_size = MIN2(OldSize, _min_heap_byte_size - _min_gen0_size); - _initial_gen1_size = OldSize; + _min_old_size = MIN2(_initial_old_size, _min_heap_byte_size - _min_young_size); } // The initial generation sizes should match the initial heap size, // if not issue a warning and resize the generations. This behavior // differs from JDK8 where the generation sizes have higher priority // than the initial heap size. - if ((_initial_gen1_size + _initial_gen0_size) != _initial_heap_byte_size) { + if ((_initial_old_size + _initial_young_size) != _initial_heap_byte_size) { warning("Inconsistency between generation sizes and heap size, resizing " "the generations to fit the heap."); - size_t desired_gen0_size = _initial_heap_byte_size - _initial_gen1_size; - if (_initial_heap_byte_size < _initial_gen1_size) { + size_t desired_young_size = _initial_heap_byte_size - _initial_old_size; + if (_initial_heap_byte_size < _initial_old_size) { // Old want all memory, use minimum for young and rest for old - _initial_gen0_size = _min_gen0_size; - _initial_gen1_size = _initial_heap_byte_size - _min_gen0_size; - } else if (desired_gen0_size > _max_gen0_size) { + _initial_young_size = _min_young_size; + _initial_old_size = _initial_heap_byte_size - _min_young_size; + } else if (desired_young_size > _max_young_size) { // Need to increase both young and old generation - _initial_gen0_size = _max_gen0_size; - _initial_gen1_size = _initial_heap_byte_size - _max_gen0_size; - } else if (desired_gen0_size < _min_gen0_size) { + _initial_young_size = _max_young_size; + _initial_old_size = _initial_heap_byte_size - _max_young_size; + } else if (desired_young_size < _min_young_size) { // Need to decrease both young and old generation - _initial_gen0_size = _min_gen0_size; - _initial_gen1_size = _initial_heap_byte_size - _min_gen0_size; + _initial_young_size = _min_young_size; + _initial_old_size = _initial_heap_byte_size - _min_young_size; } else { // The young generation boundaries allow us to only update the // young generation. - _initial_gen0_size = desired_gen0_size; + _initial_young_size = desired_young_size; } if (PrintGCDetails && Verbose) { - gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 " - SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, - _min_gen0_size, _initial_gen0_size, _max_gen0_size); + gclog_or_tty->print_cr("2: Minimum young " SIZE_FORMAT " Initial young " + SIZE_FORMAT " Maximum young " SIZE_FORMAT, + _min_young_size, _initial_young_size, _max_young_size); } } - // Write back to flags if necessary - if (NewSize != _initial_gen0_size) { - FLAG_SET_ERGO(uintx, NewSize, _initial_gen0_size); + // Write back to flags if necessary. + if (NewSize != _initial_young_size) { + FLAG_SET_ERGO(uintx, NewSize, _initial_young_size); } - if (MaxNewSize != _max_gen0_size) { - FLAG_SET_ERGO(uintx, MaxNewSize, _max_gen0_size); + if (MaxNewSize != _max_young_size) { + FLAG_SET_ERGO(uintx, MaxNewSize, _max_young_size); } - if (OldSize != _initial_gen1_size) { - FLAG_SET_ERGO(uintx, OldSize, _initial_gen1_size); + if (OldSize != _initial_old_size) { + FLAG_SET_ERGO(uintx, OldSize, _initial_old_size); } if (PrintGCDetails && Verbose) { - gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT " Initial gen1 " - SIZE_FORMAT " Maximum gen1 " SIZE_FORMAT, - _min_gen1_size, _initial_gen1_size, _max_gen1_size); + gclog_or_tty->print_cr("Minimum old " SIZE_FORMAT " Initial old " + SIZE_FORMAT " Maximum old " SIZE_FORMAT, + _min_old_size, _initial_old_size, _max_old_size); } DEBUG_ONLY(GenCollectorPolicy::assert_size_info();) @@ -631,11 +610,11 @@ HandleMark hm; // Discard any handles allocated in each iteration. // First allocation attempt is lock-free. - Generation *gen0 = gch->get_gen(0); - assert(gen0->supports_inline_contig_alloc(), + Generation *young = gch->get_gen(0); + assert(young->supports_inline_contig_alloc(), "Otherwise, must do alloc within heap lock"); - if (gen0->should_allocate(size, is_tlab)) { - result = gen0->par_allocate(size, is_tlab); + if (young->should_allocate(size, is_tlab)) { + result = young->par_allocate(size, is_tlab); if (result != NULL) { assert(gch->is_in_reserved(result), "result not in heap"); return result; @@ -917,8 +896,8 @@ bool GenCollectorPolicy::should_try_older_generation_allocation( size_t word_size) const { GenCollectedHeap* gch = GenCollectedHeap::heap(); - size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc(); - return (word_size > heap_word_size(gen0_capacity)) + size_t young_capacity = gch->get_gen(0)->capacity_before_gc(); + return (word_size > heap_word_size(young_capacity)) || GC_locker::is_active_and_needs_gc() || gch->incremental_collection_failed(); } @@ -940,11 +919,11 @@ } if (UseParNewGC) { - _generations[0] = new GenerationSpec(Generation::ParNew, _initial_gen0_size, _max_gen0_size); + _generations[0] = new GenerationSpec(Generation::ParNew, _initial_young_size, _max_young_size); } else { - _generations[0] = new GenerationSpec(Generation::DefNew, _initial_gen0_size, _max_gen0_size); + _generations[0] = new GenerationSpec(Generation::DefNew, _initial_young_size, _max_young_size); } - _generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_gen1_size, _max_gen1_size); + _generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_old_size, _max_old_size); if (_generations[0] == NULL || _generations[1] == NULL) { vm_exit_during_initialization("Unable to allocate gen spec"); @@ -978,18 +957,18 @@ flag_value = 20 * M; set_basic_flag_values(); FLAG_SET_CMDLINE(uintx, NewSize, flag_value); - verify_gen0_min(flag_value); + verify_young_min(flag_value); set_basic_flag_values(); FLAG_SET_CMDLINE(uintx, NewSize, flag_value); - verify_gen0_initial(flag_value); + verify_young_initial(flag_value); // If NewSize is set on command line, but is larger than the min // heap size, it should only be used for initial young size. flag_value = 80 * M; set_basic_flag_values(); FLAG_SET_CMDLINE(uintx, NewSize, flag_value); - verify_gen0_initial(flag_value); + verify_young_initial(flag_value); // If NewSize has been ergonomically set, the collector policy // should use it for min but calculate the initial young size @@ -997,11 +976,11 @@ flag_value = 20 * M; set_basic_flag_values(); FLAG_SET_ERGO(uintx, NewSize, flag_value); - verify_gen0_min(flag_value); + verify_young_min(flag_value); set_basic_flag_values(); FLAG_SET_ERGO(uintx, NewSize, flag_value); - verify_scaled_gen0_initial(InitialHeapSize); + verify_scaled_young_initial(InitialHeapSize); restore_flags(); } @@ -1016,11 +995,11 @@ flag_value = 20 * M; set_basic_flag_values(); FLAG_SET_CMDLINE(uintx, OldSize, flag_value); - verify_gen1_min(flag_value); + verify_old_min(flag_value); set_basic_flag_values(); FLAG_SET_CMDLINE(uintx, OldSize, flag_value); - verify_gen1_initial(flag_value); + verify_old_initial(flag_value); // If MaxNewSize is large, the maximum OldSize will be less than // what's requested on the command line and it should be reset @@ -1031,46 +1010,46 @@ FLAG_SET_CMDLINE(uintx, MaxNewSize, 170*M); // Calculate what we expect the flag to be. flag_value = MaxHeapSize - MaxNewSize; - verify_gen1_initial(flag_value); + verify_old_initial(flag_value); } - static void verify_gen0_min(size_t expected) { + static void verify_young_min(size_t expected) { MarkSweepPolicy msp; msp.initialize_all(); - assert(msp.min_gen0_size() <= expected, err_msg("%zu > %zu", msp.min_gen0_size(), expected)); + assert(msp.min_young_size() <= expected, err_msg("%zu > %zu", msp.min_young_size(), expected)); } - static void verify_gen0_initial(size_t expected) { + static void verify_young_initial(size_t expected) { MarkSweepPolicy msp; msp.initialize_all(); - assert(msp.initial_gen0_size() == expected, err_msg("%zu != %zu", msp.initial_gen0_size(), expected)); + assert(msp.initial_young_size() == expected, err_msg("%zu != %zu", msp.initial_young_size(), expected)); } - static void verify_scaled_gen0_initial(size_t initial_heap_size) { + static void verify_scaled_young_initial(size_t initial_heap_size) { MarkSweepPolicy msp; msp.initialize_all(); size_t expected = msp.scale_by_NewRatio_aligned(initial_heap_size); - assert(msp.initial_gen0_size() == expected, err_msg("%zu != %zu", msp.initial_gen0_size(), expected)); + assert(msp.initial_young_size() == expected, err_msg("%zu != %zu", msp.initial_young_size(), expected)); assert(FLAG_IS_ERGO(NewSize) && NewSize == expected, err_msg("NewSize should have been set ergonomically to %zu, but was %zu", expected, NewSize)); } - static void verify_gen1_min(size_t expected) { + static void verify_old_min(size_t expected) { MarkSweepPolicy msp; msp.initialize_all(); - assert(msp.min_gen1_size() <= expected, err_msg("%zu > %zu", msp.min_gen1_size(), expected)); + assert(msp.min_old_size() <= expected, err_msg("%zu > %zu", msp.min_old_size(), expected)); } - static void verify_gen1_initial(size_t expected) { + static void verify_old_initial(size_t expected) { MarkSweepPolicy msp; msp.initialize_all(); - assert(msp.initial_gen1_size() == expected, err_msg("%zu != %zu", msp.initial_gen1_size(), expected)); + assert(msp.initial_old_size() == expected, err_msg("%zu != %zu", msp.initial_old_size(), expected)); }
--- a/hotspot/src/share/vm/memory/collectorPolicy.hpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/memory/collectorPolicy.hpp Wed Jul 05 19:46:17 2017 +0200 @@ -219,12 +219,12 @@ class GenCollectorPolicy : public CollectorPolicy { friend class TestGenCollectorPolicy; protected: - size_t _min_gen0_size; - size_t _initial_gen0_size; - size_t _max_gen0_size; - size_t _min_gen1_size; - size_t _initial_gen1_size; - size_t _max_gen1_size; + size_t _min_young_size; + size_t _initial_young_size; + size_t _max_young_size; + size_t _min_old_size; + size_t _initial_old_size; + size_t _max_old_size; // _gen_alignment and _space_alignment will have the same value most of the // time. When using large pages they can differ. @@ -260,13 +260,13 @@ GenCollectorPolicy(); // Accessors - size_t min_gen0_size() { return _min_gen0_size; } - size_t initial_gen0_size() { return _initial_gen0_size; } - size_t max_gen0_size() { return _max_gen0_size; } - size_t gen_alignment() { return _gen_alignment; } - size_t min_gen1_size() { return _min_gen1_size; } - size_t initial_gen1_size() { return _initial_gen1_size; } - size_t max_gen1_size() { return _max_gen1_size; } + size_t min_young_size() { return _min_young_size; } + size_t initial_young_size() { return _initial_young_size; } + size_t max_young_size() { return _max_young_size; } + size_t gen_alignment() { return _gen_alignment; } + size_t min_old_size() { return _min_old_size; } + size_t initial_old_size() { return _initial_old_size; } + size_t max_old_size() { return _max_old_size; } int number_of_generations() { return 2; } @@ -298,7 +298,7 @@ size_t init_survivor_size); virtual void post_heap_initialize() { - assert(_max_gen0_size == MaxNewSize, "Should be taken care of by initialize_size_info"); + assert(_max_young_size == MaxNewSize, "Should be taken care of by initialize_size_info"); } BarrierSet::Name barrier_set_name() { return BarrierSet::CardTableModRef; }
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -1119,10 +1119,10 @@ } void GenCollectedHeap::print_tracing_info() const { - if (TraceGen0Time) { + if (TraceYoungGenTime) { get_gen(0)->print_summary_info(); } - if (TraceGen1Time) { + if (TraceOldGenTime) { get_gen(1)->print_summary_info(); } }
--- a/hotspot/src/share/vm/memory/tenuredGeneration.hpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/memory/tenuredGeneration.hpp Wed Jul 05 19:46:17 2017 +0200 @@ -67,10 +67,9 @@ // Does a "full" (forced) collection invoked on this generation collect // all younger generations as well? Note that this is a // hack to allow the collection of the younger gen first if the flag is - // set. This is better than using th policy's should_collect_gen0_first() - // since that causes us to do an extra unnecessary pair of restart-&-stop-world. + // set. virtual bool full_collects_younger_generations() const { - return !CollectGen0First; + return !ScavengeBeforeFullGC; } virtual void gc_prologue(bool full);
--- a/hotspot/src/share/vm/oops/cpCache.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/oops/cpCache.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -407,7 +407,7 @@ oop ConstantPoolCacheEntry::appendix_if_resolved(constantPoolHandle cpool) { - if (is_f1_null() || !has_appendix()) + if (!has_appendix()) return NULL; const int ref_index = f2_as_index() + _indy_resolved_references_appendix_offset; objArrayOop resolved_references = cpool->resolved_references(); @@ -416,7 +416,7 @@ oop ConstantPoolCacheEntry::method_type_if_resolved(constantPoolHandle cpool) { - if (is_f1_null() || !has_method_type()) + if (!has_method_type()) return NULL; const int ref_index = f2_as_index() + _indy_resolved_references_method_type_offset; objArrayOop resolved_references = cpool->resolved_references();
--- a/hotspot/src/share/vm/oops/cpCache.hpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/oops/cpCache.hpp Wed Jul 05 19:46:17 2017 +0200 @@ -348,8 +348,8 @@ bool is_final() const { return (_flags & (1 << is_final_shift)) != 0; } bool is_forced_virtual() const { return (_flags & (1 << is_forced_virtual_shift)) != 0; } bool is_vfinal() const { return (_flags & (1 << is_vfinal_shift)) != 0; } - bool has_appendix() const { return (_flags & (1 << has_appendix_shift)) != 0; } - bool has_method_type() const { return (_flags & (1 << has_method_type_shift)) != 0; } + bool has_appendix() const { return (!is_f1_null()) && (_flags & (1 << has_appendix_shift)) != 0; } + bool has_method_type() const { return (!is_f1_null()) && (_flags & (1 << has_method_type_shift)) != 0; } bool is_method_entry() const { return (_flags & (1 << is_field_entry_shift)) == 0; } bool is_field_entry() const { return (_flags & (1 << is_field_entry_shift)) != 0; } bool is_byte() const { return flag_state() == btos; }
--- a/hotspot/src/share/vm/oops/methodData.hpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/oops/methodData.hpp Wed Jul 05 19:46:17 2017 +0200 @@ -2053,7 +2053,7 @@ // Whole-method sticky bits and flags enum { - _trap_hist_limit = 20, // decoupled from Deoptimization::Reason_LIMIT + _trap_hist_limit = 21, // decoupled from Deoptimization::Reason_LIMIT _trap_hist_mask = max_jubyte, _extra_data_count = 4 // extra DataLayout headers, for trap history }; // Public flag values
--- a/hotspot/src/share/vm/opto/addnode.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/opto/addnode.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -254,47 +254,46 @@ const Type *t_sub1 = phase->type( in1->in(1) ); const Type *t_2 = phase->type( in2 ); if( t_sub1->singleton() && t_2->singleton() && t_sub1 != Type::TOP && t_2 != Type::TOP ) - return new (phase->C) SubINode(phase->makecon( add_ring( t_sub1, t_2 ) ), - in1->in(2) ); + return new SubINode(phase->makecon( add_ring( t_sub1, t_2 ) ), in1->in(2) ); // Convert "(a-b)+(c-d)" into "(a+c)-(b+d)" if( op2 == Op_SubI ) { // Check for dead cycle: d = (a-b)+(c-d) assert( in1->in(2) != this && in2->in(2) != this, "dead loop in AddINode::Ideal" ); - Node *sub = new (phase->C) SubINode(NULL, NULL); - sub->init_req(1, phase->transform(new (phase->C) AddINode(in1->in(1), in2->in(1) ) )); - sub->init_req(2, phase->transform(new (phase->C) AddINode(in1->in(2), in2->in(2) ) )); + Node *sub = new SubINode(NULL, NULL); + sub->init_req(1, phase->transform(new AddINode(in1->in(1), in2->in(1) ) )); + sub->init_req(2, phase->transform(new AddINode(in1->in(2), in2->in(2) ) )); return sub; } // Convert "(a-b)+(b+c)" into "(a+c)" if( op2 == Op_AddI && in1->in(2) == in2->in(1) ) { assert(in1->in(1) != this && in2->in(2) != this,"dead loop in AddINode::Ideal"); - return new (phase->C) AddINode(in1->in(1), in2->in(2)); + return new AddINode(in1->in(1), in2->in(2)); } // Convert "(a-b)+(c+b)" into "(a+c)" if( op2 == Op_AddI && in1->in(2) == in2->in(2) ) { assert(in1->in(1) != this && in2->in(1) != this,"dead loop in AddINode::Ideal"); - return new (phase->C) AddINode(in1->in(1), in2->in(1)); + return new AddINode(in1->in(1), in2->in(1)); } // Convert "(a-b)+(b-c)" into "(a-c)" if( op2 == Op_SubI && in1->in(2) == in2->in(1) ) { assert(in1->in(1) != this && in2->in(2) != this,"dead loop in AddINode::Ideal"); - return new (phase->C) SubINode(in1->in(1), in2->in(2)); + return new SubINode(in1->in(1), in2->in(2)); } // Convert "(a-b)+(c-a)" into "(c-b)" if( op2 == Op_SubI && in1->in(1) == in2->in(2) ) { assert(in1->in(2) != this && in2->in(1) != this,"dead loop in AddINode::Ideal"); - return new (phase->C) SubINode(in2->in(1), in1->in(2)); + return new SubINode(in2->in(1), in1->in(2)); } } // Convert "x+(0-y)" into "(x-y)" if( op2 == Op_SubI && phase->type(in2->in(1)) == TypeInt::ZERO ) - return new (phase->C) SubINode(in1, in2->in(2) ); + return new SubINode(in1, in2->in(2) ); // Convert "(0-y)+x" into "(x-y)" if( op1 == Op_SubI && phase->type(in1->in(1)) == TypeInt::ZERO ) - return new (phase->C) SubINode( in2, in1->in(2) ); + return new SubINode( in2, in1->in(2) ); // Convert (x>>>z)+y into (x+(y<<z))>>>z for small constant z and y. // Helps with array allocation math constant folding @@ -315,8 +314,8 @@ if( z < 5 && -5 < y && y < 0 ) { const Type *t_in11 = phase->type(in1->in(1)); if( t_in11 != Type::TOP && (t_in11->is_int()->_lo >= -(y << z)) ) { - Node *a = phase->transform( new (phase->C) AddINode( in1->in(1), phase->intcon(y<<z) ) ); - return new (phase->C) URShiftINode( a, in1->in(2) ); + Node *a = phase->transform( new AddINode( in1->in(1), phase->intcon(y<<z) ) ); + return new URShiftINode( a, in1->in(2) ); } } } @@ -387,47 +386,46 @@ const Type *t_sub1 = phase->type( in1->in(1) ); const Type *t_2 = phase->type( in2 ); if( t_sub1->singleton() && t_2->singleton() && t_sub1 != Type::TOP && t_2 != Type::TOP ) - return new (phase->C) SubLNode(phase->makecon( add_ring( t_sub1, t_2 ) ), - in1->in(2) ); + return new SubLNode(phase->makecon( add_ring( t_sub1, t_2 ) ), in1->in(2) ); // Convert "(a-b)+(c-d)" into "(a+c)-(b+d)" if( op2 == Op_SubL ) { // Check for dead cycle: d = (a-b)+(c-d) assert( in1->in(2) != this && in2->in(2) != this, "dead loop in AddLNode::Ideal" ); - Node *sub = new (phase->C) SubLNode(NULL, NULL); - sub->init_req(1, phase->transform(new (phase->C) AddLNode(in1->in(1), in2->in(1) ) )); - sub->init_req(2, phase->transform(new (phase->C) AddLNode(in1->in(2), in2->in(2) ) )); + Node *sub = new SubLNode(NULL, NULL); + sub->init_req(1, phase->transform(new AddLNode(in1->in(1), in2->in(1) ) )); + sub->init_req(2, phase->transform(new AddLNode(in1->in(2), in2->in(2) ) )); return sub; } // Convert "(a-b)+(b+c)" into "(a+c)" if( op2 == Op_AddL && in1->in(2) == in2->in(1) ) { assert(in1->in(1) != this && in2->in(2) != this,"dead loop in AddLNode::Ideal"); - return new (phase->C) AddLNode(in1->in(1), in2->in(2)); + return new AddLNode(in1->in(1), in2->in(2)); } // Convert "(a-b)+(c+b)" into "(a+c)" if( op2 == Op_AddL && in1->in(2) == in2->in(2) ) { assert(in1->in(1) != this && in2->in(1) != this,"dead loop in AddLNode::Ideal"); - return new (phase->C) AddLNode(in1->in(1), in2->in(1)); + return new AddLNode(in1->in(1), in2->in(1)); } // Convert "(a-b)+(b-c)" into "(a-c)" if( op2 == Op_SubL && in1->in(2) == in2->in(1) ) { assert(in1->in(1) != this && in2->in(2) != this,"dead loop in AddLNode::Ideal"); - return new (phase->C) SubLNode(in1->in(1), in2->in(2)); + return new SubLNode(in1->in(1), in2->in(2)); } // Convert "(a-b)+(c-a)" into "(c-b)" if( op2 == Op_SubL && in1->in(1) == in1->in(2) ) { assert(in1->in(2) != this && in2->in(1) != this,"dead loop in AddLNode::Ideal"); - return new (phase->C) SubLNode(in2->in(1), in1->in(2)); + return new SubLNode(in2->in(1), in1->in(2)); } } // Convert "x+(0-y)" into "(x-y)" if( op2 == Op_SubL && phase->type(in2->in(1)) == TypeLong::ZERO ) - return new (phase->C) SubLNode( in1, in2->in(2) ); + return new SubLNode( in1, in2->in(2) ); // Convert "(0-y)+x" into "(x-y)" if( op1 == Op_SubL && phase->type(in1->in(1)) == TypeInt::ZERO ) - return new (phase->C) SubLNode( in2, in1->in(2) ); + return new SubLNode( in2, in1->in(2) ); // Convert "X+X+X+X+X...+X+Y" into "k*X+Y" or really convert "X+(X+Y)" // into "(X<<1)+Y" and let shift-folding happen. @@ -435,8 +433,8 @@ in2->in(1) == in1 && op1 != Op_ConL && 0 ) { - Node *shift = phase->transform(new (phase->C) LShiftLNode(in1,phase->intcon(1))); - return new (phase->C) AddLNode(shift,in2->in(2)); + Node *shift = phase->transform(new LShiftLNode(in1,phase->intcon(1))); + return new AddLNode(shift,in2->in(2)); } return AddNode::Ideal(phase, can_reshape); @@ -596,7 +594,7 @@ offset = phase->MakeConX(t2->get_con() + t12->get_con()); } else { // Else move the constant to the right. ((A+con)+B) into ((A+B)+con) - address = phase->transform(new (phase->C) AddPNode(in(Base),addp->in(Address),in(Offset))); + address = phase->transform(new AddPNode(in(Base),addp->in(Address),in(Offset))); offset = addp->in(Offset); } PhaseIterGVN *igvn = phase->is_IterGVN(); @@ -616,7 +614,7 @@ // If this is a NULL+long form (from unsafe accesses), switch to a rawptr. if (phase->type(in(Address)) == TypePtr::NULL_PTR) { Node* offset = in(Offset); - return new (phase->C) CastX2PNode(offset); + return new CastX2PNode(offset); } } @@ -628,7 +626,7 @@ if( add->Opcode() == Op_AddX && add->in(1) != add ) { const Type *t22 = phase->type( add->in(2) ); if( t22->singleton() && (t22 != Type::TOP) ) { // Right input is an add of a constant? - set_req(Address, phase->transform(new (phase->C) AddPNode(in(Base),in(Address),add->in(1)))); + set_req(Address, phase->transform(new AddPNode(in(Base),in(Address),add->in(1)))); set_req(Offset, add->in(2)); PhaseIterGVN *igvn = phase->is_IterGVN(); if (add->outcnt() == 0 && igvn) { @@ -858,7 +856,7 @@ // to force a right-spline graph for the rest of MinINode::Ideal(). if( l->Opcode() == Op_MinI ) { assert( l != l->in(1), "dead loop in MinINode::Ideal" ); - r = phase->transform(new (phase->C) MinINode(l->in(2),r)); + r = phase->transform(new MinINode(l->in(2),r)); l = l->in(1); set_req(1, l); set_req(2, r); @@ -906,18 +904,18 @@ } if( x->_idx > y->_idx ) - return new (phase->C) MinINode(r->in(1),phase->transform(new (phase->C) MinINode(l,r->in(2)))); + return new MinINode(r->in(1),phase->transform(new MinINode(l,r->in(2)))); // See if covers: MIN2(x+c0,MIN2(y+c1,z)) if( !phase->eqv(x,y) ) return NULL; // If (y == x) transform MIN2(x+c0, MIN2(x+c1,z)) into // MIN2(x+c0 or x+c1 which less, z). - return new (phase->C) MinINode(phase->transform(new (phase->C) AddINode(x,phase->intcon(MIN2(x_off,y_off)))),r->in(2)); + return new MinINode(phase->transform(new AddINode(x,phase->intcon(MIN2(x_off,y_off)))),r->in(2)); } else { // See if covers: MIN2(x+c0,y+c1) if( !phase->eqv(x,y) ) return NULL; // If (y == x) transform MIN2(x+c0,x+c1) into x+c0 or x+c1 which less. - return new (phase->C) AddINode(x,phase->intcon(MIN2(x_off,y_off))); + return new AddINode(x,phase->intcon(MIN2(x_off,y_off))); } }
--- a/hotspot/src/share/vm/opto/block.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/opto/block.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -373,7 +373,7 @@ // I'll need a few machine-specific GotoNodes. Make an Ideal GotoNode, // then Match it into a machine-specific Node. Then clone the machine // Node on demand. - Node *x = new (C) GotoNode(NULL); + Node *x = new GotoNode(NULL); x->init_req(0, x); _goto = matcher.match_tree(x); assert(_goto != NULL, ""); @@ -426,7 +426,7 @@ !p->is_block_start() ); // Make the block begin with one of Region or StartNode. if( !p->is_block_start() ) { - RegionNode *r = new (C) RegionNode( 2 ); + RegionNode *r = new RegionNode( 2 ); r->init_req(1, p); // Insert RegionNode in the way proj->set_req(0, r); // Insert RegionNode in the way p = r; @@ -501,7 +501,7 @@ // get ProjNode corresponding to the succ_no'th successor of the in block ProjNode* proj = in->get_node(in->number_of_nodes() - in->_num_succs + succ_no)->as_Proj(); // create region for basic block - RegionNode* region = new (C) RegionNode(2); + RegionNode* region = new RegionNode(2); region->init_req(1, proj); // setup corresponding basic block Block* block = new (_block_arena) Block(_block_arena, region);
--- a/hotspot/src/share/vm/opto/bytecodeInfo.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/opto/bytecodeInfo.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -361,11 +361,14 @@ set_msg("not an accessor"); return false; } + + // Limit inlining depth in case inlining is forced or + // _max_inline_level was increased to compensate for lambda forms. + if (inline_level() > MaxForceInlineLevel) { + set_msg("MaxForceInlineLevel"); + return false; + } if (inline_level() > _max_inline_level) { - if (callee_method->force_inline() && inline_level() > MaxForceInlineLevel) { - set_msg("MaxForceInlineLevel"); - return false; - } if (!callee_method->force_inline() || !IncrementalInline) { set_msg("inlining too deep"); return false;
--- a/hotspot/src/share/vm/opto/c2_globals.hpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/opto/c2_globals.hpp Wed Jul 05 19:46:17 2017 +0200 @@ -650,9 +650,6 @@ product(bool, UseMathExactIntrinsics, true, \ "Enables intrinsification of various java.lang.Math functions") \ \ - experimental(bool, ReplaceInParentMaps, false, \ - "Propagate type improvements in callers of inlinee if possible") \ - \ product(bool, UseTypeSpeculation, true, \ "Speculatively propagate types from profiles") \ \
--- a/hotspot/src/share/vm/opto/callGenerator.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/opto/callGenerator.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -63,12 +63,12 @@ } virtual bool is_parse() const { return true; } - virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); + virtual JVMState* generate(JVMState* jvms); int is_osr() { return _is_osr; } }; -JVMState* ParseGenerator::generate(JVMState* jvms, Parse* parent_parser) { +JVMState* ParseGenerator::generate(JVMState* jvms) { Compile* C = Compile::current(); C->print_inlining_update(this); @@ -81,7 +81,7 @@ return NULL; // bailing out of the compile; do not try to parse } - Parse parser(jvms, method(), _expected_uses, parent_parser); + Parse parser(jvms, method(), _expected_uses); // Grab signature for matching/allocation #ifdef ASSERT if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) { @@ -120,12 +120,12 @@ _separate_io_proj(separate_io_proj) { } - virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); + virtual JVMState* generate(JVMState* jvms); CallStaticJavaNode* call_node() const { return _call_node; } }; -JVMState* DirectCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { +JVMState* DirectCallGenerator::generate(JVMState* jvms) { GraphKit kit(jvms); kit.C->print_inlining_update(this); bool is_static = method()->is_static(); @@ -136,7 +136,7 @@ kit.C->log()->elem("direct_call bci='%d'", jvms->bci()); } - CallStaticJavaNode *call = new (kit.C) CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci()); + CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci()); _call_node = call; // Save the call node in case we need it later if (!is_static) { // Make an explicit receiver null_check as part of this call. @@ -173,10 +173,10 @@ vtable_index >= 0, "either invalid or usable"); } virtual bool is_virtual() const { return true; } - virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); + virtual JVMState* generate(JVMState* jvms); }; -JVMState* VirtualCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { +JVMState* VirtualCallGenerator::generate(JVMState* jvms) { GraphKit kit(jvms); Node* receiver = kit.argument(0); @@ -225,7 +225,7 @@ "no vtable calls if +UseInlineCaches "); address target = SharedRuntime::get_resolve_virtual_call_stub(); // Normal inline cache used for call - CallDynamicJavaNode *call = new (kit.C) CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci()); + CallDynamicJavaNode *call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci()); kit.set_arguments_for_java_call(call); kit.set_edges_for_java_call(call); Node* ret = kit.set_results_for_java_call(call); @@ -283,7 +283,7 @@ // Convert the CallStaticJava into an inline virtual void do_late_inline(); - virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { + virtual JVMState* generate(JVMState* jvms) { Compile *C = Compile::current(); C->log_inline_id(this); @@ -298,7 +298,7 @@ // that the late inlining logic can distinguish between fall // through and exceptional uses of the memory and io projections // as is done for allocations and macro expansion. - return DirectCallGenerator::generate(jvms, parent_parser); + return DirectCallGenerator::generate(jvms); } virtual void print_inlining_late(const char* msg) { @@ -350,7 +350,7 @@ JVMState* old_jvms = call->jvms(); JVMState* jvms = old_jvms->clone_shallow(C); uint size = call->req(); - SafePointNode* map = new (C) SafePointNode(size, jvms); + SafePointNode* map = new SafePointNode(size, jvms); for (uint i1 = 0; i1 < size; i1++) { map->init_req(i1, call->in(i1)); } @@ -399,7 +399,7 @@ } // Now perform the inlining using the synthesized JVMState - JVMState* new_jvms = _inline_cg->generate(jvms, NULL); + JVMState* new_jvms = _inline_cg->generate(jvms); if (new_jvms == NULL) return; // no change if (C->failing()) return; @@ -417,7 +417,7 @@ C->env()->notice_inlined_method(_inline_cg->method()); C->set_inlining_progress(true); - kit.replace_call(call, result); + kit.replace_call(call, result, true); } @@ -439,8 +439,8 @@ virtual bool is_mh_late_inline() const { return true; } - virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { - JVMState* new_jvms = LateInlineCallGenerator::generate(jvms, parent_parser); + virtual JVMState* generate(JVMState* jvms) { + JVMState* new_jvms = LateInlineCallGenerator::generate(jvms); Compile* C = Compile::current(); if (_input_not_const) { @@ -486,14 +486,14 @@ LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) : LateInlineCallGenerator(method, inline_cg) {} - virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { + virtual JVMState* generate(JVMState* jvms) { Compile *C = Compile::current(); C->log_inline_id(this); C->add_string_late_inline(this); - JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser); + JVMState* new_jvms = DirectCallGenerator::generate(jvms); return new_jvms; } @@ -510,14 +510,14 @@ LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) : LateInlineCallGenerator(method, inline_cg) {} - virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) { + virtual JVMState* generate(JVMState* jvms) { Compile *C = Compile::current(); C->log_inline_id(this); C->add_boxing_late_inline(this); - JVMState* new_jvms = DirectCallGenerator::generate(jvms, parent_parser); + JVMState* new_jvms = DirectCallGenerator::generate(jvms); return new_jvms; } }; @@ -553,7 +553,7 @@ virtual bool is_virtual() const { return _is_virtual; } virtual bool is_deferred() const { return true; } - virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); + virtual JVMState* generate(JVMState* jvms); }; @@ -563,14 +563,14 @@ return new WarmCallGenerator(ci, if_cold, if_hot); } -JVMState* WarmCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { +JVMState* WarmCallGenerator::generate(JVMState* jvms) { Compile* C = Compile::current(); C->print_inlining_update(this); if (C->log() != NULL) { C->log()->elem("warm_call bci='%d'", jvms->bci()); } - jvms = _if_cold->generate(jvms, parent_parser); + jvms = _if_cold->generate(jvms); if (jvms != NULL) { Node* m = jvms->map()->control(); if (m->is_CatchProj()) m = m->in(0); else m = C->top(); @@ -631,7 +631,7 @@ virtual bool is_inline() const { return _if_hit->is_inline(); } virtual bool is_deferred() const { return _if_hit->is_deferred(); } - virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); + virtual JVMState* generate(JVMState* jvms); }; @@ -643,14 +643,13 @@ } -JVMState* PredictedCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { +JVMState* PredictedCallGenerator::generate(JVMState* jvms) { GraphKit kit(jvms); kit.C->print_inlining_update(this); PhaseGVN& gvn = kit.gvn(); // We need an explicit receiver null_check before checking its type. // We share a map with the caller, so his JVMS gets adjusted. Node* receiver = kit.argument(0); - CompileLog* log = kit.C->log(); if (log != NULL) { log->elem("predicted_call bci='%d' klass='%d'", @@ -662,6 +661,10 @@ return kit.transfer_exceptions_into_jvms(); } + // Make a copy of the replaced nodes in case we need to restore them + ReplacedNodes replaced_nodes = kit.map()->replaced_nodes(); + replaced_nodes.clone(); + Node* exact_receiver = receiver; // will get updated in place... Node* slow_ctl = kit.type_check_receiver(receiver, _predicted_receiver, _hit_prob, @@ -672,7 +675,7 @@ { PreserveJVMState pjvms(&kit); kit.set_control(slow_ctl); if (!kit.stopped()) { - slow_jvms = _if_missed->generate(kit.sync_jvms(), parent_parser); + slow_jvms = _if_missed->generate(kit.sync_jvms()); if (kit.failing()) return NULL; // might happen because of NodeCountInliningCutoff assert(slow_jvms != NULL, "must be"); @@ -693,12 +696,12 @@ kit.replace_in_map(receiver, exact_receiver); // Make the hot call: - JVMState* new_jvms = _if_hit->generate(kit.sync_jvms(), parent_parser); + JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); if (new_jvms == NULL) { // Inline failed, so make a direct call. assert(_if_hit->is_inline(), "must have been a failed inline"); CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); - new_jvms = cg->generate(kit.sync_jvms(), parent_parser); + new_jvms = cg->generate(kit.sync_jvms()); } kit.add_exception_states_from(new_jvms); kit.set_jvms(new_jvms); @@ -715,16 +718,29 @@ return kit.transfer_exceptions_into_jvms(); } + // There are 2 branches and the replaced nodes are only valid on + // one: restore the replaced nodes to what they were before the + // branch. + kit.map()->set_replaced_nodes(replaced_nodes); + // Finish the diamond. kit.C->set_has_split_ifs(true); // Has chance for split-if optimization - RegionNode* region = new (kit.C) RegionNode(3); + RegionNode* region = new RegionNode(3); region->init_req(1, kit.control()); region->init_req(2, slow_map->control()); kit.set_control(gvn.transform(region)); Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); iophi->set_req(2, slow_map->i_o()); kit.set_i_o(gvn.transform(iophi)); + // Merge memory kit.merge_memory(slow_map->merged_memory(), region, 2); + // Transform new memory Phis. + for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { + Node* phi = mms.memory(); + if (phi->is_Phi() && phi->in(0) == region) { + mms.set_memory(gvn.transform(phi)); + } + } uint tos = kit.jvms()->stkoff() + kit.sp(); uint limit = slow_map->req(); for (uint i = TypeFunc::Parms; i < limit; i++) { @@ -825,7 +841,7 @@ const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass()); if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { - Node* cast_obj = gvn.transform(new (C) CheckCastPPNode(kit.control(), arg, sig_type)); + Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type)); kit.set_argument(0, cast_obj); } } @@ -837,7 +853,7 @@ const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr(); const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass()); if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { - Node* cast_obj = gvn.transform(new (C) CheckCastPPNode(kit.control(), arg, sig_type)); + Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type)); kit.set_argument(receiver_skip + i, cast_obj); } } @@ -882,15 +898,15 @@ } -//------------------------PredictedIntrinsicGenerator------------------------------ -// Internal class which handles all predicted Intrinsic calls. -class PredictedIntrinsicGenerator : public CallGenerator { +//------------------------PredicatedIntrinsicGenerator------------------------------ +// Internal class which handles all predicated Intrinsic calls. +class PredicatedIntrinsicGenerator : public CallGenerator { CallGenerator* _intrinsic; CallGenerator* _cg; public: - PredictedIntrinsicGenerator(CallGenerator* intrinsic, - CallGenerator* cg) + PredicatedIntrinsicGenerator(CallGenerator* intrinsic, + CallGenerator* cg) : CallGenerator(cg->method()) { _intrinsic = intrinsic; @@ -901,108 +917,186 @@ virtual bool is_inlined() const { return true; } virtual bool is_intrinsic() const { return true; } - virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); + virtual JVMState* generate(JVMState* jvms); }; -CallGenerator* CallGenerator::for_predicted_intrinsic(CallGenerator* intrinsic, - CallGenerator* cg) { - return new PredictedIntrinsicGenerator(intrinsic, cg); +CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic, + CallGenerator* cg) { + return new PredicatedIntrinsicGenerator(intrinsic, cg); } -JVMState* PredictedIntrinsicGenerator::generate(JVMState* jvms, Parse* parent_parser) { +JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) { + // The code we want to generate here is: + // if (receiver == NULL) + // uncommon_Trap + // if (predicate(0)) + // do_intrinsic(0) + // else + // if (predicate(1)) + // do_intrinsic(1) + // ... + // else + // do_java_comp + GraphKit kit(jvms); PhaseGVN& gvn = kit.gvn(); CompileLog* log = kit.C->log(); if (log != NULL) { - log->elem("predicted_intrinsic bci='%d' method='%d'", + log->elem("predicated_intrinsic bci='%d' method='%d'", jvms->bci(), log->identify(method())); } - Node* slow_ctl = _intrinsic->generate_predicate(kit.sync_jvms()); - if (kit.failing()) - return NULL; // might happen because of NodeCountInliningCutoff - - kit.C->print_inlining_update(this); - SafePointNode* slow_map = NULL; - JVMState* slow_jvms; - if (slow_ctl != NULL) { - PreserveJVMState pjvms(&kit); - kit.set_control(slow_ctl); - if (!kit.stopped()) { - slow_jvms = _cg->generate(kit.sync_jvms(), parent_parser); - if (kit.failing()) - return NULL; // might happen because of NodeCountInliningCutoff - assert(slow_jvms != NULL, "must be"); - kit.add_exception_states_from(slow_jvms); - kit.set_map(slow_jvms->map()); - if (!kit.stopped()) - slow_map = kit.stop(); + if (!method()->is_static()) { + // We need an explicit receiver null_check before checking its type in predicate. + // We share a map with the caller, so his JVMS gets adjusted. + Node* receiver = kit.null_check_receiver_before_call(method()); + if (kit.stopped()) { + return kit.transfer_exceptions_into_jvms(); } } - if (kit.stopped()) { - // Predicate is always false. - kit.set_jvms(slow_jvms); + int n_predicates = _intrinsic->predicates_count(); + assert(n_predicates > 0, "sanity"); + + JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1)); + + // Region for normal compilation code if intrinsic failed. + Node* slow_region = new RegionNode(1); + + int results = 0; + for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) { +#ifdef ASSERT + JVMState* old_jvms = kit.jvms(); + SafePointNode* old_map = kit.map(); + Node* old_io = old_map->i_o(); + Node* old_mem = old_map->memory(); + Node* old_exc = old_map->next_exception(); +#endif + Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate); +#ifdef ASSERT + // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate. + assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state"); + SafePointNode* new_map = kit.map(); + assert(old_io == new_map->i_o(), "generate_predicate should not change i_o"); + assert(old_mem == new_map->memory(), "generate_predicate should not change memory"); + assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions"); +#endif + if (!kit.stopped()) { + PreserveJVMState pjvms(&kit); + // Generate intrinsic code: + JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms()); + if (new_jvms == NULL) { + // Intrinsic failed, use normal compilation path for this predicate. + slow_region->add_req(kit.control()); + } else { + kit.add_exception_states_from(new_jvms); + kit.set_jvms(new_jvms); + if (!kit.stopped()) { + result_jvms[results++] = kit.jvms(); + } + } + } + if (else_ctrl == NULL) { + else_ctrl = kit.C->top(); + } + kit.set_control(else_ctrl); + } + if (!kit.stopped()) { + // Final 'else' after predicates. + slow_region->add_req(kit.control()); + } + if (slow_region->req() > 1) { + PreserveJVMState pjvms(&kit); + // Generate normal compilation code: + kit.set_control(gvn.transform(slow_region)); + JVMState* new_jvms = _cg->generate(kit.sync_jvms()); + if (kit.failing()) + return NULL; // might happen because of NodeCountInliningCutoff + assert(new_jvms != NULL, "must be"); + kit.add_exception_states_from(new_jvms); + kit.set_jvms(new_jvms); + if (!kit.stopped()) { + result_jvms[results++] = kit.jvms(); + } + } + + if (results == 0) { + // All paths ended in uncommon traps. + (void) kit.stop(); return kit.transfer_exceptions_into_jvms(); } - // Generate intrinsic code: - JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms(), parent_parser); - if (new_jvms == NULL) { - // Intrinsic failed, so use slow code or make a direct call. - if (slow_map == NULL) { - CallGenerator* cg = CallGenerator::for_direct_call(method()); - new_jvms = cg->generate(kit.sync_jvms(), parent_parser); - } else { - kit.set_jvms(slow_jvms); - return kit.transfer_exceptions_into_jvms(); - } - } - kit.add_exception_states_from(new_jvms); - kit.set_jvms(new_jvms); - - // Need to merge slow and fast? - if (slow_map == NULL) { - // The fast path is the only path remaining. + if (results == 1) { // Only one path + kit.set_jvms(result_jvms[0]); return kit.transfer_exceptions_into_jvms(); } - if (kit.stopped()) { - // Intrinsic method threw an exception, so it's just the slow path after all. - kit.set_jvms(slow_jvms); - return kit.transfer_exceptions_into_jvms(); + // Merge all paths. + kit.C->set_has_split_ifs(true); // Has chance for split-if optimization + RegionNode* region = new RegionNode(results + 1); + Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); + for (int i = 0; i < results; i++) { + JVMState* jvms = result_jvms[i]; + int path = i + 1; + SafePointNode* map = jvms->map(); + region->init_req(path, map->control()); + iophi->set_req(path, map->i_o()); + if (i == 0) { + kit.set_jvms(jvms); + } else { + kit.merge_memory(map->merged_memory(), region, path); + } + } + kit.set_control(gvn.transform(region)); + kit.set_i_o(gvn.transform(iophi)); + // Transform new memory Phis. + for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) { + Node* phi = mms.memory(); + if (phi->is_Phi() && phi->in(0) == region) { + mms.set_memory(gvn.transform(phi)); + } } - // Finish the diamond. - kit.C->set_has_split_ifs(true); // Has chance for split-if optimization - RegionNode* region = new (kit.C) RegionNode(3); - region->init_req(1, kit.control()); - region->init_req(2, slow_map->control()); - kit.set_control(gvn.transform(region)); - Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); - iophi->set_req(2, slow_map->i_o()); - kit.set_i_o(gvn.transform(iophi)); - kit.merge_memory(slow_map->merged_memory(), region, 2); + // Merge debug info. + Node** ins = NEW_RESOURCE_ARRAY(Node*, results); uint tos = kit.jvms()->stkoff() + kit.sp(); - uint limit = slow_map->req(); + Node* map = kit.map(); + uint limit = map->req(); for (uint i = TypeFunc::Parms; i < limit; i++) { // Skip unused stack slots; fast forward to monoff(); if (i == tos) { i = kit.jvms()->monoff(); if( i >= limit ) break; } - Node* m = kit.map()->in(i); - Node* n = slow_map->in(i); - if (m != n) { - const Type* t = gvn.type(m)->meet_speculative(gvn.type(n)); - Node* phi = PhiNode::make(region, m, t); - phi->set_req(2, n); - kit.map()->set_req(i, gvn.transform(phi)); + Node* n = map->in(i); + ins[0] = n; + const Type* t = gvn.type(n); + bool needs_phi = false; + for (int j = 1; j < results; j++) { + JVMState* jvms = result_jvms[j]; + Node* jmap = jvms->map(); + Node* m = NULL; + if (jmap->req() > i) { + m = jmap->in(i); + if (m != n) { + needs_phi = true; + t = t->meet_speculative(gvn.type(m)); + } + } + ins[j] = m; + } + if (needs_phi) { + Node* phi = PhiNode::make(region, n, t); + for (int j = 1; j < results; j++) { + phi->set_req(j + 1, ins[j]); + } + map->set_req(i, gvn.transform(phi)); } } + return kit.transfer_exceptions_into_jvms(); } @@ -1025,7 +1119,7 @@ virtual bool is_virtual() const { ShouldNotReachHere(); return false; } virtual bool is_trap() const { return true; } - virtual JVMState* generate(JVMState* jvms, Parse* parent_parser); + virtual JVMState* generate(JVMState* jvms); }; @@ -1037,7 +1131,7 @@ } -JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms, Parse* parent_parser) { +JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) { GraphKit kit(jvms); kit.C->print_inlining_update(this); // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver).
--- a/hotspot/src/share/vm/opto/callGenerator.hpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/opto/callGenerator.hpp Wed Jul 05 19:46:17 2017 +0200 @@ -31,8 +31,6 @@ #include "opto/type.hpp" #include "runtime/deoptimization.hpp" -class Parse; - //---------------------------CallGenerator------------------------------------- // The subclasses of this class handle generation of ideal nodes for // call sites and method entry points. @@ -63,8 +61,9 @@ virtual bool is_virtual() const { return false; } // is_deferred: The decision whether to inline or not is deferred. virtual bool is_deferred() const { return false; } - // is_predicted: Uses an explicit check against a predicted type. - virtual bool is_predicted() const { return false; } + // is_predicated: Uses an explicit check (predicate). + virtual bool is_predicated() const { return false; } + virtual int predicates_count() const { return 0; } // is_trap: Does not return to the caller. (E.g., uncommon trap.) virtual bool is_trap() const { return false; } // does_virtual_dispatch: Should try inlining as normal method first. @@ -114,7 +113,7 @@ // // If the result is NULL, it means that this CallGenerator was unable // to handle the given call, and another CallGenerator should be consulted. - virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) = 0; + virtual JVMState* generate(JVMState* jvms) = 0; // How to generate a call site that is inlined: static CallGenerator* for_inline(ciMethod* m, float expected_uses = -1); @@ -160,9 +159,9 @@ // Registry for intrinsics: static CallGenerator* for_intrinsic(ciMethod* m); static void register_intrinsic(ciMethod* m, CallGenerator* cg); - static CallGenerator* for_predicted_intrinsic(CallGenerator* intrinsic, - CallGenerator* cg); - virtual Node* generate_predicate(JVMState* jvms) { return NULL; }; + static CallGenerator* for_predicated_intrinsic(CallGenerator* intrinsic, + CallGenerator* cg); + virtual Node* generate_predicate(JVMState* jvms, int predicate) { return NULL; }; virtual void print_inlining_late(const char* msg) { ShouldNotReachHere(); }
--- a/hotspot/src/share/vm/opto/callnode.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/opto/callnode.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -74,20 +74,20 @@ case TypeFunc::Control: case TypeFunc::I_O: case TypeFunc::Memory: - return new (match->C) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); + return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); case TypeFunc::FramePtr: - return new (match->C) MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP); + return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP); case TypeFunc::ReturnAdr: - return new (match->C) MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP); + return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP); case TypeFunc::Parms: default: { uint parm_num = proj->_con - TypeFunc::Parms; const Type *t = _domain->field_at(proj->_con); if (t->base() == Type::Half) // 2nd half of Longs and Doubles - return new (match->C) ConNode(Type::TOP); + return new ConNode(Type::TOP); uint ideal_reg = t->ideal_reg(); RegMask &rm = match->_calling_convention_mask[parm_num]; - return new (match->C) MachProjNode(this,proj->_con,rm,ideal_reg); + return new MachProjNode(this,proj->_con,rm,ideal_reg); } } return NULL; @@ -685,12 +685,12 @@ case TypeFunc::Control: case TypeFunc::I_O: case TypeFunc::Memory: - return new (match->C) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); + return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); case TypeFunc::Parms+1: // For LONG & DOUBLE returns assert(tf()->_range->field_at(TypeFunc::Parms+1) == Type::HALF, ""); // 2nd half of doubles and longs - return new (match->C) MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad); + return new MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad); case TypeFunc::Parms: { // Normal returns uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg(); @@ -700,7 +700,7 @@ RegMask rm = RegMask(regs.first()); if( OptoReg::is_valid(regs.second()) ) rm.Insert( regs.second() ); - return new (match->C) MachProjNode(this,proj->_con,rm,ideal_reg); + return new MachProjNode(this,proj->_con,rm,ideal_reg); } case TypeFunc::ReturnAdr: @@ -1090,6 +1090,7 @@ #ifndef PRODUCT void SafePointNode::dump_spec(outputStream *st) const { st->print(" SafePoint "); + _replaced_nodes.dump(st); } #endif @@ -1288,10 +1289,10 @@ Node* nproj = catchproj->clone(); igvn->register_new_node_with_optimizer(nproj); - Node *frame = new (phase->C) ParmNode( phase->C->start(), TypeFunc::FramePtr ); + Node *frame = new ParmNode( phase->C->start(), TypeFunc::FramePtr ); frame = phase->transform(frame); // Halt & Catch Fire - Node *halt = new (phase->C) HaltNode( nproj, frame ); + Node *halt = new HaltNode( nproj, frame ); phase->C->root()->add_req(halt); phase->transform(halt); @@ -1333,7 +1334,7 @@ if (!allow_new_nodes) return NULL; // Create a cast which is control dependent on the initialization to // propagate the fact that the array length must be positive. - length = new (phase->C) CastIINode(length, narrow_length_type); + length = new CastIINode(length, narrow_length_type); length->set_req(0, initialization()->proj_out(0)); } }
--- a/hotspot/src/share/vm/opto/callnode.hpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/opto/callnode.hpp Wed Jul 05 19:46:17 2017 +0200 @@ -30,6 +30,7 @@ #include "opto/multnode.hpp" #include "opto/opcodes.hpp" #include "opto/phaseX.hpp" +#include "opto/replacednodes.hpp" #include "opto/type.hpp" // Portions of code courtesy of Clifford Click @@ -335,6 +336,7 @@ OopMap* _oop_map; // Array of OopMap info (8-bit char) for GC JVMState* const _jvms; // Pointer to list of JVM State objects const TypePtr* _adr_type; // What type of memory does this node produce? + ReplacedNodes _replaced_nodes; // During parsing: list of pair of nodes from calls to GraphKit::replace_in_map() // Many calls take *all* of memory as input, // but some produce a limited subset of that memory as output. @@ -426,6 +428,37 @@ void set_next_exception(SafePointNode* n); bool has_exceptions() const { return next_exception() != NULL; } + // Helper methods to operate on replaced nodes + ReplacedNodes replaced_nodes() const { + return _replaced_nodes; + } + + void set_replaced_nodes(ReplacedNodes replaced_nodes) { + _replaced_nodes = replaced_nodes; + } + + void clone_replaced_nodes() { + _replaced_nodes.clone(); + } + void record_replaced_node(Node* initial, Node* improved) { + _replaced_nodes.record(initial, improved); + } + void transfer_replaced_nodes_from(SafePointNode* sfpt, uint idx = 0) { + _replaced_nodes.transfer_from(sfpt->_replaced_nodes, idx); + } + void delete_replaced_nodes() { + _replaced_nodes.reset(); + } + void apply_replaced_nodes() { + _replaced_nodes.apply(this); + } + void merge_replaced_nodes_with(SafePointNode* sfpt) { + _replaced_nodes.merge_with(sfpt->_replaced_nodes); + } + bool has_replaced_nodes() const { + return !_replaced_nodes.is_empty(); + } + // Standard Node stuff virtual int Opcode() const; virtual bool pinned() const { return true; }
--- a/hotspot/src/share/vm/opto/castnode.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/opto/castnode.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -228,11 +228,11 @@ Node* dispX, bool negate = false) { if (negate) { - dispX = new (phase->C) SubXNode(phase->MakeConX(0), phase->transform(dispX)); + dispX = new SubXNode(phase->MakeConX(0), phase->transform(dispX)); } - return new (phase->C) AddPNode(phase->C->top(), - phase->transform(new (phase->C) CastX2PNode(base)), - phase->transform(dispX)); + return new AddPNode(phase->C->top(), + phase->transform(new CastX2PNode(base)), + phase->transform(dispX)); } Node *CastX2PNode::Ideal(PhaseGVN *phase, bool can_reshape) {
--- a/hotspot/src/share/vm/opto/cfgnode.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/opto/cfgnode.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -662,17 +662,17 @@ convf2i->in(1) == bot_in ) { // Matched pattern, including LShiftI; RShiftI, replace with integer compares // max test - Node *cmp = gvn->register_new_node_with_optimizer(new (phase->C) CmpINode( convf2i, min )); - Node *boo = gvn->register_new_node_with_optimizer(new (phase->C) BoolNode( cmp, BoolTest::lt )); - IfNode *iff = (IfNode*)gvn->register_new_node_with_optimizer(new (phase->C) IfNode( top_if->in(0), boo, PROB_UNLIKELY_MAG(5), top_if->_fcnt )); - Node *if_min= gvn->register_new_node_with_optimizer(new (phase->C) IfTrueNode (iff)); - Node *ifF = gvn->register_new_node_with_optimizer(new (phase->C) IfFalseNode(iff)); + Node *cmp = gvn->register_new_node_with_optimizer(new CmpINode( convf2i, min )); + Node *boo = gvn->register_new_node_with_optimizer(new BoolNode( cmp, BoolTest::lt )); + IfNode *iff = (IfNode*)gvn->register_new_node_with_optimizer(new IfNode( top_if->in(0), boo, PROB_UNLIKELY_MAG(5), top_if->_fcnt )); + Node *if_min= gvn->register_new_node_with_optimizer(new IfTrueNode (iff)); + Node *ifF = gvn->register_new_node_with_optimizer(new IfFalseNode(iff)); // min test - cmp = gvn->register_new_node_with_optimizer(new (phase->C) CmpINode( convf2i, max )); - boo = gvn->register_new_node_with_optimizer(new (phase->C) BoolNode( cmp, BoolTest::gt )); - iff = (IfNode*)gvn->register_new_node_with_optimizer(new (phase->C) IfNode( ifF, boo, PROB_UNLIKELY_MAG(5), bot_if->_fcnt )); - Node *if_max= gvn->register_new_node_with_optimizer(new (phase->C) IfTrueNode (iff)); - ifF = gvn->register_new_node_with_optimizer(new (phase->C) IfFalseNode(iff)); + cmp = gvn->register_new_node_with_optimizer(new CmpINode( convf2i, max )); + boo = gvn->register_new_node_with_optimizer(new BoolNode( cmp, BoolTest::gt )); + iff = (IfNode*)gvn->register_new_node_with_optimizer(new IfNode( ifF, boo, PROB_UNLIKELY_MAG(5), bot_if->_fcnt )); + Node *if_max= gvn->register_new_node_with_optimizer(new IfTrueNode (iff)); + ifF = gvn->register_new_node_with_optimizer(new IfFalseNode(iff)); // update input edges to region node set_req_X( min_idx, if_min, gvn ); set_req_X( max_idx, if_max, gvn ); @@ -731,7 +731,7 @@ PhiNode* PhiNode::make(Node* r, Node* x, const Type *t, const TypePtr* at) { uint preds = r->req(); // Number of predecessor paths assert(t != Type::MEMORY || at == flatten_phi_adr_type(at), "flatten at"); - PhiNode* p = new (Compile::current()) PhiNode(r, t, at); + PhiNode* p = new PhiNode(r, t, at); for (uint j = 1; j < preds; j++) { // Fill in all inputs, except those which the region does not yet have if (r->in(j) != NULL) @@ -749,7 +749,7 @@ const Type* t = x->bottom_type(); const TypePtr* at = NULL; if (t == Type::MEMORY) at = flatten_phi_adr_type(x->adr_type()); - return new (Compile::current()) PhiNode(r, t, at); + return new PhiNode(r, t, at); } @@ -1258,9 +1258,9 @@ } else return NULL; // Build int->bool conversion - Node *n = new (phase->C) Conv2BNode( cmp->in(1) ); + Node *n = new Conv2BNode( cmp->in(1) ); if( flipped ) - n = new (phase->C) XorINode( phase->transform(n), phase->intcon(1) ); + n = new XorINode( phase->transform(n), phase->intcon(1) ); return n; } @@ -1320,9 +1320,9 @@ if( q->is_Con() && phase->type(q) != TypeInt::ZERO && y->is_Con() ) return NULL; - Node *cmplt = phase->transform( new (phase->C) CmpLTMaskNode(p,q) ); - Node *j_and = phase->transform( new (phase->C) AndINode(cmplt,y) ); - return new (phase->C) AddINode(j_and,x); + Node *cmplt = phase->transform( new CmpLTMaskNode(p,q) ); + Node *j_and = phase->transform( new AndINode(cmplt,y) ); + return new AddINode(j_and,x); } //------------------------------is_absolute------------------------------------ @@ -1384,17 +1384,17 @@ if( sub->Opcode() != Op_SubF || sub->in(2) != x || phase->type(sub->in(1)) != tzero ) return NULL; - x = new (phase->C) AbsFNode(x); + x = new AbsFNode(x); if (flip) { - x = new (phase->C) SubFNode(sub->in(1), phase->transform(x)); + x = new SubFNode(sub->in(1), phase->transform(x)); } } else { if( sub->Opcode() != Op_SubD || sub->in(2) != x || phase->type(sub->in(1)) != tzero ) return NULL; - x = new (phase->C) AbsDNode(x); + x = new AbsDNode(x); if (flip) { - x = new (phase->C) SubDNode(sub->in(1), phase->transform(x)); + x = new SubDNode(sub->in(1), phase->transform(x)); } } @@ -1469,7 +1469,7 @@ // Now start splitting out the flow paths that merge the same value. // Split first the RegionNode. PhaseIterGVN *igvn = phase->is_IterGVN(); - RegionNode *newr = new (phase->C) RegionNode(hit+1); + RegionNode *newr = new RegionNode(hit+1); split_once(igvn, phi, val, r, newr); // Now split all other Phis than this one @@ -1781,13 +1781,13 @@ } if (doit) { if (base == NULL) { - base = new (phase->C) PhiNode(in(0), type, NULL); + base = new PhiNode(in(0), type, NULL); for (uint i = 1; i < req(); i++) { base->init_req(i, in(i)->in(AddPNode::Base)); } phase->is_IterGVN()->register_new_node_with_optimizer(base); } - return new (phase->C) AddPNode(base, base, y); + return new AddPNode(base, base, y); } } } @@ -1864,7 +1864,7 @@ // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into // MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...)) PhaseIterGVN *igvn = phase->is_IterGVN(); - Node* hook = new (phase->C) Node(1); + Node* hook = new Node(1); PhiNode* new_base = (PhiNode*) clone(); // Must eagerly register phis, since they participate in loops. if (igvn) { @@ -1961,7 +1961,7 @@ } else { narrow_t = TypeNarrowKlass::make(this->bottom_type()->is_ptr()); } - PhiNode* new_phi = new (phase->C) PhiNode(r, narrow_t); + PhiNode* new_phi = new PhiNode(r, narrow_t); uint orig_cnt = req(); for (uint i=1; i<req(); ++i) {// For all paths in Node *ii = in(i); @@ -1975,9 +1975,9 @@ new_ii = new_phi; } else { if (is_decodeN) { - new_ii = new (phase->C) EncodePNode(ii, narrow_t); + new_ii = new EncodePNode(ii, narrow_t); } else { - new_ii = new (phase->C) EncodePKlassNode(ii, narrow_t); + new_ii = new EncodePKlassNode(ii, narrow_t); } igvn->register_new_node_with_optimizer(new_ii); } @@ -1986,9 +1986,9 @@ } igvn->register_new_node_with_optimizer(new_phi, this); if (is_decodeN) { - progress = new (phase->C) DecodeNNode(new_phi, bottom_type()); + progress = new DecodeNNode(new_phi, bottom_type()); } else { - progress = new (phase->C) DecodeNKlassNode(new_phi, bottom_type()); + progress = new DecodeNKlassNode(new_phi, bottom_type()); } } }
--- a/hotspot/src/share/vm/opto/chaitin.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/opto/chaitin.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -1730,7 +1730,7 @@ // Now we see we need a base-Phi here to merge the bases const Type *t = base->bottom_type(); - base = new (C) PhiNode( derived->in(0), t ); + base = new PhiNode( derived->in(0), t ); for( i = 1; i < derived->req(); i++ ) { base->init_req(i, find_base_for_derived(derived_base_map, derived->in(i), maxlrg)); t = t->meet(base->in(i)->bottom_type()); @@ -1800,7 +1800,7 @@ Block *phi_block = _cfg.get_block_for_node(phi); if (_cfg.get_block_for_node(phi_block->pred(2)) == block) { const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI]; - Node *spill = new (C) MachSpillCopyNode(MachSpillCopyNode::LoopPhiInput, phi, *mask, *mask); + Node *spill = new MachSpillCopyNode(MachSpillCopyNode::LoopPhiInput, phi, *mask, *mask); insert_proj( phi_block, 1, spill, maxlrg++ ); n->set_req(1,spill); must_recompute_live = true;
--- a/hotspot/src/share/vm/opto/coalesce.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/opto/coalesce.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -291,7 +291,7 @@ _phc.clone_projs(pred, pred->end_idx(), m, copy, _phc._lrg_map); } else { const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()]; - copy = new (C) MachSpillCopyNode(MachSpillCopyNode::PhiInput, m, *rm, *rm); + copy = new MachSpillCopyNode(MachSpillCopyNode::PhiInput, m, *rm, *rm); // Find a good place to insert. Kinda tricky, use a subroutine insert_copy_with_overlap(pred,copy,phi_name,src_name); } @@ -325,7 +325,7 @@ l += _phc.clone_projs(b, l, m, copy, _phc._lrg_map); } else { const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()]; - copy = new (C) MachSpillCopyNode(MachSpillCopyNode::TwoAddress, m, *rm, *rm); + copy = new MachSpillCopyNode(MachSpillCopyNode::TwoAddress, m, *rm, *rm); // Insert the copy in the basic block, just before us b->insert_node(copy, l++); } @@ -372,7 +372,7 @@ continue; // Live out; do not pre-split // Split the lrg at this use const RegMask *rm = C->matcher()->idealreg2spillmask[inp->ideal_reg()]; - Node* copy = new (C) MachSpillCopyNode(MachSpillCopyNode::DebugUse, inp, *rm, *rm); + Node* copy = new MachSpillCopyNode(MachSpillCopyNode::DebugUse, inp, *rm, *rm); // Insert the copy in the use-def chain n->set_req(inpidx, copy ); // Insert the copy in the basic block, just before us
--- a/hotspot/src/share/vm/opto/compile.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/opto/compile.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -95,7 +95,7 @@ // Constant table base node singleton. MachConstantBaseNode* Compile::mach_constant_base_node() { if (_mach_constant_base_node == NULL) { - _mach_constant_base_node = new (C) MachConstantBaseNode(); + _mach_constant_base_node = new MachConstantBaseNode(); _mach_constant_base_node->add_req(C->root()); } return _mach_constant_base_node; @@ -392,6 +392,11 @@ uint next = 0; while (next < useful.size()) { Node *n = useful.at(next++); + if (n->is_SafePoint()) { + // We're done with a parsing phase. Replaced nodes are not valid + // beyond that point. + n->as_SafePoint()->delete_replaced_nodes(); + } // Use raw traversal of out edges since this code removes out edges int max = n->outcnt(); for (int j = 0; j < max; ++j) { @@ -673,7 +678,6 @@ _print_inlining_stream(NULL), _print_inlining_idx(0), _print_inlining_output(NULL), - _preserve_jvm_state(0), _interpreter_frame_size(0) { C = this; @@ -748,14 +752,14 @@ const TypeTuple *domain = StartOSRNode::osr_domain(); const TypeTuple *range = TypeTuple::make_range(method()->signature()); init_tf(TypeFunc::make(domain, range)); - StartNode* s = new (this) StartOSRNode(root(), domain); + StartNode* s = new StartOSRNode(root(), domain); initial_gvn()->set_type_bottom(s); init_start(s); cg = CallGenerator::for_osr(method(), entry_bci()); } else { // Normal case. init_tf(TypeFunc::make(method())); - StartNode* s = new (this) StartNode(root(), tf()->domain()); + StartNode* s = new StartNode(root(), tf()->domain()); initial_gvn()->set_type_bottom(s); init_start(s); if (method()->intrinsic_id() == vmIntrinsics::_Reference_get && UseG1GC) { @@ -783,7 +787,7 @@ return; } JVMState* jvms = build_start_state(start(), tf()); - if ((jvms = cg->generate(jvms, NULL)) == NULL) { + if ((jvms = cg->generate(jvms)) == NULL) { record_method_not_compilable("method parse failed"); return; } @@ -980,7 +984,6 @@ _print_inlining_stream(NULL), _print_inlining_idx(0), _print_inlining_output(NULL), - _preserve_jvm_state(0), _allowed_reasons(0), _interpreter_frame_size(0) { C = this; @@ -1061,9 +1064,9 @@ // Globally visible Nodes // First set TOP to NULL to give safe behavior during creation of RootNode set_cached_top_node(NULL); - set_root(new (this) RootNode()); + set_root(new RootNode()); // Now that you have a Root to point to, create the real TOP - set_cached_top_node( new (this) ConNode(Type::TOP) ); + set_cached_top_node( new ConNode(Type::TOP) ); set_recent_alloc(NULL, NULL); // Create Debug Information Recorder to record scopes, oopmaps, etc. @@ -1914,6 +1917,8 @@ for_igvn()->clear(); gvn->replace_with(&igvn); + _late_inlines_pos = _late_inlines.length(); + while (_boxing_late_inlines.length() > 0) { CallGenerator* cg = _boxing_late_inlines.pop(); cg->do_late_inline(); @@ -1977,8 +1982,8 @@ if (live_nodes() > (uint)LiveNodeCountInliningCutoff) { if (low_live_nodes < (uint)LiveNodeCountInliningCutoff * 8 / 10) { // PhaseIdealLoop is expensive so we only try it once we are - // out of loop and we only try it again if the previous helped - // got the number of nodes down significantly + // out of live nodes and we only try it again if the previous + // helped got the number of nodes down significantly PhaseIdealLoop ideal_loop( igvn, false, true ); if (failing()) return; low_live_nodes = live_nodes(); @@ -2072,6 +2077,10 @@ // Inline valueOf() methods now. inline_boxing_calls(igvn); + if (AlwaysIncrementalInline) { + inline_incrementally(igvn); + } + print_method(PHASE_INCREMENTAL_BOXING_INLINE, 2); if (failing()) return; @@ -2757,9 +2766,9 @@ // Decode a narrow oop to match address // [R12 + narrow_oop_reg<<3 + offset] if (t->isa_oopptr()) { - nn = new (this) DecodeNNode(nn, t); + nn = new DecodeNNode(nn, t); } else { - nn = new (this) DecodeNKlassNode(nn, t); + nn = new DecodeNKlassNode(nn, t); } n->set_req(AddPNode::Base, nn); n->set_req(AddPNode::Address, nn); @@ -2880,7 +2889,7 @@ } } if (new_in2 != NULL) { - Node* cmpN = new (this) CmpNNode(in1->in(1), new_in2); + Node* cmpN = new CmpNNode(in1->in(1), new_in2); n->subsume_by(cmpN, this); if (in1->outcnt() == 0) { in1->disconnect_inputs(NULL, this); @@ -2979,8 +2988,8 @@ n->subsume_by(divmod->mod_proj(), this); } else { // replace a%b with a-((a/b)*b) - Node* mult = new (this) MulINode(d, d->in(2)); - Node* sub = new (this) SubINode(d->in(1), mult); + Node* mult = new MulINode(d, d->in(2)); + Node* sub = new SubINode(d->in(1), mult); n->subsume_by(sub, this); } } @@ -2999,8 +3008,8 @@ n->subsume_by(divmod->mod_proj(), this); } else { // replace a%b with a-((a/b)*b) - Node* mult = new (this) MulLNode(d, d->in(2)); - Node* sub = new (this) SubLNode(d->in(1), mult); + Node* mult = new MulLNode(d, d->in(2)); + Node* sub = new SubLNode(d->in(1), mult); n->subsume_by(sub, this); } } @@ -3049,7 +3058,7 @@ } } else { if (t == NULL || t->_lo < 0 || t->_hi > (int)mask) { - Node* shift = new (this) AndINode(in2, ConNode::make(this, TypeInt::make(mask))); + Node* shift = new AndINode(in2, ConNode::make(this, TypeInt::make(mask))); n->set_req(2, shift); } }
--- a/hotspot/src/share/vm/opto/compile.hpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/opto/compile.hpp Wed Jul 05 19:46:17 2017 +0200 @@ -431,9 +431,6 @@ // Remove the speculative part of types and clean up the graph void remove_speculative_types(PhaseIterGVN &igvn); - // Are we within a PreserveJVMState block? - int _preserve_jvm_state; - void* _replay_inline_data; // Pointer to data loaded from file void print_inlining_init(); @@ -1198,21 +1195,6 @@ // Auxiliary method for randomized fuzzing/stressing static bool randomized_select(int count); - - // enter a PreserveJVMState block - void inc_preserve_jvm_state() { - _preserve_jvm_state++; - } - - // exit a PreserveJVMState block - void dec_preserve_jvm_state() { - _preserve_jvm_state--; - assert(_preserve_jvm_state >= 0, "_preserve_jvm_state shouldn't be negative"); - } - - bool has_preserve_jvm_state() const { - return _preserve_jvm_state > 0; - } }; #endif // SHARE_VM_OPTO_COMPILE_HPP
--- a/hotspot/src/share/vm/opto/connode.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/opto/connode.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -45,17 +45,17 @@ //------------------------------make------------------------------------------- ConNode *ConNode::make( Compile* C, const Type *t ) { switch( t->basic_type() ) { - case T_INT: return new (C) ConINode( t->is_int() ); - case T_LONG: return new (C) ConLNode( t->is_long() ); - case T_FLOAT: return new (C) ConFNode( t->is_float_constant() ); - case T_DOUBLE: return new (C) ConDNode( t->is_double_constant() ); - case T_VOID: return new (C) ConNode ( Type::TOP ); - case T_OBJECT: return new (C) ConPNode( t->is_ptr() ); - case T_ARRAY: return new (C) ConPNode( t->is_aryptr() ); - case T_ADDRESS: return new (C) ConPNode( t->is_ptr() ); - case T_NARROWOOP: return new (C) ConNNode( t->is_narrowoop() ); - case T_NARROWKLASS: return new (C) ConNKlassNode( t->is_narrowklass() ); - case T_METADATA: return new (C) ConPNode( t->is_ptr() ); + case T_INT: return new ConINode( t->is_int() ); + case T_LONG: return new ConLNode( t->is_long() ); + case T_FLOAT: return new ConFNode( t->is_float_constant() ); + case T_DOUBLE: return new ConDNode( t->is_double_constant() ); + case T_VOID: return new ConNode ( Type::TOP ); + case T_OBJECT: return new ConPNode( t->is_ptr() ); + case T_ARRAY: return new ConPNode( t->is_aryptr() ); + case T_ADDRESS: return new ConPNode( t->is_ptr() ); + case T_NARROWOOP: return new ConNNode( t->is_narrowoop() ); + case T_NARROWKLASS: return new ConNKlassNode( t->is_narrowklass() ); + case T_METADATA: return new ConPNode( t->is_ptr() ); // Expected cases: TypePtr::NULL_PTR, any is_rawptr() // Also seen: AnyPtr(TopPTR *+top); from command line: // r -XX:+PrintOpto -XX:CIStart=285 -XX:+CompileTheWorld -XX:CompileTheWorldStartAt=660
--- a/hotspot/src/share/vm/opto/connode.hpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/opto/connode.hpp Wed Jul 05 19:46:17 2017 +0200 @@ -58,7 +58,7 @@ // Factory method: static ConINode* make( Compile* C, int con ) { - return new (C) ConINode( TypeInt::make(con) ); + return new ConINode( TypeInt::make(con) ); } }; @@ -73,9 +73,9 @@ // Factory methods: static ConPNode* make( Compile *C ,address con ) { if (con == NULL) - return new (C) ConPNode( TypePtr::NULL_PTR ) ; + return new ConPNode( TypePtr::NULL_PTR ) ; else - return new (C) ConPNode( TypeRawPtr::make(con) ); + return new ConPNode( TypeRawPtr::make(con) ); } }; @@ -106,7 +106,7 @@ // Factory method: static ConLNode* make( Compile *C ,jlong con ) { - return new (C) ConLNode( TypeLong::make(con) ); + return new ConLNode( TypeLong::make(con) ); } }; @@ -120,7 +120,7 @@ // Factory method: static ConFNode* make( Compile *C, float con ) { - return new (C) ConFNode( TypeF::make(con) ); + return new ConFNode( TypeF::make(con) ); } }; @@ -134,7 +134,7 @@ // Factory method: static ConDNode* make( Compile *C, double con ) { - return new (C) ConDNode( TypeD::make(con) ); + return new ConDNode( TypeD::make(con) ); } };
--- a/hotspot/src/share/vm/opto/convertnode.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/opto/convertnode.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -374,11 +374,11 @@ ryhi = -rylo0; } - Node* cx = phase->transform( new (phase->C) ConvI2LNode(x, TypeLong::make(rxlo, rxhi, widen)) ); - Node* cy = phase->transform( new (phase->C) ConvI2LNode(y, TypeLong::make(rylo, ryhi, widen)) ); + Node* cx = phase->transform( new ConvI2LNode(x, TypeLong::make(rxlo, rxhi, widen)) ); + Node* cy = phase->transform( new ConvI2LNode(y, TypeLong::make(rylo, ryhi, widen)) ); switch (op) { - case Op_AddI: return new (phase->C) AddLNode(cx, cy); - case Op_SubI: return new (phase->C) SubLNode(cx, cy); + case Op_AddI: return new AddLNode(cx, cy); + case Op_SubI: return new SubLNode(cx, cy); default: ShouldNotReachHere(); } } @@ -452,9 +452,9 @@ assert( x != andl && y != andl, "dead loop in ConvL2INode::Ideal" ); if (phase->type(x) == Type::TOP) return NULL; if (phase->type(y) == Type::TOP) return NULL; - Node *add1 = phase->transform(new (phase->C) ConvL2INode(x)); - Node *add2 = phase->transform(new (phase->C) ConvL2INode(y)); - return new (phase->C) AddINode(add1,add2); + Node *add1 = phase->transform(new ConvL2INode(x)); + Node *add2 = phase->transform(new ConvL2INode(y)); + return new AddINode(add1,add2); } // Disable optimization: LoadL->ConvL2I ==> LoadI.
--- a/hotspot/src/share/vm/opto/divnode.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/opto/divnode.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -106,7 +106,7 @@ // division by +/- 1 if (!d_pos) { // Just negate the value - q = new (phase->C) SubINode(phase->intcon(0), dividend); + q = new SubINode(phase->intcon(0), dividend); } } else if ( is_power_of_2(d) ) { // division by +/- a power of 2 @@ -143,18 +143,18 @@ // (-2+3)>>2 becomes 0, etc. // Compute 0 or -1, based on sign bit - Node *sign = phase->transform(new (phase->C) RShiftINode(dividend, phase->intcon(N - 1))); + Node *sign = phase->transform(new RShiftINode(dividend, phase->intcon(N - 1))); // Mask sign bit to the low sign bits - Node *round = phase->transform(new (phase->C) URShiftINode(sign, phase->intcon(N - l))); + Node *round = phase->transform(new URShiftINode(sign, phase->intcon(N - l))); // Round up before shifting - dividend = phase->transform(new (phase->C) AddINode(dividend, round)); + dividend = phase->transform(new AddINode(dividend, round)); } // Shift for division - q = new (phase->C) RShiftINode(dividend, phase->intcon(l)); + q = new RShiftINode(dividend, phase->intcon(l)); if (!d_pos) { - q = new (phase->C) SubINode(phase->intcon(0), phase->transform(q)); + q = new SubINode(phase->intcon(0), phase->transform(q)); } } else { // Attempt the jint constant divide -> multiply transform found in @@ -166,33 +166,33 @@ jint shift_const; if (magic_int_divide_constants(d, magic_const, shift_const)) { Node *magic = phase->longcon(magic_const); - Node *dividend_long = phase->transform(new (phase->C) ConvI2LNode(dividend)); + Node *dividend_long = phase->transform(new ConvI2LNode(dividend)); // Compute the high half of the dividend x magic multiplication - Node *mul_hi = phase->transform(new (phase->C) MulLNode(dividend_long, magic)); + Node *mul_hi = phase->transform(new MulLNode(dividend_long, magic)); if (magic_const < 0) { - mul_hi = phase->transform(new (phase->C) RShiftLNode(mul_hi, phase->intcon(N))); - mul_hi = phase->transform(new (phase->C) ConvL2INode(mul_hi)); + mul_hi = phase->transform(new RShiftLNode(mul_hi, phase->intcon(N))); + mul_hi = phase->transform(new ConvL2INode(mul_hi)); // The magic multiplier is too large for a 32 bit constant. We've adjusted // it down by 2^32, but have to add 1 dividend back in after the multiplication. // This handles the "overflow" case described by Granlund and Montgomery. - mul_hi = phase->transform(new (phase->C) AddINode(dividend, mul_hi)); + mul_hi = phase->transform(new AddINode(dividend, mul_hi)); // Shift over the (adjusted) mulhi if (shift_const != 0) { - mul_hi = phase->transform(new (phase->C) RShiftINode(mul_hi, phase->intcon(shift_const))); + mul_hi = phase->transform(new RShiftINode(mul_hi, phase->intcon(shift_const))); } } else { // No add is required, we can merge the shifts together. - mul_hi = phase->transform(new (phase->C) RShiftLNode(mul_hi, phase->intcon(N + shift_const))); - mul_hi = phase->transform(new (phase->C) ConvL2INode(mul_hi)); + mul_hi = phase->transform(new RShiftLNode(mul_hi, phase->intcon(N + shift_const))); + mul_hi = phase->transform(new ConvL2INode(mul_hi)); } // Get a 0 or -1 from the sign of the dividend. Node *addend0 = mul_hi; - Node *addend1 = phase->transform(new (phase->C) RShiftINode(dividend, phase->intcon(N-1))); + Node *addend1 = phase->transform(new RShiftINode(dividend, phase->intcon(N-1))); // If the divisor is negative, swap the order of the input addends; // this has the effect of negating the quotient. @@ -202,7 +202,7 @@ // Adjust the final quotient by subtracting -1 (adding 1) // from the mul_hi. - q = new (phase->C) SubINode(addend0, addend1); + q = new SubINode(addend0, addend1); } } @@ -261,7 +261,7 @@ // no need to synthesize it in ideal nodes. if (Matcher::has_match_rule(Op_MulHiL)) { Node* v = phase->longcon(magic_const); - return new (phase->C) MulHiLNode(dividend, v); + return new MulHiLNode(dividend, v); } // Taken from Hacker's Delight, Fig. 8-2. Multiply high signed. @@ -287,11 +287,11 @@ const int N = 64; // Dummy node to keep intermediate nodes alive during construction - Node* hook = new (phase->C) Node(4); + Node* hook = new Node(4); // u0 = u & 0xFFFFFFFF; u1 = u >> 32; - Node* u0 = phase->transform(new (phase->C) AndLNode(dividend, phase->longcon(0xFFFFFFFF))); - Node* u1 = phase->transform(new (phase->C) RShiftLNode(dividend, phase->intcon(N / 2))); + Node* u0 = phase->transform(new AndLNode(dividend, phase->longcon(0xFFFFFFFF))); + Node* u1 = phase->transform(new RShiftLNode(dividend, phase->intcon(N / 2))); hook->init_req(0, u0); hook->init_req(1, u1); @@ -300,29 +300,29 @@ Node* v1 = phase->longcon(magic_const >> (N / 2)); // w0 = u0*v0; - Node* w0 = phase->transform(new (phase->C) MulLNode(u0, v0)); + Node* w0 = phase->transform(new MulLNode(u0, v0)); // t = u1*v0 + (w0 >> 32); - Node* u1v0 = phase->transform(new (phase->C) MulLNode(u1, v0)); - Node* temp = phase->transform(new (phase->C) URShiftLNode(w0, phase->intcon(N / 2))); - Node* t = phase->transform(new (phase->C) AddLNode(u1v0, temp)); + Node* u1v0 = phase->transform(new MulLNode(u1, v0)); + Node* temp = phase->transform(new URShiftLNode(w0, phase->intcon(N / 2))); + Node* t = phase->transform(new AddLNode(u1v0, temp)); hook->init_req(2, t); // w1 = t & 0xFFFFFFFF; - Node* w1 = phase->transform(new (phase->C) AndLNode(t, phase->longcon(0xFFFFFFFF))); + Node* w1 = phase->transform(new AndLNode(t, phase->longcon(0xFFFFFFFF))); hook->init_req(3, w1); // w2 = t >> 32; - Node* w2 = phase->transform(new (phase->C) RShiftLNode(t, phase->intcon(N / 2))); + Node* w2 = phase->transform(new RShiftLNode(t, phase->intcon(N / 2))); // w1 = u0*v1 + w1; - Node* u0v1 = phase->transform(new (phase->C) MulLNode(u0, v1)); - w1 = phase->transform(new (phase->C) AddLNode(u0v1, w1)); + Node* u0v1 = phase->transform(new MulLNode(u0, v1)); + w1 = phase->transform(new AddLNode(u0v1, w1)); // return u1*v1 + w2 + (w1 >> 32); - Node* u1v1 = phase->transform(new (phase->C) MulLNode(u1, v1)); - Node* temp1 = phase->transform(new (phase->C) AddLNode(u1v1, w2)); - Node* temp2 = phase->transform(new (phase->C) RShiftLNode(w1, phase->intcon(N / 2))); + Node* u1v1 = phase->transform(new MulLNode(u1, v1)); + Node* temp1 = phase->transform(new AddLNode(u1v1, w2)); + Node* temp2 = phase->transform(new RShiftLNode(w1, phase->intcon(N / 2))); // Remove the bogus extra edges used to keep things alive PhaseIterGVN* igvn = phase->is_IterGVN(); @@ -334,7 +334,7 @@ } } - return new (phase->C) AddLNode(temp1, temp2); + return new AddLNode(temp1, temp2); } @@ -357,7 +357,7 @@ // division by +/- 1 if (!d_pos) { // Just negate the value - q = new (phase->C) SubLNode(phase->longcon(0), dividend); + q = new SubLNode(phase->longcon(0), dividend); } } else if ( is_power_of_2_long(d) ) { @@ -396,18 +396,18 @@ // (-2+3)>>2 becomes 0, etc. // Compute 0 or -1, based on sign bit - Node *sign = phase->transform(new (phase->C) RShiftLNode(dividend, phase->intcon(N - 1))); + Node *sign = phase->transform(new RShiftLNode(dividend, phase->intcon(N - 1))); // Mask sign bit to the low sign bits - Node *round = phase->transform(new (phase->C) URShiftLNode(sign, phase->intcon(N - l))); + Node *round = phase->transform(new URShiftLNode(sign, phase->intcon(N - l))); // Round up before shifting - dividend = phase->transform(new (phase->C) AddLNode(dividend, round)); + dividend = phase->transform(new AddLNode(dividend, round)); } // Shift for division - q = new (phase->C) RShiftLNode(dividend, phase->intcon(l)); + q = new RShiftLNode(dividend, phase->intcon(l)); if (!d_pos) { - q = new (phase->C) SubLNode(phase->longcon(0), phase->transform(q)); + q = new SubLNode(phase->longcon(0), phase->transform(q)); } } else if ( !Matcher::use_asm_for_ldiv_by_con(d) ) { // Use hardware DIV instruction when // it is faster than code generated below. @@ -427,17 +427,17 @@ // The magic multiplier is too large for a 64 bit constant. We've adjusted // it down by 2^64, but have to add 1 dividend back in after the multiplication. // This handles the "overflow" case described by Granlund and Montgomery. - mul_hi = phase->transform(new (phase->C) AddLNode(dividend, mul_hi)); + mul_hi = phase->transform(new AddLNode(dividend, mul_hi)); } // Shift over the (adjusted) mulhi if (shift_const != 0) { - mul_hi = phase->transform(new (phase->C) RShiftLNode(mul_hi, phase->intcon(shift_const))); + mul_hi = phase->transform(new RShiftLNode(mul_hi, phase->intcon(shift_const))); } // Get a 0 or -1 from the sign of the dividend. Node *addend0 = mul_hi; - Node *addend1 = phase->transform(new (phase->C) RShiftLNode(dividend, phase->intcon(N-1))); + Node *addend1 = phase->transform(new RShiftLNode(dividend, phase->intcon(N-1))); // If the divisor is negative, swap the order of the input addends; // this has the effect of negating the quotient. @@ -447,7 +447,7 @@ // Adjust the final quotient by subtracting -1 (adding 1) // from the mul_hi. - q = new (phase->C) SubLNode(addend0, addend1); + q = new SubLNode(addend0, addend1); } } @@ -737,7 +737,7 @@ assert( frexp((double)reciprocal, &exp) == 0.5, "reciprocal should be power of 2" ); // return multiplication by the reciprocal - return (new (phase->C) MulFNode(in(1), phase->makecon(TypeF::make(reciprocal)))); + return (new MulFNode(in(1), phase->makecon(TypeF::make(reciprocal)))); } //============================================================================= @@ -831,7 +831,7 @@ assert( frexp(reciprocal, &exp) == 0.5, "reciprocal should be power of 2" ); // return multiplication by the reciprocal - return (new (phase->C) MulDNode(in(1), phase->makecon(TypeD::make(reciprocal)))); + return (new MulDNode(in(1), phase->makecon(TypeD::make(reciprocal)))); } //============================================================================= @@ -858,7 +858,7 @@ if( !ti->is_con() ) return NULL; jint con = ti->get_con(); - Node *hook = new (phase->C) Node(1); + Node *hook = new Node(1); // First, special check for modulo 2^k-1 if( con >= 0 && con < max_jint && is_power_of_2(con+1) ) { @@ -878,24 +878,24 @@ hook->init_req(0, x); // Add a use to x to prevent him from dying // Generate code to reduce X rapidly to nearly 2^k-1. for( int i = 0; i < trip_count; i++ ) { - Node *xl = phase->transform( new (phase->C) AndINode(x,divisor) ); - Node *xh = phase->transform( new (phase->C) RShiftINode(x,phase->intcon(k)) ); // Must be signed - x = phase->transform( new (phase->C) AddINode(xh,xl) ); + Node *xl = phase->transform( new AndINode(x,divisor) ); + Node *xh = phase->transform( new RShiftINode(x,phase->intcon(k)) ); // Must be signed + x = phase->transform( new AddINode(xh,xl) ); hook->set_req(0, x); } // Generate sign-fixup code. Was original value positive? // int hack_res = (i >= 0) ? divisor : 1; - Node *cmp1 = phase->transform( new (phase->C) CmpINode( in(1), phase->intcon(0) ) ); - Node *bol1 = phase->transform( new (phase->C) BoolNode( cmp1, BoolTest::ge ) ); - Node *cmov1= phase->transform( new (phase->C) CMoveINode(bol1, phase->intcon(1), divisor, TypeInt::POS) ); + Node *cmp1 = phase->transform( new CmpINode( in(1), phase->intcon(0) ) ); + Node *bol1 = phase->transform( new BoolNode( cmp1, BoolTest::ge ) ); + Node *cmov1= phase->transform( new CMoveINode(bol1, phase->intcon(1), divisor, TypeInt::POS) ); // if( x >= hack_res ) x -= divisor; - Node *sub = phase->transform( new (phase->C) SubINode( x, divisor ) ); - Node *cmp2 = phase->transform( new (phase->C) CmpINode( x, cmov1 ) ); - Node *bol2 = phase->transform( new (phase->C) BoolNode( cmp2, BoolTest::ge ) ); + Node *sub = phase->transform( new SubINode( x, divisor ) ); + Node *cmp2 = phase->transform( new CmpINode( x, cmov1 ) ); + Node *bol2 = phase->transform( new BoolNode( cmp2, BoolTest::ge ) ); // Convention is to not transform the return value of an Ideal // since Ideal is expected to return a modified 'this' or a new node. - Node *cmov2= new (phase->C) CMoveINode(bol2, x, sub, TypeInt::INT); + Node *cmov2= new CMoveINode(bol2, x, sub, TypeInt::INT); // cmov2 is now the mod // Now remove the bogus extra edges used to keep things alive @@ -918,7 +918,7 @@ jint pos_con = (con >= 0) ? con : -con; // integer Mod 1 is always 0 - if( pos_con == 1 ) return new (phase->C) ConINode(TypeInt::ZERO); + if( pos_con == 1 ) return new ConINode(TypeInt::ZERO); int log2_con = -1; @@ -931,7 +931,7 @@ // See if this can be masked, if the dividend is non-negative if( dti && dti->_lo >= 0 ) - return ( new (phase->C) AndINode( in(1), phase->intcon( pos_con-1 ) ) ); + return ( new AndINode( in(1), phase->intcon( pos_con-1 ) ) ); } // Save in(1) so that it cannot be changed or deleted @@ -946,12 +946,12 @@ Node *mult = NULL; if( log2_con >= 0 ) - mult = phase->transform( new (phase->C) LShiftINode( divide, phase->intcon( log2_con ) ) ); + mult = phase->transform( new LShiftINode( divide, phase->intcon( log2_con ) ) ); else - mult = phase->transform( new (phase->C) MulINode( divide, phase->intcon( pos_con ) ) ); + mult = phase->transform( new MulINode( divide, phase->intcon( pos_con ) ) ); // Finally, subtract the multiplied divided value from the original - result = new (phase->C) SubINode( in(1), mult ); + result = new SubINode( in(1), mult ); } // Now remove the bogus extra edges used to keep things alive @@ -1029,7 +1029,7 @@ if( !tl->is_con() ) return NULL; jlong con = tl->get_con(); - Node *hook = new (phase->C) Node(1); + Node *hook = new Node(1); // Expand mod if( con >= 0 && con < max_jlong && is_power_of_2_long(con+1) ) { @@ -1051,24 +1051,24 @@ hook->init_req(0, x); // Add a use to x to prevent him from dying // Generate code to reduce X rapidly to nearly 2^k-1. for( int i = 0; i < trip_count; i++ ) { - Node *xl = phase->transform( new (phase->C) AndLNode(x,divisor) ); - Node *xh = phase->transform( new (phase->C) RShiftLNode(x,phase->intcon(k)) ); // Must be signed - x = phase->transform( new (phase->C) AddLNode(xh,xl) ); + Node *xl = phase->transform( new AndLNode(x,divisor) ); + Node *xh = phase->transform( new RShiftLNode(x,phase->intcon(k)) ); // Must be signed + x = phase->transform( new AddLNode(xh,xl) ); hook->set_req(0, x); // Add a use to x to prevent him from dying } // Generate sign-fixup code. Was original value positive? // long hack_res = (i >= 0) ? divisor : CONST64(1); - Node *cmp1 = phase->transform( new (phase->C) CmpLNode( in(1), phase->longcon(0) ) ); - Node *bol1 = phase->transform( new (phase->C) BoolNode( cmp1, BoolTest::ge ) ); - Node *cmov1= phase->transform( new (phase->C) CMoveLNode(bol1, phase->longcon(1), divisor, TypeLong::LONG) ); + Node *cmp1 = phase->transform( new CmpLNode( in(1), phase->longcon(0) ) ); + Node *bol1 = phase->transform( new BoolNode( cmp1, BoolTest::ge ) ); + Node *cmov1= phase->transform( new CMoveLNode(bol1, phase->longcon(1), divisor, TypeLong::LONG) ); // if( x >= hack_res ) x -= divisor; - Node *sub = phase->transform( new (phase->C) SubLNode( x, divisor ) ); - Node *cmp2 = phase->transform( new (phase->C) CmpLNode( x, cmov1 ) ); - Node *bol2 = phase->transform( new (phase->C) BoolNode( cmp2, BoolTest::ge ) ); + Node *sub = phase->transform( new SubLNode( x, divisor ) ); + Node *cmp2 = phase->transform( new CmpLNode( x, cmov1 ) ); + Node *bol2 = phase->transform( new BoolNode( cmp2, BoolTest::ge ) ); // Convention is to not transform the return value of an Ideal // since Ideal is expected to return a modified 'this' or a new node. - Node *cmov2= new (phase->C) CMoveLNode(bol2, x, sub, TypeLong::LONG); + Node *cmov2= new CMoveLNode(bol2, x, sub, TypeLong::LONG); // cmov2 is now the mod // Now remove the bogus extra edges used to keep things alive @@ -1091,7 +1091,7 @@ jlong pos_con = (con >= 0) ? con : -con; // integer Mod 1 is always 0 - if( pos_con == 1 ) return new (phase->C) ConLNode(TypeLong::ZERO); + if( pos_con == 1 ) return new ConLNode(TypeLong::ZERO); int log2_con = -1; @@ -1104,7 +1104,7 @@ // See if this can be masked, if the dividend is non-negative if( dtl && dtl->_lo >= 0 ) - return ( new (phase->C) AndLNode( in(1), phase->longcon( pos_con-1 ) ) ); + return ( new AndLNode( in(1), phase->longcon( pos_con-1 ) ) ); } // Save in(1) so that it cannot be changed or deleted @@ -1119,12 +1119,12 @@ Node *mult = NULL; if( log2_con >= 0 ) - mult = phase->transform( new (phase->C) LShiftLNode( divide, phase->intcon( log2_con ) ) ); + mult = phase->transform( new LShiftLNode( divide, phase->intcon( log2_con ) ) ); else - mult = phase->transform( new (phase->C) MulLNode( divide, phase->longcon( pos_con ) ) ); + mult = phase->transform( new MulLNode( divide, phase->longcon( pos_con ) ) ); // Finally, subtract the multiplied divided value from the original - result = new (phase->C) SubLNode( in(1), mult ); + result = new SubLNode( in(1), mult ); } // Now remove the bogus extra edges used to keep things alive @@ -1279,9 +1279,9 @@ assert(n->Opcode() == Op_DivI || n->Opcode() == Op_ModI, "only div or mod input pattern accepted"); - DivModINode* divmod = new (C) DivModINode(n->in(0), n->in(1), n->in(2)); - Node* dproj = new (C) ProjNode(divmod, DivModNode::div_proj_num); - Node* mproj = new (C) ProjNode(divmod, DivModNode::mod_proj_num); + DivModINode* divmod = new DivModINode(n->in(0), n->in(1), n->in(2)); + Node* dproj = new ProjNode(divmod, DivModNode::div_proj_num); + Node* mproj = new ProjNode(divmod, DivModNode::mod_proj_num); return divmod; } @@ -1291,9 +1291,9 @@ assert(n->Opcode() == Op_DivL || n->Opcode() == Op_ModL, "only div or mod input pattern accepted"); - DivModLNode* divmod = new (C) DivModLNode(n->in(0), n->in(1), n->in(2)); - Node* dproj = new (C) ProjNode(divmod, DivModNode::div_proj_num); - Node* mproj = new (C) ProjNode(divmod, DivModNode::mod_proj_num); + DivModLNode* divmod = new DivModLNode(n->in(0), n->in(1), n->in(2)); + Node* dproj = new ProjNode(divmod, DivModNode::div_proj_num); + Node* mproj = new ProjNode(divmod, DivModNode::mod_proj_num); return divmod; } @@ -1308,7 +1308,7 @@ assert(proj->_con == mod_proj_num, "must be div or mod projection"); rm = match->modI_proj_mask(); } - return new (match->C)MachProjNode(this, proj->_con, rm, ideal_reg); + return new MachProjNode(this, proj->_con, rm, ideal_reg); } @@ -1323,5 +1323,5 @@ assert(proj->_con == mod_proj_num, "must be div or mod projection"); rm = match->modL_proj_mask(); } - return new (match->C)MachProjNode(this, proj->_con, rm, ideal_reg); + return new MachProjNode(this, proj->_con, rm, ideal_reg); }
--- a/hotspot/src/share/vm/opto/doCall.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/opto/doCall.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -119,12 +119,12 @@ if (allow_inline && allow_intrinsics) { CallGenerator* cg = find_intrinsic(callee, call_does_dispatch); if (cg != NULL) { - if (cg->is_predicted()) { + if (cg->is_predicated()) { // Code without intrinsic but, hopefully, inlined. CallGenerator* inline_cg = this->call_generator(callee, vtable_index, call_does_dispatch, jvms, allow_inline, prof_factor, speculative_receiver_type, false); if (inline_cg != NULL) { - cg = CallGenerator::for_predicted_intrinsic(cg, inline_cg); + cg = CallGenerator::for_predicated_intrinsic(cg, inline_cg); } } @@ -525,7 +525,7 @@ // because exceptions don't return to the call site.) profile_call(receiver); - JVMState* new_jvms = cg->generate(jvms, this); + JVMState* new_jvms = cg->generate(jvms); if (new_jvms == NULL) { // When inlining attempt fails (e.g., too many arguments), // it may contaminate the current compile state, making it @@ -539,7 +539,7 @@ // intrinsic was expecting to optimize. Should always be possible to // get a normal java call that may inline in that case cg = C->call_generator(cg->method(), vtable_index, call_does_dispatch, jvms, try_inline, prof_factor(), speculative_receiver_type, /* allow_intrinsics= */ false); - new_jvms = cg->generate(jvms, this); + new_jvms = cg->generate(jvms); if (new_jvms == NULL) { guarantee(failing(), "call failed to generate: calls should work"); return; @@ -596,7 +596,7 @@ const Type* sig_type = TypeOopPtr::make_from_klass(ctype->as_klass()); if (arg_type != NULL && !arg_type->higher_equal(sig_type)) { Node* retnode = pop(); - Node* cast_obj = _gvn.transform(new (C) CheckCastPPNode(control(), retnode, sig_type)); + Node* cast_obj = _gvn.transform(new CheckCastPPNode(control(), retnode, sig_type)); push(cast_obj); } } @@ -689,7 +689,7 @@ } int len = bcis->length(); - CatchNode *cn = new (C) CatchNode(control(), i_o, len+1); + CatchNode *cn = new CatchNode(control(), i_o, len+1); Node *catch_ = _gvn.transform(cn); // now branch with the exception state to each of the (potential) @@ -700,14 +700,14 @@ // Locals are just copied from before the call. // Get control from the CatchNode. int handler_bci = bcis->at(i); - Node* ctrl = _gvn.transform( new (C) CatchProjNode(catch_, i+1,handler_bci)); + Node* ctrl = _gvn.transform( new CatchProjNode(catch_, i+1,handler_bci)); // This handler cannot happen? if (ctrl == top()) continue; set_control(ctrl); // Create exception oop const TypeInstPtr* extype = extypes->at(i)->is_instptr(); - Node *ex_oop = _gvn.transform(new (C) CreateExNode(extypes->at(i), ctrl, i_o)); + Node *ex_oop = _gvn.transform(new CreateExNode(extypes->at(i), ctrl, i_o)); // Handle unloaded exception classes. if (saw_unloaded->contains(handler_bci)) { @@ -746,7 +746,7 @@ // The first CatchProj is for the normal return. // (Note: If this is a call to rethrow_Java, this node goes dead.) - set_control(_gvn.transform( new (C) CatchProjNode(catch_, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci))); + set_control(_gvn.transform( new CatchProjNode(catch_, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci))); } @@ -797,7 +797,7 @@ // I'm loading the class from, I can replace the LoadKlass with the // klass constant for the exception oop. if( ex_node->is_Phi() ) { - ex_klass_node = new (C) PhiNode( ex_node->in(0), TypeKlassPtr::OBJECT ); + ex_klass_node = new PhiNode( ex_node->in(0), TypeKlassPtr::OBJECT ); for( uint i = 1; i < ex_node->req(); i++ ) { Node* p = basic_plus_adr( ex_node->in(i), ex_node->in(i), oopDesc::klass_offset_in_bytes() ); Node* k = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) ); @@ -863,7 +863,7 @@ PreserveJVMState pjvms(this); const TypeInstPtr* tinst = TypeOopPtr::make_from_klass_unique(klass)->cast_to_ptr_type(TypePtr::NotNull)->is_instptr(); assert(klass->has_subklass() || tinst->klass_is_exact(), "lost exactness"); - Node* ex_oop = _gvn.transform(new (C) CheckCastPPNode(control(), ex_node, tinst)); + Node* ex_oop = _gvn.transform(new CheckCastPPNode(control(), ex_node, tinst)); push_ex_oop(ex_oop); // Push exception oop for handler #ifndef PRODUCT if (PrintOpto && WizardMode) {
--- a/hotspot/src/share/vm/opto/escape.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/opto/escape.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -939,7 +939,13 @@ strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 || - strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0) + strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 || + strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 || + strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 || + strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 || + strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 || + strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 || + strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0) ))) { call->dump(); fatal(err_msg_res("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name));
--- a/hotspot/src/share/vm/opto/generateOptoStub.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/opto/generateOptoStub.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -50,7 +50,7 @@ const TypeTuple *jrange = C->tf()->range(); // The procedure start - StartNode* start = new (C) StartNode(root(), jdomain); + StartNode* start = new StartNode(root(), jdomain); _gvn.set_type_bottom(start); // Make a map, with JVM state @@ -64,7 +64,7 @@ jvms->set_scloff(max_map); jvms->set_endoff(max_map); { - SafePointNode *map = new (C) SafePointNode( max_map, jvms ); + SafePointNode *map = new SafePointNode( max_map, jvms ); jvms->set_map(map); set_jvms(jvms); assert(map == this->map(), "kit.map is set"); @@ -73,7 +73,7 @@ // Make up the parameters uint i; for( i = 0; i < parm_cnt; i++ ) - map()->init_req(i, _gvn.transform(new (C) ParmNode(start, i))); + map()->init_req(i, _gvn.transform(new ParmNode(start, i))); for( ; i<map()->req(); i++ ) map()->init_req(i, top()); // For nicer debugging @@ -81,7 +81,7 @@ set_all_memory(map()->memory()); // Get base of thread-local storage area - Node* thread = _gvn.transform( new (C) ThreadLocalNode() ); + Node* thread = _gvn.transform( new ThreadLocalNode() ); const int NoAlias = Compile::AliasIdxBot; @@ -166,8 +166,7 @@ //----------------------------- // Make the call node - CallRuntimeNode *call = new (C) - CallRuntimeNode(c_sig, C_function, name, TypePtr::BOTTOM); + CallRuntimeNode *call = new CallRuntimeNode(c_sig, C_function, name, TypePtr::BOTTOM); //----------------------------- // Fix-up the debug info for the call @@ -184,7 +183,7 @@ for (; i < parm_cnt; i++) { // Regular input arguments // Convert ints to longs if required. if (CCallingConventionRequiresIntsAsLongs && jdomain->field_at(i)->isa_int()) { - Node* int_as_long = _gvn.transform(new (C) ConvI2LNode(map()->in(i))); + Node* int_as_long = _gvn.transform(new ConvI2LNode(map()->in(i))); call->init_req(cnt++, int_as_long); // long call->init_req(cnt++, top()); // half } else { @@ -200,23 +199,23 @@ //----------------------------- // Now set up the return results - set_control( _gvn.transform( new (C) ProjNode(call,TypeFunc::Control)) ); - set_i_o( _gvn.transform( new (C) ProjNode(call,TypeFunc::I_O )) ); + set_control( _gvn.transform( new ProjNode(call,TypeFunc::Control)) ); + set_i_o( _gvn.transform( new ProjNode(call,TypeFunc::I_O )) ); set_all_memory_call(call); if (range->cnt() > TypeFunc::Parms) { - Node* retnode = _gvn.transform( new (C) ProjNode(call,TypeFunc::Parms) ); + Node* retnode = _gvn.transform( new ProjNode(call,TypeFunc::Parms) ); // C-land is allowed to return sub-word values. Convert to integer type. assert( retval != Type::TOP, "" ); if (retval == TypeInt::BOOL) { - retnode = _gvn.transform( new (C) AndINode(retnode, intcon(0xFF)) ); + retnode = _gvn.transform( new AndINode(retnode, intcon(0xFF)) ); } else if (retval == TypeInt::CHAR) { - retnode = _gvn.transform( new (C) AndINode(retnode, intcon(0xFFFF)) ); + retnode = _gvn.transform( new AndINode(retnode, intcon(0xFFFF)) ); } else if (retval == TypeInt::BYTE) { - retnode = _gvn.transform( new (C) LShiftINode(retnode, intcon(24)) ); - retnode = _gvn.transform( new (C) RShiftINode(retnode, intcon(24)) ); + retnode = _gvn.transform( new LShiftINode(retnode, intcon(24)) ); + retnode = _gvn.transform( new RShiftINode(retnode, intcon(24)) ); } else if (retval == TypeInt::SHORT) { - retnode = _gvn.transform( new (C) LShiftINode(retnode, intcon(16)) ); - retnode = _gvn.transform( new (C) RShiftINode(retnode, intcon(16)) ); + retnode = _gvn.transform( new LShiftINode(retnode, intcon(16)) ); + retnode = _gvn.transform( new RShiftINode(retnode, intcon(16)) ); } map()->set_req( TypeFunc::Parms, retnode ); } @@ -253,21 +252,21 @@ Node* exit_memory = reset_memory(); - Node* cmp = _gvn.transform( new (C) CmpPNode(pending, null()) ); - Node* bo = _gvn.transform( new (C) BoolNode(cmp, BoolTest::ne) ); + Node* cmp = _gvn.transform( new CmpPNode(pending, null()) ); + Node* bo = _gvn.transform( new BoolNode(cmp, BoolTest::ne) ); IfNode *iff = create_and_map_if(control(), bo, PROB_MIN, COUNT_UNKNOWN); - Node* if_null = _gvn.transform( new (C) IfFalseNode(iff) ); - Node* if_not_null = _gvn.transform( new (C) IfTrueNode(iff) ); + Node* if_null = _gvn.transform( new IfFalseNode(iff) ); + Node* if_not_null = _gvn.transform( new IfTrueNode(iff) ); assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); Node *exc_target = makecon(TypeRawPtr::make( StubRoutines::forward_exception_entry() )); - Node *to_exc = new (C) TailCallNode(if_not_null, - i_o(), - exit_memory, - frameptr(), - returnadr(), - exc_target, null()); + Node *to_exc = new TailCallNode(if_not_null, + i_o(), + exit_memory, + frameptr(), + returnadr(), + exc_target, null()); root()->add_req(_gvn.transform(to_exc)); // bind to root to keep live C->init_start(start); @@ -277,27 +276,27 @@ switch( is_fancy_jump ) { case 0: // Make a return instruction // Return to caller, free any space for return address - ret = new (C) ReturnNode(TypeFunc::Parms, if_null, - i_o(), - exit_memory, - frameptr(), - returnadr()); + ret = new ReturnNode(TypeFunc::Parms, if_null, + i_o(), + exit_memory, + frameptr(), + returnadr()); if (C->tf()->range()->cnt() > TypeFunc::Parms) ret->add_req( map()->in(TypeFunc::Parms) ); break; case 1: // This is a fancy tail-call jump. Jump to computed address. // Jump to new callee; leave old return address alone. - ret = new (C) TailCallNode(if_null, - i_o(), - exit_memory, - frameptr(), - returnadr(), - target, map()->in(TypeFunc::Parms)); + ret = new TailCallNode(if_null, + i_o(), + exit_memory, + frameptr(), + returnadr(), + target, map()->in(TypeFunc::Parms)); break; case 2: // Pop return address & jump // Throw away old return address; jump to new computed address //assert(C_function == CAST_FROM_FN_PTR(address, OptoRuntime::rethrow_C), "fancy_jump==2 only for rethrow"); - ret = new (C) TailJumpNode(if_null, + ret = new TailJumpNode(if_null, i_o(), exit_memory, frameptr(),
--- a/hotspot/src/share/vm/opto/graphKit.cpp Thu Jun 19 11:22:18 2014 -0700 +++ b/hotspot/src/share/vm/opto/graphKit.cpp Wed Jul 05 19:46:17 2017 +0200 @@ -294,7 +294,7 @@ JVMState* jvms = new (C) JVMState(_method, NULL); jvms->set_bci(_bci); jvms->set_sp(_sp); - jvms->set_map(new (C) SafePointNode(TypeFunc::Parms, jvms)); + jvms->set_map(new SafePointNode(TypeFunc::Parms, jvms)); set_jvms(jvms); for (uint i = 0; i < map()->req(); i++) map()->init_req(i, top()); set_all_memory(top()); @@ -347,7 +347,7 @@ if (region->in(0) != hidden_merge_mark) { // The control input is not (yet) a specially-marked region in phi_map. // Make it so, and build some phis. - region = new (C) RegionNode(2); + region = new RegionNode(2); _gvn.set_type(region, Type::CONTROL); region->set_req(0, hidden_merge_mark); // marks an internal ex-state region->init_req(1, phi_map->control()); @@ -432,6 +432,7 @@ } } } + phi_map->merge_replaced_nodes_with(ex_map); } //--------------------------use_exception_state-------------------------------- @@ -496,13 +497,13 @@ // take the uncommon_trap in the BuildCutout below. // first must access the should_post_on_exceptions_flag in this thread's JavaThread - Node* jthread = _gvn.transform(new (C) ThreadLocalNode()); + Node* jthread = _gvn.transform(new ThreadLocalNode()); Node* adr = basic_plus_adr(top(), jthread, in_bytes(JavaThread::should_post_on_exceptions_flag_offset())); Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, MemNode::unordered); // Test the should_post_on_exceptions_flag vs. 0 - Node* chk = _gvn.transform( new (C) CmpINode(should_post_flag, intcon(0)) ); - Node* tst = _gvn.transform( new (C) BoolNode(chk, BoolTest::eq) ); + Node* chk = _gvn.transform( new CmpINode(should_post_flag, intcon(0)) ); + Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) ); // Branch to slow_path if should_post_on_exceptions_flag was true { BuildCutout unless(this, tst, PROB_MAX); @@ -645,7 +646,6 @@ _map = kit->map(); // preserve the map _sp = kit->sp(); kit->set_map(clone_map ? kit->clone_map() : NULL); - Compile::current()->inc_preserve_jvm_state(); #ifdef ASSERT _bci = kit->bci(); Parse* parser = kit->is_Parse(); @@ -663,7 +663,6 @@ #endif kit->set_map(_map); kit->set_sp(_sp); - Compile::current()->dec_preserve_jvm_state(); } @@ -675,8 +674,8 @@ SafePointNode* outer_map = _map; // preserved map is caller's SafePointNode* inner_map = kit->map(); IfNode* iff = kit->create_and_map_if(outer_map->control(), p, prob, cnt); - outer_map->set_control(kit->gvn().transform( new (kit->C) IfTrueNode(iff) )); - inner_map->set_control(kit->gvn().transform( new (kit->C) IfFalseNode(iff) )); + outer_map->set_control(kit->gvn().transform( new IfTrueNode(iff) )); + inner_map->set_control(kit->gvn().transform( new IfFalseNode(iff) )); } BuildCutout::~BuildCutout() { GraphKit* kit = _kit; @@ -1118,7 +1117,7 @@ Node* GraphKit::basic_plus_adr(Node* base, Node* ptr, Node* offset) { // short-circuit a common case if (offset == intcon(0)) return ptr; - return _gvn.transform( new (C) AddPNode(base, ptr, offset) ); + return _gvn.transform( new AddPNode(base, ptr, offset) ); } Node* GraphKit::ConvI2L(Node* offset) { @@ -1127,7 +1126,7 @@ if (offset_con != Type::OffsetBot) { return longcon((jlong) offset_con); } - return _gvn.transform( new (C) ConvI2LNode(offset)); + return _gvn.transform( new ConvI2LNode(offset)); } Node* GraphKit::ConvI2UL(Node* offset) { @@ -1135,9 +1134,9 @@ if (offset_con != (juint) Type::OffsetBot) { return longcon((julong) offset_con); } - Node* conv = _gvn.transform( new (C) ConvI2LNode(offset)); + Node* conv = _gvn.transform( new ConvI2LNode(offset)); Node* mask = _gvn.transform( ConLNode::make(C, (julong) max_juint) ); - return _gvn.transform( new (C) AndLNode(conv, mask) ); + return _gvn.transform( new AndLNode(conv, mask) ); } Node* GraphKit::ConvL2I(Node* offset) { @@ -1146,7 +1145,7 @@ if (offset_con != (jlong)Type::OffsetBot) { return intcon((int) offset_con); } - return _gvn.transform( new (C) ConvL2INode(offset)); + return _gvn.transform( new ConvL2INode(offset)); } //-------------------------load_object_klass----------------------------------- @@ -1165,7 +1164,7 @@ Node *alen; if (alloc == NULL) { Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes()); - alen = _gvn.transform( new (C) LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS)); + alen = _gvn.transform( new LoadRangeNode(0, immutable_memory(), r_adr, TypeInt::POS)); } else { alen = alloc->Ideal_length(); Node* ccast = alloc->make_ideal_length(_gvn.type(array)->is_oopptr(), &_gvn); @@ -1199,8 +1198,8 @@ // Construct NULL check