OpenJDK / jdk / jdk
changeset 57773:eb42609c4249
Merge
author | prr |
---|---|
date | Thu, 19 Dec 2019 12:28:56 -0800 |
parents | 7d329b38512f ce6662089667 |
children | 747c05277fd7 |
files | src/java.base/share/classes/sun/security/util/math/intpoly/FieldGen.jsh src/java.base/share/classes/sun/security/util/math/intpoly/IntegerPolynomialP256.java src/java.base/share/classes/sun/security/util/math/intpoly/IntegerPolynomialP384.java src/java.base/share/classes/sun/security/util/math/intpoly/IntegerPolynomialP521.java src/java.base/share/classes/sun/security/util/math/intpoly/P256OrderField.java src/java.base/share/classes/sun/security/util/math/intpoly/P384OrderField.java src/java.base/share/classes/sun/security/util/math/intpoly/P521OrderField.java src/java.base/share/classes/sun/security/util/math/intpoly/header.txt src/jdk.incubator.jpackage/macosx/classes/jdk/incubator/jpackage/internal/resources/background_dmg.png src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/ObjectSubstitutions.java test/hotspot/jtreg/gc/startup_warnings/TestParallelScavengeSerialOld.java test/hotspot/jtreg/runtime/handshake/HandshakeWalkStackFallbackTest.java test/jdk/java/net/NetworkInterface/NoSetNetworkInterface.java test/jdk/javax/net/ssl/compatibility/JdkRelease.java test/jdk/jdk/jfr/event/gc/collection/TestGCCauseWithPSMarkSweep.java test/jdk/jdk/jfr/event/gc/collection/TestGCEventMixedWithPSMarkSweep.java test/jdk/jdk/jfr/event/gc/heapsummary/TestHeapSummaryEventPSSerial.java test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithPSMarkSweep.java test/jdk/jdk/jfr/event/gc/refstat/TestRefStatEventWithPSMarkSweep.java test/jdk/jdk/jfr/event/oldobject/TestParallelOld.java test/jdk/tools/jpackage/apps/com.other/com/other/Other.java test/jdk/tools/jpackage/apps/com.other/module-info.java test/jdk/tools/jpackage/manage_packages.sh test/jdk/tools/jpackage/share/AddLauncherBase.java test/jdk/tools/jpackage/share/AddLauncherModuleTest.java test/jdk/tools/jpackage/share/AddLauncherTest.java test/jdk/tools/jpackage/share/AddLaunchersTest.java |
diffstat | 1006 files changed, 25833 insertions(+), 17425 deletions(-) [+] |
line wrap: on
line diff
--- a/.hgtags Thu Dec 19 11:57:58 2019 +0530 +++ b/.hgtags Thu Dec 19 12:28:56 2019 -0800 @@ -599,3 +599,7 @@ 438337c846fb071900ddb6922bddf8b3e895a514 jdk-14+24 17d242844fc9e7d18b3eac97426490a9c246119e jdk-14+25 288777cf0702914e5266bc1e5d380eed9032ca41 jdk-14+26 +2c724dba4c3cf9516b2152e151c9aea66b21b30b jdk-15+0 +91a3f092682fc715d991a87eb6ec6f28886d2035 jdk-14+27 +63e17cf29bed191ea21020b4648c9cdf893f80f5 jdk-15+1 +f33197adda9ad82fdef46ac0f7dc0126204f35b2 jdk-15+2
--- a/make/Bundles.gmk Thu Dec 19 11:57:58 2019 +0530 +++ b/make/Bundles.gmk Thu Dec 19 12:28:56 2019 -0800 @@ -156,6 +156,12 @@ JRE_IMAGE_HOMEDIR := $(JRE_MACOSX_CONTENTS_DIR)/Home JDK_BUNDLE_SUBDIR := JRE_BUNDLE_SUBDIR := + # In certain situations, the JDK_IMAGE_DIR points to an image without the + # the symbols and demos. If so, the symobls and demos can be found in a + # separate image. These variables allow for overriding from a custom makefile. + JDK_SYMBOLS_IMAGE_DIR ?= $(JDK_IMAGE_DIR) + JDK_DEMOS_IMAGE_DIR ?= $(JDK_IMAGE_DIR) + JDK_DEMOS_IMAGE_HOMEDIR ?= $(JDK_DEMOS_IMAGE_DIR)/$(JDK_MACOSX_CONTENTS_SUBDIR)/Home else JDK_IMAGE_HOMEDIR := $(JDK_IMAGE_DIR) JRE_IMAGE_HOMEDIR := $(JRE_IMAGE_DIR) @@ -165,6 +171,12 @@ JDK_BUNDLE_SUBDIR := $(JDK_BUNDLE_SUBDIR)/$(DEBUG_LEVEL) JRE_BUNDLE_SUBDIR := $(JRE_BUNDLE_SUBDIR)/$(DEBUG_LEVEL) endif + # In certain situations, the JDK_IMAGE_DIR points to an image without the + # the symbols and demos. If so, the symobls and demos can be found in a + # separate image. These variables allow for overriding from a custom makefile. + JDK_SYMBOLS_IMAGE_DIR ?= $(JDK_IMAGE_DIR) + JDK_DEMOS_IMAGE_DIR ?= $(JDK_IMAGE_DIR) + JDK_DEMOS_IMAGE_HOMEDIR ?= $(JDK_DEMOS_IMAGE_DIR) endif ################################################################################ @@ -176,13 +188,24 @@ # There may be files with spaces in the names, so use ShellFindFiles # explicitly. ALL_JDK_FILES := $(call ShellFindFiles, $(JDK_IMAGE_DIR)) + ifneq ($(JDK_IMAGE_DIR), $(JDK_SYMBOLS_IMAGE_DIR)) + ALL_JDK_SYMBOLS_FILES := $(call ShellFindFiles, $(JDK_SYMBOLS_IMAGE_DIR)) + else + ALL_JDK_SYMBOLS_FILES := $(ALL_JDK_FILES) + endif + ifneq ($(JDK_IMAGE_DIR), $(JDK_DEMOS_IMAGE_DIR)) + ALL_JDK_DEMOS_FILES := $(call ShellFindFiles, $(JDK_DEMOS_IMAGE_DIR)) + else + ALL_JDK_DEMOS_FILES := $(ALL_JDK_FILES) + endif # Create special filter rules when dealing with unzipped .dSYM directories on # macosx ifeq ($(call isTargetOs, macosx), true) ifeq ($(ZIP_EXTERNAL_DEBUG_SYMBOLS), false) JDK_SYMBOLS_EXCLUDE_PATTERN := $(addprefix %, \ - $(call containing, .dSYM/, $(patsubst $(JDK_IMAGE_DIR)/%, %, $(ALL_JDK_FILES)))) + $(call containing, .dSYM/, $(patsubst $(JDK_IMAGE_DIR)/%, %, \ + $(ALL_JDK_SYMBOLS_FILES)))) endif endif @@ -203,12 +226,13 @@ $(filter-out \ $(JDK_IMAGE_HOMEDIR)/demo/% \ , \ - $(ALL_JDK_FILES) \ + $(ALL_JDK_SYMBOLS_FILES) \ ) \ ) \ $(call FindFiles, $(SYMBOLS_IMAGE_DIR)) - TEST_DEMOS_BUNDLE_FILES := $(filter $(JDK_IMAGE_HOMEDIR)/demo/%, $(ALL_JDK_FILES)) + TEST_DEMOS_BUNDLE_FILES := $(filter $(JDK_DEMOS_IMAGE_HOMEDIR)/demo/%, \ + $(ALL_JDK_DEMOS_FILES)) ALL_JRE_FILES := $(call ShellFindFiles, $(JRE_IMAGE_DIR)) @@ -245,15 +269,17 @@ LEGACY_TARGETS += $(BUILD_JRE_BUNDLE) - $(eval $(call SetupBundleFile, BUILD_JDK_SYMBOLS_BUNDLE, \ - BUNDLE_NAME := $(JDK_SYMBOLS_BUNDLE_NAME), \ - FILES := $(JDK_SYMBOLS_BUNDLE_FILES), \ - BASE_DIRS := $(JDK_IMAGE_DIR) $(wildcard $(SYMBOLS_IMAGE_DIR)), \ - SUBDIR := $(JDK_BUNDLE_SUBDIR), \ - UNZIP_DEBUGINFO := true, \ - )) + ifeq ($(COPY_DEBUG_SYMBOLS), true) + $(eval $(call SetupBundleFile, BUILD_JDK_SYMBOLS_BUNDLE, \ + BUNDLE_NAME := $(JDK_SYMBOLS_BUNDLE_NAME), \ + FILES := $(JDK_SYMBOLS_BUNDLE_FILES), \ + BASE_DIRS := $(JDK_SYMBOLS_IMAGE_DIR) $(wildcard $(SYMBOLS_IMAGE_DIR)), \ + SUBDIR := $(JDK_BUNDLE_SUBDIR), \ + UNZIP_DEBUGINFO := true, \ + )) - PRODUCT_TARGETS += $(BUILD_JDK_SYMBOLS_BUNDLE) + PRODUCT_TARGETS += $(BUILD_JDK_SYMBOLS_BUNDLE) + endif # The demo bundle is only created to support client tests. Ideally it should # be built with the main test bundle, but since the prerequisites match @@ -261,7 +287,7 @@ $(eval $(call SetupBundleFile, BUILD_TEST_DEMOS_BUNDLE, \ BUNDLE_NAME := $(TEST_DEMOS_BUNDLE_NAME), \ FILES := $(TEST_DEMOS_BUNDLE_FILES), \ - BASE_DIRS := $(JDK_IMAGE_DIR), \ + BASE_DIRS := $(JDK_DEMOS_IMAGE_DIR), \ SUBDIR := $(JDK_BUNDLE_SUBDIR), \ ))
--- a/make/CompileJavaModules.gmk Thu Dec 19 11:57:58 2019 +0530 +++ b/make/CompileJavaModules.gmk Thu Dec 19 12:28:56 2019 -0800 @@ -381,7 +381,7 @@ ################################################################################ jdk.incubator.jpackage_COPY += .gif .png .txt .spec .script .prerm .preinst .postrm .postinst .list .sh \ - .desktop .copyright .control .plist .template .icns .scpt .entitlements .wxs .wxl .wxi .ico .bmp + .desktop .copyright .control .plist .template .icns .scpt .entitlements .wxs .wxl .wxi .ico .bmp .tiff jdk.incubator.jpackage_CLEAN += .properties
--- a/make/MacBundles.gmk Thu Dec 19 11:57:58 2019 +0530 +++ b/make/MacBundles.gmk Thu Dec 19 12:28:56 2019 -0800 @@ -61,17 +61,15 @@ FILES := $(call FindFiles, $(JRE_IMAGE_DIR)), \ )) - $(JDK_MACOSX_CONTENTS_DIR)/MacOS/libjli.dylib: - $(call LogInfo, Creating link $(patsubst $(OUTPUTDIR)/%,%,$@)) - $(call MakeTargetDir) - $(RM) $@ - $(LN) -s ../Home/lib/libjli.dylib $@ + $(eval $(call SetupCopyFiles, COPY_LIBJLI_JDK, \ + FILES := $(JDK_IMAGE_DIR)/lib/libjli.dylib, \ + DEST := $(JDK_MACOSX_CONTENTS_DIR)/MacOS, \ + )) - $(JRE_MACOSX_CONTENTS_DIR)/MacOS/libjli.dylib: - $(call LogInfo, Creating link $(patsubst $(OUTPUTDIR)/%,%,$@)) - $(call MakeTargetDir) - $(RM) $@ - $(LN) -s ../Home/lib/libjli.dylib $@ + $(eval $(call SetupCopyFiles, COPY_LIBJLI_JRE, \ + FILES := $(JRE_IMAGE_DIR)/lib/libjli.dylib, \ + DEST := $(JRE_MACOSX_CONTENTS_DIR)/MacOS, \ + )) $(eval $(call SetupTextFileProcessing, BUILD_JDK_PLIST, \ SOURCE_FILES := $(MACOSX_PLIST_SRC)/JDK-Info.plist, \ @@ -97,13 +95,19 @@ @@VENDOR@@ => $(BUNDLE_VENDOR) , \ )) - jdk-bundle: $(COPY_JDK_IMAGE) $(JDK_MACOSX_CONTENTS_DIR)/MacOS/libjli.dylib \ - $(BUILD_JDK_PLIST) + $(SUPPORT_OUTPUTDIR)/images/_jdk_bundle_attribute_set: $(COPY_JDK_IMAGE) $(SETFILE) -a B $(dir $(JDK_MACOSX_CONTENTS_DIR)) + $(TOUCH) $@ - jre-bundle: $(COPY_JRE_IMAGE) $(JRE_MACOSX_CONTENTS_DIR)/MacOS/libjli.dylib \ - $(BUILD_JRE_PLIST) + $(SUPPORT_OUTPUTDIR)/images/_jre_bundle_attribute_set: $(COPY_JRE_IMAGE) $(SETFILE) -a B $(dir $(JRE_MACOSX_CONTENTS_DIR)) + $(TOUCH) $@ + + jdk-bundle: $(COPY_JDK_IMAGE) $(COPY_LIBJLI_JDK) \ + $(BUILD_JDK_PLIST) $(SUPPORT_OUTPUTDIR)/images/_jdk_bundle_attribute_set + + jre-bundle: $(COPY_JRE_IMAGE) $(COPY_LIBJLI_JRE) \ + $(BUILD_JRE_PLIST) $(SUPPORT_OUTPUTDIR)/images/_jre_bundle_attribute_set else # Not macosx
--- a/make/RunTests.gmk Thu Dec 19 11:57:58 2019 +0530 +++ b/make/RunTests.gmk Thu Dec 19 12:28:56 2019 -0800 @@ -701,7 +701,8 @@ endif ifneq ($$(MICRO_VM_OPTIONS)$$(MICRO_JAVA_OPTIONS), ) - $1_MICRO_VM_OPTIONS := -jvmArgs $$(MICRO_VM_OPTIONS) $$(MICRO_JAVA_OPTIONS) + JMH_JVM_ARGS := $$(MICRO_VM_OPTIONS) $$(MICRO_JAVA_OPTIONS) + $1_MICRO_VM_OPTIONS := -jvmArgs $(call ShellQuote,$$(JMH_JVM_ARGS)) endif ifneq ($$(MICRO_ITER), )
--- a/make/ToolsJdk.gmk Thu Dec 19 11:57:58 2019 +0530 +++ b/make/ToolsJdk.gmk Thu Dec 19 12:28:56 2019 -0800 @@ -106,6 +106,9 @@ TOOL_CLDRCONVERTER = $(JAVA_SMALL) -cp $(BUILDTOOLS_OUTPUTDIR)/jdk_tools_classes \ build.tools.cldrconverter.CLDRConverter +TOOL_INTPOLY = $(JAVA_SMALL) -cp $(BUILDTOOLS_OUTPUTDIR)/jdk_tools_classes \ + build.tools.intpoly.FieldGen + TOOL_GENERATELSREQUIVMAPS = $(JAVA_SMALL) -cp $(BUILDTOOLS_OUTPUTDIR)/jdk_tools_classes \ build.tools.generatelsrequivmaps.EquivMapsGenerator
--- a/make/autoconf/basics.m4 Thu Dec 19 11:57:58 2019 +0530 +++ b/make/autoconf/basics.m4 Thu Dec 19 12:28:56 2019 -0800 @@ -1287,12 +1287,23 @@ BASIC_REQUIRE_PROGS(MIG, mig) BASIC_REQUIRE_PROGS(XATTR, xattr) BASIC_PATH_PROGS(CODESIGN, codesign) + if test "x$CODESIGN" != "x"; then - # Verify that the openjdk_codesign certificate is present - AC_MSG_CHECKING([if openjdk_codesign certificate is present]) + # Check for user provided code signing identity. + # If no identity was provided, fall back to "openjdk_codesign". + AC_ARG_WITH([macosx-codesign-identity], [AS_HELP_STRING([--with-macosx-codesign-identity], + [specify the code signing identity])], + [MACOSX_CODESIGN_IDENTITY=$with_macosx_codesign_identity], + [MACOSX_CODESIGN_IDENTITY=openjdk_codesign] + ) + + AC_SUBST(MACOSX_CODESIGN_IDENTITY) + + # Verify that the codesign certificate is present + AC_MSG_CHECKING([if codesign certificate is present]) $RM codesign-testfile $TOUCH codesign-testfile - $CODESIGN -s openjdk_codesign codesign-testfile 2>&AS_MESSAGE_LOG_FD >&AS_MESSAGE_LOG_FD || CODESIGN= + $CODESIGN -s "$MACOSX_CODESIGN_IDENTITY" codesign-testfile 2>&AS_MESSAGE_LOG_FD >&AS_MESSAGE_LOG_FD || CODESIGN= $RM codesign-testfile if test "x$CODESIGN" = x; then AC_MSG_RESULT([no])
--- a/make/autoconf/platform.m4 Thu Dec 19 11:57:58 2019 +0530 +++ b/make/autoconf/platform.m4 Thu Dec 19 12:28:56 2019 -0800 @@ -561,13 +561,12 @@ PLATFORM_CHECK_DEPRECATION ]) -AC_DEFUN_ONCE([PLATFORM_CHECK_DEPRECATION], +AC_DEFUN([PLATFORM_CHECK_DEPRECATION], [ AC_ARG_ENABLE(deprecated-ports, [AS_HELP_STRING([--enable-deprecated-ports@<:@=yes/no@:>@], [Suppress the error when configuring for a deprecated port @<:@no@:>@])]) - AC_REQUIRE([PLATFORM_EXTRACT_TARGET_AND_BUILD]) - if test "x$OPENJDK_TARGET_OS" = xsolaris || test "x$OPENJDK_TARGET_CPU_ARCH" = xsparc; then + if test "x$OPENJDK_TARGET_OS" = xsolaris || (test "x$OPENJDK_TARGET_CPU_ARCH" = xsparc && test "x$with_jvm_variants" != xzero); then if test "x$enable_deprecated_ports" = "xyes"; then AC_MSG_WARN([The Solaris and SPARC ports are deprecated and may be removed in a future release.]) else
--- a/make/autoconf/spec.gmk.in Thu Dec 19 11:57:58 2019 +0530 +++ b/make/autoconf/spec.gmk.in Thu Dec 19 12:28:56 2019 -0800 @@ -415,6 +415,9 @@ # The highest allowed version of macosx MACOSX_VERSION_MAX=@MACOSX_VERSION_MAX@ +# The macosx code signing identity to use +MACOSX_CODESIGN_IDENTITY=@MACOSX_CODESIGN_IDENTITY@ + # Toolchain type: gcc, clang, solstudio, lxc, microsoft... TOOLCHAIN_TYPE:=@TOOLCHAIN_TYPE@ TOOLCHAIN_VERSION := @TOOLCHAIN_VERSION@
--- a/make/autoconf/version-numbers Thu Dec 19 11:57:58 2019 +0530 +++ b/make/autoconf/version-numbers Thu Dec 19 12:28:56 2019 -0800 @@ -26,18 +26,18 @@ # Default version, product, and vendor information to use, # unless overridden by configure -DEFAULT_VERSION_FEATURE=14 +DEFAULT_VERSION_FEATURE=15 DEFAULT_VERSION_INTERIM=0 DEFAULT_VERSION_UPDATE=0 DEFAULT_VERSION_PATCH=0 DEFAULT_VERSION_EXTRA1=0 DEFAULT_VERSION_EXTRA2=0 DEFAULT_VERSION_EXTRA3=0 -DEFAULT_VERSION_DATE=2020-03-17 -DEFAULT_VERSION_CLASSFILE_MAJOR=58 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`" +DEFAULT_VERSION_DATE=2020-09-15 +DEFAULT_VERSION_CLASSFILE_MAJOR=59 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`" DEFAULT_VERSION_CLASSFILE_MINOR=0 -DEFAULT_ACCEPTABLE_BOOT_VERSIONS="13 14" -DEFAULT_JDK_SOURCE_TARGET_VERSION=14 +DEFAULT_ACCEPTABLE_BOOT_VERSIONS="13 14 15" +DEFAULT_JDK_SOURCE_TARGET_VERSION=15 DEFAULT_PROMOTED_VERSION_PRE=ea LAUNCHER_NAME=openjdk
--- a/make/common/Modules.gmk Thu Dec 19 11:57:58 2019 +0530 +++ b/make/common/Modules.gmk Thu Dec 19 12:28:56 2019 -0800 @@ -58,6 +58,7 @@ java.rmi \ java.security.sasl \ java.xml \ + jdk.incubator.foreign \ jdk.internal.vm.ci \ jdk.jfr \ jdk.management \ @@ -162,6 +163,7 @@ jdk.jsobject \ jdk.jshell \ jdk.jstatd \ + jdk.incubator.foreign \ jdk.localedata \ jdk.management \ jdk.management.agent \
--- a/make/common/NativeCompilation.gmk Thu Dec 19 11:57:58 2019 +0530 +++ b/make/common/NativeCompilation.gmk Thu Dec 19 12:28:56 2019 -0800 @@ -1048,7 +1048,7 @@ $1_VARDEPS := $$($1_LD) $$($1_SYSROOT_LDFLAGS) $$($1_LDFLAGS) $$($1_EXTRA_LDFLAGS) \ $$(GLOBAL_LIBS) $$($1_LIBS) $$($1_EXTRA_LIBS) $$($1_MT) \ - $$($1_CODESIGN) $$($1_CREATE_DEBUGINFO_CMDS) $$($1_MANIFEST_VERSION) \ + $$($1_CREATE_DEBUGINFO_CMDS) $$($1_MANIFEST_VERSION) \ $$($1_STRIP_CMD) $1_VARDEPS_FILE := $$(call DependOnVariable, $1_VARDEPS, \ $$($1_OBJECT_DIR)/$$($1_NOSUFFIX).vardeps) @@ -1129,9 +1129,8 @@ # This only works if the openjdk_codesign identity is present on the system. Let # silently fail otherwise. ifneq ($(CODESIGN), ) - ifneq ($$($1_CODESIGN), ) - $(CODESIGN) -s openjdk_codesign $$@ - endif + $(CODESIGN) -s "$(MACOSX_CODESIGN_IDENTITY)" --timestamp --options runtime \ + --entitlements $(TOPDIR)/make/data/macosxsigning/entitlements.plist $$@ endif endif
--- a/make/conf/jib-profiles.js Thu Dec 19 11:57:58 2019 +0530 +++ b/make/conf/jib-profiles.js Thu Dec 19 12:28:56 2019 -0800 @@ -434,7 +434,7 @@ target_cpu: "x64", dependencies: ["devkit", "cups"], configure_args: concat(common.configure_args_64bit, - "--with-zlib=system", "--enable-dtrace"), + "--with-zlib=system", "--enable-dtrace", "--enable-deprecated-ports=yes"), }, "solaris-sparcv9": { @@ -442,7 +442,7 @@ target_cpu: "sparcv9", dependencies: ["devkit", "cups"], configure_args: concat(common.configure_args_64bit, - "--with-zlib=system", "--enable-dtrace"), + "--with-zlib=system", "--enable-dtrace", "--enable-deprecated-ports=yes"), }, "windows-x64": {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/data/macosxsigning/entitlements.plist Thu Dec 19 12:28:56 2019 -0800 @@ -0,0 +1,16 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> +<plist version="1.0"> +<dict> + <key>com.apple.security.cs.allow-jit</key> + <true/> + <key>com.apple.security.cs.allow-unsigned-executable-memory</key> + <true/> + <key>com.apple.security.cs.disable-library-validation</key> + <true/> + <key>com.apple.security.cs.allow-dyld-environment-variables</key> + <true/> + <key>com.apple.security.cs.debugger</key> + <true/> +</dict> +</plist>
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/data/symbols/java.base-E.sym.txt Thu Dec 19 12:28:56 2019 -0800 @@ -0,0 +1,169 @@ +# +# Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. Oracle designates this +# particular file as subject to the "Classpath" exception as provided +# by Oracle in the LICENSE file that accompanied this code. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# ########################################################## +# ### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. ### +# ########################################################## +# +module name java.base +header exports java/io,java/lang,java/lang/annotation,java/lang/constant,java/lang/invoke,java/lang/module,java/lang/ref,java/lang/reflect,java/math,java/net,java/net/spi,java/nio,java/nio/channels,java/nio/channels/spi,java/nio/charset,java/nio/charset/spi,java/nio/file,java/nio/file/attribute,java/nio/file/spi,java/security,java/security/cert,java/security/interfaces,java/security/spec,java/text,java/text/spi,java/time,java/time/chrono,java/time/format,java/time/temporal,java/time/zone,java/util,java/util/concurrent,java/util/concurrent/atomic,java/util/concurrent/locks,java/util/function,java/util/jar,java/util/regex,java/util/spi,java/util/stream,java/util/zip,javax/crypto,javax/crypto/interfaces,javax/crypto/spec,javax/net,javax/net/ssl,javax/security/auth,javax/security/auth/callback,javax/security/auth/login,javax/security/auth/spi,javax/security/auth/x500,javax/security/cert uses java/lang/System$LoggerFinder,java/net/ContentHandlerFactory,java/net/spi/URLStreamHandlerProvider,java/nio/channels/spi/AsynchronousChannelProvider,java/nio/channels/spi/SelectorProvider,java/nio/charset/spi/CharsetProvider,java/nio/file/spi/FileSystemProvider,java/nio/file/spi/FileTypeDetector,java/security/Provider,java/text/spi/BreakIteratorProvider,java/text/spi/CollatorProvider,java/text/spi/DateFormatProvider,java/text/spi/DateFormatSymbolsProvider,java/text/spi/DecimalFormatSymbolsProvider,java/text/spi/NumberFormatProvider,java/time/chrono/AbstractChronology,java/time/chrono/Chronology,java/time/zone/ZoneRulesProvider,java/util/spi/CalendarDataProvider,java/util/spi/CalendarNameProvider,java/util/spi/CurrencyNameProvider,java/util/spi/LocaleNameProvider,java/util/spi/ResourceBundleControlProvider,java/util/spi/ResourceBundleProvider,java/util/spi/TimeZoneNameProvider,java/util/spi/ToolProvider,javax/security/auth/spi/LoginModule,jdk/internal/logger/DefaultLoggerFinder,sun/text/spi/JavaTimeDateTimePatternProvider,sun/util/locale/provider/LocaleDataMetaInfo,sun/util/resources/LocaleData$CommonResourceBundleProvider,sun/util/resources/LocaleData$SupplementaryResourceBundleProvider,sun/util/spi/CalendarProvider provides interface\u0020;java/nio/file/spi/FileSystemProvider\u0020;impls\u0020;jdk/internal/jrtfs/JrtFileSystemProvider target linux-amd64 flags 8000 + +class name java/io/PrintStream +method name write descriptor ([B)V thrownTypes java/io/IOException flags 1 +method name writeBytes descriptor ([B)V flags 1 + +class name java/io/Serial +header extends java/lang/Object implements java/lang/annotation/Annotation flags 2601 runtimeAnnotations @Ljava/lang/annotation/Target;(value={eLjava/lang/annotation/ElementType;METHOD;eLjava/lang/annotation/ElementType;FIELD;})@Ljava/lang/annotation/Retention;(value=eLjava/lang/annotation/RetentionPolicy;SOURCE;) + +class name java/lang/Math +-method name ceil descriptor (D)D +-method name floor descriptor (D)D +-method name rint descriptor (D)D +method name ceil descriptor (D)D flags 9 runtimeAnnotations @Ljdk/internal/HotSpotIntrinsicCandidate; +method name floor descriptor (D)D flags 9 runtimeAnnotations @Ljdk/internal/HotSpotIntrinsicCandidate; +method name rint descriptor (D)D flags 9 runtimeAnnotations @Ljdk/internal/HotSpotIntrinsicCandidate; + +class name java/lang/NullPointerException +method name getMessage descriptor ()Ljava/lang/String; flags 1 + +class name java/lang/StrictMath +method name incrementExact descriptor (I)I flags 9 +method name incrementExact descriptor (J)J flags 9 +method name decrementExact descriptor (I)I flags 9 +method name decrementExact descriptor (J)J flags 9 +method name negateExact descriptor (I)I flags 9 +method name negateExact descriptor (J)J flags 9 + +class name java/lang/String +-method name stripIndent descriptor ()Ljava/lang/String; +-method name translateEscapes descriptor ()Ljava/lang/String; +-method name formatted descriptor ([Ljava/lang/Object;)Ljava/lang/String; +method name stripIndent descriptor ()Ljava/lang/String; flags 1 classAnnotations @Ljdk/internal/PreviewFeature;(feature=eLjdk/internal/PreviewFeature$Feature;TEXT_BLOCKS;,essentialAPI=Ztrue) +method name translateEscapes descriptor ()Ljava/lang/String; flags 1 classAnnotations @Ljdk/internal/PreviewFeature;(feature=eLjdk/internal/PreviewFeature$Feature;TEXT_BLOCKS;,essentialAPI=Ztrue) +method name formatted descriptor ([Ljava/lang/Object;)Ljava/lang/String; flags 81 classAnnotations @Ljdk/internal/PreviewFeature;(feature=eLjdk/internal/PreviewFeature$Feature;TEXT_BLOCKS;,essentialAPI=Ztrue) + +class name java/lang/Thread +-method name suspend descriptor ()V +-method name resume descriptor ()V +-method name countStackFrames descriptor ()I +method name suspend descriptor ()V flags 11 deprecated true runtimeAnnotations @Ljava/lang/Deprecated;(forRemoval=Ztrue,since="1.2") +method name resume descriptor ()V flags 11 deprecated true runtimeAnnotations @Ljava/lang/Deprecated;(forRemoval=Ztrue,since="1.2") +method name countStackFrames descriptor ()I flags 1 deprecated true runtimeAnnotations @Ljava/lang/Deprecated;(forRemoval=Ztrue,since="1.2") + +class name java/lang/ThreadGroup +-method name suspend descriptor ()V +-method name resume descriptor ()V +-method name allowThreadSuspension descriptor (Z)Z +method name suspend descriptor ()V flags 11 deprecated true runtimeAnnotations @Ljava/lang/Deprecated;(forRemoval=Ztrue,since="1.2") +method name resume descriptor ()V flags 11 deprecated true runtimeAnnotations @Ljava/lang/Deprecated;(forRemoval=Ztrue,since="1.2") +method name allowThreadSuspension descriptor (Z)Z flags 1 deprecated true runtimeAnnotations @Ljava/lang/Deprecated;(forRemoval=Ztrue,since="1.2") + +class name java/lang/invoke/ConstantBootstraps +-method name <init> descriptor ()V +method name <init> descriptor ()V flags 1 deprecated true runtimeAnnotations @Ljava/lang/Deprecated;(forRemoval=Ztrue,since="14") + +class name java/lang/invoke/MethodHandles$Lookup +-method name hasPrivateAccess descriptor ()Z +method name previousLookupClass descriptor ()Ljava/lang/Class; flags 1 signature ()Ljava/lang/Class<*>; +method name hasPrivateAccess descriptor ()Z flags 1 deprecated true runtimeAnnotations @Ljava/lang/Deprecated;(since="14") +method name hasFullPrivilegeAccess descriptor ()Z flags 1 + +class name java/lang/reflect/Modifier +-method name <init> descriptor ()V +method name <init> descriptor ()V flags 1 deprecated true runtimeAnnotations @Ljava/lang/Deprecated;(forRemoval=Ztrue,since="14") + +class name java/nio/channels/SelectionKey +header extends java/lang/Object flags 421 +innerclass innerClass java/lang/invoke/MethodHandles$Lookup outerClass java/lang/invoke/MethodHandles innerClassName Lookup flags 19 + +class name java/nio/channels/spi/AbstractSelectableChannel +header extends java/nio/channels/SelectableChannel flags 421 +innerclass innerClass java/lang/invoke/MethodHandles$Lookup outerClass java/lang/invoke/MethodHandles innerClassName Lookup flags 19 + +class name java/nio/channels/spi/AbstractSelectionKey +header extends java/nio/channels/SelectionKey flags 421 +innerclass innerClass java/lang/invoke/MethodHandles$Lookup outerClass java/lang/invoke/MethodHandles innerClassName Lookup flags 19 + +class name java/nio/channels/spi/AbstractSelector +header extends java/nio/channels/Selector flags 421 +innerclass innerClass java/lang/invoke/MethodHandles$Lookup outerClass java/lang/invoke/MethodHandles innerClassName Lookup flags 19 + +-class name java/security/acl/Acl + +-class name java/security/acl/AclEntry + +-class name java/security/acl/AclNotFoundException + +-class name java/security/acl/Group + +-class name java/security/acl/LastOwnerException + +-class name java/security/acl/NotOwnerException + +-class name java/security/acl/Owner + +-class name java/security/acl/Permission + +class name java/util/HashSet +method name toArray descriptor ()[Ljava/lang/Object; flags 1 +method name toArray descriptor ([Ljava/lang/Object;)[Ljava/lang/Object; flags 1 signature <T:Ljava/lang/Object;>([TT;)[TT; + +class name java/util/concurrent/locks/AbstractQueuedLongSynchronizer +header extends java/util/concurrent/locks/AbstractOwnableSynchronizer implements java/io/Serializable nestMembers java/util/concurrent/locks/AbstractQueuedLongSynchronizer$ConditionObject flags 421 +innerclass innerClass java/util/concurrent/locks/AbstractQueuedLongSynchronizer$ConditionObject outerClass java/util/concurrent/locks/AbstractQueuedLongSynchronizer innerClassName ConditionObject flags 1 +-method name <init> descriptor ()V +method name <init> descriptor ()V flags 1 + +class name java/util/concurrent/locks/AbstractQueuedLongSynchronizer$ConditionObject +header extends java/lang/Object implements java/util/concurrent/locks/Condition,java/io/Serializable nestHost java/util/concurrent/locks/AbstractQueuedLongSynchronizer flags 21 +innerclass innerClass java/util/concurrent/locks/AbstractQueuedLongSynchronizer$ConditionObject outerClass java/util/concurrent/locks/AbstractQueuedLongSynchronizer innerClassName ConditionObject flags 1 +innerclass innerClass java/util/concurrent/ForkJoinPool$ManagedBlocker outerClass java/util/concurrent/ForkJoinPool innerClassName ManagedBlocker flags 609 + +class name java/util/concurrent/locks/AbstractQueuedSynchronizer +header extends java/util/concurrent/locks/AbstractOwnableSynchronizer implements java/io/Serializable nestMembers java/util/concurrent/locks/AbstractQueuedSynchronizer$ConditionObject flags 421 +innerclass innerClass java/util/concurrent/locks/AbstractQueuedSynchronizer$ConditionObject outerClass java/util/concurrent/locks/AbstractQueuedSynchronizer innerClassName ConditionObject flags 1 + +class name java/util/concurrent/locks/AbstractQueuedSynchronizer$ConditionObject +header extends java/lang/Object implements java/util/concurrent/locks/Condition,java/io/Serializable nestHost java/util/concurrent/locks/AbstractQueuedSynchronizer flags 21 +innerclass innerClass java/util/concurrent/locks/AbstractQueuedSynchronizer$ConditionObject outerClass java/util/concurrent/locks/AbstractQueuedSynchronizer innerClassName ConditionObject flags 1 +innerclass innerClass java/util/concurrent/ForkJoinPool$ManagedBlocker outerClass java/util/concurrent/ForkJoinPool innerClassName ManagedBlocker flags 609 + +class name java/util/concurrent/locks/LockSupport +method name setCurrentBlocker descriptor (Ljava/lang/Object;)V flags 9 + +class name java/util/concurrent/locks/StampedLock +header extends java/lang/Object implements java/io/Serializable flags 21 classAnnotations @Ljdk/Profile+Annotation;(value=I1) +method name tryWriteLock descriptor ()J flags 1 +method name writeLockInterruptibly descriptor ()J thrownTypes java/lang/InterruptedException flags 1 +method name tryReadLock descriptor ()J flags 1 +method name tryReadLock descriptor (JLjava/util/concurrent/TimeUnit;)J thrownTypes java/lang/InterruptedException flags 1 +method name readLockInterruptibly descriptor ()J thrownTypes java/lang/InterruptedException flags 1 +method name unlock descriptor (J)V flags 1 +-method name tryWriteLock descriptor ()J +-method name writeLockInterruptibly descriptor ()J +-method name tryReadLock descriptor ()J +-method name tryReadLock descriptor (JLjava/util/concurrent/TimeUnit;)J +-method name readLockInterruptibly descriptor ()J +-method name unlock descriptor (J)V +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/data/symbols/java.compiler-E.sym.txt Thu Dec 19 12:28:56 2019 -0800 @@ -0,0 +1,62 @@ +# +# Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. Oracle designates this +# particular file as subject to the "Classpath" exception as provided +# by Oracle in the LICENSE file that accompanied this code. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# ########################################################## +# ### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. ### +# ########################################################## +# +class name javax/lang/model/SourceVersion +field name RELEASE_14 descriptor Ljavax/lang/model/SourceVersion; flags 4019 + +class name javax/lang/model/util/AbstractAnnotationValueVisitor9 +header extends javax/lang/model/util/AbstractAnnotationValueVisitor8 flags 421 signature <R:Ljava/lang/Object;P:Ljava/lang/Object;>Ljavax/lang/model/util/AbstractAnnotationValueVisitor8<TR;TP;>; runtimeAnnotations @Ljavax/annotation/processing/SupportedSourceVersion;(value=eLjavax/lang/model/SourceVersion;RELEASE_14;) + +class name javax/lang/model/util/AbstractElementVisitor9 +header extends javax/lang/model/util/AbstractElementVisitor8 flags 421 signature <R:Ljava/lang/Object;P:Ljava/lang/Object;>Ljavax/lang/model/util/AbstractElementVisitor8<TR;TP;>; runtimeAnnotations @Ljavax/annotation/processing/SupportedSourceVersion;(value=eLjavax/lang/model/SourceVersion;RELEASE_14;) + +class name javax/lang/model/util/AbstractTypeVisitor9 +header extends javax/lang/model/util/AbstractTypeVisitor8 flags 421 signature <R:Ljava/lang/Object;P:Ljava/lang/Object;>Ljavax/lang/model/util/AbstractTypeVisitor8<TR;TP;>; runtimeAnnotations @Ljavax/annotation/processing/SupportedSourceVersion;(value=eLjavax/lang/model/SourceVersion;RELEASE_14;) + +class name javax/lang/model/util/ElementKindVisitor9 +header extends javax/lang/model/util/ElementKindVisitor8 flags 21 signature <R:Ljava/lang/Object;P:Ljava/lang/Object;>Ljavax/lang/model/util/ElementKindVisitor8<TR;TP;>; runtimeAnnotations @Ljavax/annotation/processing/SupportedSourceVersion;(value=eLjavax/lang/model/SourceVersion;RELEASE_14;) + +class name javax/lang/model/util/ElementScanner9 +header extends javax/lang/model/util/ElementScanner8 flags 21 signature <R:Ljava/lang/Object;P:Ljava/lang/Object;>Ljavax/lang/model/util/ElementScanner8<TR;TP;>; runtimeAnnotations @Ljavax/annotation/processing/SupportedSourceVersion;(value=eLjavax/lang/model/SourceVersion;RELEASE_14;) + +class name javax/lang/model/util/SimpleAnnotationValueVisitor9 +header extends javax/lang/model/util/SimpleAnnotationValueVisitor8 flags 21 signature <R:Ljava/lang/Object;P:Ljava/lang/Object;>Ljavax/lang/model/util/SimpleAnnotationValueVisitor8<TR;TP;>; runtimeAnnotations @Ljavax/annotation/processing/SupportedSourceVersion;(value=eLjavax/lang/model/SourceVersion;RELEASE_14;) + +class name javax/lang/model/util/SimpleElementVisitor9 +header extends javax/lang/model/util/SimpleElementVisitor8 flags 21 signature <R:Ljava/lang/Object;P:Ljava/lang/Object;>Ljavax/lang/model/util/SimpleElementVisitor8<TR;TP;>; runtimeAnnotations @Ljavax/annotation/processing/SupportedSourceVersion;(value=eLjavax/lang/model/SourceVersion;RELEASE_14;) + +class name javax/lang/model/util/SimpleTypeVisitor9 +header extends javax/lang/model/util/SimpleTypeVisitor8 flags 21 signature <R:Ljava/lang/Object;P:Ljava/lang/Object;>Ljavax/lang/model/util/SimpleTypeVisitor8<TR;TP;>; runtimeAnnotations @Ljavax/annotation/processing/SupportedSourceVersion;(value=eLjavax/lang/model/SourceVersion;RELEASE_14;) + +class name javax/lang/model/util/TypeKindVisitor9 +header extends javax/lang/model/util/TypeKindVisitor8 flags 21 signature <R:Ljava/lang/Object;P:Ljava/lang/Object;>Ljavax/lang/model/util/TypeKindVisitor8<TR;TP;>; runtimeAnnotations @Ljavax/annotation/processing/SupportedSourceVersion;(value=eLjavax/lang/model/SourceVersion;RELEASE_14;) + +class name javax/tools/ToolProvider +-method name <init> descriptor ()V +method name <init> descriptor ()V flags 1 deprecated true runtimeAnnotations @Ljava/lang/Deprecated;(forRemoval=Ztrue,since="14") +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/data/symbols/java.desktop-E.sym.txt Thu Dec 19 12:28:56 2019 -0800 @@ -0,0 +1,31 @@ +# +# Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. Oracle designates this +# particular file as subject to the "Classpath" exception as provided +# by Oracle in the LICENSE file that accompanied this code. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# ########################################################## +# ### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. ### +# ########################################################## +# +class name javax/accessibility/AccessibleBundle +header extends java/lang/Object flags 421 classAnnotations @Ljdk/Profile+Annotation;(value=I4) +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/data/symbols/java.xml-E.sym.txt Thu Dec 19 12:28:56 2019 -0800 @@ -0,0 +1,35 @@ +# +# Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. Oracle designates this +# particular file as subject to the "Classpath" exception as provided +# by Oracle in the LICENSE file that accompanied this code. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# ########################################################## +# ### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. ### +# ########################################################## +# +class name javax/xml/stream/XMLInputFactory +method name newFactory descriptor ()Ljavax/xml/stream/XMLInputFactory; thrownTypes javax/xml/stream/FactoryConfigurationError flags 9 +-method name newFactory descriptor ()Ljavax/xml/stream/XMLInputFactory; + +class name org/xml/sax/ContentHandler +method name declaration descriptor (Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;)V thrownTypes org/xml/sax/SAXException flags 1 +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/data/symbols/jdk.compiler-E.sym.txt Thu Dec 19 12:28:56 2019 -0800 @@ -0,0 +1,87 @@ +# +# Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. Oracle designates this +# particular file as subject to the "Classpath" exception as provided +# by Oracle in the LICENSE file that accompanied this code. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# ########################################################## +# ### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. ### +# ########################################################## +# +class name com/sun/source/tree/BindingPatternTree +header extends java/lang/Object implements com/sun/source/tree/PatternTree flags 601 +method name getType descriptor ()Lcom/sun/source/tree/Tree; flags 401 +method name getBinding descriptor ()Ljavax/lang/model/element/Name; flags 401 + +class name com/sun/source/tree/CaseTree +-method name getExpression descriptor ()Lcom/sun/source/tree/ExpressionTree; +-method name getExpressions descriptor ()Ljava/util/List; +-method name getBody descriptor ()Lcom/sun/source/tree/Tree; +-method name getCaseKind descriptor ()Lcom/sun/source/tree/CaseTree$CaseKind; +method name getExpression descriptor ()Lcom/sun/source/tree/ExpressionTree; flags 401 deprecated true runtimeAnnotations @Ljava/lang/Deprecated; +method name getExpressions descriptor ()Ljava/util/List; flags 401 signature ()Ljava/util/List<+Lcom/sun/source/tree/ExpressionTree;>; +method name getBody descriptor ()Lcom/sun/source/tree/Tree; flags 1 +method name getCaseKind descriptor ()Lcom/sun/source/tree/CaseTree$CaseKind; flags 1 + +class name com/sun/source/tree/CaseTree$CaseKind +header extends java/lang/Enum nestHost com/sun/source/tree/CaseTree flags 4031 signature Ljava/lang/Enum<Lcom/sun/source/tree/CaseTree$CaseKind;>; +innerclass innerClass com/sun/source/tree/CaseTree$CaseKind outerClass com/sun/source/tree/CaseTree innerClassName CaseKind flags 4019 + +class name com/sun/source/tree/InstanceOfTree +method name getPattern descriptor ()Lcom/sun/source/tree/PatternTree; flags 401 + +class name com/sun/source/tree/PatternTree +header extends java/lang/Object implements com/sun/source/tree/Tree flags 601 + +class name com/sun/source/tree/SwitchExpressionTree +header extends java/lang/Object implements com/sun/source/tree/ExpressionTree flags 601 + +class name com/sun/source/tree/Tree$Kind +-field name SWITCH_EXPRESSION descriptor Lcom/sun/source/tree/Tree$Kind; +-field name YIELD descriptor Lcom/sun/source/tree/Tree$Kind; +field name BINDING_PATTERN descriptor Lcom/sun/source/tree/Tree$Kind; flags 4019 +field name SWITCH_EXPRESSION descriptor Lcom/sun/source/tree/Tree$Kind; flags 4019 +field name YIELD descriptor Lcom/sun/source/tree/Tree$Kind; flags 4019 + +class name com/sun/source/tree/TreeVisitor +-method name visitSwitchExpression descriptor (Lcom/sun/source/tree/SwitchExpressionTree;Ljava/lang/Object;)Ljava/lang/Object; +-method name visitYield descriptor (Lcom/sun/source/tree/YieldTree;Ljava/lang/Object;)Ljava/lang/Object; +method name visitBindingPattern descriptor (Lcom/sun/source/tree/BindingPatternTree;Ljava/lang/Object;)Ljava/lang/Object; flags 401 signature (Lcom/sun/source/tree/BindingPatternTree;TP;)TR; +method name visitSwitchExpression descriptor (Lcom/sun/source/tree/SwitchExpressionTree;Ljava/lang/Object;)Ljava/lang/Object; flags 401 signature (Lcom/sun/source/tree/SwitchExpressionTree;TP;)TR; +method name visitYield descriptor (Lcom/sun/source/tree/YieldTree;Ljava/lang/Object;)Ljava/lang/Object; flags 401 signature (Lcom/sun/source/tree/YieldTree;TP;)TR; + +class name com/sun/source/tree/YieldTree +header extends java/lang/Object implements com/sun/source/tree/StatementTree flags 601 + +class name com/sun/source/util/SimpleTreeVisitor +-method name visitSwitchExpression descriptor (Lcom/sun/source/tree/SwitchExpressionTree;Ljava/lang/Object;)Ljava/lang/Object; +-method name visitYield descriptor (Lcom/sun/source/tree/YieldTree;Ljava/lang/Object;)Ljava/lang/Object; +method name visitSwitchExpression descriptor (Lcom/sun/source/tree/SwitchExpressionTree;Ljava/lang/Object;)Ljava/lang/Object; flags 1 signature (Lcom/sun/source/tree/SwitchExpressionTree;TP;)TR; +method name visitBindingPattern descriptor (Lcom/sun/source/tree/BindingPatternTree;Ljava/lang/Object;)Ljava/lang/Object; flags 1 signature (Lcom/sun/source/tree/BindingPatternTree;TP;)TR; +method name visitYield descriptor (Lcom/sun/source/tree/YieldTree;Ljava/lang/Object;)Ljava/lang/Object; flags 1 signature (Lcom/sun/source/tree/YieldTree;TP;)TR; + +class name com/sun/source/util/TreeScanner +-method name visitSwitchExpression descriptor (Lcom/sun/source/tree/SwitchExpressionTree;Ljava/lang/Object;)Ljava/lang/Object; +-method name visitYield descriptor (Lcom/sun/source/tree/YieldTree;Ljava/lang/Object;)Ljava/lang/Object; +method name visitSwitchExpression descriptor (Lcom/sun/source/tree/SwitchExpressionTree;Ljava/lang/Object;)Ljava/lang/Object; flags 1 signature (Lcom/sun/source/tree/SwitchExpressionTree;TP;)TR; +method name visitBindingPattern descriptor (Lcom/sun/source/tree/BindingPatternTree;Ljava/lang/Object;)Ljava/lang/Object; flags 1 signature (Lcom/sun/source/tree/BindingPatternTree;TP;)TR; +method name visitYield descriptor (Lcom/sun/source/tree/YieldTree;Ljava/lang/Object;)Ljava/lang/Object; flags 1 signature (Lcom/sun/source/tree/YieldTree;TP;)TR; +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/data/symbols/jdk.httpserver-E.sym.txt Thu Dec 19 12:28:56 2019 -0800 @@ -0,0 +1,37 @@ +# +# Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. Oracle designates this +# particular file as subject to the "Classpath" exception as provided +# by Oracle in the LICENSE file that accompanied this code. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# ########################################################## +# ### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. ### +# ########################################################## +# +class name com/sun/net/httpserver/BasicAuthenticator +-field name realm descriptor Ljava/lang/String; +field name realm descriptor Ljava/lang/String; flags 14 +field name charset descriptor Ljava/nio/charset/Charset; flags 14 +method name <init> descriptor (Ljava/lang/String;Ljava/nio/charset/Charset;)V flags 1 + +class name com/sun/net/httpserver/HttpExchange +header extends java/lang/Object implements java/lang/AutoCloseable flags 421 +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/data/symbols/jdk.jfr-E.sym.txt Thu Dec 19 12:28:56 2019 -0800 @@ -0,0 +1,87 @@ +# +# Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. Oracle designates this +# particular file as subject to the "Classpath" exception as provided +# by Oracle in the LICENSE file that accompanied this code. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# ########################################################## +# ### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. ### +# ########################################################## +# +class name jdk/jfr/Recording +method name setFlushInterval descriptor (Ljava/time/Duration;)V flags 1 +method name getFlushInterval descriptor ()Ljava/time/Duration; flags 1 + +class name jdk/jfr/consumer/EventStream +header extends java/lang/Object implements java/lang/AutoCloseable flags 601 +method name openRepository descriptor ()Ljdk/jfr/consumer/EventStream; thrownTypes java/io/IOException flags 9 +method name openRepository descriptor (Ljava/nio/file/Path;)Ljdk/jfr/consumer/EventStream; thrownTypes java/io/IOException flags 9 +method name openFile descriptor (Ljava/nio/file/Path;)Ljdk/jfr/consumer/EventStream; thrownTypes java/io/IOException flags 9 +method name onEvent descriptor (Ljava/util/function/Consumer;)V flags 401 signature (Ljava/util/function/Consumer<Ljdk/jfr/consumer/RecordedEvent;>;)V +method name onEvent descriptor (Ljava/lang/String;Ljava/util/function/Consumer;)V flags 401 signature (Ljava/lang/String;Ljava/util/function/Consumer<Ljdk/jfr/consumer/RecordedEvent;>;)V +method name onFlush descriptor (Ljava/lang/Runnable;)V flags 401 +method name onError descriptor (Ljava/util/function/Consumer;)V flags 401 signature (Ljava/util/function/Consumer<Ljava/lang/Throwable;>;)V +method name onClose descriptor (Ljava/lang/Runnable;)V flags 401 +method name close descriptor ()V flags 401 +method name remove descriptor (Ljava/lang/Object;)Z flags 401 +method name setReuse descriptor (Z)V flags 401 +method name setOrdered descriptor (Z)V flags 401 +method name setStartTime descriptor (Ljava/time/Instant;)V flags 401 +method name setEndTime descriptor (Ljava/time/Instant;)V flags 401 +method name start descriptor ()V flags 401 +method name startAsync descriptor ()V flags 401 +method name awaitTermination descriptor (Ljava/time/Duration;)V thrownTypes java/lang/InterruptedException flags 401 +method name awaitTermination descriptor ()V thrownTypes java/lang/InterruptedException flags 401 + +class name jdk/jfr/consumer/RecordedEvent +method name objectAt descriptor (I)Ljava/lang/Object; flags 14 + +class name jdk/jfr/consumer/RecordedObject +method name objectAt descriptor (I)Ljava/lang/Object; flags 4 + +class name jdk/jfr/consumer/RecordingStream +header extends java/lang/Object implements java/lang/AutoCloseable,jdk/jfr/consumer/EventStream flags 31 +method name <init> descriptor ()V flags 1 +method name <init> descriptor (Ljdk/jfr/Configuration;)V flags 1 +method name enable descriptor (Ljava/lang/String;)Ljdk/jfr/EventSettings; flags 1 +method name setSettings descriptor (Ljava/util/Map;)V flags 1 signature (Ljava/util/Map<Ljava/lang/String;Ljava/lang/String;>;)V +method name enable descriptor (Ljava/lang/Class;)Ljdk/jfr/EventSettings; flags 1 signature (Ljava/lang/Class<+Ljdk/jfr/Event;>;)Ljdk/jfr/EventSettings; +method name disable descriptor (Ljava/lang/String;)Ljdk/jfr/EventSettings; flags 1 +method name disable descriptor (Ljava/lang/Class;)Ljdk/jfr/EventSettings; flags 1 signature (Ljava/lang/Class<+Ljdk/jfr/Event;>;)Ljdk/jfr/EventSettings; +method name setMaxAge descriptor (Ljava/time/Duration;)V flags 1 +method name setMaxSize descriptor (J)V flags 1 +method name setFlushInterval descriptor (Ljava/time/Duration;)V flags 1 +method name setReuse descriptor (Z)V flags 1 +method name setOrdered descriptor (Z)V flags 1 +method name setStartTime descriptor (Ljava/time/Instant;)V flags 1 +method name setEndTime descriptor (Ljava/time/Instant;)V flags 1 +method name onEvent descriptor (Ljava/lang/String;Ljava/util/function/Consumer;)V flags 1 signature (Ljava/lang/String;Ljava/util/function/Consumer<Ljdk/jfr/consumer/RecordedEvent;>;)V +method name onEvent descriptor (Ljava/util/function/Consumer;)V flags 1 signature (Ljava/util/function/Consumer<Ljdk/jfr/consumer/RecordedEvent;>;)V +method name onFlush descriptor (Ljava/lang/Runnable;)V flags 1 +method name onClose descriptor (Ljava/lang/Runnable;)V flags 1 +method name onError descriptor (Ljava/util/function/Consumer;)V flags 1 signature (Ljava/util/function/Consumer<Ljava/lang/Throwable;>;)V +method name close descriptor ()V flags 1 +method name remove descriptor (Ljava/lang/Object;)Z flags 1 +method name start descriptor ()V flags 1 +method name startAsync descriptor ()V flags 1 +method name awaitTermination descriptor (Ljava/time/Duration;)V thrownTypes java/lang/InterruptedException flags 1 +method name awaitTermination descriptor ()V thrownTypes java/lang/InterruptedException flags 1 +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/data/symbols/jdk.jlink-E.sym.txt Thu Dec 19 12:28:56 2019 -0800 @@ -0,0 +1,31 @@ +# +# Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. Oracle designates this +# particular file as subject to the "Classpath" exception as provided +# by Oracle in the LICENSE file that accompanied this code. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# ########################################################## +# ### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. ### +# ########################################################## +# +module name jdk.jlink +header requires name\u0020;java.base\u0020;flags\u0020;8000,name\u0020;jdk.internal.opt\u0020;flags\u0020;0,name\u0020;jdk.jdeps\u0020;flags\u0020;0 uses jdk/tools/jlink/plugin/Plugin provides interface\u0020;java/util/spi/ToolProvider\u0020;impls\u0020;jdk/tools/jmod/Main$JmodToolProvider\u005C;u002C;jdk/tools/jlink/internal/Main$JlinkToolProvider,interface\u0020;jdk/tools/jlink/plugin/Plugin\u0020;impls\u0020;jdk/tools/jlink/internal/plugins/DefaultStripDebugPlugin\u005C;u002C;jdk/tools/jlink/internal/plugins/StripJavaDebugAttributesPlugin\u005C;u002C;jdk/tools/jlink/internal/plugins/ExcludePlugin\u005C;u002C;jdk/tools/jlink/internal/plugins/ExcludeFilesPlugin\u005C;u002C;jdk/tools/jlink/internal/plugins/ExcludeJmodSectionPlugin\u005C;u002C;jdk/tools/jlink/internal/plugins/LegalNoticeFilePlugin\u005C;u002C;jdk/tools/jlink/internal/plugins/SystemModulesPlugin\u005C;u002C;jdk/tools/jlink/internal/plugins/StripNativeCommandsPlugin\u005C;u002C;jdk/tools/jlink/internal/plugins/OrderResourcesPlugin\u005C;u002C;jdk/tools/jlink/internal/plugins/DefaultCompressPlugin\u005C;u002C;jdk/tools/jlink/internal/plugins/ExcludeVMPlugin\u005C;u002C;jdk/tools/jlink/internal/plugins/IncludeLocalesPlugin\u005C;u002C;jdk/tools/jlink/internal/plugins/GenerateJLIClassesPlugin\u005C;u002C;jdk/tools/jlink/internal/plugins/ReleaseInfoPlugin\u005C;u002C;jdk/tools/jlink/internal/plugins/AddOptionsPlugin\u005C;u002C;jdk/tools/jlink/internal/plugins/VendorBugURLPlugin\u005C;u002C;jdk/tools/jlink/internal/plugins/VendorVMBugURLPlugin\u005C;u002C;jdk/tools/jlink/internal/plugins/VendorVersionPlugin\u005C;u002C;jdk/tools/jlink/internal/plugins/StripNativeDebugSymbolsPlugin target linux-amd64 flags 8000 +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/data/symbols/jdk.jsobject-E.sym.txt Thu Dec 19 12:28:56 2019 -0800 @@ -0,0 +1,34 @@ +# +# Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. Oracle designates this +# particular file as subject to the "Classpath" exception as provided +# by Oracle in the LICENSE file that accompanied this code. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# ########################################################## +# ### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. ### +# ########################################################## +# +module name jdk.jsobject +header exports netscape/javascript requires name\u0020;java.base\u0020;flags\u0020;8000 target linux-amd64 flags 8000 + +class name netscape/javascript/JSObject +-method name getWindow descriptor (Ljava/applet/Applet;)Lnetscape/javascript/JSObject; +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/data/symbols/jdk.management-E.sym.txt Thu Dec 19 12:28:56 2019 -0800 @@ -0,0 +1,31 @@ +# +# Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. Oracle designates this +# particular file as subject to the "Classpath" exception as provided +# by Oracle in the LICENSE file that accompanied this code. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# ########################################################## +# ### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. ### +# ########################################################## +# +class name com/sun/management/ThreadMXBean +method name getCurrentThreadAllocatedBytes descriptor ()J flags 1 +
--- a/make/data/symbols/symbols Thu Dec 19 11:57:58 2019 +0530 +++ b/make/data/symbols/symbols Thu Dec 19 12:28:56 2019 -0800 @@ -29,7 +29,7 @@ #command used to generate this file: #build.tools.symbolgenerator.CreateSymbols build-description-incremental symbols include.list # -generate platforms 7:8:9:A:B:C:D +generate platforms 7:8:9:A:B:C:D:E platform version 8 files java.activation-8.sym.txt:java.base-8.sym.txt:java.compiler-8.sym.txt:java.corba-8.sym.txt:java.datatransfer-8.sym.txt:java.desktop-8.sym.txt:java.instrument-8.sym.txt:java.logging-8.sym.txt:java.management-8.sym.txt:java.management.rmi-8.sym.txt:java.naming-8.sym.txt:java.prefs-8.sym.txt:java.rmi-8.sym.txt:java.scripting-8.sym.txt:java.security.jgss-8.sym.txt:java.security.sasl-8.sym.txt:java.sql-8.sym.txt:java.sql.rowset-8.sym.txt:java.transaction-8.sym.txt:java.xml-8.sym.txt:java.xml.bind-8.sym.txt:java.xml.crypto-8.sym.txt:java.xml.ws-8.sym.txt:java.xml.ws.annotation-8.sym.txt:jdk.httpserver-8.sym.txt:jdk.management-8.sym.txt:jdk.scripting.nashorn-8.sym.txt:jdk.sctp-8.sym.txt:jdk.security.auth-8.sym.txt:jdk.security.jgss-8.sym.txt platform version 7 base 8 files java.base-7.sym.txt:java.compiler-7.sym.txt:java.datatransfer-7.sym.txt:java.desktop-7.sym.txt:java.logging-7.sym.txt:java.management-7.sym.txt:java.naming-7.sym.txt:java.prefs-7.sym.txt:java.rmi-7.sym.txt:java.scripting-7.sym.txt:java.security.jgss-7.sym.txt:java.security.sasl-7.sym.txt:java.sql-7.sym.txt:java.sql.rowset-7.sym.txt:java.xml-7.sym.txt:java.xml.bind-7.sym.txt:java.xml.ws.annotation-7.sym.txt:jdk.httpserver-7.sym.txt:jdk.management-7.sym.txt:jdk.scripting.nashorn-7.sym.txt:jdk.sctp-7.sym.txt:jdk.security.auth-7.sym.txt:jdk.security.jgss-7.sym.txt platform version 9 base 8 files java.activation-9.sym.txt:java.base-9.sym.txt:java.compiler-9.sym.txt:java.corba-9.sym.txt:java.datatransfer-9.sym.txt:java.desktop-9.sym.txt:java.instrument-9.sym.txt:java.logging-9.sym.txt:java.management-9.sym.txt:java.management.rmi-9.sym.txt:java.naming-9.sym.txt:java.prefs-9.sym.txt:java.rmi-9.sym.txt:java.scripting-9.sym.txt:java.se-9.sym.txt:java.se.ee-9.sym.txt:java.security.jgss-9.sym.txt:java.security.sasl-9.sym.txt:java.smartcardio-9.sym.txt:java.sql-9.sym.txt:java.sql.rowset-9.sym.txt:java.transaction-9.sym.txt:java.xml-9.sym.txt:java.xml.bind-9.sym.txt:java.xml.crypto-9.sym.txt:java.xml.ws-9.sym.txt:java.xml.ws.annotation-9.sym.txt:jdk.accessibility-9.sym.txt:jdk.attach-9.sym.txt:jdk.charsets-9.sym.txt:jdk.compiler-9.sym.txt:jdk.crypto.cryptoki-9.sym.txt:jdk.crypto.ec-9.sym.txt:jdk.dynalink-9.sym.txt:jdk.editpad-9.sym.txt:jdk.hotspot.agent-9.sym.txt:jdk.httpserver-9.sym.txt:jdk.incubator.httpclient-9.sym.txt:jdk.jartool-9.sym.txt:jdk.javadoc-9.sym.txt:jdk.jcmd-9.sym.txt:jdk.jconsole-9.sym.txt:jdk.jdeps-9.sym.txt:jdk.jdi-9.sym.txt:jdk.jdwp.agent-9.sym.txt:jdk.jlink-9.sym.txt:jdk.jshell-9.sym.txt:jdk.jsobject-9.sym.txt:jdk.jstatd-9.sym.txt:jdk.localedata-9.sym.txt:jdk.management-9.sym.txt:jdk.management.agent-9.sym.txt:jdk.naming.dns-9.sym.txt:jdk.naming.rmi-9.sym.txt:jdk.net-9.sym.txt:jdk.pack-9.sym.txt:jdk.policytool-9.sym.txt:jdk.rmic-9.sym.txt:jdk.scripting.nashorn-9.sym.txt:jdk.sctp-9.sym.txt:jdk.security.auth-9.sym.txt:jdk.security.jgss-9.sym.txt:jdk.unsupported-9.sym.txt:jdk.xml.dom-9.sym.txt:jdk.zipfs-9.sym.txt @@ -37,3 +37,4 @@ platform version B base A files java.activation-B.sym.txt:java.base-B.sym.txt:java.compiler-B.sym.txt:java.corba-B.sym.txt:java.datatransfer-B.sym.txt:java.desktop-B.sym.txt:java.instrument-B.sym.txt:java.logging-B.sym.txt:java.management-B.sym.txt:java.management.rmi-B.sym.txt:java.naming-B.sym.txt:java.net.http-B.sym.txt:java.prefs-B.sym.txt:java.rmi-B.sym.txt:java.scripting-B.sym.txt:java.se-B.sym.txt:java.se.ee-B.sym.txt:java.security.jgss-B.sym.txt:java.security.sasl-B.sym.txt:java.smartcardio-B.sym.txt:java.sql-B.sym.txt:java.sql.rowset-B.sym.txt:java.transaction-B.sym.txt:java.transaction.xa-B.sym.txt:java.xml-B.sym.txt:java.xml.bind-B.sym.txt:java.xml.crypto-B.sym.txt:java.xml.ws-B.sym.txt:java.xml.ws.annotation-B.sym.txt:jdk.accessibility-B.sym.txt:jdk.attach-B.sym.txt:jdk.charsets-B.sym.txt:jdk.compiler-B.sym.txt:jdk.crypto.cryptoki-B.sym.txt:jdk.crypto.ec-B.sym.txt:jdk.dynalink-B.sym.txt:jdk.editpad-B.sym.txt:jdk.hotspot.agent-B.sym.txt:jdk.httpserver-B.sym.txt:jdk.incubator.httpclient-B.sym.txt:jdk.jartool-B.sym.txt:jdk.javadoc-B.sym.txt:jdk.jcmd-B.sym.txt:jdk.jconsole-B.sym.txt:jdk.jdeps-B.sym.txt:jdk.jdi-B.sym.txt:jdk.jdwp.agent-B.sym.txt:jdk.jfr-B.sym.txt:jdk.jlink-B.sym.txt:jdk.jshell-B.sym.txt:jdk.jsobject-B.sym.txt:jdk.jstatd-B.sym.txt:jdk.localedata-B.sym.txt:jdk.management-B.sym.txt:jdk.management.agent-B.sym.txt:jdk.management.jfr-B.sym.txt:jdk.naming.dns-B.sym.txt:jdk.naming.rmi-B.sym.txt:jdk.net-B.sym.txt:jdk.pack-B.sym.txt:jdk.rmic-B.sym.txt:jdk.scripting.nashorn-B.sym.txt:jdk.sctp-B.sym.txt:jdk.security.auth-B.sym.txt:jdk.security.jgss-B.sym.txt:jdk.unsupported-B.sym.txt:jdk.xml.dom-B.sym.txt:jdk.zipfs-B.sym.txt platform version C base B files java.base-C.sym.txt:java.compiler-C.sym.txt:java.desktop-C.sym.txt:java.naming-C.sym.txt:java.rmi-C.sym.txt:java.xml-C.sym.txt:jdk.compiler-C.sym.txt:jdk.jfr-C.sym.txt:jdk.jsobject-C.sym.txt:jdk.unsupported-C.sym.txt platform version D base C files java.base-D.sym.txt:java.compiler-D.sym.txt:java.desktop-D.sym.txt:java.management-D.sym.txt:java.management.rmi-D.sym.txt:java.net.http-D.sym.txt:java.security.jgss-D.sym.txt:java.xml-D.sym.txt:java.xml.crypto-D.sym.txt:jdk.compiler-D.sym.txt:jdk.httpserver-D.sym.txt:jdk.jartool-D.sym.txt:jdk.javadoc-D.sym.txt:jdk.jlink-D.sym.txt:jdk.jshell-D.sym.txt +platform version E base D files java.base-E.sym.txt:java.compiler-E.sym.txt:java.desktop-E.sym.txt:java.xml-E.sym.txt:jdk.compiler-E.sym.txt:jdk.httpserver-E.sym.txt:jdk.jfr-E.sym.txt:jdk.jlink-E.sym.txt:jdk.jsobject-E.sym.txt:jdk.management-E.sym.txt
--- a/make/gensrc/Gensrc-java.base.gmk Thu Dec 19 11:57:58 2019 +0530 +++ b/make/gensrc/Gensrc-java.base.gmk Thu Dec 19 12:28:56 2019 -0800 @@ -100,6 +100,21 @@ ################################################################################ +INTPOLY_GEN_DONE := $(GENSRC_DIR)/_intpoly-gensrc.marker +INTPOLY_HEADER := $(TOPDIR)/make/jdk/src/classes/build/tools/intpoly/header.txt +$(INTPOLY_GEN_DONE): $(INTPLOY_HEADER) $(BUILD_TOOLS_JDK) + $(call MakeDir, $(GENSRC_DIR)) + $(call LogInfo, Generating fixed-field math classes for java.base) + $(call ExecuteWithLog, $@, \ + $(TOOL_INTPOLY) \ + $(INTPOLY_HEADER) \ + $(GENSRC_DIR)) $(LOG_DEBUG) + $(TOUCH) $@ + +GENSRC_JAVA_BASE += $(INTPOLY_GEN_DONE) + +################################################################################ + java.base: $(GENSRC_JAVA_BASE) all: java.base
--- a/make/gensrc/GensrcVarHandles.gmk Thu Dec 19 11:57:58 2019 +0530 +++ b/make/gensrc/GensrcVarHandles.gmk Thu Dec 19 12:28:56 2019 -0800 @@ -159,6 +159,108 @@ ################################################################################ +################################################################################ +# Setup a rule for generating a VarHandleMemoryAddress java class +# Param 1 - Variable declaration prefix +# Param 2 - Type with first letter capitalized +define GenerateVarHandleMemoryAddress + + $1_Type := $2 + + $1_FILENAME := $(VARHANDLES_GENSRC_DIR)/VarHandleMemoryAddressAs$$($1_Type)s.java + + ifeq ($$($1_Type), Byte) + $1_type := byte + $1_BoxType := $$($1_Type) + + $1_rawType := $$($1_type) + $1_RawType := $$($1_Type) + $1_RawBoxType := $$($1_BoxType) + + $1_ARGS += -Kbyte + endif + + ifeq ($$($1_Type), Short) + $1_type := short + $1_BoxType := $$($1_Type) + + $1_rawType := $$($1_type) + $1_RawType := $$($1_Type) + $1_RawBoxType := $$($1_BoxType) + endif + + ifeq ($$($1_Type), Char) + $1_type := char + $1_BoxType := Character + + $1_rawType := $$($1_type) + $1_RawType := $$($1_Type) + $1_RawBoxType := $$($1_BoxType) + endif + + ifeq ($$($1_Type), Int) + $1_type := int + $1_BoxType := Integer + + $1_rawType := $$($1_type) + $1_RawType := $$($1_Type) + $1_RawBoxType := $$($1_BoxType) + + $1_ARGS += -KCAS + $1_ARGS += -KAtomicAdd + $1_ARGS += -KBitwise + endif + + ifeq ($$($1_Type), Long) + $1_type := long + $1_BoxType := $$($1_Type) + + $1_rawType := $$($1_type) + $1_RawType := $$($1_Type) + $1_RawBoxType := $$($1_BoxType) + + $1_ARGS += -KCAS + $1_ARGS += -KAtomicAdd + $1_ARGS += -KBitwise + endif + + ifeq ($$($1_Type), Float) + $1_type := float + $1_BoxType := $$($1_Type) + + $1_rawType := int + $1_RawType := Int + $1_RawBoxType := Integer + + $1_ARGS += -KCAS + $1_ARGS += -KfloatingPoint + endif + + ifeq ($$($1_Type), Double) + $1_type := double + $1_BoxType := $$($1_Type) + + $1_rawType := long + $1_RawType := Long + $1_RawBoxType := Long + + $1_ARGS += -KCAS + $1_ARGS += -KfloatingPoint + endif + + $$($1_FILENAME): $(VARHANDLES_SRC_DIR)/X-VarHandleMemoryAddressView.java.template $(BUILD_TOOLS_JDK) + $$(call MakeDir, $$(@D)) + $(RM) $$@ + $(TOOL_SPP) -nel -K$$($1_type) \ + -Dtype=$$($1_type) -DType=$$($1_Type) -DBoxType=$$($1_BoxType) \ + -DrawType=$$($1_rawType) -DRawType=$$($1_RawType) -DRawBoxType=$$($1_RawBoxType) \ + $$($1_ARGS) -i$$< -o$$@ + + GENSRC_VARHANDLES += $$($1_FILENAME) +endef + +################################################################################ + # List the types to generate source for, with capitalized first letter VARHANDLES_TYPES := Boolean Byte Short Char Int Long Float Double Reference $(foreach t, $(VARHANDLES_TYPES), \ @@ -169,6 +271,11 @@ $(foreach t, $(VARHANDLES_BYTE_ARRAY_TYPES), \ $(eval $(call GenerateVarHandleByteArray,VAR_HANDLE_BYTE_ARRAY_$t,$t))) +# List the types to generate source for, with capitalized first letter +VARHANDLES_MEMORY_ADDRESS_TYPES := Byte Short Char Int Long Float Double +$(foreach t, $(VARHANDLES_MEMORY_ADDRESS_TYPES), \ + $(eval $(call GenerateVarHandleMemoryAddress,VAR_HANDLE_MEMORY_ADDRESS_$t,$t))) + GENSRC_JAVA_BASE += $(GENSRC_VARHANDLES) # Include custom extension post hook
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/jdk/src/classes/build/tools/intpoly/FieldGen.java Thu Dec 19 12:28:56 2019 -0800 @@ -0,0 +1,921 @@ +/* + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + + +/* + * This file is used to generated optimized finite field implementations. + */ +package build.tools.intpoly; + +import java.io.*; +import java.math.BigInteger; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.*; + +public class FieldGen { + + static FieldParams Curve25519 = new FieldParams( + "IntegerPolynomial25519", 26, 10, 1, 255, + Arrays.asList( + new Term(0, -19) + ), + Curve25519CrSequence(), simpleSmallCrSequence(10) + ); + + private static List<CarryReduce> Curve25519CrSequence() { + List<CarryReduce> result = new ArrayList<CarryReduce>(); + + // reduce(7,2) + result.add(new Reduce(17)); + result.add(new Reduce(18)); + + // carry(8,2) + result.add(new Carry(8)); + result.add(new Carry(9)); + + // reduce(0,7) + for (int i = 10; i < 17; i++) { + result.add(new Reduce(i)); + } + + // carry(0,9) + result.addAll(fullCarry(10)); + + return result; + } + + static FieldParams Curve448 = new FieldParams( + "IntegerPolynomial448", 28, 16, 1, 448, + Arrays.asList( + new Term(224, -1), + new Term(0, -1) + ), + Curve448CrSequence(), simpleSmallCrSequence(16) + ); + + private static List<CarryReduce> Curve448CrSequence() { + List<CarryReduce> result = new ArrayList<CarryReduce>(); + + // reduce(8, 7) + for (int i = 24; i < 31; i++) { + result.add(new Reduce(i)); + } + // reduce(4, 4) + for (int i = 20; i < 24; i++) { + result.add(new Reduce(i)); + } + + //carry(14, 2) + result.add(new Carry(14)); + result.add(new Carry(15)); + + // reduce(0, 4) + for (int i = 16; i < 20; i++) { + result.add(new Reduce(i)); + } + + // carry(0, 15) + result.addAll(fullCarry(16)); + + return result; + } + + static FieldParams P256 = new FieldParams( + "IntegerPolynomialP256", 26, 10, 2, 256, + Arrays.asList( + new Term(224, -1), + new Term(192, 1), + new Term(96, 1), + new Term(0, -1) + ), + P256CrSequence(), simpleSmallCrSequence(10) + ); + + private static List<CarryReduce> P256CrSequence() { + List<CarryReduce> result = new ArrayList<CarryReduce>(); + result.addAll(fullReduce(10)); + result.addAll(simpleSmallCrSequence(10)); + return result; + } + + static FieldParams P384 = new FieldParams( + "IntegerPolynomialP384", 28, 14, 2, 384, + Arrays.asList( + new Term(128, -1), + new Term(96, -1), + new Term(32, 1), + new Term(0, -1) + ), + P384CrSequence(), simpleSmallCrSequence(14) + ); + + private static List<CarryReduce> P384CrSequence() { + List<CarryReduce> result = new ArrayList<CarryReduce>(); + result.addAll(fullReduce(14)); + result.addAll(simpleSmallCrSequence(14)); + return result; + } + + static FieldParams P521 = new FieldParams( + "IntegerPolynomialP521", 28, 19, 2, 521, + Arrays.asList( + new Term(0, -1) + ), + P521CrSequence(), simpleSmallCrSequence(19) + ); + + private static List<CarryReduce> P521CrSequence() { + List<CarryReduce> result = new ArrayList<CarryReduce>(); + result.addAll(fullReduce(19)); + result.addAll(simpleSmallCrSequence(19)); + return result; + } + + static FieldParams O256 = new FieldParams( + "P256OrderField", 26, 10, 1, 256, + "FFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC632551", + orderFieldCrSequence(10), orderFieldSmallCrSequence(10) + ); + + static FieldParams O384 = new FieldParams( + "P384OrderField", 28, 14, 1, 384, + "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFC7634D81F4372DDF581A0DB248B0A77AECEC196ACCC52973", + orderFieldCrSequence(14), orderFieldSmallCrSequence(14) + ); + + static FieldParams O521 = new FieldParams( + "P521OrderField", 28, 19, 1, 521, + "01FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFA51868783BF2F966B7FCC0148F709A5D03BB5C9B8899C47AEBB6FB71E91386409", + o521crSequence(19), orderFieldSmallCrSequence(19) + ); + + private static List<CarryReduce> o521crSequence(int numLimbs) { + + // split the full reduce in half, with a carry in between + List<CarryReduce> result = new ArrayList<CarryReduce>(); + result.addAll(fullCarry(2 * numLimbs)); + for (int i = 2 * numLimbs - 1; i >= numLimbs + numLimbs / 2; i--) { + result.add(new Reduce(i)); + } + // carry + for (int i = numLimbs; i < numLimbs + numLimbs / 2 - 1; i++) { + result.add(new Carry(i)); + } + // rest of reduce + for (int i = numLimbs + numLimbs / 2 - 1; i >= numLimbs; i--) { + result.add(new Reduce(i)); + } + result.addAll(orderFieldSmallCrSequence(numLimbs)); + + return result; + } + + private static List<CarryReduce> orderFieldCrSequence(int numLimbs) { + List<CarryReduce> result = new ArrayList<CarryReduce>(); + result.addAll(fullCarry(2 * numLimbs)); + result.add(new Reduce(2 * numLimbs - 1)); + result.addAll(fullReduce(numLimbs)); + result.addAll(fullCarry(numLimbs + 1)); + result.add(new Reduce(numLimbs)); + result.addAll(fullCarry(numLimbs)); + + return result; + } + + private static List<CarryReduce> orderFieldSmallCrSequence(int numLimbs) { + List<CarryReduce> result = new ArrayList<CarryReduce>(); + result.addAll(fullCarry(numLimbs + 1)); + result.add(new Reduce(numLimbs)); + result.addAll(fullCarry(numLimbs)); + return result; + } + + static final FieldParams[] ALL_FIELDS = { + P256, P384, P521, O256, O384, O521, + }; + + public static class Term { + private final int power; + private final int coefficient; + + public Term(int power, int coefficient) { + this.power = power; + this.coefficient = coefficient; + } + + public int getPower() { + return power; + } + + public int getCoefficient() { + return coefficient; + } + + public BigInteger getValue() { + return BigInteger.valueOf(2).pow(power) + .multiply(BigInteger.valueOf(coefficient)); + } + + public String toString() { + return "2^" + power + " * " + coefficient; + } + } + + static abstract class CarryReduce { + private final int index; + + protected CarryReduce(int index) { + this.index = index; + } + + public int getIndex() { + return index; + } + + public abstract void write(CodeBuffer out, FieldParams params, + String prefix, Iterable<CarryReduce> remaining); + } + + static class Carry extends CarryReduce { + public Carry(int index) { + super(index); + } + + public void write(CodeBuffer out, FieldParams params, String prefix, + Iterable<CarryReduce> remaining) { + carry(out, params, prefix, getIndex()); + } + } + + static class Reduce extends CarryReduce { + public Reduce(int index) { + super(index); + } + + public void write(CodeBuffer out, FieldParams params, String prefix, + Iterable<CarryReduce> remaining) { + reduce(out, params, prefix, getIndex(), remaining); + } + } + + static class FieldParams { + private final String className; + private final int bitsPerLimb; + private final int numLimbs; + private final int maxAdds; + private final int power; + private final Iterable<Term> terms; + private final List<CarryReduce> crSequence; + private final List<CarryReduce> smallCrSequence; + + public FieldParams(String className, int bitsPerLimb, int numLimbs, + int maxAdds, int power, + Iterable<Term> terms, List<CarryReduce> crSequence, + List<CarryReduce> smallCrSequence) { + this.className = className; + this.bitsPerLimb = bitsPerLimb; + this.numLimbs = numLimbs; + this.maxAdds = maxAdds; + this.power = power; + this.terms = terms; + this.crSequence = crSequence; + this.smallCrSequence = smallCrSequence; + } + + public FieldParams(String className, int bitsPerLimb, int numLimbs, + int maxAdds, int power, + String term, List<CarryReduce> crSequence, + List<CarryReduce> smallCrSequence) { + this.className = className; + this.bitsPerLimb = bitsPerLimb; + this.numLimbs = numLimbs; + this.maxAdds = maxAdds; + this.power = power; + this.crSequence = crSequence; + this.smallCrSequence = smallCrSequence; + + terms = buildTerms(BigInteger.ONE.shiftLeft(power) + .subtract(new BigInteger(term, 16))); + } + + private Iterable<Term> buildTerms(BigInteger sub) { + // split a large subtrahend into smaller terms + // that are aligned with limbs + List<Term> result = new ArrayList<Term>(); + BigInteger mod = BigInteger.valueOf(1 << bitsPerLimb); + int termIndex = 0; + while (!sub.equals(BigInteger.ZERO)) { + int coef = sub.mod(mod).intValue(); + boolean plusOne = false; + if (coef > (1 << (bitsPerLimb - 1))) { + coef = coef - (1 << bitsPerLimb); + plusOne = true; + } + if (coef != 0) { + int pow = termIndex * bitsPerLimb; + result.add(new Term(pow, -coef)); + } + sub = sub.shiftRight(bitsPerLimb); + if (plusOne) { + sub = sub.add(BigInteger.ONE); + } + ++termIndex; + } + return result; + } + + public String getClassName() { + return className; + } + + public int getBitsPerLimb() { + return bitsPerLimb; + } + + public int getNumLimbs() { + return numLimbs; + } + + public int getMaxAdds() { + return maxAdds; + } + + public int getPower() { + return power; + } + + public Iterable<Term> getTerms() { + return terms; + } + + public List<CarryReduce> getCrSequence() { + return crSequence; + } + + public List<CarryReduce> getSmallCrSequence() { + return smallCrSequence; + } + } + + static Collection<Carry> fullCarry(int numLimbs) { + List<Carry> result = new ArrayList<Carry>(); + for (int i = 0; i < numLimbs - 1; i++) { + result.add(new Carry(i)); + } + return result; + } + + static Collection<Reduce> fullReduce(int numLimbs) { + List<Reduce> result = new ArrayList<Reduce>(); + for (int i = numLimbs - 2; i >= 0; i--) { + result.add(new Reduce(i + numLimbs)); + } + return result; + } + + static List<CarryReduce> simpleCrSequence(int numLimbs) { + List<CarryReduce> result = new ArrayList<CarryReduce>(); + for (int i = 0; i < 4; i++) { + result.addAll(fullCarry(2 * numLimbs - 1)); + result.addAll(fullReduce(numLimbs)); + } + + return result; + } + + static List<CarryReduce> simpleSmallCrSequence(int numLimbs) { + List<CarryReduce> result = new ArrayList<CarryReduce>(); + // carry a few positions at the end + for (int i = numLimbs - 2; i < numLimbs; i++) { + result.add(new Carry(i)); + } + // this carries out a single value that must be reduced back in + result.add(new Reduce(numLimbs)); + // finish with a full carry + result.addAll(fullCarry(numLimbs)); + return result; + } + + private final String packageName; + private final String parentName; + + private final Path headerPath; + private final Path destPath; + + public FieldGen(String packageName, String parentName, + Path headerPath, Path destRoot) throws IOException { + this.packageName = packageName; + this.parentName = parentName; + this.headerPath = headerPath; + this.destPath = destRoot.resolve(packageName.replace(".", "/")); + Files.createDirectories(destPath); + } + + // args: header.txt destpath + public static void main(String[] args) throws Exception { + + FieldGen gen = new FieldGen( + "sun.security.util.math.intpoly", + "IntegerPolynomial", + Path.of(args[0]), + Path.of(args[1])); + for (FieldParams p : ALL_FIELDS) { + System.out.println(p.className); + System.out.println(p.terms); + System.out.println(); + gen.generateFile(p); + } + } + + private void generateFile(FieldParams params) throws IOException { + String text = generate(params); + String fileName = params.getClassName() + ".java"; + PrintWriter out = new PrintWriter(Files.newBufferedWriter( + destPath.resolve(fileName))); + out.println(text); + out.close(); + } + + static class CodeBuffer { + + private int nextTemporary = 0; + private Set<String> temporaries = new HashSet<String>(); + private StringBuffer buffer = new StringBuffer(); + private int indent = 0; + private Class<?> lastCR; + private int lastCrCount = 0; + private int crMethodBreakCount = 0; + private int crNumLimbs = 0; + + public void incrIndent() { + indent++; + } + + public void decrIndent() { + indent--; + } + + public void newTempScope() { + nextTemporary = 0; + temporaries.clear(); + } + + public void appendLine(String s) { + appendIndent(); + buffer.append(s + "\n"); + } + + public void appendLine() { + buffer.append("\n"); + } + + public String toString() { + return buffer.toString(); + } + + public void startCrSequence(int numLimbs) { + this.crNumLimbs = numLimbs; + lastCrCount = 0; + crMethodBreakCount = 0; + lastCR = null; + } + + /* + * Record a carry/reduce of the specified type. This method is used to + * break up large carry/reduce sequences into multiple methods to make + * JIT/optimization easier + */ + public void record(Class<?> type) { + if (type == lastCR) { + lastCrCount++; + } else { + + if (lastCrCount >= 8) { + insertCrMethodBreak(); + } + + lastCR = type; + lastCrCount = 0; + } + } + + private void insertCrMethodBreak() { + + appendLine(); + + // call the new method + appendIndent(); + append("carryReduce" + crMethodBreakCount + "(r"); + for (int i = 0; i < crNumLimbs; i++) { + append(", c" + i); + } + // temporaries are not live between operations, no need to send + append(");\n"); + + decrIndent(); + appendLine("}"); + + // make the method + appendIndent(); + append("void carryReduce" + crMethodBreakCount + "(long[] r"); + for (int i = 0; i < crNumLimbs; i++) { + append(", long c" + i); + } + append(") {\n"); + incrIndent(); + // declare temporaries + for (String temp : temporaries) { + appendLine("long " + temp + ";"); + } + append("\n"); + + crMethodBreakCount++; + } + + public String getTemporary(String type, String value) { + Iterator<String> iter = temporaries.iterator(); + if (iter.hasNext()) { + String result = iter.next(); + iter.remove(); + appendLine(result + " = " + value + ";"); + return result; + } else { + String result = "t" + (nextTemporary++); + appendLine(type + " " + result + " = " + value + ";"); + return result; + } + } + + public void freeTemporary(String temp) { + temporaries.add(temp); + } + + public void appendIndent() { + for (int i = 0; i < indent; i++) { + buffer.append(" "); + } + } + + public void append(String s) { + buffer.append(s); + } + } + + private String generate(FieldParams params) throws IOException { + CodeBuffer result = new CodeBuffer(); + String header = readHeader(); + result.appendLine(header); + + if (packageName != null) { + result.appendLine("package " + packageName + ";"); + result.appendLine(); + } + result.appendLine("import java.math.BigInteger;"); + + result.appendLine("public class " + params.getClassName() + + " extends " + this.parentName + " {"); + result.incrIndent(); + + result.appendLine("private static final int BITS_PER_LIMB = " + + params.getBitsPerLimb() + ";"); + result.appendLine("private static final int NUM_LIMBS = " + + params.getNumLimbs() + ";"); + result.appendLine("private static final int MAX_ADDS = " + + params.getMaxAdds() + ";"); + result.appendLine( + "public static final BigInteger MODULUS = evaluateModulus();"); + result.appendLine("private static final long CARRY_ADD = 1 << " + + (params.getBitsPerLimb() - 1) + ";"); + if (params.getBitsPerLimb() * params.getNumLimbs() != params.getPower()) { + result.appendLine("private static final int LIMB_MASK = -1 " + + ">>> (64 - BITS_PER_LIMB);"); + } + int termIndex = 0; + + result.appendLine("public " + params.getClassName() + "() {"); + result.appendLine(); + result.appendLine(" super(BITS_PER_LIMB, NUM_LIMBS, MAX_ADDS, MODULUS);"); + result.appendLine(); + result.appendLine("}"); + + result.appendLine("private static BigInteger evaluateModulus() {"); + result.incrIndent(); + result.appendLine("BigInteger result = BigInteger.valueOf(2).pow(" + + params.getPower() + ");"); + for (Term t : params.getTerms()) { + boolean subtract = false; + int coefValue = t.getCoefficient(); + if (coefValue < 0) { + coefValue = 0 - coefValue; + subtract = true; + } + String coefExpr = "BigInteger.valueOf(" + coefValue + ")"; + String powExpr = "BigInteger.valueOf(2).pow(" + t.getPower() + ")"; + String termExpr = "ERROR"; + if (t.getPower() == 0) { + termExpr = coefExpr; + } else if (coefValue == 1) { + termExpr = powExpr; + } else { + termExpr = powExpr + ".multiply(" + coefExpr + ")"; + } + if (subtract) { + result.appendLine("result = result.subtract(" + termExpr + ");"); + } else { + result.appendLine("result = result.add(" + termExpr + ");"); + } + } + result.appendLine("return result;"); + result.decrIndent(); + result.appendLine("}"); + + result.appendLine("@Override"); + result.appendLine("protected void finalCarryReduceLast(long[] limbs) {"); + result.incrIndent(); + int extraBits = params.getBitsPerLimb() * params.getNumLimbs() + - params.getPower(); + int highBits = params.getBitsPerLimb() - extraBits; + result.appendLine("long c = limbs[" + (params.getNumLimbs() - 1) + + "] >> " + highBits + ";"); + result.appendLine("limbs[" + (params.getNumLimbs() - 1) + "] -= c << " + + highBits + ";"); + for (Term t : params.getTerms()) { + int reduceBits = params.getPower() + extraBits - t.getPower(); + int negatedCoefficient = -1 * t.getCoefficient(); + modReduceInBits(result, params, true, "limbs", params.getNumLimbs(), + reduceBits, negatedCoefficient, "c"); + } + result.decrIndent(); + result.appendLine("}"); + + // full carry/reduce sequence + result.appendIndent(); + result.append("private void carryReduce(long[] r, "); + for (int i = 0; i < 2 * params.getNumLimbs() - 1; i++) { + result.append("long c" + i); + if (i < 2 * params.getNumLimbs() - 2) { + result.append(", "); + } + } + result.append(") {\n"); + result.newTempScope(); + result.incrIndent(); + result.appendLine("long c" + (2 * params.getNumLimbs() - 1) + " = 0;"); + write(result, params.getCrSequence(), params, "c", + 2 * params.getNumLimbs()); + result.appendLine(); + for (int i = 0; i < params.getNumLimbs(); i++) { + result.appendLine("r[" + i + "] = c" + i + ";"); + } + result.decrIndent(); + result.appendLine("}"); + + // small carry/reduce sequence + result.appendIndent(); + result.append("private void carryReduce(long[] r, "); + for (int i = 0; i < params.getNumLimbs(); i++) { + result.append("long c" + i); + if (i < params.getNumLimbs() - 1) { + result.append(", "); + } + } + result.append(") {\n"); + result.newTempScope(); + result.incrIndent(); + result.appendLine("long c" + params.getNumLimbs() + " = 0;"); + write(result, params.getSmallCrSequence(), params, + "c", params.getNumLimbs() + 1); + result.appendLine(); + for (int i = 0; i < params.getNumLimbs(); i++) { + result.appendLine("r[" + i + "] = c" + i + ";"); + } + result.decrIndent(); + result.appendLine("}"); + + result.appendLine("@Override"); + result.appendLine("protected void mult(long[] a, long[] b, long[] r) {"); + result.incrIndent(); + for (int i = 0; i < 2 * params.getNumLimbs() - 1; i++) { + result.appendIndent(); + result.append("long c" + i + " = "); + int startJ = Math.max(i + 1 - params.getNumLimbs(), 0); + int endJ = Math.min(params.getNumLimbs(), i + 1); + for (int j = startJ; j < endJ; j++) { + int bIndex = i - j; + result.append("(a[" + j + "] * b[" + bIndex + "])"); + if (j < endJ - 1) { + result.append(" + "); + } + } + result.append(";\n"); + } + result.appendLine(); + result.appendIndent(); + result.append("carryReduce(r, "); + for (int i = 0; i < 2 * params.getNumLimbs() - 1; i++) { + result.append("c" + i); + if (i < 2 * params.getNumLimbs() - 2) { + result.append(", "); + } + } + result.append(");\n"); + result.decrIndent(); + result.appendLine("}"); + + result.appendLine("@Override"); + result.appendLine("protected void reduce(long[] a) {"); + result.incrIndent(); + result.appendIndent(); + result.append("carryReduce(a, "); + for (int i = 0; i < params.getNumLimbs(); i++) { + result.append("a[" + i + "]"); + if (i < params.getNumLimbs() - 1) { + result.append(", "); + } + } + result.append(");\n"); + result.decrIndent(); + result.appendLine("}"); + + result.appendLine("@Override"); + result.appendLine("protected void square(long[] a, long[] r) {"); + result.incrIndent(); + for (int i = 0; i < 2 * params.getNumLimbs() - 1; i++) { + result.appendIndent(); + result.append("long c" + i + " = "); + int startJ = Math.max(i + 1 - params.getNumLimbs(), 0); + int endJ = Math.min(params.getNumLimbs(), i + 1); + int jDiff = endJ - startJ; + if (jDiff > 1) { + result.append("2 * ("); + } + for (int j = 0; j < jDiff / 2; j++) { + int aIndex = j + startJ; + int bIndex = i - aIndex; + result.append("(a[" + aIndex + "] * a[" + bIndex + "])"); + if (j < (jDiff / 2) - 1) { + result.append(" + "); + } + } + if (jDiff > 1) { + result.append(")"); + } + if (jDiff % 2 == 1) { + int aIndex = i / 2; + if (jDiff > 1) { + result.append(" + "); + } + result.append("(a[" + aIndex + "] * a[" + aIndex + "])"); + } + result.append(";\n"); + } + result.appendLine(); + result.appendIndent(); + result.append("carryReduce(r, "); + for (int i = 0; i < 2 * params.getNumLimbs() - 1; i++) { + result.append("c" + i); + if (i < 2 * params.getNumLimbs() - 2) { + result.append(", "); + } + } + result.append(");\n"); + result.decrIndent(); + result.appendLine("}"); + + result.decrIndent(); + result.appendLine("}"); // end class + + return result.toString(); + } + + private static void write(CodeBuffer out, List<CarryReduce> sequence, + FieldParams params, String prefix, int numLimbs) { + + out.startCrSequence(numLimbs); + for (int i = 0; i < sequence.size(); i++) { + CarryReduce cr = sequence.get(i); + Iterator<CarryReduce> remainingIter = sequence.listIterator(i + 1); + List<CarryReduce> remaining = new ArrayList<CarryReduce>(); + remainingIter.forEachRemaining(remaining::add); + cr.write(out, params, prefix, remaining); + } + } + + private static void reduce(CodeBuffer out, FieldParams params, + String prefix, int index, Iterable<CarryReduce> remaining) { + + out.record(Reduce.class); + + out.appendLine("//reduce from position " + index); + String reduceFrom = indexedExpr(false, prefix, index); + boolean referenced = false; + for (CarryReduce cr : remaining) { + if (cr.index == index) { + referenced = true; + } + } + for (Term t : params.getTerms()) { + int reduceBits = params.getPower() - t.getPower(); + int negatedCoefficient = -1 * t.getCoefficient(); + modReduceInBits(out, params, false, prefix, index, reduceBits, + negatedCoefficient, reduceFrom); + } + if (referenced) { + out.appendLine(reduceFrom + " = 0;"); + } + } + + private static void carry(CodeBuffer out, FieldParams params, + String prefix, int index) { + + out.record(Carry.class); + + out.appendLine("//carry from position " + index); + String carryFrom = prefix + index; + String carryTo = prefix + (index + 1); + String carry = "(" + carryFrom + " + CARRY_ADD) >> " + + params.getBitsPerLimb(); + String temp = out.getTemporary("long", carry); + out.appendLine(carryFrom + " -= (" + temp + " << " + + params.getBitsPerLimb() + ");"); + out.appendLine(carryTo + " += " + temp + ";"); + out.freeTemporary(temp); + } + + private static String indexedExpr( + boolean isArray, String prefix, int index) { + String result = prefix + index; + if (isArray) { + result = prefix + "[" + index + "]"; + } + return result; + } + + private static void modReduceInBits(CodeBuffer result, FieldParams params, + boolean isArray, String prefix, int index, int reduceBits, + int coefficient, String c) { + + String x = coefficient + " * " + c; + String accOp = "+="; + String temp = null; + if (coefficient == 1) { + x = c; + } else if (coefficient == -1) { + x = c; + accOp = "-="; + } else { + temp = result.getTemporary("long", x); + x = temp; + } + + if (reduceBits % params.getBitsPerLimb() == 0) { + int pos = reduceBits / params.getBitsPerLimb(); + result.appendLine(indexedExpr(isArray, prefix, (index - pos)) + + " " + accOp + " " + x + ";"); + } else { + int secondPos = reduceBits / params.getBitsPerLimb(); + int bitOffset = (secondPos + 1) * params.getBitsPerLimb() + - reduceBits; + int rightBitOffset = params.getBitsPerLimb() - bitOffset; + result.appendLine(indexedExpr(isArray, prefix, + (index - (secondPos + 1))) + " " + accOp + + " (" + x + " << " + bitOffset + ") & LIMB_MASK;"); + result.appendLine(indexedExpr(isArray, prefix, + (index - secondPos)) + " " + accOp + " " + x + + " >> " + rightBitOffset + ";"); + } + + if (temp != null) { + result.freeTemporary(temp); + } + } + + private String readHeader() throws IOException { + BufferedReader reader + = Files.newBufferedReader(headerPath); + StringBuffer result = new StringBuffer(); + reader.lines().forEach(s -> result.append(s + "\n")); + return result.toString(); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/jdk/src/classes/build/tools/intpoly/header.txt Thu Dec 19 12:28:56 2019 -0800 @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * This file is generated by FieldGen.java. Do not modify it directly. + */
--- a/make/jdk/src/classes/build/tools/taglet/JSpec.java Thu Dec 19 11:57:58 2019 +0530 +++ b/make/jdk/src/classes/build/tools/taglet/JSpec.java Thu Dec 19 12:28:56 2019 -0800 @@ -35,6 +35,7 @@ import com.sun.source.doctree.DocTree; import com.sun.source.doctree.LiteralTree; import com.sun.source.doctree.UnknownBlockTagTree; +import com.sun.source.doctree.UnknownInlineTagTree; import com.sun.source.util.SimpleDocTreeVisitor; import jdk.javadoc.doclet.Taglet; @@ -96,16 +97,14 @@ private String baseURL; private String idPrefix; - protected JSpec(String tagName, String specTitle, String baseURL, String idPrefix) { + JSpec(String tagName, String specTitle, String baseURL, String idPrefix) { this.tagName = tagName; this.specTitle = specTitle; this.baseURL = baseURL; this.idPrefix = idPrefix; } - - static final Pattern TAG_PATTERN = Pattern.compile("(?s)(.+ )?(?<chapter>[1-9][0-9]*)(?<section>[0-9.]*)( .*)?$"); - + private static final Pattern TAG_PATTERN = Pattern.compile("(?s)(.+ )?(?<chapter>[1-9][0-9]*)(?<section>[0-9.]*)( .*)?$"); /** * Returns the set of locations in which the tag may be used. @@ -115,9 +114,14 @@ return EnumSet.allOf(jdk.javadoc.doclet.Taglet.Location.class); } + //@Override // uncomment when JDK 15 is the boot JDK + public boolean isBlockTag() { + return true; + } + @Override public boolean isInlineTag() { - return false; + return true; } @Override @@ -132,17 +136,28 @@ return ""; StringBuilder sb = new StringBuilder(); - sb.append("<dt>See <i>" + specTitle + "</i>:</dt>\n") - .append("<dd>\n"); + boolean in_dd = false; for (DocTree tag : tags) { - - if (tag.getKind() != UNKNOWN_BLOCK_TAG) { - continue; + if (sb.length() == 0 && tag.getKind() == DocTree.Kind.UNKNOWN_BLOCK_TAG) { + sb.append("<dt>See <i>").append(specTitle).append("</i>:</dt>\n") + .append("<dd>\n"); + in_dd = true; } - UnknownBlockTagTree blockTag = (UnknownBlockTagTree)tag; - String tagText = blockTag.getContent().toString().trim(); + List<? extends DocTree> contents; + switch (tag.getKind()) { + case UNKNOWN_BLOCK_TAG: + contents = ((UnknownBlockTagTree) tag).getContent(); + break; + case UNKNOWN_INLINE_TAG: + contents = ((UnknownInlineTagTree) tag).getContent(); + break; + default: + continue; + } + + String tagText = contents.toString().trim(); Matcher m = TAG_PATTERN.matcher(tagText); if (m.find()) { String chapter = m.group("chapter"); @@ -151,23 +166,27 @@ String url = String.format("%1$s/%2$s-%3$s.html#jls-%3$s%4$s", baseURL, idPrefix, chapter, section); - sb.append("<a href=\"") .append(url) .append("\">") - .append(expand(blockTag)) - .append("</a><br>"); + .append(expand(contents)) + .append("</a>"); + + if (tag.getKind() == DocTree.Kind.UNKNOWN_BLOCK_TAG) { + sb.append("<br>"); + } } - } - sb.append("</dd>"); + if (in_dd) { + sb.append("</dd>"); + } return sb.toString(); } - private String expand(UnknownBlockTagTree tree) { - StringBuilder sb = new StringBuilder(); + + private String expand(List<? extends DocTree> trees) { return (new SimpleDocTreeVisitor<StringBuilder, StringBuilder>() { public StringBuilder defaultAction(DocTree tree, StringBuilder sb) { return sb.append(tree.toString()); @@ -185,8 +204,10 @@ } private String escape(String s) { - return s.replace("&", "&").replace("<", "<").replace(">", ">"); + return s.replace("&", "&") + .replace("<", "<") + .replace(">", ">"); } - }).visit(tree.getContent(), new StringBuilder()).toString(); + }).visit(trees, new StringBuilder()).toString(); } }
--- a/make/launcher/Launcher-jdk.hotspot.agent.gmk Thu Dec 19 11:57:58 2019 +0530 +++ b/make/launcher/Launcher-jdk.hotspot.agent.gmk Thu Dec 19 12:28:56 2019 -0800 @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -27,5 +27,5 @@ $(eval $(call SetupBuildLauncher, jhsdb, \ MAIN_CLASS := sun.jvm.hotspot.SALauncher, \ - MACOSX_SIGNED := true, \ + MACOSX_PRIVILEGED := true, \ ))
--- a/make/launcher/Launcher-jdk.jcmd.gmk Thu Dec 19 11:57:58 2019 +0530 +++ b/make/launcher/Launcher-jdk.jcmd.gmk Thu Dec 19 12:28:56 2019 -0800 @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,7 @@ JAVA_ARGS := \ -Dsun.jvm.hotspot.debugger.useProcDebugger \ -Dsun.jvm.hotspot.debugger.useWindbgDebugger, \ - MACOSX_SIGNED := true, \ + MACOSX_PRIVILEGED := true, \ )) $(eval $(call SetupBuildLauncher, jmap, \ @@ -38,7 +38,7 @@ JAVA_ARGS := \ -Dsun.jvm.hotspot.debugger.useProcDebugger \ -Dsun.jvm.hotspot.debugger.useWindbgDebugger, \ - MACOSX_SIGNED := true, \ + MACOSX_PRIVILEGED := true, \ )) $(eval $(call SetupBuildLauncher, jps, \ @@ -50,7 +50,7 @@ JAVA_ARGS := \ -Dsun.jvm.hotspot.debugger.useProcDebugger \ -Dsun.jvm.hotspot.debugger.useWindbgDebugger, \ - MACOSX_SIGNED := true, \ + MACOSX_PRIVILEGED := true, \ )) $(eval $(call SetupBuildLauncher, jstat, \
--- a/make/launcher/LauncherCommon.gmk Thu Dec 19 11:57:58 2019 +0530 +++ b/make/launcher/LauncherCommon.gmk Thu Dec 19 12:28:56 2019 -0800 @@ -74,7 +74,7 @@ # CFLAGS Additional CFLAGS # CFLAGS_windows Additional CFLAGS_windows # EXTRA_RC_FLAGS Additional EXTRA_RC_FLAGS -# MACOSX_SIGNED On macosx, sign this binary +# MACOSX_PRIVILEGED On macosx, allow to access other processes # OPTIMIZATION Override default optimization level (LOW) # OUTPUT_DIR Override default output directory # VERSION_INFO_RESOURCE Override default Windows resource file @@ -104,9 +104,8 @@ $1_CFLAGS += -DJAVA_ARGS=$$($1_JAVA_ARGS_STR) ifeq ($(call isTargetOs, macosx), true) - ifeq ($$($1_MACOSX_SIGNED), true) + ifeq ($$($1_MACOSX_PRIVILEGED), true) $1_PLIST_FILE := Info-privileged.plist - $1_CODESIGN := true else $1_PLIST_FILE := Info-cmdline.plist endif @@ -171,7 +170,6 @@ EXTRA_RC_FLAGS := $$($1_EXTRA_RC_FLAGS), \ MANIFEST := $(JAVA_MANIFEST), \ MANIFEST_VERSION := $(VERSION_NUMBER_FOUR_POSITIONS), \ - CODESIGN := $$($1_CODESIGN), \ )) $1 += $$(BUILD_LAUNCHER_$1)
--- a/make/lib/Lib-jdk.incubator.jpackage.gmk Thu Dec 19 11:57:58 2019 +0530 +++ b/make/lib/Lib-jdk.incubator.jpackage.gmk Thu Dec 19 12:28:56 2019 -0800 @@ -34,7 +34,7 @@ SYMBOLS_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libapplauncher, \ TOOLCHAIN := TOOLCHAIN_LINK_CXX, \ OPTIMIZATION := LOW, \ - CFLAGS := $(CXXFLAGS_JDKLIB), \ + CFLAGS := $(CXXFLAGS_JDKLIB) $(X_CFLAGS), \ CFLAGS_windows := -EHsc -DUNICODE -D_UNICODE, \ LDFLAGS := $(LDFLAGS_JDKLIB) $(LDFLAGS_CXX_JDK) \ $(call SET_SHARED_LIBRARY_ORIGIN), \
--- a/make/scripts/compare.sh Thu Dec 19 11:57:58 2019 +0530 +++ b/make/scripts/compare.sh Thu Dec 19 12:28:56 2019 -0800 @@ -447,6 +447,16 @@ $CAT $OTHER_DIR/$f | eval "$HTML_FILTER" > $OTHER_FILE & $CAT $THIS_DIR/$f | eval "$HTML_FILTER" > $THIS_FILE & wait + elif [ "$SUFFIX" = "svg" ]; then + # GraphViz has non-determinism when generating svg files + OTHER_FILE=$WORK_DIR/$f.other + THIS_FILE=$WORK_DIR/$f.this + $MKDIR -p $(dirname $OTHER_FILE) $(dirname $THIS_FILE) + SVG_FILTER="$SED \ + -e 's/edge[0-9][0-9]*/edgeX/g' + " + $CAT $OTHER_DIR/$f | eval "$SVG_FILTER" > $OTHER_FILE + $CAT $THIS_DIR/$f | eval "$SVG_FILTER" > $THIS_FILE elif [[ "$f" = *"/lib/classlist" ]] || [ "$SUFFIX" = "jar_contents" ]; then # The classlist files may have some lines in random order OTHER_FILE=$WORK_DIR/$f.other @@ -566,8 +576,21 @@ | $CUT -f 2 -d ' ' | $SED "s|$OTHER_UNZIPDIR/||g") fi + # Separate executable/library files from other files in zip. + DIFFING_TEXT_FILES= + DIFFING_EXEC_FILES= + for file in $DIFFING_FILES; do + SUFFIX="${file##*.}" + if [ "$SUFFIX" = "exe" -o "$SUFFIX" = "dll" -o "$SUFFIX" = "so" \ + -o "$SUFFIX" = "dylib" ]; then + DIFFING_EXEC_FILES="$DIFFING_EXEC_FILES $file" + else + DIFFING_TEXT_FILES="$DIFFING_TEXT_FILES $file" + fi + done + $RM -f $WORK_DIR/$ZIP_FILE.diffs - for file in $DIFFING_FILES; do + for file in $DIFFING_TEXT_FILES; do if [[ "$ACCEPTED_JARZIP_CONTENTS $EXCEPTIONS" != *"$file"* ]]; then diff_text $OTHER_UNZIPDIR/$file $THIS_UNZIPDIR/$file >> $WORK_DIR/$ZIP_FILE.diffs fi @@ -592,6 +615,15 @@ done fi fi + + # Use the compare_bin_file function for comparing the executable files. + for file in $DIFFING_EXEC_FILES; do + compare_bin_file $THIS_UNZIPDIR $OTHER_UNZIPDIR $WORK_DIR/$ZIP_FILE.bin \ + $file + if [ "$?" != "0" ]; then + return_value=1 + fi + done fi return $return_value @@ -779,6 +811,7 @@ PDB_DIRS="$(ls -d \ {$OTHER,$THIS}/support/modules_{cmds,libs}/{*,*/*} \ {$OTHER,$THIS}/support/native/java.base/java_objs \ + {$OTHER,$THIS}/support/native/jdk.incubator.jpackage/* \ )" export _NT_SYMBOL_PATH="$(echo $PDB_DIRS | tr ' ' ';')" fi
--- a/src/hotspot/cpu/aarch64/aarch64.ad Thu Dec 19 11:57:58 2019 +0530 +++ b/src/hotspot/cpu/aarch64/aarch64.ad Thu Dec 19 12:28:56 2019 -0800 @@ -2177,7 +2177,7 @@ return ret_value; // Per default match rules are supported. } -const bool Matcher::match_rule_supported_vector(int opcode, int vlen) { +const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { // TODO // identify extra cases that we might want to provide match rules for
--- a/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.cpp Thu Dec 19 11:57:58 2019 +0530 +++ b/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.cpp Thu Dec 19 12:28:56 2019 -0800 @@ -135,10 +135,6 @@ // * 63-48 Fixed (16-bits, always zero) // -uintptr_t ZPlatformAddressBase() { - return 0; -} - size_t ZPlatformAddressOffsetBits() { const size_t min_address_offset_bits = 42; // 4TB const size_t max_address_offset_bits = 44; // 16TB
--- a/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.hpp Thu Dec 19 11:57:58 2019 +0530 +++ b/src/hotspot/cpu/aarch64/gc/z/zGlobals_aarch64.hpp Thu Dec 19 12:28:56 2019 -0800 @@ -29,7 +29,6 @@ const size_t ZPlatformNMethodDisarmedOffset = 4; const size_t ZPlatformCacheLineSize = 64; -uintptr_t ZPlatformAddressBase(); size_t ZPlatformAddressOffsetBits(); size_t ZPlatformAddressMetadataShift();
--- a/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp Thu Dec 19 11:57:58 2019 +0530 +++ b/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp Thu Dec 19 12:28:56 2019 -0800 @@ -59,4 +59,6 @@ #define THREAD_LOCAL_POLL +#define PREFERRED_METASPACE_ALIGNMENT + #endif // CPU_AARCH64_GLOBALDEFINITIONS_AARCH64_HPP
--- a/src/hotspot/cpu/aarch64/globals_aarch64.hpp Thu Dec 19 11:57:58 2019 +0530 +++ b/src/hotspot/cpu/aarch64/globals_aarch64.hpp Thu Dec 19 12:28:56 2019 -0800 @@ -71,8 +71,6 @@ // Clear short arrays bigger than one word in an arch-specific way define_pd_global(intx, InitArrayShortSize, BytesPerLong); -define_pd_global(bool, ThreadLocalHandshakes, true); - #if defined(COMPILER1) || defined(COMPILER2) define_pd_global(intx, InlineSmallCode, 1000); #endif
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Thu Dec 19 11:57:58 2019 +0530 +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Thu Dec 19 12:28:56 2019 -0800 @@ -3904,46 +3904,71 @@ } } -void MacroAssembler::encode_klass_not_null(Register dst, Register src) { +MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone); + +MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() { + assert(UseCompressedClassPointers, "not using compressed class pointers"); + assert(Metaspace::initialized(), "metaspace not initialized yet"); + + if (_klass_decode_mode != KlassDecodeNone) { + return _klass_decode_mode; + } + + assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift() + || 0 == CompressedKlassPointers::shift(), "decode alg wrong"); + if (CompressedKlassPointers::base() == NULL) { + return (_klass_decode_mode = KlassDecodeZero); + } + + if (operand_valid_for_logical_immediate( + /*is32*/false, (uint64_t)CompressedKlassPointers::base())) { + const uint64_t range_mask = + (1UL << log2_intptr(CompressedKlassPointers::range())) - 1; + if (((uint64_t)CompressedKlassPointers::base() & range_mask) == 0) { + return (_klass_decode_mode = KlassDecodeXor); + } + } + + const uint64_t shifted_base = + (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); + guarantee((shifted_base & 0xffff0000ffffffff) == 0, + "compressed class base bad alignment"); + + return (_klass_decode_mode = KlassDecodeMovk); +} + +void MacroAssembler::encode_klass_not_null(Register dst, Register src) { + switch (klass_decode_mode()) { + case KlassDecodeZero: if (CompressedKlassPointers::shift() != 0) { - assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); lsr(dst, src, LogKlassAlignmentInBytes); } else { if (dst != src) mov(dst, src); } - return; - } - - if (use_XOR_for_compressed_class_base) { + break; + + case KlassDecodeXor: if (CompressedKlassPointers::shift() != 0) { eor(dst, src, (uint64_t)CompressedKlassPointers::base()); lsr(dst, dst, LogKlassAlignmentInBytes); } else { eor(dst, src, (uint64_t)CompressedKlassPointers::base()); } - return; - } - - if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0 - && CompressedKlassPointers::shift() == 0) { - movw(dst, src); - return; - } - -#ifdef ASSERT - verify_heapbase("MacroAssembler::encode_klass_not_null2: heap base corrupted?"); -#endif - - Register rbase = dst; - if (dst == src) rbase = rheapbase; - mov(rbase, (uint64_t)CompressedKlassPointers::base()); - sub(dst, src, rbase); - if (CompressedKlassPointers::shift() != 0) { - assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); - lsr(dst, dst, LogKlassAlignmentInBytes); - } - if (dst == src) reinit_heapbase(); + break; + + case KlassDecodeMovk: + if (CompressedKlassPointers::shift() != 0) { + ubfx(dst, src, LogKlassAlignmentInBytes, 32); + } else { + movw(dst, src); + } + break; + + case KlassDecodeNone: + ShouldNotReachHere(); + break; + } } void MacroAssembler::encode_klass_not_null(Register r) { @@ -3951,49 +3976,44 @@ } void MacroAssembler::decode_klass_not_null(Register dst, Register src) { - Register rbase = dst; assert (UseCompressedClassPointers, "should only be used for compressed headers"); - if (CompressedKlassPointers::base() == NULL) { + switch (klass_decode_mode()) { + case KlassDecodeZero: if (CompressedKlassPointers::shift() != 0) { - assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); lsl(dst, src, LogKlassAlignmentInBytes); } else { if (dst != src) mov(dst, src); } - return; - } - - if (use_XOR_for_compressed_class_base) { + break; + + case KlassDecodeXor: if (CompressedKlassPointers::shift() != 0) { lsl(dst, src, LogKlassAlignmentInBytes); eor(dst, dst, (uint64_t)CompressedKlassPointers::base()); } else { eor(dst, src, (uint64_t)CompressedKlassPointers::base()); } - return; - } - - if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0 - && CompressedKlassPointers::shift() == 0) { - if (dst != src) - movw(dst, src); - movk(dst, (uint64_t)CompressedKlassPointers::base() >> 32, 32); - return; - } - - // Cannot assert, unverified entry point counts instructions (see .ad file) - // vtableStubs also counts instructions in pd_code_size_limit. - // Also do not verify_oop as this is called by verify_oop. - if (dst == src) rbase = rheapbase; - mov(rbase, (uint64_t)CompressedKlassPointers::base()); - if (CompressedKlassPointers::shift() != 0) { - assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); - add(dst, rbase, src, Assembler::LSL, LogKlassAlignmentInBytes); - } else { - add(dst, rbase, src); - } - if (dst == src) reinit_heapbase(); + break; + + case KlassDecodeMovk: { + const uint64_t shifted_base = + (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); + + if (dst != src) movw(dst, src); + movk(dst, shifted_base >> 32, 32); + + if (CompressedKlassPointers::shift() != 0) { + lsl(dst, dst, LogKlassAlignmentInBytes); + } + + break; + } + + case KlassDecodeNone: + ShouldNotReachHere(); + break; + } } void MacroAssembler::decode_klass_not_null(Register r) { @@ -4899,11 +4919,15 @@ DIFFERENCE, NEXT_WORD, SHORT_LOOP_TAIL, SHORT_LAST2, SHORT_LAST_INIT, SHORT_LOOP_START, TAIL_CHECK; - const u1 STUB_THRESHOLD = 64 + 8; bool isLL = ae == StrIntrinsicNode::LL; bool isLU = ae == StrIntrinsicNode::LU; bool isUL = ae == StrIntrinsicNode::UL; + // The stub threshold for LL strings is: 72 (64 + 8) chars + // UU: 36 chars, or 72 bytes (valid for the 64-byte large loop with prefetch) + // LU/UL: 24 chars, or 48 bytes (valid for the 16-character loop at least) + const u1 stub_threshold = isLL ? 72 : ((isLU || isUL) ? 24 : 36); + bool str1_isL = isLL || isLU; bool str2_isL = isLL || isUL; @@ -4944,7 +4968,7 @@ cmp(str1, str2); br(Assembler::EQ, DONE); ldr(tmp2, Address(str2)); - cmp(cnt2, STUB_THRESHOLD); + cmp(cnt2, stub_threshold); br(GE, STUB); subsw(cnt2, cnt2, minCharsInWord); br(EQ, TAIL_CHECK); @@ -4956,7 +4980,7 @@ cmp(str1, str2); br(Assembler::EQ, DONE); ldr(tmp2, Address(str2)); - cmp(cnt2, STUB_THRESHOLD); + cmp(cnt2, stub_threshold); br(GE, STUB); subw(cnt2, cnt2, 4); eor(vtmpZ, T16B, vtmpZ, vtmpZ); @@ -4972,7 +4996,7 @@ cmp(str1, str2); br(Assembler::EQ, DONE); ldrs(vtmp, Address(str2)); - cmp(cnt2, STUB_THRESHOLD); + cmp(cnt2, stub_threshold); br(GE, STUB); subw(cnt2, cnt2, 4); lea(str1, Address(str1, cnt2, Address::uxtw(str1_chr_shift)));
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp Thu Dec 19 11:57:58 2019 +0530 +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp Thu Dec 19 12:28:56 2019 -0800 @@ -80,17 +80,20 @@ void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); - // True if an XOR can be used to expand narrow klass references. - bool use_XOR_for_compressed_class_base; + enum KlassDecodeMode { + KlassDecodeNone, + KlassDecodeZero, + KlassDecodeXor, + KlassDecodeMovk + }; + + KlassDecodeMode klass_decode_mode(); + + private: + static KlassDecodeMode _klass_decode_mode; public: - MacroAssembler(CodeBuffer* code) : Assembler(code) { - use_XOR_for_compressed_class_base - = operand_valid_for_logical_immediate - (/*is32*/false, (uint64_t)CompressedKlassPointers::base()) - && ((uint64_t)CompressedKlassPointers::base() - > (1UL << log2_intptr(CompressedKlassPointers::range()))); - } + MacroAssembler(CodeBuffer* code) : Assembler(code) {} // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. // The implementation is only non-empty for the InterpreterMacroAssembler,
--- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp Thu Dec 19 11:57:58 2019 +0530 +++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp Thu Dec 19 12:28:56 2019 -0800 @@ -4010,7 +4010,7 @@ // code for comparing 16 characters of strings with Latin1 and Utf16 encoding void compare_string_16_x_LU(Register tmpL, Register tmpU, Label &DIFF1, Label &DIFF2) { - Register cnt1 = r2, tmp1 = r10, tmp2 = r11, tmp3 = r12; + Register cnt1 = r2, tmp2 = r11, tmp3 = r12; FloatRegister vtmp = v1, vtmpZ = v0, vtmp3 = v2; __ ldrq(vtmp, Address(__ post(tmp2, 16))); @@ -4070,18 +4070,14 @@ __ add(str2, str2, isLU ? wordSize : wordSize/2); __ fmovd(isLU ? tmp1 : tmp2, vtmp); __ subw(cnt2, cnt2, 8); // Already loaded 4 symbols. Last 4 is special case. - __ add(str1, str1, cnt2, __ LSL, isLU ? 0 : 1); __ eor(rscratch2, tmp1, tmp2); - __ add(str2, str2, cnt2, __ LSL, isLU ? 1 : 0); __ mov(rscratch1, tmp2); __ cbnz(rscratch2, CALCULATE_DIFFERENCE); - Register strU = isLU ? str2 : str1, - strL = isLU ? str1 : str2, - tmpU = isLU ? rscratch1 : tmp1, // where to keep U for comparison + Register tmpU = isLU ? rscratch1 : tmp1, // where to keep U for comparison tmpL = isLU ? tmp1 : rscratch1; // where to keep L for comparison __ push(spilled_regs, sp); - __ sub(tmp2, strL, cnt2); // strL pointer to load from - __ sub(cnt1, strU, cnt2, __ LSL, 1); // strU pointer to load from + __ mov(tmp2, isLU ? str1 : str2); // init the pointer to L next load + __ mov(cnt1, isLU ? str2 : str1); // init the pointer to U next load __ ldr(tmp3, Address(__ post(cnt1, 8))); @@ -4110,6 +4106,7 @@ __ bind(NO_PREFETCH); __ subs(cnt2, cnt2, 16); __ br(__ LT, TAIL); + __ align(OptoLoopAlignment); __ bind(SMALL_LOOP); // smaller loop __ subs(cnt2, cnt2, 16); compare_string_16_x_LU(tmpL, tmpU, DIFF1, DIFF2); @@ -4117,7 +4114,7 @@ __ cmn(cnt2, (u1)16); __ br(__ EQ, LOAD_LAST); __ bind(TAIL); // 1..15 characters left until last load (last 4 characters) - __ add(cnt1, cnt1, cnt2, __ LSL, 1); // Address of 8 bytes before last 4 characters in UTF-16 string + __ add(cnt1, cnt1, cnt2, __ LSL, 1); // Address of 32 bytes before last 4 characters in UTF-16 string __ add(tmp2, tmp2, cnt2); // Address of 16 bytes before last 4 characters in Latin1 string __ ldr(tmp3, Address(cnt1, -8)); compare_string_16_x_LU(tmpL, tmpU, DIFF1, DIFF2); // last 16 characters before last load @@ -4133,7 +4130,8 @@ __ mov(tmpU, tmp3); __ pop(spilled_regs, sp); - __ ldrs(vtmp, Address(strL)); + // tmp2 points to the address of the last 4 Latin1 characters right now + __ ldrs(vtmp, Address(tmp2)); __ zip1(vtmp, __ T8B, vtmp, vtmpZ); __ fmovd(tmpL, vtmp); @@ -4199,6 +4197,7 @@ // less than 16 bytes left? __ subs(cnt2, cnt2, isLL ? 16 : 8); __ br(__ LT, TAIL); + __ align(OptoLoopAlignment); __ bind(SMALL_LOOP); compare_string_16_bytes_same(DIFF, DIFF2); __ subs(cnt2, cnt2, isLL ? 16 : 8);
--- a/src/hotspot/cpu/arm/arm.ad Thu Dec 19 11:57:58 2019 +0530 +++ b/src/hotspot/cpu/arm/arm.ad Thu Dec 19 12:28:56 2019 -0800 @@ -971,7 +971,7 @@ return true; // Per default match rules are supported. } -const bool Matcher::match_rule_supported_vector(int opcode, int vlen) { +const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { // TODO // identify extra cases that we might want to provide match rules for
--- a/src/hotspot/cpu/arm/globals_arm.hpp Thu Dec 19 11:57:58 2019 +0530 +++ b/src/hotspot/cpu/arm/globals_arm.hpp Thu Dec 19 12:28:56 2019 -0800 @@ -70,8 +70,6 @@ define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong); -define_pd_global(bool, ThreadLocalHandshakes, false); - #define ARCH_FLAGS(develop, \ product, \ diagnostic, \
--- a/src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp Thu Dec 19 11:57:58 2019 +0530 +++ b/src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp Thu Dec 19 12:28:56 2019 -0800 @@ -58,6 +58,10 @@ #define INCLUDE_RTM_OPT 0 #endif +#if defined(AIX) +#define PREFERRED_METASPACE_ALIGNMENT +#endif + #define SUPPORT_RESERVED_STACK_AREA #define THREAD_LOCAL_POLL
--- a/src/hotspot/cpu/ppc/globals_ppc.hpp Thu Dec 19 11:57:58 2019 +0530 +++ b/src/hotspot/cpu/ppc/globals_ppc.hpp Thu Dec 19 12:28:56 2019 -0800 @@ -74,8 +74,6 @@ // 2x unrolled loop is shorter with more than 9 HeapWords. define_pd_global(intx, InitArrayShortSize, 9*BytesPerLong); -define_pd_global(bool, ThreadLocalHandshakes, true); - // Platform dependent flag handling: flags only defined on this platform. #define ARCH_FLAGS(develop, \ product, \
--- a/src/hotspot/cpu/ppc/ppc.ad Thu Dec 19 11:57:58 2019 +0530 +++ b/src/hotspot/cpu/ppc/ppc.ad Thu Dec 19 12:28:56 2019 -0800 @@ -2291,7 +2291,7 @@ return true; // Per default match rules are supported. } -const bool Matcher::match_rule_supported_vector(int opcode, int vlen) { +const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { // TODO // identify extra cases that we might want to provide match rules for
--- a/src/hotspot/cpu/s390/globals_s390.hpp Thu Dec 19 11:57:58 2019 +0530 +++ b/src/hotspot/cpu/s390/globals_s390.hpp Thu Dec 19 12:28:56 2019 -0800 @@ -76,8 +76,6 @@ // 8146801 (Short Array Allocation): No performance work done here yet. define_pd_global(intx, InitArrayShortSize, 1*BytesPerLong); -define_pd_global(bool, ThreadLocalHandshakes, true); - #define ARCH_FLAGS(develop, \ product, \ diagnostic, \
--- a/src/hotspot/cpu/s390/s390.ad Thu Dec 19 11:57:58 2019 +0530 +++ b/src/hotspot/cpu/s390/s390.ad Thu Dec 19 12:28:56 2019 -0800 @@ -1551,7 +1551,7 @@ // BUT: make sure match rule is not disabled by a false predicate! } -const bool Matcher::match_rule_supported_vector(int opcode, int vlen) { +const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { // TODO // Identify extra cases that we might want to provide match rules for // e.g. Op_ vector nodes and other intrinsics while guarding with vlen.
--- a/src/hotspot/cpu/sparc/globals_sparc.hpp Thu Dec 19 11:57:58 2019 +0530 +++ b/src/hotspot/cpu/sparc/globals_sparc.hpp Thu Dec 19 12:28:56 2019 -0800 @@ -80,8 +80,6 @@ define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong); -define_pd_global(bool, ThreadLocalHandshakes, true); - #define ARCH_FLAGS(develop, \ product, \ diagnostic, \
--- a/src/hotspot/cpu/sparc/sparc.ad Thu Dec 19 11:57:58 2019 +0530 +++ b/src/hotspot/cpu/sparc/sparc.ad Thu Dec 19 12:28:56 2019 -0800 @@ -1711,7 +1711,7 @@ return true; // Per default match rules are supported. } -const bool Matcher::match_rule_supported_vector(int opcode, int vlen) { +const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { // TODO // identify extra cases that we might want to provide match rules for
--- a/src/hotspot/cpu/x86/gc/z/zGlobals_x86.cpp Thu Dec 19 11:57:58 2019 +0530 +++ b/src/hotspot/cpu/x86/gc/z/zGlobals_x86.cpp Thu Dec 19 12:28:56 2019 -0800 @@ -135,10 +135,6 @@ // * 63-48 Fixed (16-bits, always zero) // -uintptr_t ZPlatformAddressBase() { - return 0; -} - size_t ZPlatformAddressOffsetBits() { const size_t min_address_offset_bits = 42; // 4TB const size_t max_address_offset_bits = 44; // 16TB
--- a/src/hotspot/cpu/x86/gc/z/zGlobals_x86.hpp Thu Dec 19 11:57:58 2019 +0530 +++ b/src/hotspot/cpu/x86/gc/z/zGlobals_x86.hpp Thu Dec 19 12:28:56 2019 -0800 @@ -29,7 +29,6 @@ const size_t ZPlatformNMethodDisarmedOffset = 4; const size_t ZPlatformCacheLineSize = 64; -uintptr_t ZPlatformAddressBase(); size_t ZPlatformAddressOffsetBits(); size_t ZPlatformAddressMetadataShift();
--- a/src/hotspot/cpu/x86/globals_x86.hpp Thu Dec 19 11:57:58 2019 +0530 +++ b/src/hotspot/cpu/x86/globals_x86.hpp Thu Dec 19 12:28:56 2019 -0800 @@ -89,13 +89,6 @@ define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong); -#if defined(_LP64) || defined(_WINDOWS) -define_pd_global(bool, ThreadLocalHandshakes, true); -#else -// get_thread() is slow on linux 32 bit, therefore off by default -define_pd_global(bool, ThreadLocalHandshakes, false); -#endif - #define ARCH_FLAGS(develop, \ product, \ diagnostic, \
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp Thu Dec 19 11:57:58 2019 +0530 +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp Thu Dec 19 12:28:56 2019 -0800 @@ -4059,7 +4059,10 @@ #ifdef COMPILER2 // Generic instructions support for use in .ad files C2 code generation -void MacroAssembler::vabsnegd(int opcode, XMMRegister dst, Register scr) { +void MacroAssembler::vabsnegd(int opcode, XMMRegister dst, XMMRegister src, Register scr) { + if (dst != src) { + movdqu(dst, src); + } if (opcode == Op_AbsVD) { andpd(dst, ExternalAddress(StubRoutines::x86::vector_double_sign_mask()), scr); } else { @@ -4077,7 +4080,10 @@ } } -void MacroAssembler::vabsnegf(int opcode, XMMRegister dst, Register scr) { +void MacroAssembler::vabsnegf(int opcode, XMMRegister dst, XMMRegister src, Register scr) { + if (dst != src) { + movdqu(dst, src); + } if (opcode == Op_AbsVF) { andps(dst, ExternalAddress(StubRoutines::x86::vector_float_sign_mask()), scr); } else {
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp Thu Dec 19 11:57:58 2019 +0530 +++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp Thu Dec 19 12:28:56 2019 -0800 @@ -1638,9 +1638,9 @@ #ifdef COMPILER2 // Generic instructions support for use in .ad files C2 code generation - void vabsnegd(int opcode, XMMRegister dst, Register scr); + void vabsnegd(int opcode, XMMRegister dst, XMMRegister src, Register scr); void vabsnegd(int opcode, XMMRegister dst, XMMRegister src, int vector_len, Register scr); - void vabsnegf(int opcode, XMMRegister dst, Register scr); + void vabsnegf(int opcode, XMMRegister dst, XMMRegister src, Register scr); void vabsnegf(int opcode, XMMRegister dst, XMMRegister src, int vector_len, Register scr); void vextendbw(bool sign, XMMRegister dst, XMMRegister src, int vector_len); void vextendbw(bool sign, XMMRegister dst, XMMRegister src);
--- a/src/hotspot/cpu/x86/vm_version_x86.cpp Thu Dec 19 11:57:58 2019 +0530 +++ b/src/hotspot/cpu/x86/vm_version_x86.cpp Thu Dec 19 12:28:56 2019 -0800 @@ -693,6 +693,7 @@ _features &= ~CPU_AVX512_VPOPCNTDQ; _features &= ~CPU_AVX512_VPCLMULQDQ; _features &= ~CPU_VAES; + _features &= ~CPU_VNNI; } if (UseAVX < 2)
--- a/src/hotspot/cpu/x86/x86.ad Thu Dec 19 11:57:58 2019 +0530 +++ b/src/hotspot/cpu/x86/x86.ad Thu Dec 19 12:28:56 2019 -0800 @@ -1246,176 +1246,193 @@ //============================================================================= const bool Matcher::match_rule_supported(int opcode) { - if (!has_match_rule(opcode)) - return false; - - bool ret_value = true; + if (!has_match_rule(opcode)) { + return false; // no match rule present + } switch (opcode) { case Op_AbsVL: - if (UseAVX < 3) - ret_value = false; + if (UseAVX < 3) { + return false; + } + break; case Op_PopCountI: case Op_PopCountL: - if (!UsePopCountInstruction) - ret_value = false; + if (!UsePopCountInstruction) { + return false; + } break; case Op_PopCountVI: - if (!UsePopCountInstruction || !VM_Version::supports_vpopcntdq()) - ret_value = false; + if (!UsePopCountInstruction || !VM_Version::supports_vpopcntdq()) { + return false; + } break; case Op_MulVI: - if ((UseSSE < 4) && (UseAVX < 1)) // only with SSE4_1 or AVX - ret_value = false; + if ((UseSSE < 4) && (UseAVX < 1)) { // only with SSE4_1 or AVX + return false; + } break; case Op_MulVL: case Op_MulReductionVL: - if (VM_Version::supports_avx512dq() == false) - ret_value = false; + if (VM_Version::supports_avx512dq() == false) { + return false; + } break; case Op_AddReductionVL: - if (UseAVX < 3) // only EVEX : vector connectivity becomes an issue here - ret_value = false; + if (UseAVX < 3) { // only EVEX : vector connectivity becomes an issue here + return false; + } break; case Op_AbsVB: case Op_AbsVS: case Op_AbsVI: case Op_AddReductionVI: - if (UseSSE < 3 || !VM_Version::supports_ssse3()) // requires at least SSSE3 - ret_value = false; + if (UseSSE < 3 || !VM_Version::supports_ssse3()) { // requires at least SSSE3 + return false; + } break; case Op_MulReductionVI: - if (UseSSE < 4) // requires at least SSE4 - ret_value = false; + if (UseSSE < 4) { // requires at least SSE4 + return false; + } break; case Op_AddReductionVF: case Op_AddReductionVD: case Op_MulReductionVF: case Op_MulReductionVD: - if (UseSSE < 1) // requires at least SSE - ret_value = false; + if (UseSSE < 1) { // requires at least SSE + return false; + } break; case Op_SqrtVD: case Op_SqrtVF: - if (UseAVX < 1) // enabled for AVX only - ret_value = false; + if (UseAVX < 1) { // enabled for AVX only + return false; + } break; case Op_CompareAndSwapL: #ifdef _LP64 case Op_CompareAndSwapP: #endif - if (!VM_Version::supports_cx8()) - ret_value = false; + if (!VM_Version::supports_cx8()) { + return false; + } break; case Op_CMoveVF: case Op_CMoveVD: - if (UseAVX < 1 || UseAVX > 2) - ret_value = false; + if (UseAVX < 1 || UseAVX > 2) { + return false; + } break; case Op_StrIndexOf: - if (!UseSSE42Intrinsics) - ret_value = false; + if (!UseSSE42Intrinsics) { + return false; + } break; case Op_StrIndexOfChar: - if (!UseSSE42Intrinsics) - ret_value = false; + if (!UseSSE42Intrinsics) { + return false; + } break; case Op_OnSpinWait: - if (VM_Version::supports_on_spin_wait() == false) - ret_value = false; + if (VM_Version::supports_on_spin_wait() == false) { + return false; + } break; case Op_MulAddVS2VI: case Op_RShiftVL: case Op_AbsVD: case Op_NegVD: - if (UseSSE < 2) - ret_value = false; + if (UseSSE < 2) { + return false; + } break; case Op_MulVB: case Op_LShiftVB: case Op_RShiftVB: case Op_URShiftVB: - if (UseSSE < 4) - ret_value = false; + if (UseSSE < 4) { + return false; + } break; #ifdef _LP64 case Op_MaxD: case Op_MaxF: case Op_MinD: case Op_MinF: - if (UseAVX < 1) // enabled for AVX only - ret_value = false; + if (UseAVX < 1) { // enabled for AVX only + return false; + } break; #endif case Op_CacheWB: case Op_CacheWBPreSync: case Op_CacheWBPostSync: if (!VM_Version::supports_data_cache_line_flush()) { - ret_value = false; + return false; } break; case Op_RoundDoubleMode: - if (UseSSE < 4) - ret_value = false; + if (UseSSE < 4) { + return false; + } + break; + case Op_RoundDoubleModeV: + if (VM_Version::supports_avx() == false) { + return false; // 128bit vroundpd is not available + } break; } - - return ret_value; // Per default match rules are supported. + return true; // Match rules are supported by default. } -const bool Matcher::match_rule_supported_vector(int opcode, int vlen) { - // identify extra cases that we might want to provide match rules for - // e.g. Op_ vector nodes and other intrinsics while guarding with vlen - bool ret_value = match_rule_supported(opcode); - if (ret_value) { - switch (opcode) { - case Op_AbsVB: - case Op_AddVB: - case Op_SubVB: - if ((vlen == 64) && (VM_Version::supports_avx512bw() == false)) - ret_value = false; - break; - case Op_AbsVS: - case Op_AddVS: - case Op_SubVS: - case Op_MulVS: - case Op_LShiftVS: - case Op_RShiftVS: - case Op_URShiftVS: - if ((vlen == 32) && (VM_Version::supports_avx512bw() == false)) - ret_value = false; - break; - case Op_MulVB: - case Op_LShiftVB: - case Op_RShiftVB: - case Op_URShiftVB: - if ((vlen == 32 && UseAVX < 2) || - ((vlen == 64) && (VM_Version::supports_avx512bw() == false))) - ret_value = false; - break; - case Op_NegVF: - if ((vlen == 16) && (VM_Version::supports_avx512dq() == false)) - ret_value = false; - break; - case Op_CMoveVF: - if (vlen != 8) - ret_value = false; - break; - case Op_NegVD: - if ((vlen == 8) && (VM_Version::supports_avx512dq() == false)) - ret_value = false; - break; - case Op_CMoveVD: - if (vlen != 4) - ret_value = false; - break; - case Op_RoundDoubleModeV: - if (VM_Version::supports_avx() == false) - ret_value = false; - break; - } +//------------------------------------------------------------------------ + +// Identify extra cases that we might want to provide match rules for vector nodes and +// other intrinsics guarded with vector length (vlen) and element type (bt). +const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { + if (!match_rule_supported(opcode)) { + return false; + } + // Matcher::vector_size_supported() restricts vector sizes in the following way (see Matcher::vector_width_in_bytes): + // * SSE2 supports 128bit vectors for all types; + // * AVX1 supports 256bit vectors only for FLOAT and DOUBLE types; + // * AVX2 supports 256bit vectors for all types; + // * AVX512F supports 512bit vectors only for INT, FLOAT, and DOUBLE types; + // * AVX512BW supports 512bit vectors for BYTE, SHORT, and CHAR types. + // There's also a limit on minimum vector size supported: 2 elements (or 4 bytes for BYTE). + // And MaxVectorSize is taken into account as well. + if (!vector_size_supported(bt, vlen)) { + return false; } - - return ret_value; // Per default match rules are supported. + // Special cases which require vector length follow: + // * implementation limitations + // * some 512bit vector operations on FLOAT and DOUBLE types require AVX512DQ + // * 128bit vroundpd instruction is present only in AVX1 + switch (opcode) { + case Op_AbsVF: + case Op_NegVF: + if ((vlen == 16) && (VM_Version::supports_avx512dq() == false)) { + return false; // 512bit vandps and vxorps are not available + } + break; + case Op_AbsVD: + case Op_NegVD: + if ((vlen == 8) && (VM_Version::supports_avx512dq() == false)) { + return false; // 512bit vandpd and vxorpd are not available + } + break; + case Op_CMoveVF: + if (vlen != 8) { + return false; // implementation limitation (only vcmov8F_reg is present) + } + break; + case Op_CMoveVD: + if (vlen != 4) { + return false; // implementation limitation (only vcmov4D_reg is present) + } + break; + } + return true; // Per default match rules are supported. } // x86 supports generic vector operands: vec and legVec. @@ -1646,6 +1663,37 @@ void Compile::reshape_address(AddPNode* addp) { } +static inline uint vector_length(const MachNode* n) { + const TypeVect* vt = n->bottom_type()->is_vect(); + return vt->length(); +} + +static inline uint vector_length_in_bytes(const MachNode* n) { + const TypeVect* vt = n->bottom_type()->is_vect(); + return vt->length_in_bytes(); +} + +static inline uint vector_length_in_bytes(const MachNode* use, MachOper* opnd) { + uint def_idx = use->operand_index(opnd); + Node* def = use->in(def_idx); + return def->bottom_type()->is_vect()->length_in_bytes(); +} + +static inline Assembler::AvxVectorLen vector_length_encoding(const MachNode* n) { + switch(vector_length_in_bytes(n)) { + case 4: // fall-through + case 8: // fall-through + case 16: return Assembler::AVX_128bit; + case 32: return Assembler::AVX_256bit; + case 64: return Assembler::AVX_512bit; + + default: { + ShouldNotReachHere(); + return Assembler::AVX_NoVec; + } + } +} + // Helper methods for MachSpillCopyNode::implementation(). static int vec_mov_helper(CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo, int src_hi, int dst_hi, uint ireg, outputStream* st) { @@ -2888,102 +2936,81 @@ #ifdef _LP64 instruct roundD_reg(legRegD dst, legRegD src, immU8 rmode) %{ - predicate(UseSSE>=4); match(Set dst (RoundDoubleMode src rmode)); - format %{ "roundsd $dst, $src" %} + format %{ "roundsd $dst,$src" %} ins_cost(150); ins_encode %{ + assert(UseSSE >= 4, "required"); __ roundsd($dst$$XMMRegister, $src$$XMMRegister, $rmode$$constant); %} ins_pipe(pipe_slow); %} instruct roundD_mem(legRegD dst, memory src, immU8 rmode) %{ - predicate(UseSSE>=4); match(Set dst (RoundDoubleMode (LoadD src) rmode)); - format %{ "roundsd $dst, $src" %} + format %{ "roundsd $dst,$src" %} ins_cost(150); ins_encode %{ + assert(UseSSE >= 4, "required"); __ roundsd($dst$$XMMRegister, $src$$Address, $rmode$$constant); %} ins_pipe(pipe_slow); %} instruct roundD_imm(legRegD dst, immD con, immU8 rmode, rRegI scratch_reg) %{ - predicate(UseSSE>=4); match(Set dst (RoundDoubleMode con rmode)); effect(TEMP scratch_reg); - format %{ "roundsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} + format %{ "roundsd $dst,[$constantaddress]\t# load from constant table: double=$con" %} ins_cost(150); ins_encode %{ + assert(UseSSE >= 4, "required"); __ roundsd($dst$$XMMRegister, $constantaddress($con), $rmode$$constant, $scratch_reg$$Register); %} ins_pipe(pipe_slow); %} -instruct vround2D_reg(legVec dst, legVec src, immU8 rmode) %{ - predicate(UseAVX > 0 && n->as_Vector()->length() == 2); +instruct vroundD_reg(legVec dst, legVec src, immU8 rmode) %{ + predicate(n->as_Vector()->length() < 8); match(Set dst (RoundDoubleModeV src rmode)); - format %{ "vroundpd $dst, $src, $rmode\t! round packed2D" %} - ins_encode %{ - int vector_len = 0; + format %{ "vroundpd $dst,$src,$rmode\t! round packedD" %} + ins_encode %{ + assert(UseAVX > 0, "required"); + int vector_len = vector_length_encoding(this); __ vroundpd($dst$$XMMRegister, $src$$XMMRegister, $rmode$$constant, vector_len); %} ins_pipe( pipe_slow ); %} -instruct vround2D_mem(legVec dst, memory mem, immU8 rmode) %{ - predicate(UseAVX > 0 && n->as_Vector()->length() == 2); - match(Set dst (RoundDoubleModeV (LoadVector mem) rmode)); - format %{ "vroundpd $dst, $mem, $rmode\t! round packed2D" %} - ins_encode %{ - int vector_len = 0; - __ vroundpd($dst$$XMMRegister, $mem$$Address, $rmode$$constant, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct vround4D_reg(legVec dst, legVec src, legVec rmode) %{ - predicate(UseAVX > 0 && n->as_Vector()->length() == 4); - match(Set dst (RoundDoubleModeV src rmode)); - format %{ "vroundpd $dst, $src, $rmode\t! round packed4D" %} - ins_encode %{ - int vector_len = 1; - __ vroundpd($dst$$XMMRegister, $src$$XMMRegister, $rmode$$constant, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct vround4D_mem(legVec dst, memory mem, immU8 rmode) %{ - predicate(UseAVX > 0 && n->as_Vector()->length() == 4); - match(Set dst (RoundDoubleModeV (LoadVector mem) rmode)); - format %{ "vroundpd $dst, $mem, $rmode\t! round packed4D" %} - ins_encode %{ - int vector_len = 1; - __ vroundpd($dst$$XMMRegister, $mem$$Address, $rmode$$constant, vector_len); - %} - ins_pipe( pipe_slow ); -%} - - instruct vround8D_reg(vec dst, vec src, immU8 rmode) %{ - predicate(UseAVX > 2 && n->as_Vector()->length() == 8); + predicate(n->as_Vector()->length() == 8); match(Set dst (RoundDoubleModeV src rmode)); - format %{ "vrndscalepd $dst, $src, $rmode\t! round packed8D" %} - ins_encode %{ - int vector_len = 2; - __ vrndscalepd($dst$$XMMRegister, $src$$XMMRegister, $rmode$$constant, vector_len); + format %{ "vrndscalepd $dst,$src,$rmode\t! round packed8D" %} + ins_encode %{ + assert(UseAVX > 2, "required"); + __ vrndscalepd($dst$$XMMRegister, $src$$XMMRegister, $rmode$$constant, Assembler::AVX_512bit); + %} + ins_pipe( pipe_slow ); +%} + +instruct vroundD_mem(legVec dst, memory mem, immU8 rmode) %{ + predicate(n->as_Vector()->length() < 8); + match(Set dst (RoundDoubleModeV (LoadVector mem) rmode)); + format %{ "vroundpd $dst, $mem, $rmode\t! round packedD" %} + ins_encode %{ + assert(UseAVX > 0, "required"); + int vector_len = vector_length_encoding(this); + __ vroundpd($dst$$XMMRegister, $mem$$Address, $rmode$$constant, vector_len); %} ins_pipe( pipe_slow ); %} instruct vround8D_mem(vec dst, memory mem, immU8 rmode) %{ - predicate(UseAVX > 2 && n->as_Vector()->length() == 8); + predicate(n->as_Vector()->length() == 8); match(Set dst (RoundDoubleModeV (LoadVector mem) rmode)); - format %{ "vrndscalepd $dst, $mem, $rmode\t! round packed8D" %} - ins_encode %{ - int vector_len = 2; - __ vrndscalepd($dst$$XMMRegister, $mem$$Address, $rmode$$constant, vector_len); + format %{ "vrndscalepd $dst,$mem,$rmode\t! round packed8D" %} + ins_encode %{ + assert(UseAVX > 2, "required"); + __ vrndscalepd($dst$$XMMRegister, $mem$$Address, $rmode$$constant, Assembler::AVX_512bit); %} ins_pipe( pipe_slow ); %} @@ -3048,195 +3075,80 @@ ins_pipe( fpu_reg_reg ); %} -// Load vectors (4 bytes long) -instruct loadV4(vec dst, memory mem) %{ - predicate(n->as_LoadVector()->memory_size() == 4); - match(Set dst (LoadVector mem)); - ins_cost(125); - format %{ "movd $dst,$mem\t! load vector (4 bytes)" %} - ins_encode %{ - __ movdl($dst$$XMMRegister, $mem$$Address); - %} - ins_pipe( pipe_slow ); -%} - -// Load vectors (8 bytes long) -instruct loadV8(vec dst, memory mem) %{ - predicate(n->as_LoadVector()->memory_size() == 8); - match(Set dst (LoadVector mem)); - ins_cost(125); - format %{ "movq $dst,$mem\t! load vector (8 bytes)" %} - ins_encode %{ - __ movq($dst$$XMMRegister, $mem$$Address); - %} - ins_pipe( pipe_slow ); -%} - -// Load vectors (16 bytes long) -instruct loadV16(vec dst, memory mem) %{ - predicate(n->as_LoadVector()->memory_size() == 16); - match(Set dst (LoadVector mem)); - ins_cost(125); - format %{ "movdqu $dst,$mem\t! load vector (16 bytes)" %} - ins_encode %{ - __ movdqu($dst$$XMMRegister, $mem$$Address); - %} - ins_pipe( pipe_slow ); -%} - -// Load vectors (32 bytes long) -instruct loadV32(vec dst, memory mem) %{ - predicate(n->as_LoadVector()->memory_size() == 32); +// ============================================================================ + +// Load vectors +instruct loadV(vec dst, memory mem) %{ match(Set dst (LoadVector mem)); ins_cost(125); - format %{ "vmovdqu $dst,$mem\t! load vector (32 bytes)" %} - ins_encode %{ - __ vmovdqu($dst$$XMMRegister, $mem$$Address); - %} - ins_pipe( pipe_slow ); -%} - -// Load vectors (64 bytes long) -instruct loadV64_dword(vec dst, memory mem) %{ - predicate(n->as_LoadVector()->memory_size() == 64 && n->as_LoadVector()->element_size() <= 4); - match(Set dst (LoadVector mem)); - ins_cost(125); - format %{ "vmovdqul $dst k0,$mem\t! load vector (64 bytes)" %} - ins_encode %{ - int vector_len = 2; - __ evmovdqul($dst$$XMMRegister, $mem$$Address, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -// Load vectors (64 bytes long) -instruct loadV64_qword(vec dst, memory mem) %{ - predicate(n->as_LoadVector()->memory_size() == 64 && n->as_LoadVector()->element_size() > 4); - match(Set dst (LoadVector mem)); - ins_cost(125); - format %{ "vmovdquq $dst k0,$mem\t! load vector (64 bytes)" %} - ins_encode %{ - int vector_len = 2; - __ evmovdquq($dst$$XMMRegister, $mem$$Address, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -// Store vectors -instruct storeV4(memory mem, vec src) %{ - predicate(n->as_StoreVector()->memory_size() == 4); - match(Set mem (StoreVector mem src)); - ins_cost(145); - format %{ "movd $mem,$src\t! store vector (4 bytes)" %} - ins_encode %{ - __ movdl($mem$$Address, $src$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct storeV8(memory mem, vec src) %{ - predicate(n->as_StoreVector()->memory_size() == 8); + format %{ "load_vector $dst,$mem" %} + ins_encode %{ + switch (vector_length_in_bytes(this)) { + case 4: __ movdl ($dst$$XMMRegister, $mem$$Address); break; + case 8: __ movq ($dst$$XMMRegister, $mem$$Address); break; + case 16: __ movdqu ($dst$$XMMRegister, $mem$$Address); break; + case 32: __ vmovdqu ($dst$$XMMRegister, $mem$$Address); break; + case 64: __ evmovdqul($dst$$XMMRegister, $mem$$Address, Assembler::AVX_512bit); break; + default: ShouldNotReachHere(); + } + %} + ins_pipe( pipe_slow ); +%} + +// Store vectors generic operand pattern. +instruct storeV(memory mem, vec src) %{ match(Set mem (StoreVector mem src)); ins_cost(145); - format %{ "movq $mem,$src\t! store vector (8 bytes)" %} - ins_encode %{ - __ movq($mem$$Address, $src$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct storeV16(memory mem, vec src) %{ - predicate(n->as_StoreVector()->memory_size() == 16); - match(Set mem (StoreVector mem src)); - ins_cost(145); - format %{ "movdqu $mem,$src\t! store vector (16 bytes)" %} - ins_encode %{ - __ movdqu($mem$$Address, $src$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct storeV32(memory mem, vec src) %{ - predicate(n->as_StoreVector()->memory_size() == 32); - match(Set mem (StoreVector mem src)); - ins_cost(145); - format %{ "vmovdqu $mem,$src\t! store vector (32 bytes)" %} - ins_encode %{ - __ vmovdqu($mem$$Address, $src$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct storeV64_dword(memory mem, vec src) %{ - predicate(n->as_StoreVector()->memory_size() == 64 && n->as_StoreVector()->element_size() <= 4); - match(Set mem (StoreVector mem src)); - ins_cost(145); - format %{ "vmovdqul $mem k0,$src\t! store vector (64 bytes)" %} - ins_encode %{ - int vector_len = 2; - __ evmovdqul($mem$$Address, $src$$XMMRegister, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct storeV64_qword(memory mem, vec src) %{ - predicate(n->as_StoreVector()->memory_size() == 64 && n->as_StoreVector()->element_size() > 4); - match(Set mem (StoreVector mem src)); - ins_cost(145); - format %{ "vmovdquq $mem k0,$src\t! store vector (64 bytes)" %} - ins_encode %{ - int vector_len = 2; - __ evmovdquq($mem$$Address, $src$$XMMRegister, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -// ====================LEGACY REPLICATE======================================= - -instruct Repl16B(vec dst, rRegI src) %{ - predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vlbw()); + format %{ "store_vector $mem,$src\n\t" %} + ins_encode %{ + switch (vector_length_in_bytes(this, $src)) { + case 4: __ movdl ($mem$$Address, $src$$XMMRegister); break; + case 8: __ movq ($mem$$Address, $src$$XMMRegister); break; + case 16: __ movdqu ($mem$$Address, $src$$XMMRegister); break; + case 32: __ vmovdqu ($mem$$Address, $src$$XMMRegister); break; + case 64: __ evmovdqul($mem$$Address, $src$$XMMRegister, Assembler::AVX_512bit); break; + default: ShouldNotReachHere(); + } + %} + ins_pipe( pipe_slow ); +%} + +// ====================REPLICATE======================================= + +// Replicate byte scalar to be vector +instruct ReplB_reg(vec dst, rRegI src) %{ + predicate((n->as_Vector()->length() <= 32) || + (n->as_Vector()->length() == 64 && VM_Version::supports_avx512bw())); // AVX512BW for 512bit byte instructions match(Set dst (ReplicateB src)); - format %{ "movd $dst,$src\n\t" - "punpcklbw $dst,$dst\n\t" - "pshuflw $dst,$dst,0x00\n\t" - "punpcklqdq $dst,$dst\t! replicate16B" %} - ins_encode %{ - __ movdl($dst$$XMMRegister, $src$$Register); - __ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister); - __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); - __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl32B(vec dst, rRegI src) %{ - predicate(n->as_Vector()->length() == 32 && !VM_Version::supports_avx512vlbw()); + format %{ "replicateB $dst,$src" %} + ins_encode %{ + uint vlen = vector_length(this); + if (vlen == 64 || VM_Version::supports_avx512vlbw()) { // AVX512VL for <512bit operands + assert(VM_Version::supports_avx512bw(), "required"); + int vlen_enc = vector_length_encoding(this); + __ evpbroadcastb($dst$$XMMRegister, $src$$Register, vlen_enc); + } else { + __ movdl($dst$$XMMRegister, $src$$Register); + __ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister); + __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); + if (vlen >= 16) { + __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); + if (vlen >= 32) { + assert(vlen == 32, "sanity"); // vlen == 64 && !AVX512BW is covered by ReplB_reg_leg + __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); + } + } + } + %} + ins_pipe( pipe_slow ); +%} + +instruct ReplB_reg_leg(legVec dst, rRegI src) %{ + predicate(n->as_Vector()->length() == 64 && !VM_Version::supports_avx512bw()); // AVX512BW for 512bit byte instructions match(Set dst (ReplicateB src)); - format %{ "movd $dst,$src\n\t" - "punpcklbw $dst,$dst\n\t" - "pshuflw $dst,$dst,0x00\n\t" - "punpcklqdq $dst,$dst\n\t" - "vinserti128_high $dst,$dst\t! replicate32B" %} - ins_encode %{ - __ movdl($dst$$XMMRegister, $src$$Register); - __ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister); - __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); - __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); - __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl64B(legVec dst, rRegI src) %{ - predicate(n->as_Vector()->length() == 64 && !VM_Version::supports_avx512vlbw()); - match(Set dst (ReplicateB src)); - format %{ "movd $dst,$src\n\t" - "punpcklbw $dst,$dst\n\t" - "pshuflw $dst,$dst,0x00\n\t" - "punpcklqdq $dst,$dst\n\t" - "vinserti128_high $dst,$dst\t" - "vinserti64x4 $dst,$dst,$dst,0x1\t! replicate64B" %} - ins_encode %{ + format %{ "replicateB $dst,$src" %} + ins_encode %{ + assert(UseAVX > 2, "required"); __ movdl($dst$$XMMRegister, $src$$Register); __ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister); __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); @@ -3247,39 +3159,52 @@ ins_pipe( pipe_slow ); %} -instruct Repl16B_imm(vec dst, immI con) %{ - predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vlbw()); - match(Set dst (ReplicateB con)); - format %{ "movq $dst,[$constantaddress]\n\t" - "punpcklqdq $dst,$dst\t! replicate16B($con)" %} - ins_encode %{ - __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1))); - __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl32B_imm(vec dst, immI con) %{ - predicate(n->as_Vector()->length() == 32 && !VM_Version::supports_avx512vlbw()); +instruct ReplB_mem(vec dst, memory mem) %{ + predicate((n->as_Vector()->length() <= 32 && VM_Version::supports_avx512vlbw()) || // AVX512VL for <512bit operands + (n->as_Vector()->length() == 64 && VM_Version::supports_avx512bw())); // AVX512BW for 512bit byte instructions + match(Set dst (ReplicateB (LoadB mem))); + format %{ "replicateB $dst,$mem" %} + ins_encode %{ + assert(UseAVX > 2, "required"); + int vector_len = vector_length_encoding(this); + __ vpbroadcastb($dst$$XMMRegister, $mem$$Address, vector_len); + %} + ins_pipe( pipe_slow ); +%} + +instruct ReplB_imm(vec dst, immI con) %{ + predicate((n->as_Vector()->length() <= 32) || + (n->as_Vector()->length() == 64 && VM_Version::supports_avx512bw())); // AVX512BW for 512bit byte instructions match(Set dst (ReplicateB con)); - format %{ "movq $dst,[$constantaddress]\n\t" - "punpcklqdq $dst,$dst\n\t" - "vinserti128_high $dst,$dst\t! lreplicate32B($con)" %} - ins_encode %{ - __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1))); - __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); - __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl64B_imm(legVec dst, immI con) %{ - predicate(n->as_Vector()->length() == 64 && !VM_Version::supports_avx512vlbw()); + format %{ "replicateB $dst,$con" %} + ins_encode %{ + uint vlen = vector_length(this); + InternalAddress const_addr = $constantaddress(replicate8_imm($con$$constant, 1)); + if (vlen == 4) { + __ movdl($dst$$XMMRegister, const_addr); + } else { + __ movq($dst$$XMMRegister, const_addr); + if (vlen >= 16) { + if (vlen == 64 || VM_Version::supports_avx512vlbw()) { // AVX512VL for <512bit operands + int vlen_enc = vector_length_encoding(this); + __ vpbroadcastb($dst$$XMMRegister, $dst$$XMMRegister, vlen_enc); + } else { + __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); + if (vlen >= 32) { + assert(vlen == 32, "sanity");// vlen == 64 && !AVX512BW is covered by ReplB_imm_leg + __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); + } + } + } + } + %} + ins_pipe( pipe_slow ); +%} + +instruct ReplB_imm_leg(legVec dst, immI con) %{ + predicate(n->as_Vector()->length() == 64 && !VM_Version::supports_avx512bw()); match(Set dst (ReplicateB con)); - format %{ "movq $dst,[$constantaddress]\n\t" - "punpcklqdq $dst,$dst\n\t" - "vinserti128_high $dst,$dst\t" - "vinserti64x4 $dst,$dst,$dst,0x1\t! replicate64B($con)" %} + format %{ "replicateB $dst,$con" %} ins_encode %{ __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1))); __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); @@ -3289,118 +3214,55 @@ ins_pipe( pipe_slow ); %} -instruct Repl4S(vec dst, rRegI src) %{ - predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vlbw()); - match(Set dst (ReplicateS src)); - format %{ "movd $dst,$src\n\t" - "pshuflw $dst,$dst,0x00\t! replicate4S" %} - ins_encode %{ - __ movdl($dst$$XMMRegister, $src$$Register); - __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl4S_mem(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 4 && UseAVX > 0 && !VM_Version::supports_avx512vlbw()); - match(Set dst (ReplicateS (LoadS mem))); - format %{ "pshuflw $dst,$mem,0x00\t! replicate4S" %} - ins_encode %{ - __ pshuflw($dst$$XMMRegister, $mem$$Address, 0x00); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8S(vec dst, rRegI src) %{ - predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vlbw()); +// Replicate byte scalar zero to be vector +instruct ReplB_zero(vec dst, immI0 zero) %{ + match(Set dst (ReplicateB zero)); + format %{ "replicateB $dst,$zero" %} + ins_encode %{ + uint vlen = vector_length(this); + if (vlen <= 16) { + __ pxor($dst$$XMMRegister, $dst$$XMMRegister); + } else { + // Use vpxor since AVX512F does not have 512bit vxorpd (requires AVX512DQ). + int vlen_enc = vector_length_encoding(this); + __ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vlen_enc); + } + %} + ins_pipe( fpu_reg_reg ); +%} + +// ====================ReplicateS======================================= + +instruct ReplS_reg(vec dst, rRegI src) %{ + predicate((n->as_Vector()->length() <= 16) || + (n->as_Vector()->length() == 32 && VM_Version::supports_avx512bw())); // AVX512BW for 512bit instructions on shorts match(Set dst (ReplicateS src)); - format %{ "movd $dst,$src\n\t" - "pshuflw $dst,$dst,0x00\n\t" - "punpcklqdq $dst,$dst\t! replicate8S" %} - ins_encode %{ - __ movdl($dst$$XMMRegister, $src$$Register); - __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); - __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8S_mem(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 8 && UseAVX > 0 && !VM_Version::supports_avx512vlbw()); - match(Set dst (ReplicateS (LoadS mem))); - format %{ "pshuflw $dst,$mem,0x00\n\t" - "punpcklqdq $dst,$dst\t! replicate8S" %} - ins_encode %{ - __ pshuflw($dst$$XMMRegister, $mem$$Address, 0x00); - __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8S_imm(vec dst, immI con) %{ - predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vlbw()); - match(Set dst (ReplicateS con)); - format %{ "movq $dst,[$constantaddress]\n\t" - "punpcklqdq $dst,$dst\t! replicate8S($con)" %} - ins_encode %{ - __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2))); - __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl16S(vec dst, rRegI src) %{ - predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vlbw()); + format %{ "replicateS $dst,$src" %} + ins_encode %{ + uint vlen = vector_length(this); + if (vlen == 32 || VM_Version::supports_avx512vlbw()) { // AVX512VL for <512bit operands + assert(VM_Version::supports_avx512bw(), "required"); + int vlen_enc = vector_length_encoding(this); + __ evpbroadcastw($dst$$XMMRegister, $src$$Register, vlen_enc); + } else { + __ movdl($dst$$XMMRegister, $src$$Register); + __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); + if (vlen >= 8) { + __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); + if (vlen >= 16) { + assert(vlen == 16, "sanity"); // vlen == 32 && !AVX512BW is covered by ReplS_reg_leg + __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); + } + } + } + %} + ins_pipe( pipe_slow ); +%} + +instruct ReplS_reg_leg(legVec dst, rRegI src) %{ + predicate(n->as_Vector()->length() == 32 && !VM_Version::supports_avx512bw()); match(Set dst (ReplicateS src)); - format %{ "movd $dst,$src\n\t" - "pshuflw $dst,$dst,0x00\n\t" - "punpcklqdq $dst,$dst\n\t" - "vinserti128_high $dst,$dst\t! replicate16S" %} - ins_encode %{ - __ movdl($dst$$XMMRegister, $src$$Register); - __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); - __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); - __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl16S_mem(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vlbw()); - match(Set dst (ReplicateS (LoadS mem))); - format %{ "pshuflw $dst,$mem,0x00\n\t" - "punpcklqdq $dst,$dst\n\t" - "vinserti128_high $dst,$dst\t! replicate16S" %} - ins_encode %{ - __ pshuflw($dst$$XMMRegister, $mem$$Address, 0x00); - __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); - __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl16S_imm(vec dst, immI con) %{ - predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vlbw()); - match(Set dst (ReplicateS con)); - format %{ "movq $dst,[$constantaddress]\n\t" - "punpcklqdq $dst,$dst\n\t" - "vinserti128_high $dst,$dst\t! replicate16S($con)" %} - ins_encode %{ - __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2))); - __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); - __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl32S(legVec dst, rRegI src) %{ - predicate(n->as_Vector()->length() == 32 && !VM_Version::supports_avx512vlbw()); - match(Set dst (ReplicateS src)); - format %{ "movd $dst,$src\n\t" - "pshuflw $dst,$dst,0x00\n\t" - "punpcklqdq $dst,$dst\n\t" - "vinserti128_high $dst,$dst\t" - "vinserti64x4 $dst,$dst,$dst,0x1\t! replicate32S" %} + format %{ "replicateS $dst,$src" %} ins_encode %{ __ movdl($dst$$XMMRegister, $src$$Register); __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); @@ -3411,13 +3273,36 @@ ins_pipe( pipe_slow ); %} -instruct Repl32S_mem(legVec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 32 && !VM_Version::supports_avx512vlbw()); +instruct ReplS_mem(vec dst, memory mem) %{ + predicate((n->as_Vector()->length() >= 4 && + n->as_Vector()->length() <= 16 && VM_Version::supports_avx()) || + (n->as_Vector()->length() == 32 && VM_Version::supports_avx512bw())); // AVX512BW for 512bit instructions on shorts match(Set dst (ReplicateS (LoadS mem))); - format %{ "pshuflw $dst,$mem,0x00\n\t" - "punpcklqdq $dst,$dst\n\t" - "vinserti128_high $dst,$dst\t" - "vinserti64x4 $dst,$dst,$dst,0x1\t! replicate32S" %} + format %{ "replicateS $dst,$mem" %} + ins_encode %{ + uint vlen = vector_length(this); + if (vlen == 32 || VM_Version::supports_avx512vlbw()) { // AVX512VL for <512bit operands + assert(VM_Version::supports_avx512bw(), "required"); + int vlen_enc = vector_length_encoding(this); + __ vpbroadcastw($dst$$XMMRegister, $mem$$Address, vlen_enc); + } else { + __ pshuflw($dst$$XMMRegister, $mem$$Address, 0x00); + if (vlen >= 8) { + __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); + if (vlen >= 16) { + assert(vlen == 16, "sanity"); // vlen == 32 && !AVX512BW is covered by ReplS_mem_leg + __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); + } + } + } + %} + ins_pipe( pipe_slow ); +%} + +instruct ReplS_mem_leg(legVec dst, memory mem) %{ + predicate(n->as_Vector()->length() == 32 && !VM_Version::supports_avx512bw()); + match(Set dst (ReplicateS (LoadS mem))); + format %{ "replicateS $dst,$mem" %} ins_encode %{ __ pshuflw($dst$$XMMRegister, $mem$$Address, 0x00); __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); @@ -3427,13 +3312,39 @@ ins_pipe( pipe_slow ); %} -instruct Repl32S_imm(legVec dst, immI con) %{ - predicate(n->as_Vector()->length() == 32 && !VM_Version::supports_avx512vlbw()); +instruct ReplS_imm(vec dst, immI con) %{ + predicate((n->as_Vector()->length() <= 16) || + (n->as_Vector()->length() == 32 && VM_Version::supports_avx512bw())); // AVX512BW for 512bit instructions on shorts match(Set dst (ReplicateS con)); - format %{ "movq $dst,[$constantaddress]\n\t" - "punpcklqdq $dst,$dst\n\t" - "vinserti128_high $dst,$dst\t" - "vinserti64x4 $dst,$dst,$dst,0x1\t! replicate32S($con)" %} + format %{ "replicateS $dst,$con" %} + ins_encode %{ + uint vlen = vector_length(this); + InternalAddress constaddr = $constantaddress(replicate8_imm($con$$constant, 2)); + if (vlen == 2) { + __ movdl($dst$$XMMRegister, constaddr); + } else { + __ movq($dst$$XMMRegister, constaddr); + if (vlen == 32 || VM_Version::supports_avx512vlbw() ) { // AVX512VL for <512bit operands + assert(VM_Version::supports_avx512bw(), "required"); + int vlen_enc = vector_length_encoding(this); + __ vpbroadcastw($dst$$XMMRegister, $dst$$XMMRegister, vlen_enc); + } else { + __ movq($dst$$XMMRegister, constaddr); + __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); + if (vlen >= 16) { + assert(vlen == 16, "sanity"); // vlen == 32 && !AVX512BW is covered by ReplS_imm_leg + __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); + } + } + } + %} + ins_pipe( fpu_reg_reg ); +%} + +instruct ReplS_imm_leg(legVec dst, immI con) %{ + predicate(n->as_Vector()->length() == 32 && !VM_Version::supports_avx512bw()); + match(Set dst (ReplicateS con)); + format %{ "replicateS $dst,$con" %} ins_encode %{ __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2))); __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); @@ -3443,61 +3354,49 @@ ins_pipe( pipe_slow ); %} -instruct Repl4I(vec dst, rRegI src) %{ - predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl()); - match(Set dst (ReplicateI src)); - format %{ "movd $dst,$src\n\t" - "pshufd $dst,$dst,0x00\t! replicate4I" %} - ins_encode %{ - __ movdl($dst$$XMMRegister, $src$$Register); - __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl4I_mem(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 4 && UseAVX > 0 && !VM_Version::supports_avx512vl()); - match(Set dst (ReplicateI (LoadI mem))); - format %{ "pshufd $dst,$mem,0x00\t! replicate4I" %} - ins_encode %{ - __ pshufd($dst$$XMMRegister, $mem$$Address, 0x00); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8I(vec dst, rRegI src) %{ - predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl()); +instruct ReplS_zero(vec dst, immI0 zero) %{ + match(Set dst (ReplicateS zero)); + format %{ "replicateS $dst,$zero" %} + ins_encode %{ + uint vlen = vector_length(this); + if (vlen <= 8) { + __ pxor($dst$$XMMRegister, $dst$$XMMRegister); + } else { + int vlen_enc = vector_length_encoding(this); + __ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vlen_enc); + } + %} + ins_pipe( fpu_reg_reg ); +%} + +// ====================ReplicateI======================================= + +instruct ReplI_reg(vec dst, rRegI src) %{ + predicate((n->as_Vector()->length() <= 8) || + (n->as_Vector()->length() == 16 && VM_Version::supports_avx512vl())); match(Set dst (ReplicateI src)); - format %{ "movd $dst,$src\n\t" - "pshufd $dst,$dst,0x00\n\t" - "vinserti128_high $dst,$dst\t! replicate8I" %} - ins_encode %{ - __ movdl($dst$$XMMRegister, $src$$Register); - __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); - __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8I_mem(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl()); - match(Set dst (ReplicateI (LoadI mem))); - format %{ "pshufd $dst,$mem,0x00\n\t" - "vinserti128_high $dst,$dst\t! replicate8I" %} - ins_encode %{ - __ pshufd($dst$$XMMRegister, $mem$$Address, 0x00); - __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl16I(legVec dst, rRegI src) %{ + format %{ "replicateI $dst,$src" %} + ins_encode %{ + uint vlen = vector_length(this); + if (VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands + int vlen_enc = vector_length_encoding(this); + __ evpbroadcastd($dst$$XMMRegister, $src$$Register, vlen_enc); + } else { + __ movdl($dst$$XMMRegister, $src$$Register); + __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); + if (vlen >= 8) { + assert(vlen == 8, "sanity"); // vlen == 16 && !AVX512VL is covered by ReplI_reg_leg + __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); + } + } + %} + ins_pipe( pipe_slow ); +%} + +instruct ReplI_reg_leg(legVec dst, rRegI src) %{ predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vl()); match(Set dst (ReplicateI src)); - format %{ "movd $dst,$src\n\t" - "pshufd $dst,$dst,0x00\n\t" - "vinserti128_high $dst,$dst\t" - "vinserti64x4 $dst,$dst,$dst,0x1\t! replicate16I" %} + format %{ "replicateI $dst,$src" %} ins_encode %{ __ movdl($dst$$XMMRegister, $src$$Register); __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); @@ -3507,12 +3406,31 @@ ins_pipe( pipe_slow ); %} -instruct Repl16I_mem(legVec dst, memory mem) %{ +instruct ReplI_mem(vec dst, memory mem) %{ + predicate((n->as_Vector()->length() <= 8 && VM_Version::supports_avx()) || + (n->as_Vector()->length() == 16 && VM_Version::supports_avx512vl())); + match(Set dst (ReplicateI (LoadI mem))); + format %{ "replicateI $dst,$mem" %} + ins_encode %{ + uint vlen = vector_length(this); + if (vlen <= 4) { + __ pshufd($dst$$XMMRegister, $mem$$Address, 0x00); + } else if (VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands + int vector_len = vector_length_encoding(this); + __ vpbroadcastd($dst$$XMMRegister, $mem$$Address, vector_len); + } else { + assert(vlen == 8, "sanity"); // vlen == 16 && !AVX512VL is covered by ReplI_mem_leg + __ pshufd($dst$$XMMRegister, $mem$$Address, 0x00); + __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); + } + %} + ins_pipe( pipe_slow ); +%} + +instruct ReplI_mem_leg(legVec dst, memory mem) %{ predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vl()); match(Set dst (ReplicateI (LoadI mem))); - format %{ "pshufd $dst,$mem,0x00\n\t" - "vinserti128_high $dst,$dst\t" - "vinserti64x4 $dst,$dst,$dst,0x1\t! replicate16I" %} + format %{ "replicateI $dst,$mem" %} ins_encode %{ __ pshufd($dst$$XMMRegister, $mem$$Address, 0x00); __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); @@ -3521,39 +3439,36 @@ ins_pipe( pipe_slow ); %} -instruct Repl4I_imm(vec dst, immI con) %{ - predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl()); +instruct ReplI_imm(vec dst, immI con) %{ + predicate((n->as_Vector()->length() <= 8) || + (n->as_Vector()->length() == 16 && VM_Version::supports_avx512vl())); match(Set dst (ReplicateI con)); - format %{ "movq $dst,[$constantaddress]\t! replicate4I($con)\n\t" - "punpcklqdq $dst,$dst" %} - ins_encode %{ - __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4))); - __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8I_imm(vec dst, immI con) %{ - predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl()); - match(Set dst (ReplicateI con)); - format %{ "movq $dst,[$constantaddress]\t! replicate8I($con)\n\t" - "punpcklqdq $dst,$dst\n\t" - "vinserti128_high $dst,$dst" %} - ins_encode %{ - __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4))); - __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); - __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl16I_imm(legVec dst, immI con) %{ + format %{ "replicateI $dst,$con" %} + ins_encode %{ + uint vlen = vector_length(this); + InternalAddress constaddr = $constantaddress(replicate8_imm($con$$constant, 4)); + if (vlen == 2) { + __ movq($dst$$XMMRegister, constaddr); + } else if (VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands + int vector_len = vector_length_encoding(this); + __ movq($dst$$XMMRegister, constaddr); + __ vpbroadcastd($dst$$XMMRegister, $dst$$XMMRegister, vector_len); + } else { + __ movq($dst$$XMMRegister, constaddr); + __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); + if (vlen >= 8) { + assert(vlen == 8, "sanity"); + __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); + } + } + %} + ins_pipe( pipe_slow ); +%} + +instruct ReplI_imm_leg(legVec dst, immI con) %{ predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vl()); match(Set dst (ReplicateI con)); - format %{ "movq $dst,[$constantaddress]\t" - "punpcklqdq $dst,$dst\n\t" - "vinserti128_high $dst,$dst" - "vinserti64x4 $dst,$dst,$dst,0x1\t! replicate16I($con)" %} + format %{ "replicateI $dst,$con" %} ins_encode %{ __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4))); __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); @@ -3563,42 +3478,53 @@ ins_pipe( pipe_slow ); %} -// Long could be loaded into xmm register directly from memory. -instruct Repl2L_mem(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 2 && !VM_Version::supports_avx512vlbw()); - match(Set dst (ReplicateL (LoadL mem))); - format %{ "movq $dst,$mem\n\t" - "punpcklqdq $dst,$dst\t! replicate2L" %} - ins_encode %{ - __ movq($dst$$XMMRegister, $mem$$Address); - __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -// Replicate long (8 byte) scalar to be vector +// Replicate integer (4 byte) scalar zero to be vector +instruct ReplI_zero(vec dst, immI0 zero) %{ + match(Set dst (ReplicateI zero)); + format %{ "replicateI $dst,$zero" %} + ins_encode %{ + uint vlen = vector_length(this); + if (vlen <= 4) { + __ pxor($dst$$XMMRegister, $dst$$XMMRegister); + } else { + int vlen_enc = vector_length_encoding(this); + __ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vlen_enc); + } + %} + ins_pipe( fpu_reg_reg ); +%} + +// ====================ReplicateL======================================= + #ifdef _LP64 -instruct Repl4L(vec dst, rRegL src) %{ - predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl()); +// Replicate long (8 byte) scalar to be vector +instruct ReplL_reg(vec dst, rRegL src) %{ + predicate((n->as_Vector()->length() <= 4) || + (n->as_Vector()->length() == 8 && VM_Version::supports_avx512vl())); match(Set dst (ReplicateL src)); - format %{ "movdq $dst,$src\n\t" - "punpcklqdq $dst,$dst\n\t" - "vinserti128_high $dst,$dst\t! replicate4L" %} - ins_encode %{ - __ movdq($dst$$XMMRegister, $src$$Register); - __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); - __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8L(legVec dst, rRegL src) %{ + format %{ "replicateL $dst,$src" %} + ins_encode %{ + uint vlen = vector_length(this); + if (vlen == 2) { + __ movdq($dst$$XMMRegister, $src$$Register); + __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); + } else if (VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands + int vlen_enc = vector_length_encoding(this); + __ evpbroadcastq($dst$$XMMRegister, $src$$Register, vlen_enc); + } else { + assert(vlen == 4, "sanity"); // vlen == 8 && !AVX512VL is covered by ReplL_reg_leg + __ movdq($dst$$XMMRegister, $src$$Register); + __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); + __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); + } + %} + ins_pipe( pipe_slow ); +%} + +instruct ReplL_reg_leg(legVec dst, rRegL src) %{ predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl()); match(Set dst (ReplicateL src)); - format %{ "movdq $dst,$src\n\t" - "punpcklqdq $dst,$dst\n\t" - "vinserti128_high $dst,$dst\t" - "vinserti64x4 $dst,$dst,$dst,0x1\t! replicate8L" %} + format %{ "replicateL $dst,$src" %} ins_encode %{ __ movdq($dst$$XMMRegister, $src$$Register); __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); @@ -3608,98 +3534,88 @@ ins_pipe( pipe_slow ); %} #else // _LP64 -instruct Repl4L(vec dst, eRegL src, vec tmp) %{ - predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl()); +// Replicate long (8 byte) scalar to be vector +instruct ReplL_reg(vec dst, eRegL src, vec tmp) %{ + predicate(n->as_Vector()->length() <= 4); match(Set dst (ReplicateL src)); effect(TEMP dst, USE src, TEMP tmp); - format %{ "movdl $dst,$src.lo\n\t" - "movdl $tmp,$src.hi\n\t" - "punpckldq $dst,$tmp\n\t" - "punpcklqdq $dst,$dst\n\t" - "vinserti128_high $dst,$dst\t! replicate4L" %} - ins_encode %{ - __ movdl($dst$$XMMRegister, $src$$Register); - __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register)); - __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister); - __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); - __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8L(legVec dst, eRegL src, legVec tmp) %{ - predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl()); + format %{ "replicateL $dst,$src" %} + ins_encode %{ + uint vlen = vector_length(this); + if (vlen == 2) { + __ movdl($dst$$XMMRegister, $src$$Register); + __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register)); + __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister); + __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); + } else if (VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands + int vector_len = Assembler::AVX_256bit; + __ movdl($dst$$XMMRegister, $src$$Register); + __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register)); + __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister); + __ vpbroadcastq($dst$$XMMRegister, $dst$$XMMRegister, vector_len); + } else { + __ movdl($dst$$XMMRegister, $src$$Register); + __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register)); + __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister); + __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); + __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); + } + %} + ins_pipe( pipe_slow ); +%} + +instruct ReplL_reg_leg(legVec dst, eRegL src, legVec tmp) %{ + predicate(n->as_Vector()->length() == 8); match(Set dst (ReplicateL src)); effect(TEMP dst, USE src, TEMP tmp); - format %{ "movdl $dst,$src.lo\n\t" - "movdl $tmp,$src.hi\n\t" - "punpckldq $dst,$tmp\n\t" - "punpcklqdq $dst,$dst\n\t" - "vinserti128_high $dst,$dst\t" - "vinserti64x4 $dst,$dst,$dst,0x1\t! replicate8L" %} - ins_encode %{ - __ movdl($dst$$XMMRegister, $src$$Register); - __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register)); - __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister); - __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); - __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); - __ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1); + format %{ "replicateL $dst,$src" %} + ins_encode %{ + if (VM_Version::supports_avx512vl()) { + __ movdl($dst$$XMMRegister, $src$$Register); + __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register)); + __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister); + __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); + __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); + __ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1); + } else { + int vector_len = Assembler::AVX_512bit; + __ movdl($dst$$XMMRegister, $src$$Register); + __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register)); + __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister); + __ vpbroadcastq($dst$$XMMRegister, $dst$$XMMRegister, vector_len); + } %} ins_pipe( pipe_slow ); %} #endif // _LP64 -instruct Repl4L_imm(vec dst, immL con) %{ - predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl()); - match(Set dst (ReplicateL con)); - format %{ "movq $dst,[$constantaddress]\n\t" - "punpcklqdq $dst,$dst\n\t" - "vinserti128_high $dst,$dst\t! replicate4L($con)" %} - ins_encode %{ - __ movq($dst$$XMMRegister, $constantaddress($con)); - __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); - __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8L_imm(legVec dst, immL con) %{ - predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl()); - match(Set dst (ReplicateL con)); - format %{ "movq $dst,[$constantaddress]\n\t" - "punpcklqdq $dst,$dst\n\t" - "vinserti128_high $dst,$dst\t" - "vinserti64x4 $dst,$dst,$dst,0x1\t! replicate8L($con)" %} - ins_encode %{ - __ movq($dst$$XMMRegister, $constantaddress($con)); - __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); - __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); - __ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl4L_mem(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl()); +instruct ReplL_mem(vec dst, memory mem) %{ + predicate((n->as_Vector()->length() <= 4) || + (n->as_Vector()->length() == 8 && VM_Version::supports_avx512vl())); match(Set dst (ReplicateL (LoadL mem))); - format %{ "movq $dst,$mem\n\t" - "punpcklqdq $dst,$dst\n\t" - "vinserti128_high $dst,$dst\t! replicate4L" %} - ins_encode %{ - __ movq($dst$$XMMRegister, $mem$$Address); - __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); - __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8L_mem(legVec dst, memory mem) %{ + format %{ "replicateL $dst,$mem" %} + ins_encode %{ + uint vlen = vector_length(this); + if (vlen == 2) { + __ movq($dst$$XMMRegister, $mem$$Address); + __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); + } else if (VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands + int vlen_enc = vector_length_encoding(this); + __ vpbroadcastq($dst$$XMMRegister, $mem$$Address, vlen_enc); + } else { + assert(vlen == 4, "sanity"); // vlen == 8 && !AVX512VL is covered by ReplL_mem_leg + __ movq($dst$$XMMRegister, $mem$$Address); + __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); + __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); + } + %} + ins_pipe( pipe_slow ); +%} + +instruct ReplL_mem_leg(legVec dst, memory mem) %{ predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl()); match(Set dst (ReplicateL (LoadL mem))); - format %{ "movq $dst,$mem\n\t" - "punpcklqdq $dst,$dst\n\t" - "vinserti128_high $dst,$dst\t" - "vinserti64x4 $dst,$dst,$dst,0x1\t! replicate8L" %} + format %{ "replicateL $dst,$mem" %} ins_encode %{ __ movq($dst$$XMMRegister, $mem$$Address); __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); @@ -3709,56 +3625,87 @@ ins_pipe( pipe_slow ); %} -instruct Repl2F_mem(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 2 && UseAVX > 0 && !VM_Version::supports_avx512vl()); - match(Set dst (ReplicateF (LoadF mem))); - format %{ "pshufd $dst,$mem,0x00\t! replicate2F" %} - ins_encode %{ - __ pshufd($dst$$XMMRegister, $mem$$Address, 0x00); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl4F_mem(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 4 && UseAVX > 0 && !VM_Version::supports_avx512vl()); - match(Set dst (ReplicateF (LoadF mem))); - format %{ "pshufd $dst,$mem,0x00\t! replicate4F" %} - ins_encode %{ - __ pshufd($dst$$XMMRegister, $mem$$Address, 0x00); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8F(vec dst, vlRegF src) %{ - predicate(n->as_Vector()->length() == 8 && UseAVX > 0 && !VM_Version::supports_avx512vl()); +// Replicate long (8 byte) scalar immediate to be vector by loading from const table. +instruct ReplL_imm(vec dst, immL con) %{ + predicate((n->as_Vector()->length() <= 4) || + (n->as_Vector()->length() == 8 && VM_Version::supports_avx512vl())); + match(Set dst (ReplicateL con)); + format %{ "replicateL $dst,$con" %} + ins_encode %{ + uint vlen = vector_length(this); + InternalAddress const_addr = $constantaddress($con); + if (vlen == 2) { + __ movq($dst$$XMMRegister, const_addr); + __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); + } else if (VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands + int vlen_enc = vector_length_encoding(this); + __ movq($dst$$XMMRegister, const_addr); + __ vpbroadcastq($dst$$XMMRegister, $dst$$XMMRegister, vlen_enc); + } else { + assert(vlen == 4, "sanity"); // vlen == 8 && !AVX512VL is covered by ReplL_imm_leg + __ movq($dst$$XMMRegister, const_addr); + __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); + __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); + } + %} + ins_pipe( pipe_slow ); +%} + +instruct ReplL_imm_leg(legVec dst, immL con) %{ + predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl()); + match(Set dst (ReplicateL con)); + format %{ "replicateL $dst,$con" %} + ins_encode %{ + __ movq($dst$$XMMRegister, $constantaddress($con)); + __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); + __ vinserti128_high($dst$$XMMRegister, $dst$$XMMRegister); + __ vinserti64x4($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, 0x1); + %} + ins_pipe( pipe_slow ); +%} + +instruct ReplL_zero(vec dst, immL0 zero) %{ + match(Set dst (ReplicateL zero)); + format %{ "replicateL $dst,$zero" %} + ins_encode %{ + int vlen = vector_length(this); + if (vlen == 2) { + __ pxor($dst$$XMMRegister, $dst$$XMMRegister); + } else { + int vlen_enc = vector_length_encoding(this); + __ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vlen_enc); + } + %} + ins_pipe( fpu_reg_reg ); +%} + +// ====================ReplicateF======================================= + +instruct ReplF_reg(vec dst, vlRegF src) %{ + predicate((n->as_Vector()->length() <= 8) || + (n->as_Vector()->length() == 16 && VM_Version::supports_avx512vl())); match(Set dst (ReplicateF src)); - format %{ "pshufd $dst,$src,0x00\n\t" - "vinsertf128_high $dst,$dst\t! replicate8F" %} - ins_encode %{ - __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00); - __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8F_mem(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl()); - match(Set dst (ReplicateF (LoadF mem))); - format %{ "pshufd $dst,$mem,0x00\n\t" - "vinsertf128_high $dst,$dst\t! replicate8F" %} - ins_encode %{ - __ pshufd($dst$$XMMRegister, $mem$$Address, 0x00); - __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl16F(legVec dst, vlRegF src) %{ - predicate(n->as_Vector()->length() == 16 && UseAVX > 0 && !VM_Version::supports_avx512vl()); + format %{ "replicateF $dst,$src" %} + ins_encode %{ + uint vlen = vector_length(this); + if (vlen <= 4) { + __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00); + } else if (VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands + int vector_len = vector_length_encoding(this); + __ vpbroadcastss($dst$$XMMRegister, $src$$XMMRegister, vector_len); + } else { + assert(vlen == 8, "sanity"); // vlen == 16 && !AVX512VL is covered by ReplF_reg_leg + __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00); + __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister); + } + %} + ins_pipe( pipe_slow ); +%} + +instruct ReplF_reg_leg(legVec dst, vlRegF src) %{ + predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vl()); match(Set dst (ReplicateF src)); - format %{ "pshufd $dst,$src,0x00\n\t" - "vinsertf128_high $dst,$dst\t" - "vinserti64x4 $dst,$dst,$dst,0x1\t! replicate16F" %} + format %{ "replicateF $dst,$src" %} ins_encode %{ __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00); __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister); @@ -3767,12 +3714,31 @@ ins_pipe( pipe_slow ); %} -instruct Repl16F_mem(legVec dst, memory mem) %{ +instruct ReplF_mem(vec dst, memory mem) %{ + predicate((n->as_Vector()->length() <= 8 && VM_Version::supports_avx()) || + (n->as_Vector()->length() == 16 && VM_Version::supports_avx512vl())); + match(Set dst (ReplicateF (LoadF mem))); + format %{ "replicateF $dst,$mem" %} + ins_encode %{ + uint vlen = vector_length(this); + if (vlen <= 4) { + __ pshufd($dst$$XMMRegister, $mem$$Address, 0x00); + } else if (VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands + int vector_len = vector_length_encoding(this); + __ vpbroadcastss($dst$$XMMRegister, $mem$$Address, vector_len); + } else { + assert(vlen == 8, "sanity"); // vlen == 16 && !AVX512VL is covered by ReplF_mem_leg + __ pshufd($dst$$XMMRegister, $mem$$Address, 0x00); + __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister); + } + %} + ins_pipe( pipe_slow ); +%} + +instruct ReplF_mem_leg(legVec dst, memory mem) %{ predicate(n->as_Vector()->length() == 16 && !VM_Version::supports_avx512vl()); match(Set dst (ReplicateF (LoadF mem))); - format %{ "pshufd $dst,$mem,0x00\n\t" - "vinsertf128_high $dst,$dst\t" - "vinserti64x4 $dst,$dst,$dst,0x1\t! replicate16F" %} + format %{ "replicateF $dst,$mem" %} ins_encode %{ __ pshufd($dst$$XMMRegister, $mem$$Address, 0x00); __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister); @@ -3781,77 +3747,49 @@ ins_pipe( pipe_slow ); %} -instruct Repl2F_zero(vec dst, immF0 zero) %{ - predicate(n->as_Vector()->length() == 2); - match(Set dst (ReplicateF zero)); - format %{ "xorps $dst,$dst\t! replicate2F zero" %} - ins_encode %{ - __ xorps($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( fpu_reg_reg ); -%} - -instruct Repl4F_zero(vec dst, immF0 zero) %{ - predicate(n->as_Vector()->length() == 4); +instruct ReplF_zero(vec dst, immF0 zero) %{ match(Set dst (ReplicateF zero)); - format %{ "xorps $dst,$dst\t! replicate4F zero" %} - ins_encode %{ - __ xorps($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( fpu_reg_reg ); -%} - -instruct Repl8F_zero(vec dst, immF0 zero) %{ - predicate(n->as_Vector()->length() == 8 && UseAVX > 0); - match(Set dst (ReplicateF zero)); - format %{ "vxorps $dst,$dst,$dst\t! replicate8F zero" %} - ins_encode %{ - int vector_len = 1; - __ vxorps($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector_len); + format %{ "replicateF $dst,$zero" %} + ins_encode %{ + uint vlen = vector_length(this); + if (vlen <= 4) { + __ xorps($dst$$XMMRegister, $dst$$XMMRegister); + } else { + int vlen_enc = vector_length_encoding(this); + __ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vlen_enc); // 512bit vxorps requires AVX512DQ + } %} ins_pipe( fpu_reg_reg ); %} -instruct Repl2D_mem(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 2 && UseAVX > 0 && !VM_Version::supports_avx512vl()); - match(Set dst (ReplicateD (LoadD mem))); - format %{ "pshufd $dst,$mem,0x44\t! replicate2D" %} - ins_encode %{ - __ pshufd($dst$$XMMRegister, $mem$$Address, 0x44); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl4D(vec dst, vlRegD src) %{ - predicate(n->as_Vector()->length() == 4 && UseAVX > 0 && !VM_Version::supports_avx512vl()); +// ====================ReplicateD======================================= + +// Replicate double (8 bytes) scalar to be vector +instruct ReplD_reg(vec dst, vlRegD src) %{ + predicate((n->as_Vector()->length() <= 4) || + (n->as_Vector()->length() == 8 && VM_Version::supports_avx512vl())); match(Set dst (ReplicateD src)); - format %{ "pshufd $dst,$src,0x44\n\t" - "vinsertf128_high $dst,$dst\t! replicate4D" %} - ins_encode %{ - __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44); - __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl4D_mem(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 4 && !VM_Version::supports_avx512vl()); - match(Set dst (ReplicateD (LoadD mem))); - format %{ "pshufd $dst,$mem,0x44\n\t" - "vinsertf128_high $dst,$dst\t! replicate4D" %} - ins_encode %{ - __ pshufd($dst$$XMMRegister, $mem$$Address, 0x44); - __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8D(legVec dst, vlRegD src) %{ - predicate(n->as_Vector()->length() == 8 && UseAVX > 0 && !VM_Version::supports_avx512vl()); + format %{ "replicateD $dst,$src" %} + ins_encode %{ + uint vlen = vector_length(this); + if (vlen == 2) { + __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44); + } else if (VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands + int vector_len = vector_length_encoding(this); + __ vpbroadcastsd($dst$$XMMRegister, $src$$XMMRegister, vector_len); + } else { + assert(vlen == 4, "sanity"); // vlen == 8 && !AVX512VL is covered by ReplD_reg_leg + __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44); + __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister); + } + %} + ins_pipe( pipe_slow ); +%} + +instruct ReplD_reg_leg(legVec dst, vlRegD src) %{ + predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl()); match(Set dst (ReplicateD src)); - format %{ "pshufd $dst,$src,0x44\n\t" - "vinsertf128_high $dst,$dst\t" - "vinserti64x4 $dst,$dst,$dst,0x1\t! replicate8D" %} + format %{ "replicateD $dst,$src" %} ins_encode %{ __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44); __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister); @@ -3860,12 +3798,31 @@ ins_pipe( pipe_slow ); %} -instruct Repl8D_mem(legVec dst, memory mem) %{ +instruct ReplD_mem(vec dst, memory mem) %{ + predicate((n->as_Vector()->length() <= 4 && VM_Version::supports_avx()) || + (n->as_Vector()->length() == 8 && VM_Version::supports_avx512vl())); + match(Set dst (ReplicateD (LoadD mem))); + format %{ "replicateD $dst,$mem" %} + ins_encode %{ + uint vlen = vector_length(this); + if (vlen == 2) { + __ pshufd($dst$$XMMRegister, $mem$$Address, 0x44); + } else if (VM_Version::supports_avx512vl()) { // AVX512VL for <512bit operands + int vector_len = vector_length_encoding(this); + __ vpbroadcastsd($dst$$XMMRegister, $mem$$Address, vector_len); + } else { + assert(vlen == 4, "sanity"); // vlen == 8 && !AVX512VL is covered by ReplD_mem_leg + __ pshufd($dst$$XMMRegister, $mem$$Address, 0x44); + __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister); + } + %} + ins_pipe( pipe_slow ); +%} + +instruct ReplD_mem_leg(legVec dst, memory mem) %{ predicate(n->as_Vector()->length() == 8 && !VM_Version::supports_avx512vl()); match(Set dst (ReplicateD (LoadD mem))); - format %{ "pshufd $dst,$mem,0x44\n\t" - "vinsertf128_high $dst,$dst\t" - "vinserti64x4 $dst,$dst,$dst,0x1\t! replicate8D" %} + format %{ "replicateD $dst,$mem" %} ins_encode %{ __ pshufd($dst$$XMMRegister, $mem$$Address, 0x44); __ vinsertf128_high($dst$$XMMRegister, $dst$$XMMRegister); @@ -3874,1202 +3831,128 @@ ins_pipe( pipe_slow ); %} -// Replicate double (8 byte) scalar zero to be vector -instruct Repl2D_zero(vec dst, immD0 zero) %{ - predicate(n->as_Vector()->length() == 2); - match(Set dst (ReplicateD zero)); - format %{ "xorpd $dst,$dst\t! replicate2D zero" %} - ins_encode %{ - __ xorpd($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( fpu_reg_reg ); -%} - -instruct Repl4D_zero(vec dst, immD0 zero) %{ - predicate(n->as_Vector()->length() == 4 && UseAVX > 0); +instruct ReplD_zero(vec dst, immD0 zero) %{ match(Set dst (ReplicateD zero)); - format %{ "vxorpd $dst,$dst,$dst,vect256\t! replicate4D zero" %} - ins_encode %{ - int vector_len = 1; - __ vxorpd($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector_len); - %} - ins_pipe( fpu_reg_reg ); -%} - -// ====================GENERIC REPLICATE========================================== - -// Replicate byte scalar to be vector -instruct Repl4B(vec dst, rRegI src) %{ - predicate(n->as_Vector()->length() == 4); - match(Set dst (ReplicateB src)); - format %{ "movd $dst,$src\n\t" - "punpcklbw $dst,$dst\n\t" - "pshuflw $dst,$dst,0x00\t! replicate4B" %} - ins_encode %{ - __ movdl($dst$$XMMRegister, $src$$Register); - __ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister); - __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8B(vec dst, rRegI src) %{ - predicate(n->as_Vector()->length() == 8); - match(Set dst (ReplicateB src)); - format %{ "movd $dst,$src\n\t" - "punpcklbw $dst,$dst\n\t" - "pshuflw $dst,$dst,0x00\t! replicate8B" %} - ins_encode %{ - __ movdl($dst$$XMMRegister, $src$$Register); - __ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister); - __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); - %} - ins_pipe( pipe_slow ); -%} - -// Replicate byte scalar immediate to be vector by loading from const table. -instruct Repl4B_imm(vec dst, immI con) %{ - predicate(n->as_Vector()->length() == 4); - match(Set dst (ReplicateB con)); - format %{ "movdl $dst,[$constantaddress]\t! replicate4B($con)" %} - ins_encode %{ - __ movdl($dst$$XMMRegister, $constantaddress(replicate4_imm($con$$constant, 1))); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8B_imm(vec dst, immI con) %{ - predicate(n->as_Vector()->length() == 8); - match(Set dst (ReplicateB con)); - format %{ "movq $dst,[$constantaddress]\t! replicate8B($con)" %} - ins_encode %{ - __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1))); - %} - ins_pipe( pipe_slow ); -%} - -// Replicate byte scalar zero to be vector -instruct Repl4B_zero(vec dst, immI0 zero) %{ - predicate(n->as_Vector()->length() == 4); - match(Set dst (ReplicateB zero)); - format %{ "pxor $dst,$dst\t! replicate4B zero" %} - ins_encode %{ - __ pxor($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( fpu_reg_reg ); -%} - -instruct Repl8B_zero(vec dst, immI0 zero) %{ - predicate(n->as_Vector()->length() == 8); - match(Set dst (ReplicateB zero)); - format %{ "pxor $dst,$dst\t! replicate8B zero" %} - ins_encode %{ - __ pxor($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( fpu_reg_reg ); -%} - -instruct Repl16B_zero(vec dst, immI0 zero) %{ - predicate(n->as_Vector()->length() == 16); - match(Set dst (ReplicateB zero)); - format %{ "pxor $dst,$dst\t! replicate16B zero" %} - ins_encode %{ - __ pxor($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( fpu_reg_reg ); -%} - -instruct Repl32B_zero(vec dst, immI0 zero) %{ - predicate(n->as_Vector()->length() == 32); - match(Set dst (ReplicateB zero)); - format %{ "vpxor $dst,$dst,$dst\t! replicate32B zero" %} - ins_encode %{ - // Use vxorpd since AVX does not have vpxor for 256-bit (AVX2 will have it). - int vector_len = 1; - __ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector_len); - %} - ins_pipe( fpu_reg_reg ); -%} - -// Replicate char/short (2 byte) scalar to be vector -instruct Repl2S(vec dst, rRegI src) %{ - predicate(n->as_Vector()->length() == 2); - match(Set dst (ReplicateS src)); - format %{ "movd $dst,$src\n\t" - "pshuflw $dst,$dst,0x00\t! replicate2S" %} - ins_encode %{ - __ movdl($dst$$XMMRegister, $src$$Register); - __ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00); - %} - ins_pipe( fpu_reg_reg ); -%} - -// Replicate char/short (2 byte) scalar immediate to be vector by loading from const table. -instruct Repl2S_imm(vec dst, immI con) %{ - predicate(n->as_Vector()->length() == 2); - match(Set dst (ReplicateS con)); - format %{ "movdl $dst,[$constantaddress]\t! replicate2S($con)" %} - ins_encode %{ - __ movdl($dst$$XMMRegister, $constantaddress(replicate4_imm($con$$constant, 2))); - %} - ins_pipe( fpu_reg_reg ); -%} - -instruct Repl4S_imm(vec dst, immI con) %{ - predicate(n->as_Vector()->length() == 4); - match(Set dst (ReplicateS con)); - format %{ "movq $dst,[$constantaddress]\t! replicate4S($con)" %} - ins_encode %{ - __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2))); - %} - ins_pipe( fpu_reg_reg ); -%} - -// Replicate char/short (2 byte) scalar zero to be vector -instruct Repl2S_zero(vec dst, immI0 zero) %{ - predicate(n->as_Vector()->length() == 2); - match(Set dst (ReplicateS zero)); - format %{ "pxor $dst,$dst\t! replicate2S zero" %} - ins_encode %{ - __ pxor($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( fpu_reg_reg ); -%} - -instruct Repl4S_zero(vec dst, immI0 zero) %{ - predicate(n->as_Vector()->length() == 4); - match(Set dst (ReplicateS zero)); - format %{ "pxor $dst,$dst\t! replicate4S zero" %} - ins_encode %{ - __ pxor($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( fpu_reg_reg ); -%} - -instruct Repl8S_zero(vec dst, immI0 zero) %{ - predicate(n->as_Vector()->length() == 8); - match(Set dst (ReplicateS zero)); - format %{ "pxor $dst,$dst\t! replicate8S zero" %} - ins_encode %{ - __ pxor($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( fpu_reg_reg ); -%} - -instruct Repl16S_zero(vec dst, immI0 zero) %{ - predicate(n->as_Vector()->length() == 16); - match(Set dst (ReplicateS zero)); - format %{ "vpxor $dst,$dst,$dst\t! replicate16S zero" %} - ins_encode %{ - // Use vxorpd since AVX does not have vpxor for 256-bit (AVX2 will have it). - int vector_len = 1; - __ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector_len); - %} - ins_pipe( fpu_reg_reg ); -%} - -// Replicate integer (4 byte) scalar to be vector -instruct Repl2I(vec dst, rRegI src) %{ - predicate(n->as_Vector()->length() == 2); - match(Set dst (ReplicateI src)); - format %{ "movd $dst,$src\n\t" - "pshufd $dst,$dst,0x00\t! replicate2I" %} - ins_encode %{ - __ movdl($dst$$XMMRegister, $src$$Register); - __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); - %} - ins_pipe( fpu_reg_reg ); -%} - -// Integer could be loaded into xmm register directly from memory. -instruct Repl2I_mem(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 2); - match(Set dst (ReplicateI (LoadI mem))); - format %{ "movd $dst,$mem\n\t" - "pshufd $dst,$dst,0x00\t! replicate2I" %} - ins_encode %{ - __ movdl($dst$$XMMRegister, $mem$$Address); - __ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00); - %} - ins_pipe( fpu_reg_reg ); -%} - -// Replicate integer (4 byte) scalar immediate to be vector by loading from const table. -instruct Repl2I_imm(vec dst, immI con) %{ - predicate(n->as_Vector()->length() == 2); - match(Set dst (ReplicateI con)); - format %{ "movq $dst,[$constantaddress]\t! replicate2I($con)" %} - ins_encode %{ - __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4))); - %} - ins_pipe( fpu_reg_reg ); -%} - -// Replicate integer (4 byte) scalar zero to be vector -instruct Repl2I_zero(vec dst, immI0 zero) %{ - predicate(n->as_Vector()->length() == 2); - match(Set dst (ReplicateI zero)); - format %{ "pxor $dst,$dst\t! replicate2I" %} - ins_encode %{ - __ pxor($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( fpu_reg_reg ); -%} - -instruct Repl4I_zero(vec dst, immI0 zero) %{ - predicate(n->as_Vector()->length() == 4); - match(Set dst (ReplicateI zero)); - format %{ "pxor $dst,$dst\t! replicate4I zero)" %} - ins_encode %{ - __ pxor($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( fpu_reg_reg ); -%} - -instruct Repl8I_zero(vec dst, immI0 zero) %{ - predicate(n->as_Vector()->length() == 8); - match(Set dst (ReplicateI zero)); - format %{ "vpxor $dst,$dst,$dst\t! replicate8I zero" %} - ins_encode %{ - // Use vxorpd since AVX does not have vpxor for 256-bit (AVX2 will have it). - int vector_len = 1; - __ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector_len); - %} - ins_pipe( fpu_reg_reg ); -%} - -// Replicate long (8 byte) scalar to be vector -#ifdef _LP64 -instruct Repl2L(vec dst, rRegL src) %{ - predicate(n->as_Vector()->length() == 2); - match(Set dst (ReplicateL src)); - format %{ "movdq $dst,$src\n\t" - "punpcklqdq $dst,$dst\t! replicate2L" %} - ins_encode %{ - __ movdq($dst$$XMMRegister, $src$$Register); - __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} -#else // _LP64 -instruct Repl2L(vec dst, eRegL src, vec tmp) %{ - predicate(n->as_Vector()->length() == 2); - match(Set dst (ReplicateL src)); - effect(TEMP dst, USE src, TEMP tmp); - format %{ "movdl $dst,$src.lo\n\t" - "movdl $tmp,$src.hi\n\t" - "punpckldq $dst,$tmp\n\t" - "punpcklqdq $dst,$dst\t! replicate2L"%} - ins_encode %{ - __ movdl($dst$$XMMRegister, $src$$Register); - __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register)); - __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister); - __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} -#endif // _LP64 - -// Replicate long (8 byte) scalar immediate to be vector by loading from const table. -instruct Repl2L_imm(vec dst, immL con) %{ - predicate(n->as_Vector()->length() == 2); - match(Set dst (ReplicateL con)); - format %{ "movq $dst,[$constantaddress]\n\t" - "punpcklqdq $dst,$dst\t! replicate2L($con)" %} - ins_encode %{ - __ movq($dst$$XMMRegister, $constantaddress($con)); - __ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -// Replicate long (8 byte) scalar zero to be vector -instruct Repl2L_zero(vec dst, immL0 zero) %{ - predicate(n->as_Vector()->length() == 2); - match(Set dst (ReplicateL zero)); - format %{ "pxor $dst,$dst\t! replicate2L zero" %} - ins_encode %{ - __ pxor($dst$$XMMRegister, $dst$$XMMRegister); - %} - ins_pipe( fpu_reg_reg ); -%} - -instruct Repl4L_zero(vec dst, immL0 zero) %{ - predicate(n->as_Vector()->length() == 4); - match(Set dst (ReplicateL zero)); - format %{ "vpxor $dst,$dst,$dst\t! replicate4L zero" %} - ins_encode %{ - // Use vxorpd since AVX does not have vpxor for 256-bit (AVX2 will have it). - int vector_len = 1; - __ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector_len); - %} - ins_pipe( fpu_reg_reg ); -%} - -// Replicate float (4 byte) scalar to be vector -instruct Repl2F(vec dst, vlRegF src) %{ - predicate(n->as_Vector()->length() == 2); - match(Set dst (ReplicateF src)); - format %{ "pshufd $dst,$dst,0x00\t! replicate2F" %} - ins_encode %{ - __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00); - %} - ins_pipe( fpu_reg_reg ); -%} - -instruct Repl4F(vec dst, vlRegF src) %{ - predicate(n->as_Vector()->length() == 4); - match(Set dst (ReplicateF src)); - format %{ "pshufd $dst,$dst,0x00\t! replicate4F" %} - ins_encode %{ - __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00); - %} - ins_pipe( pipe_slow ); -%} - -// Replicate double (8 bytes) scalar to be vector -instruct Repl2D(vec dst, vlRegD src) %{ - predicate(n->as_Vector()->length() == 2); - match(Set dst (ReplicateD src)); - format %{ "pshufd $dst,$src,0x44\t! replicate2D" %} - ins_encode %{ - __ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44); - %} - ins_pipe( pipe_slow ); -%} - -// ====================EVEX REPLICATE============================================= - -instruct Repl4B_mem_evex(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 4 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); - match(Set dst (ReplicateB (LoadB mem))); - format %{ "vpbroadcastb $dst,$mem\t! replicate4B" %} - ins_encode %{ - int vector_len = 0; - __ vpbroadcastb($dst$$XMMRegister, $mem$$Address, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8B_mem_evex(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 8 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); - match(Set dst (ReplicateB (LoadB mem))); - format %{ "vpbroadcastb $dst,$mem\t! replicate8B" %} - ins_encode %{ - int vector_len = 0; - __ vpbroadcastb($dst$$XMMRegister, $mem$$Address, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl16B_evex(vec dst, rRegI src) %{ - predicate(n->as_Vector()->length() == 16 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); - match(Set dst (ReplicateB src)); - format %{ "evpbroadcastb $dst,$src\t! replicate16B" %} - ins_encode %{ - int vector_len = 0; - __ evpbroadcastb($dst$$XMMRegister, $src$$Register, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl16B_mem_evex(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 16 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); - match(Set dst (ReplicateB (LoadB mem))); - format %{ "vpbroadcastb $dst,$mem\t! replicate16B" %} - ins_encode %{ - int vector_len = 0; - __ vpbroadcastb($dst$$XMMRegister, $mem$$Address, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl32B_evex(vec dst, rRegI src) %{ - predicate(n->as_Vector()->length() == 32 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); - match(Set dst (ReplicateB src)); - format %{ "evpbroadcastb $dst,$src\t! replicate32B" %} - ins_encode %{ - int vector_len = 1; - __ evpbroadcastb($dst$$XMMRegister, $src$$Register, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl32B_mem_evex(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 32 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); - match(Set dst (ReplicateB (LoadB mem))); - format %{ "vpbroadcastb $dst,$mem\t! replicate32B" %} - ins_encode %{ - int vector_len = 1; - __ vpbroadcastb($dst$$XMMRegister, $mem$$Address, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl64B_evex(vec dst, rRegI src) %{ - predicate(n->as_Vector()->length() == 64 && UseAVX > 2 && VM_Version::supports_avx512bw()); - match(Set dst (ReplicateB src)); - format %{ "evpbroadcastb $dst,$src\t! upper replicate64B" %} - ins_encode %{ - int vector_len = 2; - __ evpbroadcastb($dst$$XMMRegister, $src$$Register, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl64B_mem_evex(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 64 && UseAVX > 2 && VM_Version::supports_avx512bw()); - match(Set dst (ReplicateB (LoadB mem))); - format %{ "vpbroadcastb $dst,$mem\t! replicate64B" %} - ins_encode %{ - int vector_len = 2; - __ vpbroadcastb($dst$$XMMRegister, $mem$$Address, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl16B_imm_evex(vec dst, immI con) %{ - predicate(n->as_Vector()->length() == 16 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); - match(Set dst (ReplicateB con)); - format %{ "movq $dst,[$constantaddress]\n\t" - "vpbroadcastb $dst,$dst\t! replicate16B" %} - ins_encode %{ - int vector_len = 0; - __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1))); - __ vpbroadcastb($dst$$XMMRegister, $dst$$XMMRegister, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl32B_imm_evex(vec dst, immI con) %{ - predicate(n->as_Vector()->length() == 32 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); - match(Set dst (ReplicateB con)); - format %{ "movq $dst,[$constantaddress]\n\t" - "vpbroadcastb $dst,$dst\t! replicate32B" %} - ins_encode %{ - int vector_len = 1; - __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1))); - __ vpbroadcastb($dst$$XMMRegister, $dst$$XMMRegister, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl64B_imm_evex(vec dst, immI con) %{ - predicate(n->as_Vector()->length() == 64 && UseAVX > 2 && VM_Version::supports_avx512bw()); - match(Set dst (ReplicateB con)); - format %{ "movq $dst,[$constantaddress]\n\t" - "vpbroadcastb $dst,$dst\t! upper replicate64B" %} - ins_encode %{ - int vector_len = 2; - __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1))); - __ vpbroadcastb($dst$$XMMRegister, $dst$$XMMRegister, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl64B_zero_evex(vec dst, immI0 zero) %{ - predicate(n->as_Vector()->length() == 64 && UseAVX > 2); - match(Set dst (ReplicateB zero)); - format %{ "vpxor $dst k0,$dst,$dst\t! replicate64B zero" %} - ins_encode %{ - // Use vxorpd since AVX does not have vpxor for 512-bit (EVEX will have it). - int vector_len = 2; - __ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vector_len); - %} - ins_pipe( fpu_reg_reg ); -%} - -instruct Repl4S_evex(vec dst, rRegI src) %{ - predicate(n->as_Vector()->length() == 4 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); - match(Set dst (ReplicateS src)); - format %{ "evpbroadcastw $dst,$src\t! replicate4S" %} - ins_encode %{ - int vector_len = 0; - __ evpbroadcastw($dst$$XMMRegister, $src$$Register, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl4S_mem_evex(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 4 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); - match(Set dst (ReplicateS (LoadS mem))); - format %{ "vpbroadcastw $dst,$mem\t! replicate4S" %} - ins_encode %{ - int vector_len = 0; - __ vpbroadcastw($dst$$XMMRegister, $mem$$Address, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8S_evex(vec dst, rRegI src) %{ - predicate(n->as_Vector()->length() == 8 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); - match(Set dst (ReplicateS src)); - format %{ "evpbroadcastw $dst,$src\t! replicate8S" %} - ins_encode %{ - int vector_len = 0; - __ evpbroadcastw($dst$$XMMRegister, $src$$Register, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8S_mem_evex(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 8 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); - match(Set dst (ReplicateS (LoadS mem))); - format %{ "vpbroadcastw $dst,$mem\t! replicate8S" %} - ins_encode %{ - int vector_len = 0; - __ vpbroadcastw($dst$$XMMRegister, $mem$$Address, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl16S_evex(vec dst, rRegI src) %{ - predicate(n->as_Vector()->length() == 16 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); - match(Set dst (ReplicateS src)); - format %{ "evpbroadcastw $dst,$src\t! replicate16S" %} - ins_encode %{ - int vector_len = 1; - __ evpbroadcastw($dst$$XMMRegister, $src$$Register, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl16S_mem_evex(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 16 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); - match(Set dst (ReplicateS (LoadS mem))); - format %{ "vpbroadcastw $dst,$mem\t! replicate16S" %} - ins_encode %{ - int vector_len = 1; - __ vpbroadcastw($dst$$XMMRegister, $mem$$Address, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl32S_evex(vec dst, rRegI src) %{ - predicate(n->as_Vector()->length() == 32 && UseAVX > 2 && VM_Version::supports_avx512bw()); - match(Set dst (ReplicateS src)); - format %{ "evpbroadcastw $dst,$src\t! replicate32S" %} - ins_encode %{ - int vector_len = 2; - __ evpbroadcastw($dst$$XMMRegister, $src$$Register, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl32S_mem_evex(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 32 && UseAVX > 2 && VM_Version::supports_avx512bw()); - match(Set dst (ReplicateS (LoadS mem))); - format %{ "vpbroadcastw $dst,$mem\t! replicate32S" %} - ins_encode %{ - int vector_len = 2; - __ vpbroadcastw($dst$$XMMRegister, $mem$$Address, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8S_imm_evex(vec dst, immI con) %{ - predicate(n->as_Vector()->length() == 8 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); - match(Set dst (ReplicateS con)); - format %{ "movq $dst,[$constantaddress]\n\t" - "vpbroadcastw $dst,$dst\t! replicate8S" %} - ins_encode %{ - int vector_len = 0; - __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2))); - __ vpbroadcastw($dst$$XMMRegister, $dst$$XMMRegister, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl16S_imm_evex(vec dst, immI con) %{ - predicate(n->as_Vector()->length() == 16 && UseAVX > 2 && VM_Version::supports_avx512vlbw()); - match(Set dst (ReplicateS con)); - format %{ "movq $dst,[$constantaddress]\n\t" - "vpbroadcastw $dst,$dst\t! replicate16S" %} - ins_encode %{ - int vector_len = 1; - __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2))); - __ vpbroadcastw($dst$$XMMRegister, $dst$$XMMRegister, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl32S_imm_evex(vec dst, immI con) %{ - predicate(n->as_Vector()->length() == 32 && UseAVX > 2 && VM_Version::supports_avx512bw()); - match(Set dst (ReplicateS con)); - format %{ "movq $dst,[$constantaddress]\n\t" - "vpbroadcastw $dst,$dst\t! replicate32S" %} - ins_encode %{ - int vector_len = 2; - __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2))); - __ vpbroadcastw($dst$$XMMRegister, $dst$$XMMRegister, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl32S_zero_evex(vec dst, immI0 zero) %{ - predicate(n->as_Vector()->length() == 32 && UseAVX > 2); - match(Set dst (ReplicateS zero)); - format %{ "vpxor $dst k0,$dst,$dst\t! replicate32S zero" %} - ins_encode %{ - // Use vxorpd since AVX does not have vpxor for 512-bit (EVEX will have it). - int vector_len = 2; - __ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector_len); - %} - ins_pipe( fpu_reg_reg ); -%} - -instruct Repl4I_evex(vec dst, rRegI src) %{ - predicate(n->as_Vector()->length() == 4 && UseAVX > 2 && VM_Version::supports_avx512vl()); - match(Set dst (ReplicateI src)); - format %{ "evpbroadcastd $dst,$src\t! replicate4I" %} - ins_encode %{ - int vector_len = 0; - __ evpbroadcastd($dst$$XMMRegister, $src$$Register, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl4I_mem_evex(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 4 && UseAVX > 2 && VM_Version::supports_avx512vl()); - match(Set dst (ReplicateI (LoadI mem))); - format %{ "vpbroadcastd $dst,$mem\t! replicate4I" %} - ins_encode %{ - int vector_len = 0; - __ vpbroadcastd($dst$$XMMRegister, $mem$$Address, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8I_evex(vec dst, rRegI src) %{ - predicate(n->as_Vector()->length() == 8 && UseAVX > 2 && VM_Version::supports_avx512vl()); - match(Set dst (ReplicateI src)); - format %{ "evpbroadcastd $dst,$src\t! replicate8I" %} - ins_encode %{ - int vector_len = 1; - __ evpbroadcastd($dst$$XMMRegister, $src$$Register, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8I_mem_evex(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 8 && UseAVX > 2 && VM_Version::supports_avx512vl()); - match(Set dst (ReplicateI (LoadI mem))); - format %{ "vpbroadcastd $dst,$mem\t! replicate8I" %} - ins_encode %{ - int vector_len = 1; - __ vpbroadcastd($dst$$XMMRegister, $mem$$Address, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl16I_evex(vec dst, rRegI src) %{ - predicate(n->as_Vector()->length() == 16 && UseAVX > 2); - match(Set dst (ReplicateI src)); - format %{ "evpbroadcastd $dst,$src\t! replicate16I" %} - ins_encode %{ - int vector_len = 2; - __ evpbroadcastd($dst$$XMMRegister, $src$$Register, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl16I_mem_evex(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 16 && UseAVX > 2); - match(Set dst (ReplicateI (LoadI mem))); - format %{ "vpbroadcastd $dst,$mem\t! replicate16I" %} - ins_encode %{ - int vector_len = 2; - __ vpbroadcastd($dst$$XMMRegister, $mem$$Address, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl4I_imm_evex(vec dst, immI con) %{ - predicate(n->as_Vector()->length() == 4 && UseAVX > 2 && VM_Version::supports_avx512vl()); - match(Set dst (ReplicateI con)); - format %{ "movq $dst,[$constantaddress]\t! replicate8I($con)\n\t" - "vpbroadcastd $dst,$dst\t! replicate4I" %} - ins_encode %{ - int vector_len = 0; - __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4))); - __ vpbroadcastd($dst$$XMMRegister, $dst$$XMMRegister, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8I_imm_evex(vec dst, immI con) %{ - predicate(n->as_Vector()->length() == 8 && UseAVX > 2 && VM_Version::supports_avx512vl()); - match(Set dst (ReplicateI con)); - format %{ "movq $dst,[$constantaddress]\t! replicate8I($con)\n\t" - "vpbroadcastd $dst,$dst\t! replicate8I" %} - ins_encode %{ - int vector_len = 1; - __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4))); - __ vpbroadcastd($dst$$XMMRegister, $dst$$XMMRegister, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl16I_imm_evex(vec dst, immI con) %{ - predicate(n->as_Vector()->length() == 16 && UseAVX > 2); - match(Set dst (ReplicateI con)); - format %{ "movq $dst,[$constantaddress]\t! replicate16I($con)\n\t" - "vpbroadcastd $dst,$dst\t! replicate16I" %} - ins_encode %{ - int vector_len = 2; - __ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4))); - __ vpbroadcastd($dst$$XMMRegister, $dst$$XMMRegister, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl16I_zero_evex(vec dst, immI0 zero) %{ - predicate(n->as_Vector()->length() == 16 && UseAVX > 2); - match(Set dst (ReplicateI zero)); - format %{ "vpxor $dst k0,$dst,$dst\t! replicate16I zero" %} - ins_encode %{ - // Use vxorpd since AVX does not have vpxor for 512-bit (AVX2 will have it). - int vector_len = 2; - __ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector_len); - %} - ins_pipe( fpu_reg_reg ); -%} - -// Replicate long (8 byte) scalar to be vector -#ifdef _LP64 -instruct Repl4L_evex(vec dst, rRegL src) %{ - predicate(n->as_Vector()->length() == 4 && UseAVX > 2 && VM_Version::supports_avx512vl()); - match(Set dst (ReplicateL src)); - format %{ "evpbroadcastq $dst,$src\t! replicate4L" %} - ins_encode %{ - int vector_len = 1; - __ evpbroadcastq($dst$$XMMRegister, $src$$Register, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8L_evex(vec dst, rRegL src) %{ - predicate(n->as_Vector()->length() == 8 && UseAVX > 2); - match(Set dst (ReplicateL src)); - format %{ "evpbroadcastq $dst,$src\t! replicate8L" %} - ins_encode %{ - int vector_len = 2; - __ evpbroadcastq($dst$$XMMRegister, $src$$Register, vector_len); - %} - ins_pipe( pipe_slow ); -%} -#else // _LP64 -instruct Repl4L_evex(vec dst, eRegL src, regD tmp) %{ - predicate(n->as_Vector()->length() == 4 && UseAVX > 2 && VM_Version::supports_avx512vl()); - match(Set dst (ReplicateL src)); - effect(TEMP dst, USE src, TEMP tmp); - format %{ "movdl $dst,$src.lo\n\t" - "movdl $tmp,$src.hi\n\t" - "punpckldq $dst,$tmp\n\t" - "vpbroadcastq $dst,$dst\t! replicate4L" %} - ins_encode %{ - int vector_len = 1; - __ movdl($dst$$XMMRegister, $src$$Register); - __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register)); - __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister); - __ vpbroadcastq($dst$$XMMRegister, $dst$$XMMRegister, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8L_evex(legVec dst, eRegL src, legVec tmp) %{ - predicate(n->as_Vector()->length() == 8 && UseAVX > 2); - match(Set dst (ReplicateL src)); - effect(TEMP dst, USE src, TEMP tmp); - format %{ "movdl $dst,$src.lo\n\t" - "movdl $tmp,$src.hi\n\t" - "punpckldq $dst,$tmp\n\t" - "vpbroadcastq $dst,$dst\t! replicate8L" %} - ins_encode %{ - int vector_len = 2; - __ movdl($dst$$XMMRegister, $src$$Register); - __ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register)); - __ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister); - __ vpbroadcastq($dst$$XMMRegister, $dst$$XMMRegister, vector_len); - %} - ins_pipe( pipe_slow ); -%} -#endif // _LP64 - -instruct Repl4L_imm_evex(vec dst, immL con) %{ - predicate(n->as_Vector()->length() == 4 && UseAVX > 2 && VM_Version::supports_avx512vl()); - match(Set dst (ReplicateL con)); - format %{ "movq $dst,[$constantaddress]\n\t" - "vpbroadcastq $dst,$dst\t! replicate4L" %} - ins_encode %{ - int vector_len = 1; - __ movq($dst$$XMMRegister, $constantaddress($con)); - __ vpbroadcastq($dst$$XMMRegister, $dst$$XMMRegister, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8L_imm_evex(vec dst, immL con) %{ - predicate(n->as_Vector()->length() == 8 && UseAVX > 2); - match(Set dst (ReplicateL con)); - format %{ "movq $dst,[$constantaddress]\n\t" - "vpbroadcastq $dst,$dst\t! replicate8L" %} - ins_encode %{ - int vector_len = 2; - __ movq($dst$$XMMRegister, $constantaddress($con)); - __ vpbroadcastq($dst$$XMMRegister, $dst$$XMMRegister, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl2L_mem_evex(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 2 && UseAVX > 2 && VM_Version::supports_avx512vl()); - match(Set dst (ReplicateL (LoadL mem))); - format %{ "vpbroadcastd $dst,$mem\t! replicate2L" %} - ins_encode %{ - int vector_len = 0; - __ vpbroadcastq($dst$$XMMRegister, $mem$$Address, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl4L_mem_evex(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 4 && UseAVX > 2 && VM_Version::supports_avx512vl()); - match(Set dst (ReplicateL (LoadL mem))); - format %{ "vpbroadcastd $dst,$mem\t! replicate4L" %} - ins_encode %{ - int vector_len = 1; - __ vpbroadcastq($dst$$XMMRegister, $mem$$Address, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8L_mem_evex(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 8 && UseAVX > 2); - match(Set dst (ReplicateL (LoadL mem))); - format %{ "vpbroadcastd $dst,$mem\t! replicate8L" %} - ins_encode %{ - int vector_len = 2; - __ vpbroadcastq($dst$$XMMRegister, $mem$$Address, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8L_zero_evex(vec dst, immL0 zero) %{ - predicate(n->as_Vector()->length() == 8 && UseAVX > 2); - match(Set dst (ReplicateL zero)); - format %{ "vpxor $dst k0,$dst,$dst\t! replicate8L zero" %} - ins_encode %{ - // Use vxorpd since AVX does not have vpxor for 512-bit (EVEX will have it). - int vector_len = 2; - __ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vector_len); - %} - ins_pipe( fpu_reg_reg ); -%} - -instruct Repl8F_evex(vec dst, regF src) %{ - predicate(n->as_Vector()->length() == 8 && UseAVX > 2 && VM_Version::supports_avx512vl()); - match(Set dst (ReplicateF src)); - format %{ "vpbroadcastss $dst,$src\t! replicate8F" %} - ins_encode %{ - int vector_len = 1; - __ vpbroadcastss($dst$$XMMRegister, $src$$XMMRegister, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8F_mem_evex(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 8 && UseAVX > 2 && VM_Version::supports_avx512vl()); - match(Set dst (ReplicateF (LoadF mem))); - format %{ "vbroadcastss $dst,$mem\t! replicate8F" %} - ins_encode %{ - int vector_len = 1; - __ vpbroadcastss($dst$$XMMRegister, $mem$$Address, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl16F_evex(vec dst, regF src) %{ - predicate(n->as_Vector()->length() == 16 && UseAVX > 2); - match(Set dst (ReplicateF src)); - format %{ "vpbroadcastss $dst,$src\t! replicate16F" %} - ins_encode %{ - int vector_len = 2; - __ vpbroadcastss($dst$$XMMRegister, $src$$XMMRegister, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl16F_mem_evex(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 16 && UseAVX > 2); - match(Set dst (ReplicateF (LoadF mem))); - format %{ "vbroadcastss $dst,$mem\t! replicate16F" %} - ins_encode %{ - int vector_len = 2; - __ vpbroadcastss($dst$$XMMRegister, $mem$$Address, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl16F_zero_evex(vec dst, immF0 zero) %{ - predicate(n->as_Vector()->length() == 16 && UseAVX > 2); - match(Set dst (ReplicateF zero)); - format %{ "vpxor $dst k0,$dst,$dst\t! replicate16F zero" %} - ins_encode %{ - // Use vpxor in place of vxorps since EVEX has a constriant on dq for vxorps: this is a 512-bit operation - int vector_len = 2; - __ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vector_len); - %} - ins_pipe( fpu_reg_reg ); -%} - -instruct Repl4D_evex(vec dst, regD src) %{ - predicate(n->as_Vector()->length() == 4 && UseAVX > 2 && VM_Version::supports_avx512vl()); - match(Set dst (ReplicateD src)); - format %{ "vpbroadcastsd $dst,$src\t! replicate4D" %} - ins_encode %{ - int vector_len = 1; - __ vpbroadcastsd($dst$$XMMRegister, $src$$XMMRegister, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl4D_mem_evex(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 4 && UseAVX > 2 && VM_Version::supports_avx512vl()); - match(Set dst (ReplicateD (LoadD mem))); - format %{ "vbroadcastsd $dst,$mem\t! replicate4D" %} - ins_encode %{ - int vector_len = 1; - __ vpbroadcastsd($dst$$XMMRegister, $mem$$Address, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8D_evex(vec dst, regD src) %{ - predicate(n->as_Vector()->length() == 8 && UseAVX > 2); - match(Set dst (ReplicateD src)); - format %{ "vpbroadcastsd $dst,$src\t! replicate8D" %} - ins_encode %{ - int vector_len = 2; - __ vpbroadcastsd($dst$$XMMRegister, $src$$XMMRegister, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8D_mem_evex(vec dst, memory mem) %{ - predicate(n->as_Vector()->length() == 8 && UseAVX > 2); - match(Set dst (ReplicateD (LoadD mem))); - format %{ "vbroadcastsd $dst,$mem\t! replicate8D" %} - ins_encode %{ - int vector_len = 2; - __ vpbroadcastsd($dst$$XMMRegister, $mem$$Address, vector_len); - %} - ins_pipe( pipe_slow ); -%} - -instruct Repl8D_zero_evex(vec dst, immD0 zero) %{ - predicate(n->as_Vector()->length() == 8 && UseAVX > 2); - match(Set dst (ReplicateD zero)); - format %{ "vpxor $dst k0,$dst,$dst,vect512\t! replicate8D zero" %} - ins_encode %{ - // Use vpxor in place of vxorpd since EVEX has a constriant on dq for vxorpd: this is a 512-bit operation - int vector_len = 2; - __ vpxor($dst$$XMMRegister,$dst$$XMMRegister, $dst$$XMMRegister, vector_len); + format %{ "replicateD $dst,$zero" %} + ins_encode %{ + uint vlen = vector_length(this); + if (vlen == 2) { + __ xorpd($dst$$XMMRegister, $dst$$XMMRegister); + } else { + int vlen_enc = vector_length_encoding(this); + __ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vlen_enc); // 512bit vxorps requires AVX512DQ + } %} ins_pipe( fpu_reg_reg ); %} // ====================REDUCTION ARITHMETIC======================================= -instruct rsadd2I_reduction_reg(rRegI dst, rRegI src1, vec src2, vec tmp, vec tmp2) %{ - predicate(UseSSE > 2 && UseAVX == 0 && n->in(2)->bottom_type()->is_vect()->length() == 2); - match(Set dst (AddReductionVI src1 src2)); - effect(TEMP tmp2, TEMP tmp); - format %{ "movdqu $tmp2,$src2\n\t" - "phaddd $tmp2,$tmp2\n\t" - "movd $tmp,$src1\n\t" - "paddd $tmp,$tmp2\n\t" - "movd $dst,$tmp\t! add reduction2I" %} - ins_encode %{ - __ movdqu($tmp2$$XMMRegister, $src2$$XMMRegister); - __ phaddd($tmp2$$XMMRegister, $tmp2$$XMMRegister); - __ movdl($tmp$$XMMRegister, $src1$$Register); - __ paddd($tmp$$XMMRegister, $tmp2$$XMMRegister); - __ movdl($dst$$Register, $tmp$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct rvadd2I_reduction_reg(rRegI dst, rRegI src1, vec src2, vec tmp, vec tmp2) %{ - predicate(UseAVX > 0 && VM_Version::supports_avxonly() && n->in(2)->bottom_type()->is_vect()->length() == 2); +// =======================AddReductionVI========================================== + +instruct vadd2I_reduction_reg(rRegI dst, rRegI src1, vec src2, vec tmp, vec tmp2) %{ + predicate(n->in(2)->bottom_type()->is_vect()->length() == 2); // vector_length(src2) == 2 match(Set dst (AddReductionVI src1 src2)); effect(TEMP tmp, TEMP tmp2); - format %{ "vphaddd $tmp,$src2,$src2\n\t" - "movd $tmp2,$src1\n\t" - "vpaddd $tmp2,$tmp2,$tmp\n\t" - "movd $dst,$tmp2\t! add reduction2I" %} - ins_encode %{ - int vector_len = 0; - __ vphaddd($tmp$$XMMRegister, $src2$$XMMRegister, $src2$$XMMRegister, vector_len); - __ movdl($tmp2$$XMMRegister, $src1$$Register); - __ vpaddd($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, vector_len); - __ movdl($dst$$Register, $tmp2$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct rvadd2I_reduction_reg_evex(rRegI dst, rRegI src1, vec src2, vec tmp, vec tmp2) %{ - predicate(UseAVX > 2 && n->in(2)->bottom_type()->is_vect()->length() == 2); - match(Set dst (AddReductionVI src1 src2)); - effect(TEMP tmp, TEMP tmp2); - format %{ "pshufd $tmp2,$src2,0x1\n\t" - "vpaddd $tmp,$src2,$tmp2\n\t" - "movd $tmp2,$src1\n\t" - "vpaddd $tmp2,$tmp,$tmp2\n\t" - "movd $dst,$tmp2\t! add reduction2I" %} - ins_encode %{ - int vector_len = 0; - __ pshufd($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1); - __ vpaddd($tmp$$XMMRegister, $src2$$XMMRegister, $tmp2$$XMMRegister, vector_len); - __ movdl($tmp2$$XMMRegister, $src1$$Register); - __ vpaddd($tmp2$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len); - __ movdl($dst$$Register, $tmp2$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct rsadd4I_reduction_reg(rRegI dst, rRegI src1, vec src2, vec tmp, vec tmp2) %{ - predicate(UseSSE > 2 && UseAVX == 0 && n->in(2)->bottom_type()->is_vect()->length() == 4); - match(Set dst (AddReductionVI src1 src2)); - effect(TEMP tmp, TEMP tmp2); - format %{ "movdqu $tmp,$src2\n\t" - "phaddd $tmp,$tmp\n\t" - "phaddd $tmp,$tmp\n\t" - "movd $tmp2,$src1\n\t" - "paddd $tmp2,$tmp\n\t" - "movd $dst,$tmp2\t! add reduction4I" %} - ins_encode %{ - __ movdqu($tmp$$XMMRegister, $src2$$XMMRegister); - __ phaddd($tmp$$XMMRegister, $tmp$$XMMRegister); - __ phaddd($tmp$$XMMRegister, $tmp$$XMMRegister); - __ movdl($tmp2$$XMMRegister, $src1$$Register); - __ paddd($tmp2$$XMMRegister, $tmp$$XMMRegister); - __ movdl($dst$$Register, $tmp2$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct rvadd4I_reduction_reg(rRegI dst, rRegI src1, vec src2, vec tmp, vec tmp2) %{ - predicate(UseAVX > 0 && VM_Version::supports_avxonly() && n->in(2)->bottom_type()->is_vect()->length() == 4); + format %{ "vector_add2I_reduction $dst,$src1,$src2" %} + ins_encode %{ + if (UseAVX > 2) { + int vector_len = Assembler::AVX_128bit; + __ pshufd($tmp2$$XMMRegister, $src2$$XMMRegister, 0x1); + __ vpaddd($tmp$$XMMRegister, $src2$$XMMRegister, $tmp2$$XMMRegister, vector_len); + __ movdl($tmp2$$XMMRegister, $src1$$Register); + __ vpaddd($tmp2$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len); + __ movdl($dst$$Register, $tmp2$$XMMRegister); + } else if (VM_Version::supports_avxonly()) { + int vector_len = Assembler::AVX_128bit; + __ vphaddd($tmp$$XMMRegister, $src2$$XMMRegister, $src2$$XMMRegister, vector_len); + __ movdl($tmp2$$XMMRegister, $src1$$Register); + __ vpaddd($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, vector_len); + __ movdl($dst$$Register, $tmp2$$XMMRegister); + } else { + assert(UseSSE > 2, "required"); + __ movdqu($tmp2$$XMMRegister, $src2$$XMMRegister); + __ phaddd($tmp2$$XMMRegister, $tmp2$$XMMRegister); + __ movdl($tmp$$XMMRegister, $src1$$Register); + __ paddd($tmp$$XMMRegister, $tmp2$$XMMRegister); + __ movdl($dst$$Register, $tmp$$XMMRegister); + } + %} + ins_pipe( pipe_slow ); +%} + +instruct vadd4I_reduction_reg(rRegI dst, rRegI src1, vec src2, vec tmp, vec tmp2) %{ + predicate(n->in(2)->bottom_type()->is_vect()->length() == 4); // vector_length(src2) == 4 match(Set dst (AddReductionVI src1 src2)); effect(TEMP tmp, TEMP tmp2); - format %{ "vphaddd $tmp,$src2,$src2\n\t" - "vphaddd $tmp,$tmp,$tmp\n\t" - "movd $tmp2,$src1\n\t" - "vpaddd $tmp2,$tmp2,$tmp\n\t" - "movd $dst,$tmp2\t! add reduction4I" %} - ins_encode %{ - int vector_len = 0; - __ vphaddd($tmp$$XMMRegister, $src2$$XMMRegister, $src2$$XMMRegister, vector_len); - __ vphaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp$$XMMRegister, vector_len); - __ movdl($tmp2$$XMMRegister, $src1$$Register); - __ vpaddd($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, vector_len); - __ movdl($dst$$Register, $tmp2$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct rvadd4I_reduction_reg_evex(rRegI dst, rRegI src1, vec src2, vec tmp, vec tmp2) %{ - predicate(UseAVX > 2 && n->in(2)->bottom_type()->is_vect()->length() == 4); - match(Set dst (AddReductionVI src1 src2)); - effect(TEMP tmp, TEMP tmp2); - format %{ "pshufd $tmp2,$src2,0xE\n\t" - "vpaddd $tmp,$src2,$tmp2\n\t" - "pshufd $tmp2,$tmp,0x1\n\t" - "vpaddd $tmp,$tmp,$tmp2\n\t" - "movd $tmp2,$src1\n\t" - "vpaddd $tmp2,$tmp,$tmp2\n\t" - "movd $dst,$tmp2\t! add reduction4I" %} - ins_encode %{ - int vector_len = 0; - __ pshufd($tmp2$$XMMRegister, $src2$$XMMRegister, 0xE); - __ vpaddd($tmp$$XMMRegister, $src2$$XMMRegister, $tmp2$$XMMRegister, vector_len); - __ pshufd($tmp2$$XMMRegister, $tmp$$XMMRegister, 0x1); - __ vpaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len); - __ movdl($tmp2$$XMMRegister, $src1$$Register); - __ vpaddd($tmp2$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len); - __ movdl($dst$$Register, $tmp2$$XMMRegister); - %} - ins_pipe( pipe_slow ); -%} - -instruct rvadd8I_reduction_reg(rRegI dst, rRegI src1, vec src2, vec tmp, vec tmp2) %{ - predicate(UseAVX > 0 && VM_Version::supports_avxonly() && n->in(2)->bottom_type()->is_vect()->length() == 8); + format %{ "vector_add4I_reduction $dst,$src1,$src2" %} + ins_encode %{ + if (UseAVX > 2) { + int vector_len = Assembler::AVX_128bit; + __ pshufd($tmp2$$XMMRegister, $src2$$XMMRegister, 0xE); + __ vpaddd($tmp$$XMMRegister, $src2$$XMMRegister, $tmp2$$XMMRegister, vector_len); + __ pshufd($tmp2$$XMMRegister, $tmp$$XMMRegister, 0x1); + __ vpaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len); + __ movdl($tmp2$$XMMRegister, $src1$$Register); + __ vpaddd($tmp2$$XMMRegister, $tmp$$XMMRegister, $tmp2$$XMMRegister, vector_len); + __ movdl($dst$$Register, $tmp2$$XMMRegister); + } else if (VM_Version::supports_avxonly()) { + int vector_len = Assembler::AVX_128bit; + __ vphaddd($tmp$$XMMRegister, $src2$$XMMRegister, $src2$$XMMRegister, vector_len); + __ vphaddd($tmp$$XMMRegister, $tmp$$XMMRegister, $tmp$$XMMRegister, vector_len); + __ movdl($tmp2$$XMMRegister, $src1$$Register); + __ vpaddd($tmp2$$XMMRegister, $tmp2$$XMMRegister, $tmp$$XMMRegister, vector_len); + __ movdl($dst$$Register, $tmp2$$XMMRegister); + } else { + assert(UseSSE > 2, "required"); + __ movdqu($tmp$$XMMRegister, $src2$$XMMRegister); + __ phaddd($tmp$$XMMRegister, $tmp$$XMMRegister); + __ phaddd($tmp$$XMMRegister, $tmp$$XMMRegister); + __ movdl($tmp2$$XMMRegister, $src1$$Register); + __ paddd($tmp2$$XMMRegister, $tmp$$XMMRegister); + __ movdl($dst$$Register, $tmp2$$XMMRegister); + } + %} + ins_pipe( pipe_slow ); +%}