changeset 57789:8ce5915e57d2

Merge
author psadhukhan
date Tue, 21 Jan 2020 07:29:48 +0530
parents f446d8919043 a18a85485ba4
children 6500cac59d0f
files src/hotspot/os/bsd/gc/z/zBackingFile_bsd.cpp src/hotspot/os/bsd/gc/z/zBackingFile_bsd.hpp src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.hpp src/hotspot/os/linux/gc/z/zBackingFile_linux.cpp src/hotspot/os/linux/gc/z/zBackingFile_linux.hpp src/hotspot/os/linux/gc/z/zBackingPath_linux.cpp src/hotspot/os/linux/gc/z/zBackingPath_linux.hpp src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.hpp src/hotspot/os/windows/gc/z/zBackingFile_windows.cpp src/hotspot/os/windows/gc/z/zBackingFile_windows.hpp src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.hpp src/hotspot/share/gc/parallel/psMarkSweep.cpp src/hotspot/share/gc/parallel/psMarkSweep.hpp src/hotspot/share/gc/parallel/psMarkSweepDecorator.cpp src/hotspot/share/gc/parallel/psMarkSweepDecorator.hpp src/hotspot/share/gc/parallel/psMarkSweepProxy.hpp test/hotspot/jtreg/runtime/records/TEST.properties test/jdk/java/io/Serializable/records/TEST.properties test/jdk/java/lang/instrument/TEST.properties test/jdk/java/lang/reflect/records/TEST.properties test/jdk/java/lang/runtime/TEST.properties test/jdk/sun/security/krb5/auto/SaslGSS.java
diffstat 624 files changed, 11270 insertions(+), 6933 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Fri Jan 17 12:20:00 2020 -0800
+++ b/.hgtags	Tue Jan 21 07:29:48 2020 +0530
@@ -610,3 +610,5 @@
 bb0a7975b31ded63d594ee8dbfc4d4ead587f79b jdk-15+4
 decd3d2953b640f1043ee76953ff89238bff92e8 jdk-14+31
 b97c1773ccafae4a8c16cc6aedb10b2a4f9a07ed jdk-15+5
+2776da28515e087cc8849acf1e131a65ea7e77b6 jdk-14+32
+ef7d53b4fccd4a0501b17d974e84f37aa99fa813 jdk-15+6
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/GraalBuilderImage.gmk	Tue Jan 21 07:29:48 2020 +0530
@@ -0,0 +1,57 @@
+#
+# Copyright (c) 2020, Red Hat Inc.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.  Oracle designates this
+# particular file as subject to the "Classpath" exception as provided
+# by Oracle in the LICENSE file that accompanied this code.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+# This makefile creates a jdk image overlayed with statically linked core
+# libraries.
+
+default: all
+
+include $(SPEC)
+include MakeBase.gmk
+
+################################################################################
+
+TARGETS :=
+
+$(eval $(call SetupCopyFiles, COPY_JDK_IMG, \
+      SRC := $(JDK_IMAGE_DIR)/, \
+      DEST := $(GRAAL_BUILDER_IMAGE_DIR)/, \
+      FILES := $(call FindFiles, $(JDK_IMAGE_DIR)/), \
+))
+TARGETS += $(COPY_JDK_IMG)
+
+$(eval $(call SetupCopyFiles, COPY_STATIC_LIBS, \
+      SRC := $(STATIC_LIBS_IMAGE_DIR)/lib, \
+      DEST := $(GRAAL_BUILDER_IMAGE_DIR)/lib, \
+      FILES := $(filter %$(STATIC_LIBRARY_SUFFIX), \
+          $(call FindFiles, $(STATIC_LIBS_IMAGE_DIR)/lib)), \
+))
+TARGETS += $(COPY_STATIC_LIBS)
+
+################################################################################
+
+all: $(TARGETS)
+
+.PHONY: all
--- a/make/Main.gmk	Fri Jan 17 12:20:00 2020 -0800
+++ b/make/Main.gmk	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -407,6 +407,9 @@
 exploded-image-optimize:
 	+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f ExplodedImageOptimize.gmk)
 
+graal-builder-image:
+	+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f GraalBuilderImage.gmk)
+
 ifeq ($(JCOV_ENABLED), true)
   jcov-image:
 	+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f Coverage.gmk jcov-image)
@@ -415,7 +418,7 @@
 ALL_TARGETS += store-source-revision create-source-revision-tracker bootcycle-images zip-security \
     zip-source jrtfs-jar jdk-image legacy-jre-image \
     symbols-image static-libs-image mac-jdk-bundle mac-legacy-jre-bundle \
-    release-file exploded-image-optimize jcov-image
+    release-file exploded-image-optimize graal-builder-image jcov-image
 
 ################################################################################
 # Docs targets
@@ -886,6 +889,8 @@
 
   static-libs-image: $(STATIC_LIBS_TARGETS)
 
+  graal-builder-image: jdk-image static-libs-image
+
   mac-jdk-bundle: jdk-image
   mac-legacy-jre-bundle: legacy-jre-image
 
--- a/make/autoconf/spec.gmk.in	Fri Jan 17 12:20:00 2020 -0800
+++ b/make/autoconf/spec.gmk.in	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -902,6 +902,10 @@
 STATIC_LIBS_IMAGE_SUBDIR := static-libs
 STATIC_LIBS_IMAGE_DIR := $(IMAGES_OUTPUTDIR)/$(STATIC_LIBS_IMAGE_SUBDIR)
 
+# Graal builder image
+GRAAL_BUILDER_IMAGE_SUBDIR := graal-builder-jdk
+GRAAL_BUILDER_IMAGE_DIR := $(IMAGES_OUTPUTDIR)/$(GRAAL_BUILDER_IMAGE_SUBDIR)
+
 # Macosx bundles directory definitions
 JDK_MACOSX_BUNDLE_SUBDIR=jdk-bundle
 JRE_MACOSX_BUNDLE_SUBDIR=jre-bundle
--- a/make/data/symbols/java.base-E.sym.txt	Fri Jan 17 12:20:00 2020 -0800
+++ b/make/data/symbols/java.base-E.sym.txt	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -53,7 +53,7 @@
 
 class name java/lang/Record
 header extends java/lang/Object flags 421 classAnnotations @Ljdk/internal/PreviewFeature;(feature=eLjdk/internal/PreviewFeature$Feature;RECORDS;,essentialAPI=Ztrue)
-method name <init> descriptor ()V flags 1
+method name <init> descriptor ()V flags 4
 method name equals descriptor (Ljava/lang/Object;)Z flags 401
 method name hashCode descriptor ()I flags 401
 method name toString descriptor ()Ljava/lang/String; flags 401
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/data/symbols/jdk.incubator.foreign-E.sym.txt	Tue Jan 21 07:29:48 2020 +0530
@@ -0,0 +1,197 @@
+#
+# Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.  Oracle designates this
+# particular file as subject to the "Classpath" exception as provided
+# by Oracle in the LICENSE file that accompanied this code.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+# ##########################################################
+# ### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. ###
+# ##########################################################
+#
+module name jdk.incubator.foreign
+header exports jdk/incubator/foreign requires name\u0020;java.base\u0020;flags\u0020;8000 target linux-amd64 resolution 9 flags 8000
+
+class name jdk/incubator/foreign/AbstractLayout
+header extends java/lang/Object implements jdk/incubator/foreign/MemoryLayout flags 420
+innerclass innerClass java/lang/constant/DirectMethodHandleDesc$Kind outerClass java/lang/constant/DirectMethodHandleDesc innerClassName Kind flags 4019
+innerclass innerClass java/lang/invoke/MethodHandles$Lookup outerClass java/lang/invoke/MethodHandles innerClassName Lookup flags 19
+field name BSM_GET_STATIC_FINAL descriptor Ljava/lang/constant/DirectMethodHandleDesc; flags 19
+method name <init> descriptor (Ljava/util/OptionalLong;JLjava/util/Optional;)V flags 1 signature (Ljava/util/OptionalLong;JLjava/util/Optional<Ljava/lang/String;>;)V
+method name withName descriptor (Ljava/lang/String;)Ljdk/incubator/foreign/AbstractLayout; flags 1
+method name name descriptor ()Ljava/util/Optional; flags 11 signature ()Ljava/util/Optional<Ljava/lang/String;>;
+method name withBitAlignment descriptor (J)Ljdk/incubator/foreign/AbstractLayout; flags 1
+method name bitAlignment descriptor ()J flags 11
+method name bitSize descriptor ()J flags 1
+method name hashCode descriptor ()I flags 1
+method name equals descriptor (Ljava/lang/Object;)Z flags 1
+method name withBitAlignment descriptor (J)Ljdk/incubator/foreign/MemoryLayout; flags 1041
+method name withName descriptor (Ljava/lang/String;)Ljdk/incubator/foreign/MemoryLayout; flags 1041
+
+class name jdk/incubator/foreign/GroupLayout
+header extends jdk/incubator/foreign/AbstractLayout flags 31
+innerclass innerClass java/lang/invoke/MethodHandles$Lookup outerClass java/lang/invoke/MethodHandles innerClassName Lookup flags 19
+method name memberLayouts descriptor ()Ljava/util/List; flags 1 signature ()Ljava/util/List<Ljdk/incubator/foreign/MemoryLayout;>;
+method name toString descriptor ()Ljava/lang/String; flags 1
+method name isStruct descriptor ()Z flags 1
+method name isUnion descriptor ()Z flags 1
+method name equals descriptor (Ljava/lang/Object;)Z flags 1
+method name hashCode descriptor ()I flags 1
+method name describeConstable descriptor ()Ljava/util/Optional; flags 1 signature ()Ljava/util/Optional<Ljava/lang/constant/DynamicConstantDesc<Ljdk/incubator/foreign/GroupLayout;>;>;
+method name withName descriptor (Ljava/lang/String;)Ljdk/incubator/foreign/GroupLayout; flags 1
+method name withBitAlignment descriptor (J)Ljdk/incubator/foreign/GroupLayout; flags 1
+method name bitSize descriptor ()J flags 1041
+method name withBitAlignment descriptor (J)Ljdk/incubator/foreign/AbstractLayout; flags 1041
+method name withName descriptor (Ljava/lang/String;)Ljdk/incubator/foreign/AbstractLayout; flags 1041
+method name withBitAlignment descriptor (J)Ljdk/incubator/foreign/MemoryLayout; flags 1041
+method name withName descriptor (Ljava/lang/String;)Ljdk/incubator/foreign/MemoryLayout; flags 1041
+
+class name jdk/incubator/foreign/MemoryAddress
+header extends java/lang/Object flags 601
+method name offset descriptor (J)Ljdk/incubator/foreign/MemoryAddress; flags 401
+method name offset descriptor ()J flags 401
+method name segment descriptor ()Ljdk/incubator/foreign/MemorySegment; flags 401
+method name equals descriptor (Ljava/lang/Object;)Z flags 401
+method name hashCode descriptor ()I flags 401
+method name copy descriptor (Ljdk/incubator/foreign/MemoryAddress;Ljdk/incubator/foreign/MemoryAddress;J)V flags 9
+
+class name jdk/incubator/foreign/MemoryHandles
+header extends java/lang/Object flags 31
+innerclass innerClass java/lang/invoke/MethodHandles$Lookup outerClass java/lang/invoke/MethodHandles innerClassName Lookup flags 19
+method name varHandle descriptor (Ljava/lang/Class;Ljava/nio/ByteOrder;)Ljava/lang/invoke/VarHandle; flags 9 signature (Ljava/lang/Class<*>;Ljava/nio/ByteOrder;)Ljava/lang/invoke/VarHandle;
+method name varHandle descriptor (Ljava/lang/Class;JLjava/nio/ByteOrder;)Ljava/lang/invoke/VarHandle; flags 9 signature (Ljava/lang/Class<*>;JLjava/nio/ByteOrder;)Ljava/lang/invoke/VarHandle;
+method name withOffset descriptor (Ljava/lang/invoke/VarHandle;J)Ljava/lang/invoke/VarHandle; flags 9
+method name withStride descriptor (Ljava/lang/invoke/VarHandle;J)Ljava/lang/invoke/VarHandle; flags 9
+
+class name jdk/incubator/foreign/MemoryLayout
+header extends java/lang/Object implements java/lang/constant/Constable nestMembers jdk/incubator/foreign/MemoryLayout$PathElement flags 601
+innerclass innerClass jdk/incubator/foreign/MemoryLayout$PathElement outerClass jdk/incubator/foreign/MemoryLayout innerClassName PathElement flags 609
+innerclass innerClass java/lang/invoke/MethodHandles$Lookup outerClass java/lang/invoke/MethodHandles innerClassName Lookup flags 19
+method name describeConstable descriptor ()Ljava/util/Optional; flags 401 signature ()Ljava/util/Optional<+Ljava/lang/constant/DynamicConstantDesc<+Ljdk/incubator/foreign/MemoryLayout;>;>;
+method name bitSize descriptor ()J flags 401
+method name byteSize descriptor ()J flags 1
+method name name descriptor ()Ljava/util/Optional; flags 401 signature ()Ljava/util/Optional<Ljava/lang/String;>;
+method name withName descriptor (Ljava/lang/String;)Ljdk/incubator/foreign/MemoryLayout; flags 401
+method name bitAlignment descriptor ()J flags 401
+method name byteAlignment descriptor ()J flags 1
+method name withBitAlignment descriptor (J)Ljdk/incubator/foreign/MemoryLayout; flags 401
+method name offset descriptor ([Ljdk/incubator/foreign/MemoryLayout$PathElement;)J flags 81
+method name varHandle descriptor (Ljava/lang/Class;[Ljdk/incubator/foreign/MemoryLayout$PathElement;)Ljava/lang/invoke/VarHandle; flags 81 signature (Ljava/lang/Class<*>;[Ljdk/incubator/foreign/MemoryLayout$PathElement;)Ljava/lang/invoke/VarHandle;
+method name equals descriptor (Ljava/lang/Object;)Z flags 401
+method name hashCode descriptor ()I flags 401
+method name toString descriptor ()Ljava/lang/String; flags 401
+method name ofPaddingBits descriptor (J)Ljdk/incubator/foreign/MemoryLayout; flags 9
+method name ofValueBits descriptor (JLjava/nio/ByteOrder;)Ljdk/incubator/foreign/ValueLayout; flags 9
+method name ofSequence descriptor (JLjdk/incubator/foreign/MemoryLayout;)Ljdk/incubator/foreign/SequenceLayout; flags 9
+method name ofSequence descriptor (Ljdk/incubator/foreign/MemoryLayout;)Ljdk/incubator/foreign/SequenceLayout; flags 9
+method name ofStruct descriptor ([Ljdk/incubator/foreign/MemoryLayout;)Ljdk/incubator/foreign/GroupLayout; flags 89
+method name ofUnion descriptor ([Ljdk/incubator/foreign/MemoryLayout;)Ljdk/incubator/foreign/GroupLayout; flags 89
+
+class name jdk/incubator/foreign/MemoryLayout$PathElement
+header extends java/lang/Object nestHost jdk/incubator/foreign/MemoryLayout flags 601
+innerclass innerClass jdk/incubator/foreign/MemoryLayout$PathElement outerClass jdk/incubator/foreign/MemoryLayout innerClassName PathElement flags 609
+innerclass innerClass java/lang/invoke/MethodHandles$Lookup outerClass java/lang/invoke/MethodHandles innerClassName Lookup flags 19
+method name groupElement descriptor (Ljava/lang/String;)Ljdk/incubator/foreign/MemoryLayout$PathElement; flags 9
+method name sequenceElement descriptor (J)Ljdk/incubator/foreign/MemoryLayout$PathElement; flags 9
+method name sequenceElement descriptor (JJ)Ljdk/incubator/foreign/MemoryLayout$PathElement; flags 9
+method name sequenceElement descriptor ()Ljdk/incubator/foreign/MemoryLayout$PathElement; flags 9
+
+class name jdk/incubator/foreign/MemoryLayouts
+header extends java/lang/Object flags 31
+field name BITS_8_LE descriptor Ljdk/incubator/foreign/ValueLayout; flags 19
+field name BITS_16_LE descriptor Ljdk/incubator/foreign/ValueLayout; flags 19
+field name BITS_32_LE descriptor Ljdk/incubator/foreign/ValueLayout; flags 19
+field name BITS_64_LE descriptor Ljdk/incubator/foreign/ValueLayout; flags 19
+field name BITS_8_BE descriptor Ljdk/incubator/foreign/ValueLayout; flags 19
+field name BITS_16_BE descriptor Ljdk/incubator/foreign/ValueLayout; flags 19
+field name BITS_32_BE descriptor Ljdk/incubator/foreign/ValueLayout; flags 19
+field name BITS_64_BE descriptor Ljdk/incubator/foreign/ValueLayout; flags 19
+field name PAD_8 descriptor Ljdk/incubator/foreign/MemoryLayout; flags 19
+field name PAD_16 descriptor Ljdk/incubator/foreign/MemoryLayout; flags 19
+field name PAD_32 descriptor Ljdk/incubator/foreign/MemoryLayout; flags 19
+field name PAD_64 descriptor Ljdk/incubator/foreign/MemoryLayout; flags 19
+field name JAVA_BYTE descriptor Ljdk/incubator/foreign/ValueLayout; flags 19
+field name JAVA_CHAR descriptor Ljdk/incubator/foreign/ValueLayout; flags 19
+field name JAVA_SHORT descriptor Ljdk/incubator/foreign/ValueLayout; flags 19
+field name JAVA_INT descriptor Ljdk/incubator/foreign/ValueLayout; flags 19
+field name JAVA_LONG descriptor Ljdk/incubator/foreign/ValueLayout; flags 19
+field name JAVA_FLOAT descriptor Ljdk/incubator/foreign/ValueLayout; flags 19
+field name JAVA_DOUBLE descriptor Ljdk/incubator/foreign/ValueLayout; flags 19
+
+class name jdk/incubator/foreign/MemorySegment
+header extends java/lang/Object implements java/lang/AutoCloseable flags 601
+innerclass innerClass java/nio/channels/FileChannel$MapMode outerClass java/nio/channels/FileChannel innerClassName MapMode flags 9
+innerclass innerClass java/lang/invoke/MethodHandles$Lookup outerClass java/lang/invoke/MethodHandles innerClassName Lookup flags 19
+method name baseAddress descriptor ()Ljdk/incubator/foreign/MemoryAddress; flags 401
+method name acquire descriptor ()Ljdk/incubator/foreign/MemorySegment; flags 401
+method name isAccessible descriptor ()Z flags 401
+method name byteSize descriptor ()J flags 401
+method name asReadOnly descriptor ()Ljdk/incubator/foreign/MemorySegment; flags 401
+method name asSlice descriptor (JJ)Ljdk/incubator/foreign/MemorySegment; flags 401
+method name isAlive descriptor ()Z flags 401
+method name isReadOnly descriptor ()Z flags 401
+method name close descriptor ()V flags 401
+method name asByteBuffer descriptor ()Ljava/nio/ByteBuffer; flags 401
+method name toByteArray descriptor ()[B flags 401
+method name ofByteBuffer descriptor (Ljava/nio/ByteBuffer;)Ljdk/incubator/foreign/MemorySegment; flags 9
+method name ofArray descriptor ([B)Ljdk/incubator/foreign/MemorySegment; flags 9
+method name ofArray descriptor ([C)Ljdk/incubator/foreign/MemorySegment; flags 9
+method name ofArray descriptor ([S)Ljdk/incubator/foreign/MemorySegment; flags 9
+method name ofArray descriptor ([I)Ljdk/incubator/foreign/MemorySegment; flags 9
+method name ofArray descriptor ([F)Ljdk/incubator/foreign/MemorySegment; flags 9
+method name ofArray descriptor ([J)Ljdk/incubator/foreign/MemorySegment; flags 9
+method name ofArray descriptor ([D)Ljdk/incubator/foreign/MemorySegment; flags 9
+method name allocateNative descriptor (Ljdk/incubator/foreign/MemoryLayout;)Ljdk/incubator/foreign/MemorySegment; flags 9
+method name allocateNative descriptor (J)Ljdk/incubator/foreign/MemorySegment; flags 9
+method name mapFromPath descriptor (Ljava/nio/file/Path;JLjava/nio/channels/FileChannel$MapMode;)Ljdk/incubator/foreign/MemorySegment; thrownTypes java/io/IOException flags 9
+method name allocateNative descriptor (JJ)Ljdk/incubator/foreign/MemorySegment; flags 9
+
+class name jdk/incubator/foreign/SequenceLayout
+header extends jdk/incubator/foreign/AbstractLayout flags 31
+method name elementLayout descriptor ()Ljdk/incubator/foreign/MemoryLayout; flags 1
+method name elementCount descriptor ()Ljava/util/OptionalLong; flags 1
+method name toString descriptor ()Ljava/lang/String; flags 1
+method name equals descriptor (Ljava/lang/Object;)Z flags 1
+method name hashCode descriptor ()I flags 1
+method name describeConstable descriptor ()Ljava/util/Optional; flags 1 signature ()Ljava/util/Optional<Ljava/lang/constant/DynamicConstantDesc<Ljdk/incubator/foreign/SequenceLayout;>;>;
+method name withName descriptor (Ljava/lang/String;)Ljdk/incubator/foreign/SequenceLayout; flags 1
+method name withBitAlignment descriptor (J)Ljdk/incubator/foreign/SequenceLayout; flags 1
+method name bitSize descriptor ()J flags 1041
+method name withBitAlignment descriptor (J)Ljdk/incubator/foreign/AbstractLayout; flags 1041
+method name withName descriptor (Ljava/lang/String;)Ljdk/incubator/foreign/AbstractLayout; flags 1041
+method name withBitAlignment descriptor (J)Ljdk/incubator/foreign/MemoryLayout; flags 1041
+method name withName descriptor (Ljava/lang/String;)Ljdk/incubator/foreign/MemoryLayout; flags 1041
+
+class name jdk/incubator/foreign/ValueLayout
+header extends jdk/incubator/foreign/AbstractLayout implements jdk/incubator/foreign/MemoryLayout flags 31
+method name order descriptor ()Ljava/nio/ByteOrder; flags 1
+method name withOrder descriptor (Ljava/nio/ByteOrder;)Ljdk/incubator/foreign/ValueLayout; flags 1
+method name toString descriptor ()Ljava/lang/String; flags 1
+method name equals descriptor (Ljava/lang/Object;)Z flags 1
+method name hashCode descriptor ()I flags 1
+method name describeConstable descriptor ()Ljava/util/Optional; flags 1 signature ()Ljava/util/Optional<Ljava/lang/constant/DynamicConstantDesc<Ljdk/incubator/foreign/ValueLayout;>;>;
+method name withName descriptor (Ljava/lang/String;)Ljdk/incubator/foreign/ValueLayout; flags 1
+method name withBitAlignment descriptor (J)Ljdk/incubator/foreign/ValueLayout; flags 1
+method name bitSize descriptor ()J flags 1041
+method name withBitAlignment descriptor (J)Ljdk/incubator/foreign/AbstractLayout; flags 1041
+method name withName descriptor (Ljava/lang/String;)Ljdk/incubator/foreign/AbstractLayout; flags 1041
+method name withBitAlignment descriptor (J)Ljdk/incubator/foreign/MemoryLayout; flags 1041
+method name withName descriptor (Ljava/lang/String;)Ljdk/incubator/foreign/MemoryLayout; flags 1041
+
--- a/make/data/symbols/symbols	Fri Jan 17 12:20:00 2020 -0800
+++ b/make/data/symbols/symbols	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -37,4 +37,4 @@
 platform version B base A files java.activation-B.sym.txt:java.base-B.sym.txt:java.compiler-B.sym.txt:java.corba-B.sym.txt:java.datatransfer-B.sym.txt:java.desktop-B.sym.txt:java.instrument-B.sym.txt:java.logging-B.sym.txt:java.management-B.sym.txt:java.management.rmi-B.sym.txt:java.naming-B.sym.txt:java.net.http-B.sym.txt:java.prefs-B.sym.txt:java.rmi-B.sym.txt:java.scripting-B.sym.txt:java.se-B.sym.txt:java.se.ee-B.sym.txt:java.security.jgss-B.sym.txt:java.security.sasl-B.sym.txt:java.smartcardio-B.sym.txt:java.sql-B.sym.txt:java.sql.rowset-B.sym.txt:java.transaction-B.sym.txt:java.transaction.xa-B.sym.txt:java.xml-B.sym.txt:java.xml.bind-B.sym.txt:java.xml.crypto-B.sym.txt:java.xml.ws-B.sym.txt:java.xml.ws.annotation-B.sym.txt:jdk.accessibility-B.sym.txt:jdk.attach-B.sym.txt:jdk.charsets-B.sym.txt:jdk.compiler-B.sym.txt:jdk.crypto.cryptoki-B.sym.txt:jdk.crypto.ec-B.sym.txt:jdk.dynalink-B.sym.txt:jdk.editpad-B.sym.txt:jdk.hotspot.agent-B.sym.txt:jdk.httpserver-B.sym.txt:jdk.incubator.httpclient-B.sym.txt:jdk.jartool-B.sym.txt:jdk.javadoc-B.sym.txt:jdk.jcmd-B.sym.txt:jdk.jconsole-B.sym.txt:jdk.jdeps-B.sym.txt:jdk.jdi-B.sym.txt:jdk.jdwp.agent-B.sym.txt:jdk.jfr-B.sym.txt:jdk.jlink-B.sym.txt:jdk.jshell-B.sym.txt:jdk.jsobject-B.sym.txt:jdk.jstatd-B.sym.txt:jdk.localedata-B.sym.txt:jdk.management-B.sym.txt:jdk.management.agent-B.sym.txt:jdk.management.jfr-B.sym.txt:jdk.naming.dns-B.sym.txt:jdk.naming.rmi-B.sym.txt:jdk.net-B.sym.txt:jdk.pack-B.sym.txt:jdk.rmic-B.sym.txt:jdk.scripting.nashorn-B.sym.txt:jdk.sctp-B.sym.txt:jdk.security.auth-B.sym.txt:jdk.security.jgss-B.sym.txt:jdk.unsupported-B.sym.txt:jdk.xml.dom-B.sym.txt:jdk.zipfs-B.sym.txt
 platform version C base B files java.base-C.sym.txt:java.compiler-C.sym.txt:java.desktop-C.sym.txt:java.naming-C.sym.txt:java.rmi-C.sym.txt:java.xml-C.sym.txt:jdk.compiler-C.sym.txt:jdk.jfr-C.sym.txt:jdk.jsobject-C.sym.txt:jdk.unsupported-C.sym.txt
 platform version D base C files java.base-D.sym.txt:java.compiler-D.sym.txt:java.desktop-D.sym.txt:java.management-D.sym.txt:java.management.rmi-D.sym.txt:java.net.http-D.sym.txt:java.security.jgss-D.sym.txt:java.xml-D.sym.txt:java.xml.crypto-D.sym.txt:jdk.compiler-D.sym.txt:jdk.httpserver-D.sym.txt:jdk.jartool-D.sym.txt:jdk.javadoc-D.sym.txt:jdk.jlink-D.sym.txt:jdk.jshell-D.sym.txt
-platform version E base D files java.base-E.sym.txt:java.compiler-E.sym.txt:java.desktop-E.sym.txt:java.xml-E.sym.txt:jdk.compiler-E.sym.txt:jdk.httpserver-E.sym.txt:jdk.incubator.jpackage-E.sym.txt:jdk.jfr-E.sym.txt:jdk.jlink-E.sym.txt:jdk.jshell-E.sym.txt:jdk.jsobject-E.sym.txt:jdk.management-E.sym.txt:jdk.net-E.sym.txt:jdk.pack-E.sym.txt
+platform version E base D files java.base-E.sym.txt:java.compiler-E.sym.txt:java.desktop-E.sym.txt:java.xml-E.sym.txt:jdk.compiler-E.sym.txt:jdk.httpserver-E.sym.txt:jdk.incubator.foreign-E.sym.txt:jdk.incubator.jpackage-E.sym.txt:jdk.jfr-E.sym.txt:jdk.jlink-E.sym.txt:jdk.jshell-E.sym.txt:jdk.jsobject-E.sym.txt:jdk.management-E.sym.txt:jdk.net-E.sym.txt:jdk.pack-E.sym.txt
--- a/src/hotspot/cpu/aarch64/aarch64.ad	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/cpu/aarch64/aarch64.ad	Tue Jan 21 07:29:48 2020 +0530
@@ -1,6 +1,6 @@
 //
-// Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
-// Copyright (c) 2014, 2019, Red Hat, Inc. All rights reserved.
+// Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2014, 2020, Red Hat, Inc. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -983,6 +983,7 @@
 #include "gc/shared/cardTableBarrierSet.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "opto/addnode.hpp"
+#include "opto/convertnode.hpp"
 
 extern RegMask _ANY_REG32_mask;
 extern RegMask _ANY_REG_mask;
@@ -13232,6 +13233,29 @@
   ins_pipe(fp_div_d);
 %}
 
+// Math.rint, floor, ceil
+instruct roundD_reg(vRegD dst, vRegD src, immI rmode) %{
+  match(Set dst (RoundDoubleMode src rmode));
+  format %{ "frint  $dst, $src, $rmode" %}
+  ins_encode %{
+    switch ($rmode$$constant) {
+      case RoundDoubleModeNode::rmode_rint:
+        __ frintnd(as_FloatRegister($dst$$reg),
+                   as_FloatRegister($src$$reg));
+        break;
+      case RoundDoubleModeNode::rmode_floor:
+        __ frintmd(as_FloatRegister($dst$$reg),
+                   as_FloatRegister($src$$reg));
+        break;
+      case RoundDoubleModeNode::rmode_ceil:
+        __ frintpd(as_FloatRegister($dst$$reg),
+                   as_FloatRegister($src$$reg));
+        break;
+    }
+  %}
+  ins_pipe(fp_uop_d);
+%}
+
 // ============================================================================
 // Logical Instructions
 
@@ -17939,6 +17963,29 @@
   ins_pipe(vdop_fp128);
 %}
 
+instruct vround2D_reg(vecX dst, vecX src, immI rmode) %{
+  predicate(n->as_Vector()->length() == 2 && n->bottom_type()->is_vect()->element_basic_type() == T_DOUBLE);
+  match(Set dst (RoundDoubleModeV src rmode));
+  format %{ "frint  $dst, $src, $rmode" %}
+  ins_encode %{
+    switch ($rmode$$constant) {
+      case RoundDoubleModeNode::rmode_rint:
+        __ frintn(as_FloatRegister($dst$$reg), __ T2D,
+                  as_FloatRegister($src$$reg));
+        break;
+      case RoundDoubleModeNode::rmode_floor:
+        __ frintm(as_FloatRegister($dst$$reg), __ T2D,
+                  as_FloatRegister($src$$reg));
+        break;
+      case RoundDoubleModeNode::rmode_ceil:
+        __ frintp(as_FloatRegister($dst$$reg), __ T2D,
+                  as_FloatRegister($src$$reg));
+        break;
+    }
+  %}
+  ins_pipe(vdop_fp128);
+%}
+
 //----------PEEPHOLE RULES-----------------------------------------------------
 // These must follow all instruction definitions as they use the names
 // defined in the instructions definitions.
--- a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2613,42 +2613,42 @@
 #undef INSN
 
   // AdvSIMD two-reg misc
-#define INSN(NAME, U, opcode)                                                       \
+  // In this instruction group, the 2 bits in the size field ([23:22]) may be
+  // fixed or determined by the "SIMD_Arrangement T", or both. The additional
+  // parameter "tmask" is a 2-bit mask used to indicate which bits in the size
+  // field are determined by the SIMD_Arrangement. The bit of "tmask" should be
+  // set to 1 if corresponding bit marked as "x" in the ArmARM.
+#define INSN(NAME, U, size, tmask, opcode)                                          \
   void NAME(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) {               \
        starti;                                                                      \
        assert((ASSERTION), MSG);                                                    \
        f(0, 31), f((int)T & 1, 30), f(U, 29), f(0b01110, 28, 24);                   \
-       f((int)(T >> 1), 23, 22), f(0b10000, 21, 17), f(opcode, 16, 12);             \
-       f(0b10, 11, 10), rf(Vn, 5), rf(Vd, 0);                                       \
+       f(size | ((int)(T >> 1) & tmask), 23, 22), f(0b10000, 21, 17);               \
+       f(opcode, 16, 12), f(0b10, 11, 10), rf(Vn, 5), rf(Vd, 0);                    \
  }
 
 #define MSG "invalid arrangement"
 
 #define ASSERTION (T == T2S || T == T4S || T == T2D)
-  INSN(fsqrt, 1, 0b11111);
-  INSN(fabs,  0, 0b01111);
-  INSN(fneg,  1, 0b01111);
+  INSN(fsqrt,  1, 0b10, 0b01, 0b11111);
+  INSN(fabs,   0, 0b10, 0b01, 0b01111);
+  INSN(fneg,   1, 0b10, 0b01, 0b01111);
+  INSN(frintn, 0, 0b00, 0b01, 0b11000);
+  INSN(frintm, 0, 0b00, 0b01, 0b11001);
+  INSN(frintp, 0, 0b10, 0b01, 0b11000);
 #undef ASSERTION
 
 #define ASSERTION (T == T8B || T == T16B || T == T4H || T == T8H || T == T2S || T == T4S)
-  INSN(rev64, 0, 0b00000);
+  INSN(rev64, 0, 0b00, 0b11, 0b00000);
 #undef ASSERTION
 
 #define ASSERTION (T == T8B || T == T16B || T == T4H || T == T8H)
-  INSN(rev32, 1, 0b00000);
-private:
-  INSN(_rbit, 1, 0b00101);
-public:
-
+  INSN(rev32, 1, 0b00, 0b11, 0b00000);
 #undef ASSERTION
 
 #define ASSERTION (T == T8B || T == T16B)
-  INSN(rev16, 0, 0b00001);
-  // RBIT only allows T8B and T16B but encodes them oddly.  Argh...
-  void rbit(FloatRegister Vd, SIMD_Arrangement T, FloatRegister Vn) {
-    assert((ASSERTION), MSG);
-    _rbit(Vd, SIMD_Arrangement((T & 1) | 0b010), Vn);
-  }
+  INSN(rev16, 0, 0b00, 0b11, 0b00001);
+  INSN(rbit,  1, 0b01, 0b00, 0b00101);
 #undef ASSERTION
 
 #undef MSG
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/c1/shenandoahBarrierSetC1_aarch64.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/c1/shenandoahBarrierSetC1_aarch64.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoah_aarch64.ad	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoah_aarch64.ad	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,6 @@
 //
 // Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
 // under the terms of the GNU General Public License version 2 only, as
--- a/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -170,11 +170,13 @@
   //  rscratch2: CompiledICHolder
   //  j_rarg0: Receiver
 
-  // Most registers are in use; we'll use r16, rmethod, r10, r11
+  // This stub is called from compiled code which has no callee-saved registers,
+  // so all registers except arguments are free at this point.
   const Register recv_klass_reg     = r10;
   const Register holder_klass_reg   = r16; // declaring interface klass (DECC)
   const Register resolved_klass_reg = rmethod; // resolved interface klass (REFC)
   const Register temp_reg           = r11;
+  const Register temp_reg2          = r15;
   const Register icholder_reg       = rscratch2;
 
   Label L_no_such_interface;
@@ -189,11 +191,10 @@
   __ load_klass(recv_klass_reg, j_rarg0);
 
   // Receiver subtype check against REFC.
-  // Destroys recv_klass_reg value.
   __ lookup_interface_method(// inputs: rec. class, interface
                              recv_klass_reg, resolved_klass_reg, noreg,
                              // outputs:  scan temp. reg1, scan temp. reg2
-                             recv_klass_reg, temp_reg,
+                             temp_reg2, temp_reg,
                              L_no_such_interface,
                              /*return_method=*/false);
 
@@ -201,7 +202,6 @@
   start_pc = __ pc();
 
   // Get selected method from declaring class and itable index
-  __ load_klass(recv_klass_reg, j_rarg0);   // restore recv_klass_reg
   __ lookup_interface_method(// inputs: rec. class, interface, itable index
                              recv_klass_reg, holder_klass_reg, itable_index,
                              // outputs: method, scan temp. reg
@@ -211,7 +211,7 @@
   const ptrdiff_t lookupSize = __ pc() - start_pc;
 
   // Reduce "estimate" such that "padding" does not drop below 8.
-  const ptrdiff_t estimate = 152;
+  const ptrdiff_t estimate = 124;
   const ptrdiff_t codesize = typecheckSize + lookupSize;
   slop_delta  = (int)(estimate - codesize);
   slop_bytes += slop_delta;
--- a/src/hotspot/cpu/x86/gc/shenandoah/c1/shenandoahBarrierSetC1_x86.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/cpu/x86/gc/shenandoah/c1/shenandoahBarrierSetC1_x86.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoah_x86_32.ad	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoah_x86_32.ad	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,6 @@
 //
 // Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
 // under the terms of the GNU General Public License version 2 only, as
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoah_x86_64.ad	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoah_x86_64.ad	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,6 @@
 //
 // Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
 // under the terms of the GNU General Public License version 2 only, as
--- a/src/hotspot/cpu/x86/x86.ad	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/cpu/x86/x86.ad	Tue Jan 21 07:29:48 2020 +0530
@@ -2143,7 +2143,7 @@
 
 // Replaces legVec during post-selection cleanup. See above.
 operand legVecZ() %{
-  constraint(ALLOC_IN_RC(vectorz_reg_vl));
+  constraint(ALLOC_IN_RC(vectorz_reg_legacy));
   match(VecZ);
 
   format %{ %}
--- a/src/hotspot/os/bsd/gc/z/zBackingFile_bsd.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,183 +0,0 @@
-/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/z/zBackingFile_bsd.hpp"
-#include "gc/z/zErrno.hpp"
-#include "gc/z/zGlobals.hpp"
-#include "gc/z/zLargePages.inline.hpp"
-#include "gc/z/zPhysicalMemory.inline.hpp"
-#include "logging/log.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/os.hpp"
-#include "utilities/align.hpp"
-#include "utilities/debug.hpp"
-
-#include <mach/mach.h>
-#include <mach/mach_vm.h>
-#include <sys/mman.h>
-#include <sys/types.h>
-
-static int vm_flags_superpage() {
-  if (!ZLargePages::is_explicit()) {
-    return 0;
-  }
-
-  const int page_size_in_megabytes = ZGranuleSize >> 20;
-  return page_size_in_megabytes << VM_FLAGS_SUPERPAGE_SHIFT;
-}
-
-static ZErrno mremap(uintptr_t from_addr, uintptr_t to_addr, size_t size) {
-  mach_vm_address_t remap_addr = to_addr;
-  vm_prot_t remap_cur_prot;
-  vm_prot_t remap_max_prot;
-
-  // Remap memory to an additional location
-  const kern_return_t res = mach_vm_remap(mach_task_self(),
-                                          &remap_addr,
-                                          size,
-                                          0 /* mask */,
-                                          VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE | vm_flags_superpage(),
-                                          mach_task_self(),
-                                          from_addr,
-                                          FALSE /* copy */,
-                                          &remap_cur_prot,
-                                          &remap_max_prot,
-                                          VM_INHERIT_COPY);
-
-  return (res == KERN_SUCCESS) ? ZErrno(0) : ZErrno(EINVAL);
-}
-
-ZBackingFile::ZBackingFile() :
-    _base(0),
-    _size(0),
-    _initialized(false) {
-
-  // Reserve address space for virtual backing file
-  _base = (uintptr_t)os::reserve_memory(MaxHeapSize);
-  if (_base == 0) {
-    // Failed
-    log_error(gc)("Failed to reserve address space for virtual backing file");
-    return;
-  }
-
-  // Successfully initialized
-  _initialized = true;
-}
-
-bool ZBackingFile::is_initialized() const {
-  return _initialized;
-}
-
-size_t ZBackingFile::size() const {
-  return _size;
-}
-
-bool ZBackingFile::commit_inner(size_t offset, size_t length) {
-  assert(is_aligned(offset, os::vm_page_size()), "Invalid offset");
-  assert(is_aligned(length, os::vm_page_size()), "Invalid length");
-
-  log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
-                      offset / M, (offset + length) / M, length / M);
-
-  const uintptr_t addr = _base + offset;
-  const void* const res = mmap((void*)addr, length, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
-  if (res == MAP_FAILED) {
-    ZErrno err;
-    log_error(gc)("Failed to commit memory (%s)", err.to_string());
-    return false;
-  }
-
-  const size_t end = offset + length;
-  if (end > _size) {
-    // Record new virtual file size
-    _size = end;
-  }
-
-  // Success
-  return true;
-}
-
-size_t ZBackingFile::commit(size_t offset, size_t length) {
-  // Try to commit the whole region
-  if (commit_inner(offset, length)) {
-    // Success
-    return length;
-  }
-
-  // Failed, try to commit as much as possible
-  size_t start = offset;
-  size_t end = offset + length;
-
-  for (;;) {
-    length = align_down((end - start) / 2, ZGranuleSize);
-    if (length == 0) {
-      // Done, don't commit more
-      return start - offset;
-    }
-
-    if (commit_inner(start, length)) {
-      // Success, try commit more
-      start += length;
-    } else {
-      // Failed, try commit less
-      end -= length;
-    }
-  }
-}
-
-size_t ZBackingFile::uncommit(size_t offset, size_t length) {
-  assert(is_aligned(offset, os::vm_page_size()), "Invalid offset");
-  assert(is_aligned(length, os::vm_page_size()), "Invalid length");
-
-  log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
-                      offset / M, (offset + length) / M, length / M);
-
-  const uintptr_t start = _base + offset;
-  const void* const res = mmap((void*)start, length, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
-  if (res == MAP_FAILED) {
-    ZErrno err;
-    log_error(gc)("Failed to uncommit memory (%s)", err.to_string());
-    return 0;
-  }
-
-  return length;
-}
-
-void ZBackingFile::map(uintptr_t addr, size_t size, uintptr_t offset) const {
-  const ZErrno err = mremap(_base + offset, addr, size);
-  if (err) {
-    fatal("Failed to remap memory (%s)", err.to_string());
-  }
-}
-
-void ZBackingFile::unmap(uintptr_t addr, size_t size) const {
-  // Note that we must keep the address space reservation intact and just detach
-  // the backing memory. For this reason we map a new anonymous, non-accessible
-  // and non-reserved page over the mapping instead of actually unmapping.
-  const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
-  if (res == MAP_FAILED) {
-    ZErrno err;
-    fatal("Failed to map memory (%s)", err.to_string());
-  }
-}
--- a/src/hotspot/os/bsd/gc/z/zBackingFile_bsd.hpp	Fri Jan 17 12:20:00 2020 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef OS_BSD_GC_Z_ZBACKINGFILE_BSD_HPP
-#define OS_BSD_GC_Z_ZBACKINGFILE_BSD_HPP
-
-#include "memory/allocation.hpp"
-
-class ZPhysicalMemory;
-
-// On macOS, we use a virtual backing file. It is represented by a reserved virtual
-// address space, in which we commit physical memory using the mach_vm_map() API.
-// The multi-mapping API simply remaps these addresses using mach_vm_remap() into
-// the different heap views. This works as-if there was a backing file, it's just
-// that the file is represented with memory mappings instead.
-
-class ZBackingFile {
-private:
-  uintptr_t _base;
-  size_t    _size;
-  bool      _initialized;
-
-  bool commit_inner(size_t offset, size_t length);
-
-public:
-  ZBackingFile();
-
-  bool is_initialized() const;
-
-  size_t size() const;
-
-  size_t commit(size_t offset, size_t length);
-  size_t uncommit(size_t offset, size_t length);
-
-  void map(uintptr_t addr, size_t size, uintptr_t offset) const;
-  void unmap(uintptr_t addr, size_t size) const;
-};
-
-#endif // OS_BSD_GC_Z_ZBACKINGFILE_BSD_HPP
--- a/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,198 +22,170 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zErrno.hpp"
 #include "gc/z/zGlobals.hpp"
 #include "gc/z/zLargePages.inline.hpp"
 #include "gc/z/zPhysicalMemory.inline.hpp"
 #include "gc/z/zPhysicalMemoryBacking_bsd.hpp"
+#include "logging/log.hpp"
 #include "runtime/globals.hpp"
-#include "runtime/init.hpp"
 #include "runtime/os.hpp"
 #include "utilities/align.hpp"
 #include "utilities/debug.hpp"
 
+#include <mach/mach.h>
+#include <mach/mach_vm.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+
+// The backing is represented by a reserved virtual address space, in which
+// we commit and uncommit physical memory. Multi-mapping the different heap
+// views is done by simply remapping the backing memory using mach_vm_remap().
+
+static int vm_flags_superpage() {
+  if (!ZLargePages::is_explicit()) {
+    return 0;
+  }
+
+  const int page_size_in_megabytes = ZGranuleSize >> 20;
+  return page_size_in_megabytes << VM_FLAGS_SUPERPAGE_SHIFT;
+}
+
+static ZErrno mremap(uintptr_t from_addr, uintptr_t to_addr, size_t size) {
+  mach_vm_address_t remap_addr = to_addr;
+  vm_prot_t remap_cur_prot;
+  vm_prot_t remap_max_prot;
+
+  // Remap memory to an additional location
+  const kern_return_t res = mach_vm_remap(mach_task_self(),
+                                          &remap_addr,
+                                          size,
+                                          0 /* mask */,
+                                          VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE | vm_flags_superpage(),
+                                          mach_task_self(),
+                                          from_addr,
+                                          FALSE /* copy */,
+                                          &remap_cur_prot,
+                                          &remap_max_prot,
+                                          VM_INHERIT_COPY);
+
+  return (res == KERN_SUCCESS) ? ZErrno(0) : ZErrno(EINVAL);
+}
+
+ZPhysicalMemoryBacking::ZPhysicalMemoryBacking() :
+    _base(0),
+    _size(0),
+    _initialized(false) {
+
+  // Reserve address space for backing memory
+  _base = (uintptr_t)os::reserve_memory(MaxHeapSize);
+  if (_base == 0) {
+    // Failed
+    log_error(gc)("Failed to reserve address space for backing memory");
+    return;
+  }
+
+  // Successfully initialized
+  _initialized = true;
+}
+
 bool ZPhysicalMemoryBacking::is_initialized() const {
-  return _file.is_initialized();
+  return _initialized;
 }
 
 void ZPhysicalMemoryBacking::warn_commit_limits(size_t max) const {
   // Does nothing
 }
 
-bool ZPhysicalMemoryBacking::supports_uncommit() {
-  assert(!is_init_completed(), "Invalid state");
-  assert(_file.size() >= ZGranuleSize, "Invalid size");
-
-  // Test if uncommit is supported by uncommitting and then re-committing a granule
-  return commit(uncommit(ZGranuleSize)) == ZGranuleSize;
-}
-
-size_t ZPhysicalMemoryBacking::commit(size_t size) {
-  size_t committed = 0;
-
-  // Fill holes in the backing file
-  while (committed < size) {
-    size_t allocated = 0;
-    const size_t remaining = size - committed;
-    const uintptr_t start = _uncommitted.alloc_from_front_at_most(remaining, &allocated);
-    if (start == UINTPTR_MAX) {
-      // No holes to commit
-      break;
-    }
-
-    // Try commit hole
-    const size_t filled = _file.commit(start, allocated);
-    if (filled > 0) {
-      // Successful or partialy successful
-      _committed.free(start, filled);
-      committed += filled;
-    }
-    if (filled < allocated) {
-      // Failed or partialy failed
-      _uncommitted.free(start + filled, allocated - filled);
-      return committed;
-    }
-  }
-
-  // Expand backing file
-  if (committed < size) {
-    const size_t remaining = size - committed;
-    const uintptr_t start = _file.size();
-    const size_t expanded = _file.commit(start, remaining);
-    if (expanded > 0) {
-      // Successful or partialy successful
-      _committed.free(start, expanded);
-      committed += expanded;
-    }
-  }
-
-  return committed;
+size_t ZPhysicalMemoryBacking::size() const {
+  return _size;
 }
 
-size_t ZPhysicalMemoryBacking::uncommit(size_t size) {
-  size_t uncommitted = 0;
+bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) {
+  assert(is_aligned(offset, os::vm_page_size()), "Invalid offset");
+  assert(is_aligned(length, os::vm_page_size()), "Invalid length");
 
-  // Punch holes in backing file
-  while (uncommitted < size) {
-    size_t allocated = 0;
-    const size_t remaining = size - uncommitted;
-    const uintptr_t start = _committed.alloc_from_back_at_most(remaining, &allocated);
-    assert(start != UINTPTR_MAX, "Allocation should never fail");
+  log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
+                      offset / M, (offset + length) / M, length / M);
 
-    // Try punch hole
-    const size_t punched = _file.uncommit(start, allocated);
-    if (punched > 0) {
-      // Successful or partialy successful
-      _uncommitted.free(start, punched);
-      uncommitted += punched;
-    }
-    if (punched < allocated) {
-      // Failed or partialy failed
-      _committed.free(start + punched, allocated - punched);
-      return uncommitted;
-    }
+  const uintptr_t addr = _base + offset;
+  const void* const res = mmap((void*)addr, length, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+  if (res == MAP_FAILED) {
+    ZErrno err;
+    log_error(gc)("Failed to commit memory (%s)", err.to_string());
+    return false;
+  }
+
+  const size_t end = offset + length;
+  if (end > _size) {
+    // Record new size
+    _size = end;
   }
 
-  return uncommitted;
+  // Success
+  return true;
 }
 
-ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) {
-  assert(is_aligned(size, ZGranuleSize), "Invalid size");
-
-  ZPhysicalMemory pmem;
-
-  // Allocate segments
-  for (size_t allocated = 0; allocated < size; allocated += ZGranuleSize) {
-    const uintptr_t start = _committed.alloc_from_front(ZGranuleSize);
-    assert(start != UINTPTR_MAX, "Allocation should never fail");
-    pmem.add_segment(ZPhysicalMemorySegment(start, ZGranuleSize));
+size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) {
+  // Try to commit the whole region
+  if (commit_inner(offset, length)) {
+    // Success
+    return length;
   }
 
-  return pmem;
-}
+  // Failed, try to commit as much as possible
+  size_t start = offset;
+  size_t end = offset + length;
 
-void ZPhysicalMemoryBacking::free(const ZPhysicalMemory& pmem) {
-  const size_t nsegments = pmem.nsegments();
+  for (;;) {
+    length = align_down((end - start) / 2, ZGranuleSize);
+    if (length == 0) {
+      // Done, don't commit more
+      return start - offset;
+    }
 
-  // Free segments
-  for (size_t i = 0; i < nsegments; i++) {
-    const ZPhysicalMemorySegment& segment = pmem.segment(i);
-    _committed.free(segment.start(), segment.size());
+    if (commit_inner(start, length)) {
+      // Success, try commit more
+      start += length;
+    } else {
+      // Failed, try commit less
+      end -= length;
+    }
   }
 }
 
-void ZPhysicalMemoryBacking::pretouch_view(uintptr_t addr, size_t size) const {
-  const size_t page_size = ZLargePages::is_explicit() ? ZGranuleSize : os::vm_page_size();
-  os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
+size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) {
+  assert(is_aligned(offset, os::vm_page_size()), "Invalid offset");
+  assert(is_aligned(length, os::vm_page_size()), "Invalid length");
+
+  log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
+                      offset / M, (offset + length) / M, length / M);
+
+  const uintptr_t start = _base + offset;
+  const void* const res = mmap((void*)start, length, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
+  if (res == MAP_FAILED) {
+    ZErrno err;
+    log_error(gc)("Failed to uncommit memory (%s)", err.to_string());
+    return 0;
+  }
+
+  return length;
 }
 
-void ZPhysicalMemoryBacking::map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
-  const size_t nsegments = pmem.nsegments();
-  size_t size = 0;
-
-  // Map segments
-  for (size_t i = 0; i < nsegments; i++) {
-    const ZPhysicalMemorySegment& segment = pmem.segment(i);
-    const uintptr_t segment_addr = addr + size;
-    _file.map(segment_addr, segment.size(), segment.start());
-    size += segment.size();
+void ZPhysicalMemoryBacking::map(uintptr_t addr, size_t size, uintptr_t offset) const {
+  const ZErrno err = mremap(_base + offset, addr, size);
+  if (err) {
+    fatal("Failed to remap memory (%s)", err.to_string());
   }
 }
 
-void ZPhysicalMemoryBacking::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
-  _file.unmap(addr, pmem.size());
-}
-
-uintptr_t ZPhysicalMemoryBacking::nmt_address(uintptr_t offset) const {
-  // From an NMT point of view we treat the first heap view (marked0) as committed
-  return ZAddress::marked0(offset);
-}
-
-void ZPhysicalMemoryBacking::pretouch(uintptr_t offset, size_t size) const {
-  if (ZVerifyViews) {
-    // Pre-touch good view
-    pretouch_view(ZAddress::good(offset), size);
-  } else {
-    // Pre-touch all views
-    pretouch_view(ZAddress::marked0(offset), size);
-    pretouch_view(ZAddress::marked1(offset), size);
-    pretouch_view(ZAddress::remapped(offset), size);
+void ZPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const {
+  // Note that we must keep the address space reservation intact and just detach
+  // the backing memory. For this reason we map a new anonymous, non-accessible
+  // and non-reserved page over the mapping instead of actually unmapping.
+  const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
+  if (res == MAP_FAILED) {
+    ZErrno err;
+    fatal("Failed to map memory (%s)", err.to_string());
   }
 }
-
-void ZPhysicalMemoryBacking::map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
-  if (ZVerifyViews) {
-    // Map good view
-    map_view(pmem, ZAddress::good(offset));
-  } else {
-    // Map all views
-    map_view(pmem, ZAddress::marked0(offset));
-    map_view(pmem, ZAddress::marked1(offset));
-    map_view(pmem, ZAddress::remapped(offset));
-  }
-}
-
-void ZPhysicalMemoryBacking::unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
-  if (ZVerifyViews) {
-    // Unmap good view
-    unmap_view(pmem, ZAddress::good(offset));
-  } else {
-    // Unmap all views
-    unmap_view(pmem, ZAddress::marked0(offset));
-    unmap_view(pmem, ZAddress::marked1(offset));
-    unmap_view(pmem, ZAddress::remapped(offset));
-  }
-}
-
-void ZPhysicalMemoryBacking::debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
-  // Map good view
-  assert(ZVerifyViews, "Should be enabled");
-  map_view(pmem, ZAddress::good(offset));
-}
-
-void ZPhysicalMemoryBacking::debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
-  // Unmap good view
-  assert(ZVerifyViews, "Should be enabled");
-  unmap_view(pmem, ZAddress::good(offset));
-}
--- a/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.hpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.hpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,42 +24,28 @@
 #ifndef OS_BSD_GC_Z_ZPHYSICALMEMORYBACKING_BSD_HPP
 #define OS_BSD_GC_Z_ZPHYSICALMEMORYBACKING_BSD_HPP
 
-#include "gc/z/zBackingFile_bsd.hpp"
-#include "gc/z/zMemory.hpp"
-
-class ZPhysicalMemory;
-
 class ZPhysicalMemoryBacking {
 private:
-  ZBackingFile   _file;
-  ZMemoryManager _committed;
-  ZMemoryManager _uncommitted;
+  uintptr_t _base;
+  size_t    _size;
+  bool      _initialized;
 
-  void pretouch_view(uintptr_t addr, size_t size) const;
-  void map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
-  void unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
+  bool commit_inner(size_t offset, size_t length);
 
 public:
+  ZPhysicalMemoryBacking();
+
   bool is_initialized() const;
 
   void warn_commit_limits(size_t max) const;
-  bool supports_uncommit();
 
-  size_t commit(size_t size);
-  size_t uncommit(size_t size);
-
-  ZPhysicalMemory alloc(size_t size);
-  void free(const ZPhysicalMemory& pmem);
+  size_t size() const;
 
-  uintptr_t nmt_address(uintptr_t offset) const;
-
-  void pretouch(uintptr_t offset, size_t size) const;
+  size_t commit(size_t offset, size_t length);
+  size_t uncommit(size_t offset, size_t length);
 
-  void map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
-  void unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
-
-  void debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
-  void debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
+  void map(uintptr_t addr, size_t size, uintptr_t offset) const;
+  void unmap(uintptr_t addr, size_t size) const;
 };
 
 #endif // OS_BSD_GC_Z_ZPHYSICALMEMORYBACKING_BSD_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/linux/cgroupSubsystem_linux.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -0,0 +1,421 @@
+/*
+ * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include <string.h>
+#include <math.h>
+#include <errno.h>
+#include "cgroupSubsystem_linux.hpp"
+#include "cgroupV1Subsystem_linux.hpp"
+#include "cgroupV2Subsystem_linux.hpp"
+#include "logging/log.hpp"
+#include "memory/allocation.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/os.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+CgroupSubsystem* CgroupSubsystemFactory::create() {
+  CgroupV1MemoryController* memory = NULL;
+  CgroupV1Controller* cpuset = NULL;
+  CgroupV1Controller* cpu = NULL;
+  CgroupV1Controller* cpuacct = NULL;
+  FILE *mntinfo = NULL;
+  FILE *cgroups = NULL;
+  FILE *cgroup = NULL;
+  char buf[MAXPATHLEN+1];
+  char tmproot[MAXPATHLEN+1];
+  char tmpmount[MAXPATHLEN+1];
+  char *p;
+  bool is_cgroupsV2;
+  // true iff all controllers, memory, cpu, cpuset, cpuacct are enabled
+  // at the kernel level.
+  bool all_controllers_enabled;
+
+  CgroupInfo cg_infos[CG_INFO_LENGTH];
+  int cpuset_idx  = 0;
+  int cpu_idx     = 1;
+  int cpuacct_idx = 2;
+  int memory_idx  = 3;
+
+  /*
+   * Read /proc/cgroups so as to be able to distinguish cgroups v2 vs cgroups v1.
+   *
+   * For cgroups v1 unified hierarchy, cpu, cpuacct, cpuset, memory controllers
+   * must have non-zero for the hierarchy ID field.
+   */
+  cgroups = fopen("/proc/cgroups", "r");
+  if (cgroups == NULL) {
+      log_debug(os, container)("Can't open /proc/cgroups, %s",
+                               os::strerror(errno));
+      return NULL;
+  }
+
+  while ((p = fgets(buf, MAXPATHLEN, cgroups)) != NULL) {
+    char name[MAXPATHLEN+1];
+    int  hierarchy_id;
+    int  enabled;
+
+    // Format of /proc/cgroups documented via man 7 cgroups
+    if (sscanf(p, "%s %d %*d %d", name, &hierarchy_id, &enabled) != 3) {
+      continue;
+    }
+    if (strcmp(name, "memory") == 0) {
+      cg_infos[memory_idx]._name = os::strdup(name);
+      cg_infos[memory_idx]._hierarchy_id = hierarchy_id;
+      cg_infos[memory_idx]._enabled = (enabled == 1);
+    } else if (strcmp(name, "cpuset") == 0) {
+      cg_infos[cpuset_idx]._name = os::strdup(name);
+      cg_infos[cpuset_idx]._hierarchy_id = hierarchy_id;
+      cg_infos[cpuset_idx]._enabled = (enabled == 1);
+    } else if (strcmp(name, "cpu") == 0) {
+      cg_infos[cpu_idx]._name = os::strdup(name);
+      cg_infos[cpu_idx]._hierarchy_id = hierarchy_id;
+      cg_infos[cpu_idx]._enabled = (enabled == 1);
+    } else if (strcmp(name, "cpuacct") == 0) {
+      cg_infos[cpuacct_idx]._name = os::strdup(name);
+      cg_infos[cpuacct_idx]._hierarchy_id = hierarchy_id;
+      cg_infos[cpuacct_idx]._enabled = (enabled == 1);
+    }
+  }
+  fclose(cgroups);
+
+  is_cgroupsV2 = true;
+  all_controllers_enabled = true;
+  for (int i = 0; i < CG_INFO_LENGTH; i++) {
+    is_cgroupsV2 = is_cgroupsV2 && cg_infos[i]._hierarchy_id == 0;
+    all_controllers_enabled = all_controllers_enabled && cg_infos[i]._enabled;
+  }
+
+  if (!all_controllers_enabled) {
+    // one or more controllers disabled, disable container support
+    log_debug(os, container)("One or more required controllers disabled at kernel level.");
+    return NULL;
+  }
+
+  /*
+   * Read /proc/self/cgroup and determine:
+   *  - the cgroup path for cgroups v2 or
+   *  - on a cgroups v1 system, collect info for mapping
+   *    the host mount point to the local one via /proc/self/mountinfo below.
+   */
+  cgroup = fopen("/proc/self/cgroup", "r");
+  if (cgroup == NULL) {
+    log_debug(os, container)("Can't open /proc/self/cgroup, %s",
+                             os::strerror(errno));
+    return NULL;
+  }
+
+  while ((p = fgets(buf, MAXPATHLEN, cgroup)) != NULL) {
+    char *controllers;
+    char *token;
+    char *hierarchy_id_str;
+    int  hierarchy_id;
+    char *cgroup_path;
+
+    hierarchy_id_str = strsep(&p, ":");
+    hierarchy_id = atoi(hierarchy_id_str);
+    /* Get controllers and base */
+    controllers = strsep(&p, ":");
+    cgroup_path = strsep(&p, "\n");
+
+    if (controllers == NULL) {
+      continue;
+    }
+
+    while (!is_cgroupsV2 && (token = strsep(&controllers, ",")) != NULL) {
+      if (strcmp(token, "memory") == 0) {
+        assert(hierarchy_id == cg_infos[memory_idx]._hierarchy_id, "/proc/cgroups and /proc/self/cgroup hierarchy mismatch");
+        cg_infos[memory_idx]._cgroup_path = os::strdup(cgroup_path);
+      } else if (strcmp(token, "cpuset") == 0) {
+        assert(hierarchy_id == cg_infos[cpuset_idx]._hierarchy_id, "/proc/cgroups and /proc/self/cgroup hierarchy mismatch");
+        cg_infos[cpuset_idx]._cgroup_path = os::strdup(cgroup_path);
+      } else if (strcmp(token, "cpu") == 0) {
+        assert(hierarchy_id == cg_infos[cpu_idx]._hierarchy_id, "/proc/cgroups and /proc/self/cgroup hierarchy mismatch");
+        cg_infos[cpu_idx]._cgroup_path = os::strdup(cgroup_path);
+      } else if (strcmp(token, "cpuacct") == 0) {
+        assert(hierarchy_id == cg_infos[cpuacct_idx]._hierarchy_id, "/proc/cgroups and /proc/self/cgroup hierarchy mismatch");
+        cg_infos[cpuacct_idx]._cgroup_path = os::strdup(cgroup_path);
+      }
+    }
+    if (is_cgroupsV2) {
+      for (int i = 0; i < CG_INFO_LENGTH; i++) {
+        cg_infos[i]._cgroup_path = os::strdup(cgroup_path);
+      }
+    }
+  }
+  fclose(cgroup);
+
+  if (is_cgroupsV2) {
+    // Find the cgroup2 mount point by reading /proc/self/mountinfo
+    mntinfo = fopen("/proc/self/mountinfo", "r");
+    if (mntinfo == NULL) {
+        log_debug(os, container)("Can't open /proc/self/mountinfo, %s",
+                                 os::strerror(errno));
+        return NULL;
+    }
+
+    char cgroupv2_mount[MAXPATHLEN+1];
+    char fstype[MAXPATHLEN+1];
+    bool mount_point_found = false;
+    while ((p = fgets(buf, MAXPATHLEN, mntinfo)) != NULL) {
+      char *tmp_mount_point = cgroupv2_mount;
+      char *tmp_fs_type = fstype;
+
+      // mountinfo format is documented at https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+      if (sscanf(p, "%*d %*d %*d:%*d %*s %s %*[^-]- %s cgroup2 %*s", tmp_mount_point, tmp_fs_type) == 2) {
+        // we likely have an early match return, be sure we have cgroup2 as fstype
+        if (strcmp("cgroup2", tmp_fs_type) == 0) {
+          mount_point_found = true;
+          break;
+        }
+      }
+    }
+    fclose(mntinfo);
+    if (!mount_point_found) {
+      log_trace(os, container)("Mount point for cgroupv2 not found in /proc/self/mountinfo");
+      return NULL;
+    }
+    // Cgroups v2 case, we have all the info we need.
+    // Construct the subsystem, free resources and return
+    // Note: any index in cg_infos will do as the path is the same for
+    //       all controllers.
+    CgroupController* unified = new CgroupV2Controller(cgroupv2_mount, cg_infos[memory_idx]._cgroup_path);
+    for (int i = 0; i < CG_INFO_LENGTH; i++) {
+      os::free(cg_infos[i]._name);
+      os::free(cg_infos[i]._cgroup_path);
+    }
+    log_debug(os, container)("Detected cgroups v2 unified hierarchy");
+    return new CgroupV2Subsystem(unified);
+  }
+
+  // What follows is cgroups v1
+  log_debug(os, container)("Detected cgroups hybrid or legacy hierarchy, using cgroups v1 controllers");
+
+  /*
+   * Find the cgroup mount point for memory and cpuset
+   * by reading /proc/self/mountinfo
+   *
+   * Example for docker:
+   * 219 214 0:29 /docker/7208cebd00fa5f2e342b1094f7bed87fa25661471a4637118e65f1c995be8a34 /sys/fs/cgroup/memory ro,nosuid,nodev,noexec,relatime - cgroup cgroup rw,memory
+   *
+   * Example for host:
+   * 34 28 0:29 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,memory
+   */
+  mntinfo = fopen("/proc/self/mountinfo", "r");
+  if (mntinfo == NULL) {
+      log_debug(os, container)("Can't open /proc/self/mountinfo, %s",
+                               os::strerror(errno));
+      return NULL;
+  }
+
+  while ((p = fgets(buf, MAXPATHLEN, mntinfo)) != NULL) {
+    char tmpcgroups[MAXPATHLEN+1];
+    char *cptr = tmpcgroups;
+    char *token;
+
+    // mountinfo format is documented at https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+    if (sscanf(p, "%*d %*d %*d:%*d %s %s %*[^-]- cgroup %*s %s", tmproot, tmpmount, tmpcgroups) != 3) {
+      continue;
+    }
+    while ((token = strsep(&cptr, ",")) != NULL) {
+      if (strcmp(token, "memory") == 0) {
+        memory = new CgroupV1MemoryController(tmproot, tmpmount);
+      } else if (strcmp(token, "cpuset") == 0) {
+        cpuset = new CgroupV1Controller(tmproot, tmpmount);
+      } else if (strcmp(token, "cpu") == 0) {
+        cpu = new CgroupV1Controller(tmproot, tmpmount);
+      } else if (strcmp(token, "cpuacct") == 0) {
+        cpuacct= new CgroupV1Controller(tmproot, tmpmount);
+      }
+    }
+  }
+
+  fclose(mntinfo);
+
+  if (memory == NULL) {
+    log_debug(os, container)("Required cgroup v1 memory subsystem not found");
+    return NULL;
+  }
+  if (cpuset == NULL) {
+    log_debug(os, container)("Required cgroup v1 cpuset subsystem not found");
+    return NULL;
+  }
+  if (cpu == NULL) {
+    log_debug(os, container)("Required cgroup v1 cpu subsystem not found");
+    return NULL;
+  }
+  if (cpuacct == NULL) {
+    log_debug(os, container)("Required cgroup v1 cpuacct subsystem not found");
+    return NULL;
+  }
+
+  /*
+   * Use info gathered previously from /proc/self/cgroup
+   * and map host mount point to
+   * local one via /proc/self/mountinfo content above
+   *
+   * Docker example:
+   * 5:memory:/docker/6558aed8fc662b194323ceab5b964f69cf36b3e8af877a14b80256e93aecb044
+   *
+   * Host example:
+   * 5:memory:/user.slice
+   *
+   * Construct a path to the process specific memory and cpuset
+   * cgroup directory.
+   *
+   * For a container running under Docker from memory example above
+   * the paths would be:
+   *
+   * /sys/fs/cgroup/memory
+   *
+   * For a Host from memory example above the path would be:
+   *
+   * /sys/fs/cgroup/memory/user.slice
+   *
+   */
+  for (int i = 0; i < CG_INFO_LENGTH; i++) {
+    CgroupInfo info = cg_infos[i];
+    if (strcmp(info._name, "memory") == 0) {
+      memory->set_subsystem_path(info._cgroup_path);
+    } else if (strcmp(info._name, "cpuset") == 0) {
+      cpuset->set_subsystem_path(info._cgroup_path);
+    } else if (strcmp(info._name, "cpu") == 0) {
+      cpu->set_subsystem_path(info._cgroup_path);
+    } else if (strcmp(info._name, "cpuacct") == 0) {
+      cpuacct->set_subsystem_path(info._cgroup_path);
+    }
+  }
+  return new CgroupV1Subsystem(cpuset, cpu, cpuacct, memory);
+}
+
+/* active_processor_count
+ *
+ * Calculate an appropriate number of active processors for the
+ * VM to use based on these three inputs.
+ *
+ * cpu affinity
+ * cgroup cpu quota & cpu period
+ * cgroup cpu shares
+ *
+ * Algorithm:
+ *
+ * Determine the number of available CPUs from sched_getaffinity
+ *
+ * If user specified a quota (quota != -1), calculate the number of
+ * required CPUs by dividing quota by period.
+ *
+ * If shares are in effect (shares != -1), calculate the number
+ * of CPUs required for the shares by dividing the share value
+ * by PER_CPU_SHARES.
+ *
+ * All results of division are rounded up to the next whole number.
+ *
+ * If neither shares or quotas have been specified, return the
+ * number of active processors in the system.
+ *
+ * If both shares and quotas have been specified, the results are
+ * based on the flag PreferContainerQuotaForCPUCount.  If true,
+ * return the quota value.  If false return the smallest value
+ * between shares or quotas.
+ *
+ * If shares and/or quotas have been specified, the resulting number
+ * returned will never exceed the number of active processors.
+ *
+ * return:
+ *    number of CPUs
+ */
+int CgroupSubsystem::active_processor_count() {
+  int quota_count = 0, share_count = 0;
+  int cpu_count, limit_count;
+  int result;
+
+  // We use a cache with a timeout to avoid performing expensive
+  // computations in the event this function is called frequently.
+  // [See 8227006].
+  CachingCgroupController* contrl = cpu_controller();
+  CachedMetric* cpu_limit = contrl->metrics_cache();
+  if (!cpu_limit->should_check_metric()) {
+    int val = (int)cpu_limit->value();
+    log_trace(os, container)("CgroupSubsystem::active_processor_count (cached): %d", val);
+    return val;
+  }
+
+  cpu_count = limit_count = os::Linux::active_processor_count();
+  int quota  = cpu_quota();
+  int period = cpu_period();
+  int share  = cpu_shares();
+
+  if (quota > -1 && period > 0) {
+    quota_count = ceilf((float)quota / (float)period);
+    log_trace(os, container)("CPU Quota count based on quota/period: %d", quota_count);
+  }
+  if (share > -1) {
+    share_count = ceilf((float)share / (float)PER_CPU_SHARES);
+    log_trace(os, container)("CPU Share count based on shares: %d", share_count);
+  }
+
+  // If both shares and quotas are setup results depend
+  // on flag PreferContainerQuotaForCPUCount.
+  // If true, limit CPU count to quota
+  // If false, use minimum of shares and quotas
+  if (quota_count !=0 && share_count != 0) {
+    if (PreferContainerQuotaForCPUCount) {
+      limit_count = quota_count;
+    } else {
+      limit_count = MIN2(quota_count, share_count);
+    }
+  } else if (quota_count != 0) {
+    limit_count = quota_count;
+  } else if (share_count != 0) {
+    limit_count = share_count;
+  }
+
+  result = MIN2(cpu_count, limit_count);
+  log_trace(os, container)("OSContainer::active_processor_count: %d", result);
+
+  // Update cached metric to avoid re-reading container settings too often
+  cpu_limit->set_value(result, OSCONTAINER_CACHE_TIMEOUT);
+
+  return result;
+}
+
+/* memory_limit_in_bytes
+ *
+ * Return the limit of available memory for this process.
+ *
+ * return:
+ *    memory limit in bytes or
+ *    -1 for unlimited
+ *    OSCONTAINER_ERROR for not supported
+ */
+jlong CgroupSubsystem::memory_limit_in_bytes() {
+  CachingCgroupController* contrl = memory_controller();
+  CachedMetric* memory_limit = contrl->metrics_cache();
+  if (!memory_limit->should_check_metric()) {
+    return memory_limit->value();
+  }
+  jlong mem_limit = read_memory_limit_in_bytes();
+  // Update cached metric to avoid re-reading container settings too often
+  memory_limit->set_value(mem_limit, OSCONTAINER_CACHE_TIMEOUT);
+  return mem_limit;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/linux/cgroupSubsystem_linux.hpp	Tue Jan 21 07:29:48 2020 +0530
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CGROUP_SUBSYSTEM_LINUX_HPP
+#define CGROUP_SUBSYSTEM_LINUX_HPP
+
+#include "memory/allocation.hpp"
+#include "runtime/os.hpp"
+#include "logging/log.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+#include "osContainer_linux.hpp"
+
+// Shared cgroups code (used by cgroup version 1 and version 2)
+
+/*
+ * PER_CPU_SHARES has been set to 1024 because CPU shares' quota
+ * is commonly used in cloud frameworks like Kubernetes[1],
+ * AWS[2] and Mesos[3] in a similar way. They spawn containers with
+ * --cpu-shares option values scaled by PER_CPU_SHARES. Thus, we do
+ * the inverse for determining the number of possible available
+ * CPUs to the JVM inside a container. See JDK-8216366.
+ *
+ * [1] https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu
+ *     In particular:
+ *        When using Docker:
+ *          The spec.containers[].resources.requests.cpu is converted to its core value, which is potentially
+ *          fractional, and multiplied by 1024. The greater of this number or 2 is used as the value of the
+ *          --cpu-shares flag in the docker run command.
+ * [2] https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html
+ * [3] https://github.com/apache/mesos/blob/3478e344fb77d931f6122980c6e94cd3913c441d/src/docker/docker.cpp#L648
+ *     https://github.com/apache/mesos/blob/3478e344fb77d931f6122980c6e94cd3913c441d/src/slave/containerizer/mesos/isolators/cgroups/constants.hpp#L30
+ */
+#define PER_CPU_SHARES 1024
+
+typedef char * cptr;
+
+class CgroupController: public CHeapObj<mtInternal> {
+  public:
+    virtual char *subsystem_path() = 0;
+};
+
+PRAGMA_DIAG_PUSH
+PRAGMA_FORMAT_NONLITERAL_IGNORED
+template <typename T> int subsystem_file_line_contents(CgroupController* c,
+                                              const char *filename,
+                                              const char *matchline,
+                                              const char *scan_fmt,
+                                              T returnval) {
+  FILE *fp = NULL;
+  char *p;
+  char file[MAXPATHLEN+1];
+  char buf[MAXPATHLEN+1];
+  char discard[MAXPATHLEN+1];
+  bool found_match = false;
+
+  if (c == NULL) {
+    log_debug(os, container)("subsystem_file_line_contents: CgroupController* is NULL");
+    return OSCONTAINER_ERROR;
+  }
+  if (c->subsystem_path() == NULL) {
+    log_debug(os, container)("subsystem_file_line_contents: subsystem path is NULL");
+    return OSCONTAINER_ERROR;
+  }
+
+  strncpy(file, c->subsystem_path(), MAXPATHLEN);
+  file[MAXPATHLEN-1] = '\0';
+  int filelen = strlen(file);
+  if ((filelen + strlen(filename)) > (MAXPATHLEN-1)) {
+    log_debug(os, container)("File path too long %s, %s", file, filename);
+    return OSCONTAINER_ERROR;
+  }
+  strncat(file, filename, MAXPATHLEN-filelen);
+  log_trace(os, container)("Path to %s is %s", filename, file);
+  fp = fopen(file, "r");
+  if (fp != NULL) {
+    int err = 0;
+    while ((p = fgets(buf, MAXPATHLEN, fp)) != NULL) {
+      found_match = false;
+      if (matchline == NULL) {
+        // single-line file case
+        int matched = sscanf(p, scan_fmt, returnval);
+        found_match = (matched == 1);
+      } else {
+        // multi-line file case
+        if (strstr(p, matchline) != NULL) {
+          // discard matchline string prefix
+          int matched = sscanf(p, scan_fmt, discard, returnval);
+          found_match = (matched == 2);
+        } else {
+          continue; // substring not found
+        }
+      }
+      if (found_match) {
+        fclose(fp);
+        return 0;
+      } else {
+        err = 1;
+        log_debug(os, container)("Type %s not found in file %s", scan_fmt, file);
+      }
+    }
+    if (err == 0) {
+      log_debug(os, container)("Empty file %s", file);
+    }
+  } else {
+    log_debug(os, container)("Open of file %s failed, %s", file, os::strerror(errno));
+  }
+  if (fp != NULL)
+    fclose(fp);
+  return OSCONTAINER_ERROR;
+}
+PRAGMA_DIAG_POP
+
+#define GET_CONTAINER_INFO(return_type, subsystem, filename,              \
+                           logstring, scan_fmt, variable)                 \
+  return_type variable;                                                   \
+{                                                                         \
+  int err;                                                                \
+  err = subsystem_file_line_contents(subsystem,                           \
+                                     filename,                            \
+                                     NULL,                                \
+                                     scan_fmt,                            \
+                                     &variable);                          \
+  if (err != 0)                                                           \
+    return (return_type) OSCONTAINER_ERROR;                               \
+                                                                          \
+  log_trace(os, container)(logstring, variable);                          \
+}
+
+#define GET_CONTAINER_INFO_CPTR(return_type, subsystem, filename,         \
+                               logstring, scan_fmt, variable, bufsize)    \
+  char variable[bufsize];                                                 \
+{                                                                         \
+  int err;                                                                \
+  err = subsystem_file_line_contents(subsystem,                           \
+                                     filename,                            \
+                                     NULL,                                \
+                                     scan_fmt,                            \
+                                     variable);                           \
+  if (err != 0)                                                           \
+    return (return_type) NULL;                                            \
+                                                                          \
+  log_trace(os, container)(logstring, variable);                          \
+}
+
+#define GET_CONTAINER_INFO_LINE(return_type, controller, filename,        \
+                           matchline, logstring, scan_fmt, variable)      \
+  return_type variable;                                                   \
+{                                                                         \
+  int err;                                                                \
+  err = subsystem_file_line_contents(controller,                          \
+                                filename,                                 \
+                                matchline,                                \
+                                scan_fmt,                                 \
+                                &variable);                               \
+  if (err != 0)                                                           \
+    return (return_type) OSCONTAINER_ERROR;                               \
+                                                                          \
+  log_trace(os, container)(logstring, variable);                          \
+}
+
+// Four controllers: cpu, cpuset, cpuacct, memory
+#define CG_INFO_LENGTH 4
+
+class CachedMetric : public CHeapObj<mtInternal>{
+  private:
+    volatile jlong _metric;
+    volatile jlong _next_check_counter;
+  public:
+    CachedMetric() {
+      _metric = -1;
+      _next_check_counter = min_jlong;
+    }
+    bool should_check_metric() {
+      return os::elapsed_counter() > _next_check_counter;
+    }
+    jlong value() { return _metric; }
+    void set_value(jlong value, jlong timeout) {
+      _metric = value;
+      // Metric is unlikely to change, but we want to remain
+      // responsive to configuration changes. A very short grace time
+      // between re-read avoids excessive overhead during startup without
+      // significantly reducing the VMs ability to promptly react to changed
+      // metric config
+      _next_check_counter = os::elapsed_counter() + timeout;
+    }
+};
+
+class CachingCgroupController : public CHeapObj<mtInternal> {
+  private:
+    CgroupController* _controller;
+    CachedMetric* _metrics_cache;
+
+  public:
+    CachingCgroupController(CgroupController* cont) {
+      _controller = cont;
+      _metrics_cache = new CachedMetric();
+    }
+
+    CachedMetric* metrics_cache() { return _metrics_cache; }
+    CgroupController* controller() { return _controller; }
+};
+
+class CgroupSubsystem: public CHeapObj<mtInternal> {
+  public:
+    jlong memory_limit_in_bytes();
+    int active_processor_count();
+
+    virtual int cpu_quota() = 0;
+    virtual int cpu_period() = 0;
+    virtual int cpu_shares() = 0;
+    virtual jlong memory_usage_in_bytes() = 0;
+    virtual jlong memory_and_swap_limit_in_bytes() = 0;
+    virtual jlong memory_soft_limit_in_bytes() = 0;
+    virtual jlong memory_max_usage_in_bytes() = 0;
+    virtual char * cpu_cpuset_cpus() = 0;
+    virtual char * cpu_cpuset_memory_nodes() = 0;
+    virtual jlong read_memory_limit_in_bytes() = 0;
+    virtual const char * container_type() = 0;
+    virtual CachingCgroupController* memory_controller() = 0;
+    virtual CachingCgroupController* cpu_controller() = 0;
+};
+
+class CgroupSubsystemFactory: AllStatic {
+  public:
+    static CgroupSubsystem* create();
+};
+
+// Class representing info in /proc/self/cgroup.
+// See man 7 cgroups
+class CgroupInfo : public StackObj {
+  friend class CgroupSubsystemFactory;
+
+  private:
+  char* _name;
+  int _hierarchy_id;
+  bool _enabled;
+  char* _cgroup_path;
+
+};
+
+
+#endif // CGROUP_SUBSYSTEM_LINUX_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/linux/cgroupV1Subsystem_linux.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include <string.h>
+#include <math.h>
+#include <errno.h>
+#include "cgroupV1Subsystem_linux.hpp"
+#include "logging/log.hpp"
+#include "memory/allocation.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/os.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+/*
+ * Set directory to subsystem specific files based
+ * on the contents of the mountinfo and cgroup files.
+ */
+void CgroupV1Controller::set_subsystem_path(char *cgroup_path) {
+  char buf[MAXPATHLEN+1];
+  if (_root != NULL && cgroup_path != NULL) {
+    if (strcmp(_root, "/") == 0) {
+      int buflen;
+      strncpy(buf, _mount_point, MAXPATHLEN);
+      buf[MAXPATHLEN-1] = '\0';
+      if (strcmp(cgroup_path,"/") != 0) {
+        buflen = strlen(buf);
+        if ((buflen + strlen(cgroup_path)) > (MAXPATHLEN-1)) {
+          return;
+        }
+        strncat(buf, cgroup_path, MAXPATHLEN-buflen);
+        buf[MAXPATHLEN-1] = '\0';
+      }
+      _path = os::strdup(buf);
+    } else {
+      if (strcmp(_root, cgroup_path) == 0) {
+        strncpy(buf, _mount_point, MAXPATHLEN);
+        buf[MAXPATHLEN-1] = '\0';
+        _path = os::strdup(buf);
+      } else {
+        char *p = strstr(cgroup_path, _root);
+        if (p != NULL && p == _root) {
+          if (strlen(cgroup_path) > strlen(_root)) {
+            int buflen;
+            strncpy(buf, _mount_point, MAXPATHLEN);
+            buf[MAXPATHLEN-1] = '\0';
+            buflen = strlen(buf);
+            if ((buflen + strlen(cgroup_path) - strlen(_root)) > (MAXPATHLEN-1)) {
+              return;
+            }
+            strncat(buf, cgroup_path + strlen(_root), MAXPATHLEN-buflen);
+            buf[MAXPATHLEN-1] = '\0';
+            _path = os::strdup(buf);
+          }
+        }
+      }
+    }
+  }
+}
+
+/* uses_mem_hierarchy
+ *
+ * Return whether or not hierarchical cgroup accounting is being
+ * done.
+ *
+ * return:
+ *    A number > 0 if true, or
+ *    OSCONTAINER_ERROR for not supported
+ */
+jlong CgroupV1MemoryController::uses_mem_hierarchy() {
+  GET_CONTAINER_INFO(jlong, this, "/memory.use_hierarchy",
+                    "Use Hierarchy is: " JLONG_FORMAT, JLONG_FORMAT, use_hierarchy);
+  return use_hierarchy;
+}
+
+void CgroupV1MemoryController::set_subsystem_path(char *cgroup_path) {
+  CgroupV1Controller::set_subsystem_path(cgroup_path);
+  jlong hierarchy = uses_mem_hierarchy();
+  if (hierarchy > 0) {
+    set_hierarchical(true);
+  }
+}
+
+jlong CgroupV1Subsystem::read_memory_limit_in_bytes() {
+  GET_CONTAINER_INFO(julong, _memory->controller(), "/memory.limit_in_bytes",
+                     "Memory Limit is: " JULONG_FORMAT, JULONG_FORMAT, memlimit);
+
+  if (memlimit >= _unlimited_memory) {
+    log_trace(os, container)("Non-Hierarchical Memory Limit is: Unlimited");
+    CgroupV1MemoryController* mem_controller = reinterpret_cast<CgroupV1MemoryController*>(_memory->controller());
+    if (mem_controller->is_hierarchical()) {
+      const char* matchline = "hierarchical_memory_limit";
+      const char* format = "%s " JULONG_FORMAT;
+      GET_CONTAINER_INFO_LINE(julong, _memory->controller(), "/memory.stat", matchline,
+                             "Hierarchical Memory Limit is: " JULONG_FORMAT, format, hier_memlimit)
+      if (hier_memlimit >= _unlimited_memory) {
+        log_trace(os, container)("Hierarchical Memory Limit is: Unlimited");
+      } else {
+        return (jlong)hier_memlimit;
+      }
+    }
+    return (jlong)-1;
+  }
+  else {
+    return (jlong)memlimit;
+  }
+}
+
+jlong CgroupV1Subsystem::memory_and_swap_limit_in_bytes() {
+  GET_CONTAINER_INFO(julong, _memory->controller(), "/memory.memsw.limit_in_bytes",
+                     "Memory and Swap Limit is: " JULONG_FORMAT, JULONG_FORMAT, memswlimit);
+  if (memswlimit >= _unlimited_memory) {
+    log_trace(os, container)("Non-Hierarchical Memory and Swap Limit is: Unlimited");
+    CgroupV1MemoryController* mem_controller = reinterpret_cast<CgroupV1MemoryController*>(_memory->controller());
+    if (mem_controller->is_hierarchical()) {
+      const char* matchline = "hierarchical_memsw_limit";
+      const char* format = "%s " JULONG_FORMAT;
+      GET_CONTAINER_INFO_LINE(julong, _memory->controller(), "/memory.stat", matchline,
+                             "Hierarchical Memory and Swap Limit is : " JULONG_FORMAT, format, hier_memlimit)
+      if (hier_memlimit >= _unlimited_memory) {
+        log_trace(os, container)("Hierarchical Memory and Swap Limit is: Unlimited");
+      } else {
+        return (jlong)hier_memlimit;
+      }
+    }
+    return (jlong)-1;
+  } else {
+    return (jlong)memswlimit;
+  }
+}
+
+jlong CgroupV1Subsystem::memory_soft_limit_in_bytes() {
+  GET_CONTAINER_INFO(julong, _memory->controller(), "/memory.soft_limit_in_bytes",
+                     "Memory Soft Limit is: " JULONG_FORMAT, JULONG_FORMAT, memsoftlimit);
+  if (memsoftlimit >= _unlimited_memory) {
+    log_trace(os, container)("Memory Soft Limit is: Unlimited");
+    return (jlong)-1;
+  } else {
+    return (jlong)memsoftlimit;
+  }
+}
+
+/* memory_usage_in_bytes
+ *
+ * Return the amount of used memory for this process.
+ *
+ * return:
+ *    memory usage in bytes or
+ *    -1 for unlimited
+ *    OSCONTAINER_ERROR for not supported
+ */
+jlong CgroupV1Subsystem::memory_usage_in_bytes() {
+  GET_CONTAINER_INFO(jlong, _memory->controller(), "/memory.usage_in_bytes",
+                     "Memory Usage is: " JLONG_FORMAT, JLONG_FORMAT, memusage);
+  return memusage;
+}
+
+/* memory_max_usage_in_bytes
+ *
+ * Return the maximum amount of used memory for this process.
+ *
+ * return:
+ *    max memory usage in bytes or
+ *    OSCONTAINER_ERROR for not supported
+ */
+jlong CgroupV1Subsystem::memory_max_usage_in_bytes() {
+  GET_CONTAINER_INFO(jlong, _memory->controller(), "/memory.max_usage_in_bytes",
+                     "Maximum Memory Usage is: " JLONG_FORMAT, JLONG_FORMAT, memmaxusage);
+  return memmaxusage;
+}
+
+char * CgroupV1Subsystem::cpu_cpuset_cpus() {
+  GET_CONTAINER_INFO_CPTR(cptr, _cpuset, "/cpuset.cpus",
+                     "cpuset.cpus is: %s", "%1023s", cpus, 1024);
+  return os::strdup(cpus);
+}
+
+char * CgroupV1Subsystem::cpu_cpuset_memory_nodes() {
+  GET_CONTAINER_INFO_CPTR(cptr, _cpuset, "/cpuset.mems",
+                     "cpuset.mems is: %s", "%1023s", mems, 1024);
+  return os::strdup(mems);
+}
+
+/* cpu_quota
+ *
+ * Return the number of milliseconds per period
+ * process is guaranteed to run.
+ *
+ * return:
+ *    quota time in milliseconds
+ *    -1 for no quota
+ *    OSCONTAINER_ERROR for not supported
+ */
+int CgroupV1Subsystem::cpu_quota() {
+  GET_CONTAINER_INFO(int, _cpu->controller(), "/cpu.cfs_quota_us",
+                     "CPU Quota is: %d", "%d", quota);
+  return quota;
+}
+
+int CgroupV1Subsystem::cpu_period() {
+  GET_CONTAINER_INFO(int, _cpu->controller(), "/cpu.cfs_period_us",
+                     "CPU Period is: %d", "%d", period);
+  return period;
+}
+
+/* cpu_shares
+ *
+ * Return the amount of cpu shares available to the process
+ *
+ * return:
+ *    Share number (typically a number relative to 1024)
+ *                 (2048 typically expresses 2 CPUs worth of processing)
+ *    -1 for no share setup
+ *    OSCONTAINER_ERROR for not supported
+ */
+int CgroupV1Subsystem::cpu_shares() {
+  GET_CONTAINER_INFO(int, _cpu->controller(), "/cpu.shares",
+                     "CPU Shares is: %d", "%d", shares);
+  // Convert 1024 to no shares setup
+  if (shares == 1024) return -1;
+
+  return shares;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/linux/cgroupV1Subsystem_linux.hpp	Tue Jan 21 07:29:48 2020 +0530
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CGROUP_V1_SUBSYSTEM_LINUX_HPP
+#define CGROUP_V1_SUBSYSTEM_LINUX_HPP
+
+#include "runtime/os.hpp"
+#include "memory/allocation.hpp"
+#include "cgroupSubsystem_linux.hpp"
+
+// Cgroups version 1 specific implementation
+
+class CgroupV1Controller: public CgroupController {
+  private:
+    /* mountinfo contents */
+    char *_root;
+    char *_mount_point;
+
+    /* Constructed subsystem directory */
+    char *_path;
+
+  public:
+    CgroupV1Controller(char *root, char *mountpoint) {
+      _root = os::strdup(root);
+      _mount_point = os::strdup(mountpoint);
+      _path = NULL;
+    }
+
+    virtual void set_subsystem_path(char *cgroup_path);
+    char *subsystem_path() { return _path; }
+};
+
+class CgroupV1MemoryController: public CgroupV1Controller {
+
+  public:
+    bool is_hierarchical() { return _uses_mem_hierarchy; }
+    void set_subsystem_path(char *cgroup_path);
+  private:
+    /* Some container runtimes set limits via cgroup
+     * hierarchy. If set to true consider also memory.stat
+     * file if everything else seems unlimited */
+    bool _uses_mem_hierarchy;
+    jlong uses_mem_hierarchy();
+    void set_hierarchical(bool value) { _uses_mem_hierarchy = value; }
+
+  public:
+    CgroupV1MemoryController(char *root, char *mountpoint) : CgroupV1Controller(root, mountpoint) {
+      _uses_mem_hierarchy = false;
+    }
+
+};
+
+class CgroupV1Subsystem: public CgroupSubsystem {
+
+  public:
+    jlong read_memory_limit_in_bytes();
+    jlong memory_and_swap_limit_in_bytes();
+    jlong memory_soft_limit_in_bytes();
+    jlong memory_usage_in_bytes();
+    jlong memory_max_usage_in_bytes();
+    char * cpu_cpuset_cpus();
+    char * cpu_cpuset_memory_nodes();
+
+    int cpu_quota();
+    int cpu_period();
+
+    int cpu_shares();
+
+    const char * container_type() {
+      return "cgroupv1";
+    }
+    CachingCgroupController * memory_controller() { return _memory; }
+    CachingCgroupController * cpu_controller() { return _cpu; }
+
+  private:
+    julong _unlimited_memory;
+
+    /* controllers */
+    CachingCgroupController* _memory = NULL;
+    CgroupV1Controller* _cpuset = NULL;
+    CachingCgroupController* _cpu = NULL;
+    CgroupV1Controller* _cpuacct = NULL;
+
+  public:
+    CgroupV1Subsystem(CgroupV1Controller* cpuset,
+                      CgroupV1Controller* cpu,
+                      CgroupV1Controller* cpuacct,
+                      CgroupV1MemoryController* memory) {
+      _cpuset = cpuset;
+      _cpu = new CachingCgroupController(cpu);
+      _cpuacct = cpuacct;
+      _memory = new CachingCgroupController(memory);
+      _unlimited_memory = (LONG_MAX / os::vm_page_size()) * os::vm_page_size();
+    }
+};
+
+#endif // CGROUP_V1_SUBSYSTEM_LINUX_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/linux/cgroupV2Subsystem_linux.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2020, Red Hat Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "cgroupV2Subsystem_linux.hpp"
+
+/* cpu_shares
+ *
+ * Return the amount of cpu shares available to the process
+ *
+ * return:
+ *    Share number (typically a number relative to 1024)
+ *                 (2048 typically expresses 2 CPUs worth of processing)
+ *    -1 for no share setup
+ *    OSCONTAINER_ERROR for not supported
+ */
+int CgroupV2Subsystem::cpu_shares() {
+  GET_CONTAINER_INFO(int, _unified, "/cpu.weight",
+                     "Raw value for CPU shares is: %d", "%d", shares);
+  // Convert default value of 100 to no shares setup
+  if (shares == 100) {
+    log_debug(os, container)("CPU Shares is: %d", -1);
+    return -1;
+  }
+
+  // CPU shares (OCI) value needs to get translated into
+  // a proper Cgroups v2 value. See:
+  // https://github.com/containers/crun/blob/master/crun.1.md#cpu-controller
+  //
+  // Use the inverse of (x == OCI value, y == cgroupsv2 value):
+  // ((262142 * y - 1)/9999) + 2 = x
+  //
+  int x = 262142 * shares - 1;
+  double frac = x/9999.0;
+  x = ((int)frac) + 2;
+  log_trace(os, container)("Scaled CPU shares value is: %d", x);
+  // Since the scaled value is not precise, return the closest
+  // multiple of PER_CPU_SHARES for a more conservative mapping
+  if ( x <= PER_CPU_SHARES ) {
+     // will always map to 1 CPU
+     log_debug(os, container)("CPU Shares is: %d", x);
+     return x;
+  }
+  int f = x/PER_CPU_SHARES;
+  int lower_multiple = f * PER_CPU_SHARES;
+  int upper_multiple = (f + 1) * PER_CPU_SHARES;
+  int distance_lower = MAX2(lower_multiple, x) - MIN2(lower_multiple, x);
+  int distance_upper = MAX2(upper_multiple, x) - MIN2(upper_multiple, x);
+  x = distance_lower <= distance_upper ? lower_multiple : upper_multiple;
+  log_trace(os, container)("Closest multiple of %d of the CPU Shares value is: %d", PER_CPU_SHARES, x);
+  log_debug(os, container)("CPU Shares is: %d", x);
+  return x;
+}
+
+/* cpu_quota
+ *
+ * Return the number of milliseconds per period
+ * process is guaranteed to run.
+ *
+ * return:
+ *    quota time in milliseconds
+ *    -1 for no quota
+ *    OSCONTAINER_ERROR for not supported
+ */
+int CgroupV2Subsystem::cpu_quota() {
+  char * cpu_quota_str = cpu_quota_val();
+  int limit = (int)limit_from_str(cpu_quota_str);
+  log_trace(os, container)("CPU Quota is: %d", limit);
+  return limit;
+}
+
+char * CgroupV2Subsystem::cpu_cpuset_cpus() {
+  GET_CONTAINER_INFO_CPTR(cptr, _unified, "/cpuset.cpus",
+                     "cpuset.cpus is: %s", "%1023s", cpus, 1024);
+  if (cpus == NULL) {
+    return NULL;
+  }
+  return os::strdup(cpus);
+}
+
+char* CgroupV2Subsystem::cpu_quota_val() {
+  GET_CONTAINER_INFO_CPTR(cptr, _unified, "/cpu.max",
+                     "Raw value for CPU quota is: %s", "%s %*d", quota, 1024);
+  if (quota == NULL) {
+    return NULL;
+  }
+  return os::strdup(quota);
+}
+
+char * CgroupV2Subsystem::cpu_cpuset_memory_nodes() {
+  GET_CONTAINER_INFO_CPTR(cptr, _unified, "/cpuset.mems",
+                     "cpuset.mems is: %s", "%1023s", mems, 1024);
+  if (mems == NULL) {
+    return NULL;
+  }
+  return os::strdup(mems);
+}
+
+int CgroupV2Subsystem::cpu_period() {
+  GET_CONTAINER_INFO(int, _unified, "/cpu.max",
+                     "CPU Period is: %d", "%*s %d", period);
+  return period;
+}
+
+/* memory_usage_in_bytes
+ *
+ * Return the amount of used memory used by this cgroup and decendents
+ *
+ * return:
+ *    memory usage in bytes or
+ *    -1 for unlimited
+ *    OSCONTAINER_ERROR for not supported
+ */
+jlong CgroupV2Subsystem::memory_usage_in_bytes() {
+  GET_CONTAINER_INFO(jlong, _unified, "/memory.current",
+                     "Memory Usage is: " JLONG_FORMAT, JLONG_FORMAT, memusage);
+  return memusage;
+}
+
+jlong CgroupV2Subsystem::memory_soft_limit_in_bytes() {
+  char* mem_soft_limit_str = mem_soft_limit_val();
+  return limit_from_str(mem_soft_limit_str);
+}
+
+jlong CgroupV2Subsystem::memory_max_usage_in_bytes() {
+  // Log this string at trace level so as to make tests happy.
+  log_trace(os, container)("Maximum Memory Usage is not supported.");
+  return OSCONTAINER_ERROR; // not supported
+}
+
+char* CgroupV2Subsystem::mem_soft_limit_val() {
+  GET_CONTAINER_INFO_CPTR(cptr, _unified, "/memory.high",
+                         "Memory Soft Limit is: %s", "%s", mem_soft_limit_str, 1024);
+  if (mem_soft_limit_str == NULL) {
+    return NULL;
+  }
+  return os::strdup(mem_soft_limit_str);
+}
+
+jlong CgroupV2Subsystem::memory_and_swap_limit_in_bytes() {
+  char* mem_swp_limit_str = mem_swp_limit_val();
+  return limit_from_str(mem_swp_limit_str);
+}
+
+char* CgroupV2Subsystem::mem_swp_limit_val() {
+  GET_CONTAINER_INFO_CPTR(cptr, _unified, "/memory.swap.max",
+                         "Memory and Swap Limit is: %s", "%s", mem_swp_limit_str, 1024);
+  if (mem_swp_limit_str == NULL) {
+    return NULL;
+  }
+  return os::strdup(mem_swp_limit_str);
+}
+
+/* memory_limit_in_bytes
+ *
+ * Return the limit of available memory for this process.
+ *
+ * return:
+ *    memory limit in bytes or
+ *    -1 for unlimited, OSCONTAINER_ERROR for an error
+ */
+jlong CgroupV2Subsystem::read_memory_limit_in_bytes() {
+  char * mem_limit_str = mem_limit_val();
+  jlong limit = limit_from_str(mem_limit_str);
+  if (log_is_enabled(Trace, os, container)) {
+    if (limit == -1) {
+      log_trace(os, container)("Memory Limit is: Unlimited");
+    } else {
+      log_trace(os, container)("Memory Limit is: " JLONG_FORMAT, limit);
+    }
+  }
+  return limit;
+}
+
+jlong CgroupV2Subsystem::limit_from_str(char* limit_str) {
+  if (limit_str == NULL) {
+    return OSCONTAINER_ERROR;
+  }
+  // Unlimited memory in Cgroups V2 is the literal string 'max'
+  if (strcmp("max", limit_str) == 0) {
+    os::free(limit_str);
+    return (jlong)-1;
+  }
+  julong limit;
+  if (sscanf(limit_str, JULONG_FORMAT, &limit) != 1) {
+    os::free(limit_str);
+    return OSCONTAINER_ERROR;
+  }
+  os::free(limit_str);
+  return (jlong)limit;
+}
+
+char* CgroupV2Subsystem::mem_limit_val() {
+  GET_CONTAINER_INFO_CPTR(cptr, _unified, "/memory.max",
+                         "Raw value for memory limit is: %s", "%s", mem_limit_str, 1024);
+  if (mem_limit_str == NULL) {
+    return NULL;
+  }
+  return os::strdup(mem_limit_str);
+}
+
+char* CgroupV2Controller::construct_path(char* mount_path, char *cgroup_path) {
+  char buf[MAXPATHLEN+1];
+  int buflen;
+  strncpy(buf, mount_path, MAXPATHLEN);
+  buf[MAXPATHLEN] = '\0';
+  buflen = strlen(buf);
+  if ((buflen + strlen(cgroup_path)) > MAXPATHLEN) {
+    return NULL;
+  }
+  strncat(buf, cgroup_path, MAXPATHLEN-buflen);
+  buf[MAXPATHLEN] = '\0';
+  return os::strdup(buf);
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/linux/cgroupV2Subsystem_linux.hpp	Tue Jan 21 07:29:48 2020 +0530
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2020, Red Hat Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CGROUP_V2_SUBSYSTEM_LINUX_HPP
+#define CGROUP_V2_SUBSYSTEM_LINUX_HPP
+
+#include "cgroupSubsystem_linux.hpp"
+
+class CgroupV2Controller: public CgroupController {
+  private:
+    /* the mount path of the cgroup v2 hierarchy */
+    char *_mount_path;
+    /* The cgroup path for the controller */
+    char *_cgroup_path;
+
+    /* Constructed full path to the subsystem directory */
+    char *_path;
+    static char* construct_path(char* mount_path, char *cgroup_path);
+
+  public:
+    CgroupV2Controller(char * mount_path, char *cgroup_path) {
+      _mount_path = mount_path;
+      _cgroup_path = os::strdup(cgroup_path);
+      _path = construct_path(mount_path, cgroup_path);
+    }
+
+    char *subsystem_path() { return _path; }
+};
+
+class CgroupV2Subsystem: public CgroupSubsystem {
+  private:
+    /* One unified controller */
+    CgroupController* _unified = NULL;
+    /* Caching wrappers for cpu/memory metrics */
+    CachingCgroupController* _memory = NULL;
+    CachingCgroupController* _cpu = NULL;
+
+    char *mem_limit_val();
+    char *mem_swp_limit_val();
+    char *mem_soft_limit_val();
+    char *cpu_quota_val();
+    jlong limit_from_str(char* limit_str);
+
+  public:
+    CgroupV2Subsystem(CgroupController * unified) {
+      _unified = unified;
+      _memory = new CachingCgroupController(unified);
+      _cpu = new CachingCgroupController(unified);
+    }
+
+    jlong read_memory_limit_in_bytes();
+    int cpu_quota();
+    int cpu_period();
+    int cpu_shares();
+    jlong memory_and_swap_limit_in_bytes();
+    jlong memory_soft_limit_in_bytes();
+    jlong memory_usage_in_bytes();
+    jlong memory_max_usage_in_bytes();
+    char * cpu_cpuset_cpus();
+    char * cpu_cpuset_memory_nodes();
+    const char * container_type() {
+      return "cgroupv2";
+    }
+    CachingCgroupController * memory_controller() { return _memory; }
+    CachingCgroupController * cpu_controller() { return _cpu; }
+};
+
+#endif // CGROUP_V2_SUBSYSTEM_LINUX_HPP
--- a/src/hotspot/os/linux/gc/z/zBackingFile_linux.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,574 +0,0 @@
-/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/z/zArray.inline.hpp"
-#include "gc/z/zBackingFile_linux.hpp"
-#include "gc/z/zBackingPath_linux.hpp"
-#include "gc/z/zErrno.hpp"
-#include "gc/z/zGlobals.hpp"
-#include "gc/z/zLargePages.inline.hpp"
-#include "gc/z/zSyscall_linux.hpp"
-#include "logging/log.hpp"
-#include "runtime/init.hpp"
-#include "runtime/os.hpp"
-#include "utilities/align.hpp"
-#include "utilities/debug.hpp"
-
-#include <fcntl.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <sys/statfs.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-//
-// Support for building on older Linux systems
-//
-
-// memfd_create(2) flags
-#ifndef MFD_CLOEXEC
-#define MFD_CLOEXEC                      0x0001U
-#endif
-#ifndef MFD_HUGETLB
-#define MFD_HUGETLB                      0x0004U
-#endif
-
-// open(2) flags
-#ifndef O_CLOEXEC
-#define O_CLOEXEC                        02000000
-#endif
-#ifndef O_TMPFILE
-#define O_TMPFILE                        (020000000 | O_DIRECTORY)
-#endif
-
-// fallocate(2) flags
-#ifndef FALLOC_FL_KEEP_SIZE
-#define FALLOC_FL_KEEP_SIZE              0x01
-#endif
-#ifndef FALLOC_FL_PUNCH_HOLE
-#define FALLOC_FL_PUNCH_HOLE             0x02
-#endif
-
-// Filesystem types, see statfs(2)
-#ifndef TMPFS_MAGIC
-#define TMPFS_MAGIC                      0x01021994
-#endif
-#ifndef HUGETLBFS_MAGIC
-#define HUGETLBFS_MAGIC                  0x958458f6
-#endif
-
-// Filesystem names
-#define ZFILESYSTEM_TMPFS                "tmpfs"
-#define ZFILESYSTEM_HUGETLBFS            "hugetlbfs"
-
-// Sysfs file for transparent huge page on tmpfs
-#define ZFILENAME_SHMEM_ENABLED          "/sys/kernel/mm/transparent_hugepage/shmem_enabled"
-
-// Java heap filename
-#define ZFILENAME_HEAP                   "java_heap"
-
-// Preferred tmpfs mount points, ordered by priority
-static const char* z_preferred_tmpfs_mountpoints[] = {
-  "/dev/shm",
-  "/run/shm",
-  NULL
-};
-
-// Preferred hugetlbfs mount points, ordered by priority
-static const char* z_preferred_hugetlbfs_mountpoints[] = {
-  "/dev/hugepages",
-  "/hugepages",
-  NULL
-};
-
-static int z_fallocate_hugetlbfs_attempts = 3;
-static bool z_fallocate_supported = true;
-
-ZBackingFile::ZBackingFile() :
-    _fd(-1),
-    _size(0),
-    _filesystem(0),
-    _block_size(0),
-    _available(0),
-    _initialized(false) {
-
-  // Create backing file
-  _fd = create_fd(ZFILENAME_HEAP);
-  if (_fd == -1) {
-    return;
-  }
-
-  // Get filesystem statistics
-  struct statfs buf;
-  if (fstatfs(_fd, &buf) == -1) {
-    ZErrno err;
-    log_error(gc)("Failed to determine filesystem type for backing file (%s)", err.to_string());
-    return;
-  }
-
-  _filesystem = buf.f_type;
-  _block_size = buf.f_bsize;
-  _available = buf.f_bavail * _block_size;
-
-  // Make sure we're on a supported filesystem
-  if (!is_tmpfs() && !is_hugetlbfs()) {
-    log_error(gc)("Backing file must be located on a %s or a %s filesystem",
-                  ZFILESYSTEM_TMPFS, ZFILESYSTEM_HUGETLBFS);
-    return;
-  }
-
-  // Make sure the filesystem type matches requested large page type
-  if (ZLargePages::is_transparent() && !is_tmpfs()) {
-    log_error(gc)("-XX:+UseTransparentHugePages can only be enable when using a %s filesystem",
-                  ZFILESYSTEM_TMPFS);
-    return;
-  }
-
-  if (ZLargePages::is_transparent() && !tmpfs_supports_transparent_huge_pages()) {
-    log_error(gc)("-XX:+UseTransparentHugePages on a %s filesystem not supported by kernel",
-                  ZFILESYSTEM_TMPFS);
-    return;
-  }
-
-  if (ZLargePages::is_explicit() && !is_hugetlbfs()) {
-    log_error(gc)("-XX:+UseLargePages (without -XX:+UseTransparentHugePages) can only be enabled "
-                  "when using a %s filesystem", ZFILESYSTEM_HUGETLBFS);
-    return;
-  }
-
-  if (!ZLargePages::is_explicit() && is_hugetlbfs()) {
-    log_error(gc)("-XX:+UseLargePages must be enabled when using a %s filesystem",
-                  ZFILESYSTEM_HUGETLBFS);
-    return;
-  }
-
-  const size_t expected_block_size = is_tmpfs() ? os::vm_page_size() : os::large_page_size();
-  if (expected_block_size != _block_size) {
-    log_error(gc)("%s filesystem has unexpected block size " SIZE_FORMAT " (expected " SIZE_FORMAT ")",
-                  is_tmpfs() ? ZFILESYSTEM_TMPFS : ZFILESYSTEM_HUGETLBFS, _block_size, expected_block_size);
-    return;
-  }
-
-  // Successfully initialized
-  _initialized = true;
-}
-
-int ZBackingFile::create_mem_fd(const char* name) const {
-  // Create file name
-  char filename[PATH_MAX];
-  snprintf(filename, sizeof(filename), "%s%s", name, ZLargePages::is_explicit() ? ".hugetlb" : "");
-
-  // Create file
-  const int extra_flags = ZLargePages::is_explicit() ? MFD_HUGETLB : 0;
-  const int fd = ZSyscall::memfd_create(filename, MFD_CLOEXEC | extra_flags);
-  if (fd == -1) {
-    ZErrno err;
-    log_debug(gc, init)("Failed to create memfd file (%s)",
-                        ((ZLargePages::is_explicit() && err == EINVAL) ? "Hugepages not supported" : err.to_string()));
-    return -1;
-  }
-
-  log_info(gc, init)("Heap backed by file: /memfd:%s", filename);
-
-  return fd;
-}
-
-int ZBackingFile::create_file_fd(const char* name) const {
-  const char* const filesystem = ZLargePages::is_explicit()
-                                 ? ZFILESYSTEM_HUGETLBFS
-                                 : ZFILESYSTEM_TMPFS;
-  const char** const preferred_mountpoints = ZLargePages::is_explicit()
-                                             ? z_preferred_hugetlbfs_mountpoints
-                                             : z_preferred_tmpfs_mountpoints;
-
-  // Find mountpoint
-  ZBackingPath path(filesystem, preferred_mountpoints);
-  if (path.get() == NULL) {
-    log_error(gc)("Use -XX:ZPath to specify the path to a %s filesystem", filesystem);
-    return -1;
-  }
-
-  // Try to create an anonymous file using the O_TMPFILE flag. Note that this
-  // flag requires kernel >= 3.11. If this fails we fall back to open/unlink.
-  const int fd_anon = os::open(path.get(), O_TMPFILE|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
-  if (fd_anon == -1) {
-    ZErrno err;
-    log_debug(gc, init)("Failed to create anonymous file in %s (%s)", path.get(),
-                        (err == EINVAL ? "Not supported" : err.to_string()));
-  } else {
-    // Get inode number for anonymous file
-    struct stat stat_buf;
-    if (fstat(fd_anon, &stat_buf) == -1) {
-      ZErrno err;
-      log_error(gc)("Failed to determine inode number for anonymous file (%s)", err.to_string());
-      return -1;
-    }
-
-    log_info(gc, init)("Heap backed by file: %s/#" UINT64_FORMAT, path.get(), (uint64_t)stat_buf.st_ino);
-
-    return fd_anon;
-  }
-
-  log_debug(gc, init)("Falling back to open/unlink");
-
-  // Create file name
-  char filename[PATH_MAX];
-  snprintf(filename, sizeof(filename), "%s/%s.%d", path.get(), name, os::current_process_id());
-
-  // Create file
-  const int fd = os::open(filename, O_CREAT|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
-  if (fd == -1) {
-    ZErrno err;
-    log_error(gc)("Failed to create file %s (%s)", filename, err.to_string());
-    return -1;
-  }
-
-  // Unlink file
-  if (unlink(filename) == -1) {
-    ZErrno err;
-    log_error(gc)("Failed to unlink file %s (%s)", filename, err.to_string());
-    return -1;
-  }
-
-  log_info(gc, init)("Heap backed by file: %s", filename);
-
-  return fd;
-}
-
-int ZBackingFile::create_fd(const char* name) const {
-  if (ZPath == NULL) {
-    // If the path is not explicitly specified, then we first try to create a memfd file
-    // instead of looking for a tmpfd/hugetlbfs mount point. Note that memfd_create() might
-    // not be supported at all (requires kernel >= 3.17), or it might not support large
-    // pages (requires kernel >= 4.14). If memfd_create() fails, then we try to create a
-    // file on an accessible tmpfs or hugetlbfs mount point.
-    const int fd = create_mem_fd(name);
-    if (fd != -1) {
-      return fd;
-    }
-
-    log_debug(gc, init)("Falling back to searching for an accessible mount point");
-  }
-
-  return create_file_fd(name);
-}
-
-bool ZBackingFile::is_initialized() const {
-  return _initialized;
-}
-
-int ZBackingFile::fd() const {
-  return _fd;
-}
-
-size_t ZBackingFile::size() const {
-  return _size;
-}
-
-size_t ZBackingFile::available() const {
-  return _available;
-}
-
-bool ZBackingFile::is_tmpfs() const {
-  return _filesystem == TMPFS_MAGIC;
-}
-
-bool ZBackingFile::is_hugetlbfs() const {
-  return _filesystem == HUGETLBFS_MAGIC;
-}
-
-bool ZBackingFile::tmpfs_supports_transparent_huge_pages() const {
-  // If the shmem_enabled file exists and is readable then we
-  // know the kernel supports transparent huge pages for tmpfs.
-  return access(ZFILENAME_SHMEM_ENABLED, R_OK) == 0;
-}
-
-ZErrno ZBackingFile::fallocate_compat_ftruncate(size_t size) const {
-  while (ftruncate(_fd, size) == -1) {
-    if (errno != EINTR) {
-      // Failed
-      return errno;
-    }
-  }
-
-  // Success
-  return 0;
-}
-
-ZErrno ZBackingFile::fallocate_compat_mmap(size_t offset, size_t length, bool touch) const {
-  // On hugetlbfs, mapping a file segment will fail immediately, without
-  // the need to touch the mapped pages first, if there aren't enough huge
-  // pages available to back the mapping.
-  void* const addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset);
-  if (addr == MAP_FAILED) {
-    // Failed
-    return errno;
-  }
-
-  // Once mapped, the huge pages are only reserved. We need to touch them
-  // to associate them with the file segment. Note that we can not punch
-  // hole in file segments which only have reserved pages.
-  if (touch) {
-    char* const start = (char*)addr;
-    char* const end = start + length;
-    os::pretouch_memory(start, end, _block_size);
-  }
-
-  // Unmap again. From now on, the huge pages that were mapped are allocated
-  // to this file. There's no risk in getting SIGBUS when touching them.
-  if (munmap(addr, length) == -1) {
-    // Failed
-    return errno;
-  }
-
-  // Success
-  return 0;
-}
-
-ZErrno ZBackingFile::fallocate_compat_pwrite(size_t offset, size_t length) const {
-  uint8_t data = 0;
-
-  // Allocate backing memory by writing to each block
-  for (size_t pos = offset; pos < offset + length; pos += _block_size) {
-    if (pwrite(_fd, &data, sizeof(data), pos) == -1) {
-      // Failed
-      return errno;
-    }
-  }
-
-  // Success
-  return 0;
-}
-
-ZErrno ZBackingFile::fallocate_fill_hole_compat(size_t offset, size_t length) {
-  // fallocate(2) is only supported by tmpfs since Linux 3.5, and by hugetlbfs
-  // since Linux 4.3. When fallocate(2) is not supported we emulate it using
-  // ftruncate/pwrite (for tmpfs) or ftruncate/mmap/munmap (for hugetlbfs).
-
-  const size_t end = offset + length;
-  if (end > _size) {
-    // Increase file size
-    const ZErrno err = fallocate_compat_ftruncate(end);
-    if (err) {
-      // Failed
-      return err;
-    }
-  }
-
-  // Allocate backing memory
-  const ZErrno err = is_hugetlbfs() ? fallocate_compat_mmap(offset, length, false /* touch */)
-                                    : fallocate_compat_pwrite(offset, length);
-  if (err) {
-    if (end > _size) {
-      // Restore file size
-      fallocate_compat_ftruncate(_size);
-    }
-
-    // Failed
-    return err;
-  }
-
-  if (end > _size) {
-    // Record new file size
-    _size = end;
-  }
-
-  // Success
-  return 0;
-}
-
-ZErrno ZBackingFile::fallocate_fill_hole_syscall(size_t offset, size_t length) {
-  const int mode = 0; // Allocate
-  const int res = ZSyscall::fallocate(_fd, mode, offset, length);
-  if (res == -1) {
-    // Failed
-    return errno;
-  }
-
-  const size_t end = offset + length;
-  if (end > _size) {
-    // Record new file size
-    _size = end;
-  }
-
-  // Success
-  return 0;
-}
-
-ZErrno ZBackingFile::fallocate_fill_hole(size_t offset, size_t length) {
-  // Using compat mode is more efficient when allocating space on hugetlbfs.
-  // Note that allocating huge pages this way will only reserve them, and not
-  // associate them with segments of the file. We must guarantee that we at
-  // some point touch these segments, otherwise we can not punch hole in them.
-  if (z_fallocate_supported && !is_hugetlbfs()) {
-     const ZErrno err = fallocate_fill_hole_syscall(offset, length);
-     if (!err) {
-       // Success
-       return 0;
-     }
-
-     if (err != ENOSYS && err != EOPNOTSUPP) {
-       // Failed
-       return err;
-     }
-
-     // Not supported
-     log_debug(gc)("Falling back to fallocate() compatibility mode");
-     z_fallocate_supported = false;
-  }
-
-  return fallocate_fill_hole_compat(offset, length);
-}
-
-ZErrno ZBackingFile::fallocate_punch_hole(size_t offset, size_t length) {
-  if (is_hugetlbfs()) {
-    // We can only punch hole in pages that have been touched. Non-touched
-    // pages are only reserved, and not associated with any specific file
-    // segment. We don't know which pages have been previously touched, so
-    // we always touch them here to guarantee that we can punch hole.
-    const ZErrno err = fallocate_compat_mmap(offset, length, true /* touch */);
-    if (err) {
-      // Failed
-      return err;
-    }
-  }
-
-  const int mode = FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE;
-  if (ZSyscall::fallocate(_fd, mode, offset, length) == -1) {
-    // Failed
-    return errno;
-  }
-
-  // Success
-  return 0;
-}
-
-ZErrno ZBackingFile::split_and_fallocate(bool punch_hole, size_t offset, size_t length) {
-  // Try first half
-  const size_t offset0 = offset;
-  const size_t length0 = align_up(length / 2, _block_size);
-  const ZErrno err0 = fallocate(punch_hole, offset0, length0);
-  if (err0) {
-    return err0;
-  }
-
-  // Try second half
-  const size_t offset1 = offset0 + length0;
-  const size_t length1 = length - length0;
-  const ZErrno err1 = fallocate(punch_hole, offset1, length1);
-  if (err1) {
-    return err1;
-  }
-
-  // Success
-  return 0;
-}
-
-ZErrno ZBackingFile::fallocate(bool punch_hole, size_t offset, size_t length) {
-  assert(is_aligned(offset, _block_size), "Invalid offset");
-  assert(is_aligned(length, _block_size), "Invalid length");
-
-  const ZErrno err = punch_hole ? fallocate_punch_hole(offset, length) : fallocate_fill_hole(offset, length);
-  if (err == EINTR && length > _block_size) {
-    // Calling fallocate(2) with a large length can take a long time to
-    // complete. When running profilers, such as VTune, this syscall will
-    // be constantly interrupted by signals. Expanding the file in smaller
-    // steps avoids this problem.
-    return split_and_fallocate(punch_hole, offset, length);
-  }
-
-  return err;
-}
-
-bool ZBackingFile::commit_inner(size_t offset, size_t length) {
-  log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
-                      offset / M, (offset + length) / M, length / M);
-
-retry:
-  const ZErrno err = fallocate(false /* punch_hole */, offset, length);
-  if (err) {
-    if (err == ENOSPC && !is_init_completed() && is_hugetlbfs() && z_fallocate_hugetlbfs_attempts-- > 0) {
-      // If we fail to allocate during initialization, due to lack of space on
-      // the hugetlbfs filesystem, then we wait and retry a few times before
-      // giving up. Otherwise there is a risk that running JVMs back-to-back
-      // will fail, since there is a delay between process termination and the
-      // huge pages owned by that process being returned to the huge page pool
-      // and made available for new allocations.
-      log_debug(gc, init)("Failed to commit memory (%s), retrying", err.to_string());
-
-      // Wait and retry in one second, in the hope that huge pages will be
-      // available by then.
-      sleep(1);
-      goto retry;
-    }
-
-    // Failed
-    log_error(gc)("Failed to commit memory (%s)", err.to_string());
-    return false;
-  }
-
-  // Success
-  return true;
-}
-
-size_t ZBackingFile::commit(size_t offset, size_t length) {
-  // Try to commit the whole region
-  if (commit_inner(offset, length)) {
-    // Success
-    return length;
-  }
-
-  // Failed, try to commit as much as possible
-  size_t start = offset;
-  size_t end = offset + length;
-
-  for (;;) {
-    length = align_down((end - start) / 2, ZGranuleSize);
-    if (length < ZGranuleSize) {
-      // Done, don't commit more
-      return start - offset;
-    }
-
-    if (commit_inner(start, length)) {
-      // Success, try commit more
-      start += length;
-    } else {
-      // Failed, try commit less
-      end -= length;
-    }
-  }
-}
-
-size_t ZBackingFile::uncommit(size_t offset, size_t length) {
-  log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
-                      offset / M, (offset + length) / M, length / M);
-
-  const ZErrno err = fallocate(true /* punch_hole */, offset, length);
-  if (err) {
-    log_error(gc)("Failed to uncommit memory (%s)", err.to_string());
-    return 0;
-  }
-
-  return length;
-}
--- a/src/hotspot/os/linux/gc/z/zBackingFile_linux.hpp	Fri Jan 17 12:20:00 2020 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef OS_LINUX_GC_Z_ZBACKINGFILE_LINUX_HPP
-#define OS_LINUX_GC_Z_ZBACKINGFILE_LINUX_HPP
-
-#include "memory/allocation.hpp"
-
-class ZErrno;
-
-class ZBackingFile {
-private:
-  int      _fd;
-  size_t   _size;
-  uint64_t _filesystem;
-  size_t   _block_size;
-  size_t   _available;
-  bool     _initialized;
-
-  int create_mem_fd(const char* name) const;
-  int create_file_fd(const char* name) const;
-  int create_fd(const char* name) const;
-
-  bool is_tmpfs() const;
-  bool is_hugetlbfs() const;
-  bool tmpfs_supports_transparent_huge_pages() const;
-
-  ZErrno fallocate_compat_ftruncate(size_t size) const;
-  ZErrno fallocate_compat_mmap(size_t offset, size_t length, bool reserve_only) const;
-  ZErrno fallocate_compat_pwrite(size_t offset, size_t length) const;
-  ZErrno fallocate_fill_hole_compat(size_t offset, size_t length);
-  ZErrno fallocate_fill_hole_syscall(size_t offset, size_t length);
-  ZErrno fallocate_fill_hole(size_t offset, size_t length);
-  ZErrno fallocate_punch_hole(size_t offset, size_t length);
-  ZErrno split_and_fallocate(bool punch_hole, size_t offset, size_t length);
-  ZErrno fallocate(bool punch_hole, size_t offset, size_t length);
-
-  bool commit_inner(size_t offset, size_t length);
-
-public:
-  ZBackingFile();
-
-  bool is_initialized() const;
-
-  int fd() const;
-  size_t size() const;
-  size_t available() const;
-
-  size_t commit(size_t offset, size_t length);
-  size_t uncommit(size_t offset, size_t length);
-};
-
-#endif // OS_LINUX_GC_Z_ZBACKINGFILE_LINUX_HPP
--- a/src/hotspot/os/linux/gc/z/zBackingPath_linux.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,149 +0,0 @@
-/*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/z/zArray.inline.hpp"
-#include "gc/z/zBackingPath_linux.hpp"
-#include "gc/z/zErrno.hpp"
-#include "logging/log.hpp"
-
-#include <stdio.h>
-#include <unistd.h>
-
-// Mount information, see proc(5) for more details.
-#define PROC_SELF_MOUNTINFO        "/proc/self/mountinfo"
-
-ZBackingPath::ZBackingPath(const char* filesystem, const char** preferred_mountpoints) {
-  if (ZPath != NULL) {
-    // Use specified path
-    _path = strdup(ZPath);
-  } else {
-    // Find suitable path
-    _path = find_mountpoint(filesystem, preferred_mountpoints);
-  }
-}
-
-ZBackingPath::~ZBackingPath() {
-  free(_path);
-  _path = NULL;
-}
-
-char* ZBackingPath::get_mountpoint(const char* line, const char* filesystem) const {
-  char* line_mountpoint = NULL;
-  char* line_filesystem = NULL;
-
-  // Parse line and return a newly allocated string containing the mount point if
-  // the line contains a matching filesystem and the mount point is accessible by
-  // the current user.
-  if (sscanf(line, "%*u %*u %*u:%*u %*s %ms %*[^-]- %ms", &line_mountpoint, &line_filesystem) != 2 ||
-      strcmp(line_filesystem, filesystem) != 0 ||
-      access(line_mountpoint, R_OK|W_OK|X_OK) != 0) {
-    // Not a matching or accessible filesystem
-    free(line_mountpoint);
-    line_mountpoint = NULL;
-  }
-
-  free(line_filesystem);
-
-  return line_mountpoint;
-}
-
-void ZBackingPath::get_mountpoints(const char* filesystem, ZArray<char*>* mountpoints) const {
-  FILE* fd = fopen(PROC_SELF_MOUNTINFO, "r");
-  if (fd == NULL) {
-    ZErrno err;
-    log_error(gc)("Failed to open %s: %s", PROC_SELF_MOUNTINFO, err.to_string());
-    return;
-  }
-
-  char* line = NULL;
-  size_t length = 0;
-
-  while (getline(&line, &length, fd) != -1) {
-    char* const mountpoint = get_mountpoint(line, filesystem);
-    if (mountpoint != NULL) {
-      mountpoints->add(mountpoint);
-    }
-  }
-
-  free(line);
-  fclose(fd);
-}
-
-void ZBackingPath::free_mountpoints(ZArray<char*>* mountpoints) const {
-  ZArrayIterator<char*> iter(mountpoints);
-  for (char* mountpoint; iter.next(&mountpoint);) {
-    free(mountpoint);
-  }
-  mountpoints->clear();
-}
-
-char* ZBackingPath::find_preferred_mountpoint(const char* filesystem,
-                                              ZArray<char*>* mountpoints,
-                                              const char** preferred_mountpoints) const {
-  // Find preferred mount point
-  ZArrayIterator<char*> iter1(mountpoints);
-  for (char* mountpoint; iter1.next(&mountpoint);) {
-    for (const char** preferred = preferred_mountpoints; *preferred != NULL; preferred++) {
-      if (!strcmp(mountpoint, *preferred)) {
-        // Preferred mount point found
-        return strdup(mountpoint);
-      }
-    }
-  }
-
-  // Preferred mount point not found
-  log_error(gc)("More than one %s filesystem found:", filesystem);
-  ZArrayIterator<char*> iter2(mountpoints);
-  for (char* mountpoint; iter2.next(&mountpoint);) {
-    log_error(gc)("  %s", mountpoint);
-  }
-
-  return NULL;
-}
-
-char* ZBackingPath::find_mountpoint(const char* filesystem, const char** preferred_mountpoints) const {
-  char* path = NULL;
-  ZArray<char*> mountpoints;
-
-  get_mountpoints(filesystem, &mountpoints);
-
-  if (mountpoints.size() == 0) {
-    // No mount point found
-    log_error(gc)("Failed to find an accessible %s filesystem", filesystem);
-  } else if (mountpoints.size() == 1) {
-    // One mount point found
-    path = strdup(mountpoints.at(0));
-  } else {
-    // More than one mount point found
-    path = find_preferred_mountpoint(filesystem, &mountpoints, preferred_mountpoints);
-  }
-
-  free_mountpoints(&mountpoints);
-
-  return path;
-}
-
-const char* ZBackingPath::get() const {
-  return _path;
-}
--- a/src/hotspot/os/linux/gc/z/zBackingPath_linux.hpp	Fri Jan 17 12:20:00 2020 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef OS_LINUX_GC_Z_ZBACKINGPATH_LINUX_HPP
-#define OS_LINUX_GC_Z_ZBACKINGPATH_LINUX_HPP
-
-#include "gc/z/zArray.hpp"
-#include "memory/allocation.hpp"
-
-class ZBackingPath : public StackObj {
-private:
-  char* _path;
-
-  char* get_mountpoint(const char* line,
-                       const char* filesystem) const;
-  void get_mountpoints(const char* filesystem,
-                       ZArray<char*>* mountpoints) const;
-  void free_mountpoints(ZArray<char*>* mountpoints) const;
-  char* find_preferred_mountpoint(const char* filesystem,
-                                  ZArray<char*>* mountpoints,
-                                  const char** preferred_mountpoints) const;
-  char* find_mountpoint(const char* filesystem,
-                        const char** preferred_mountpoints) const;
-
-public:
-  ZBackingPath(const char* filesystem, const char** preferred_mountpoints);
-  ~ZBackingPath();
-
-  const char* get() const;
-};
-
-#endif // OS_LINUX_GC_Z_ZBACKINGPATH_LINUX_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/linux/gc/z/zMountPoint_linux.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zArray.inline.hpp"
+#include "gc/z/zErrno.hpp"
+#include "gc/z/zMountPoint_linux.hpp"
+#include "logging/log.hpp"
+
+#include <stdio.h>
+#include <unistd.h>
+
+// Mount information, see proc(5) for more details.
+#define PROC_SELF_MOUNTINFO        "/proc/self/mountinfo"
+
+ZMountPoint::ZMountPoint(const char* filesystem, const char** preferred_mountpoints) {
+  if (ZPath != NULL) {
+    // Use specified path
+    _path = strdup(ZPath);
+  } else {
+    // Find suitable path
+    _path = find_mountpoint(filesystem, preferred_mountpoints);
+  }
+}
+
+ZMountPoint::~ZMountPoint() {
+  free(_path);
+  _path = NULL;
+}
+
+char* ZMountPoint::get_mountpoint(const char* line, const char* filesystem) const {
+  char* line_mountpoint = NULL;
+  char* line_filesystem = NULL;
+
+  // Parse line and return a newly allocated string containing the mount point if
+  // the line contains a matching filesystem and the mount point is accessible by
+  // the current user.
+  if (sscanf(line, "%*u %*u %*u:%*u %*s %ms %*[^-]- %ms", &line_mountpoint, &line_filesystem) != 2 ||
+      strcmp(line_filesystem, filesystem) != 0 ||
+      access(line_mountpoint, R_OK|W_OK|X_OK) != 0) {
+    // Not a matching or accessible filesystem
+    free(line_mountpoint);
+    line_mountpoint = NULL;
+  }
+
+  free(line_filesystem);
+
+  return line_mountpoint;
+}
+
+void ZMountPoint::get_mountpoints(const char* filesystem, ZArray<char*>* mountpoints) const {
+  FILE* fd = fopen(PROC_SELF_MOUNTINFO, "r");
+  if (fd == NULL) {
+    ZErrno err;
+    log_error(gc)("Failed to open %s: %s", PROC_SELF_MOUNTINFO, err.to_string());
+    return;
+  }
+
+  char* line = NULL;
+  size_t length = 0;
+
+  while (getline(&line, &length, fd) != -1) {
+    char* const mountpoint = get_mountpoint(line, filesystem);
+    if (mountpoint != NULL) {
+      mountpoints->add(mountpoint);
+    }
+  }
+
+  free(line);
+  fclose(fd);
+}
+
+void ZMountPoint::free_mountpoints(ZArray<char*>* mountpoints) const {
+  ZArrayIterator<char*> iter(mountpoints);
+  for (char* mountpoint; iter.next(&mountpoint);) {
+    free(mountpoint);
+  }
+  mountpoints->clear();
+}
+
+char* ZMountPoint::find_preferred_mountpoint(const char* filesystem,
+                                              ZArray<char*>* mountpoints,
+                                              const char** preferred_mountpoints) const {
+  // Find preferred mount point
+  ZArrayIterator<char*> iter1(mountpoints);
+  for (char* mountpoint; iter1.next(&mountpoint);) {
+    for (const char** preferred = preferred_mountpoints; *preferred != NULL; preferred++) {
+      if (!strcmp(mountpoint, *preferred)) {
+        // Preferred mount point found
+        return strdup(mountpoint);
+      }
+    }
+  }
+
+  // Preferred mount point not found
+  log_error(gc)("More than one %s filesystem found:", filesystem);
+  ZArrayIterator<char*> iter2(mountpoints);
+  for (char* mountpoint; iter2.next(&mountpoint);) {
+    log_error(gc)("  %s", mountpoint);
+  }
+
+  return NULL;
+}
+
+char* ZMountPoint::find_mountpoint(const char* filesystem, const char** preferred_mountpoints) const {
+  char* path = NULL;
+  ZArray<char*> mountpoints;
+
+  get_mountpoints(filesystem, &mountpoints);
+
+  if (mountpoints.size() == 0) {
+    // No mount point found
+    log_error(gc)("Failed to find an accessible %s filesystem", filesystem);
+  } else if (mountpoints.size() == 1) {
+    // One mount point found
+    path = strdup(mountpoints.at(0));
+  } else {
+    // More than one mount point found
+    path = find_preferred_mountpoint(filesystem, &mountpoints, preferred_mountpoints);
+  }
+
+  free_mountpoints(&mountpoints);
+
+  return path;
+}
+
+const char* ZMountPoint::get() const {
+  return _path;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/linux/gc/z/zMountPoint_linux.hpp	Tue Jan 21 07:29:48 2020 +0530
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_LINUX_GC_Z_ZMOUNTPOINT_LINUX_HPP
+#define OS_LINUX_GC_Z_ZMOUNTPOINT_LINUX_HPP
+
+#include "gc/z/zArray.hpp"
+#include "memory/allocation.hpp"
+
+class ZMountPoint : public StackObj {
+private:
+  char* _path;
+
+  char* get_mountpoint(const char* line,
+                       const char* filesystem) const;
+  void get_mountpoints(const char* filesystem,
+                       ZArray<char*>* mountpoints) const;
+  void free_mountpoints(ZArray<char*>* mountpoints) const;
+  char* find_preferred_mountpoint(const char* filesystem,
+                                  ZArray<char*>* mountpoints,
+                                  const char** preferred_mountpoints) const;
+  char* find_mountpoint(const char* filesystem,
+                        const char** preferred_mountpoints) const;
+
+public:
+  ZMountPoint(const char* filesystem, const char** preferred_mountpoints);
+  ~ZMountPoint();
+
+  const char* get() const;
+};
+
+#endif // OS_LINUX_GC_Z_ZMOUNTPOINT_LINUX_HPP
--- a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,63 +22,287 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zArray.inline.hpp"
 #include "gc/z/zErrno.hpp"
 #include "gc/z/zGlobals.hpp"
 #include "gc/z/zLargePages.inline.hpp"
-#include "gc/z/zMemory.hpp"
-#include "gc/z/zNUMA.hpp"
-#include "gc/z/zPhysicalMemory.inline.hpp"
+#include "gc/z/zMountPoint_linux.hpp"
 #include "gc/z/zPhysicalMemoryBacking_linux.hpp"
+#include "gc/z/zSyscall_linux.hpp"
 #include "logging/log.hpp"
-#include "runtime/globals.hpp"
 #include "runtime/init.hpp"
 #include "runtime/os.hpp"
 #include "utilities/align.hpp"
 #include "utilities/debug.hpp"
 
+#include <fcntl.h>
 #include <stdio.h>
 #include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/statfs.h>
 #include <sys/types.h>
+#include <unistd.h>
 
 //
 // Support for building on older Linux systems
 //
 
-// madvise(2) flags
-#ifndef MADV_HUGEPAGE
-#define MADV_HUGEPAGE                        14
+// memfd_create(2) flags
+#ifndef MFD_CLOEXEC
+#define MFD_CLOEXEC                      0x0001U
+#endif
+#ifndef MFD_HUGETLB
+#define MFD_HUGETLB                      0x0004U
+#endif
+
+// open(2) flags
+#ifndef O_CLOEXEC
+#define O_CLOEXEC                        02000000
+#endif
+#ifndef O_TMPFILE
+#define O_TMPFILE                        (020000000 | O_DIRECTORY)
+#endif
+
+// fallocate(2) flags
+#ifndef FALLOC_FL_KEEP_SIZE
+#define FALLOC_FL_KEEP_SIZE              0x01
+#endif
+#ifndef FALLOC_FL_PUNCH_HOLE
+#define FALLOC_FL_PUNCH_HOLE             0x02
+#endif
+
+// Filesystem types, see statfs(2)
+#ifndef TMPFS_MAGIC
+#define TMPFS_MAGIC                      0x01021994
+#endif
+#ifndef HUGETLBFS_MAGIC
+#define HUGETLBFS_MAGIC                  0x958458f6
 #endif
 
+// Filesystem names
+#define ZFILESYSTEM_TMPFS                "tmpfs"
+#define ZFILESYSTEM_HUGETLBFS            "hugetlbfs"
+
 // Proc file entry for max map mount
-#define ZFILENAME_PROC_MAX_MAP_COUNT         "/proc/sys/vm/max_map_count"
+#define ZFILENAME_PROC_MAX_MAP_COUNT     "/proc/sys/vm/max_map_count"
+
+// Sysfs file for transparent huge page on tmpfs
+#define ZFILENAME_SHMEM_ENABLED          "/sys/kernel/mm/transparent_hugepage/shmem_enabled"
+
+// Java heap filename
+#define ZFILENAME_HEAP                   "java_heap"
+
+// Preferred tmpfs mount points, ordered by priority
+static const char* z_preferred_tmpfs_mountpoints[] = {
+  "/dev/shm",
+  "/run/shm",
+  NULL
+};
+
+// Preferred hugetlbfs mount points, ordered by priority
+static const char* z_preferred_hugetlbfs_mountpoints[] = {
+  "/dev/hugepages",
+  "/hugepages",
+  NULL
+};
+
+static int z_fallocate_hugetlbfs_attempts = 3;
+static bool z_fallocate_supported = true;
+
+ZPhysicalMemoryBacking::ZPhysicalMemoryBacking() :
+    _fd(-1),
+    _size(0),
+    _filesystem(0),
+    _block_size(0),
+    _available(0),
+    _initialized(false) {
+
+  // Create backing file
+  _fd = create_fd(ZFILENAME_HEAP);
+  if (_fd == -1) {
+    return;
+  }
+
+  // Get filesystem statistics
+  struct statfs buf;
+  if (fstatfs(_fd, &buf) == -1) {
+    ZErrno err;
+    log_error(gc)("Failed to determine filesystem type for backing file (%s)", err.to_string());
+    return;
+  }
+
+  _filesystem = buf.f_type;
+  _block_size = buf.f_bsize;
+  _available = buf.f_bavail * _block_size;
+
+  // Make sure we're on a supported filesystem
+  if (!is_tmpfs() && !is_hugetlbfs()) {
+    log_error(gc)("Backing file must be located on a %s or a %s filesystem",
+                  ZFILESYSTEM_TMPFS, ZFILESYSTEM_HUGETLBFS);
+    return;
+  }
+
+  // Make sure the filesystem type matches requested large page type
+  if (ZLargePages::is_transparent() && !is_tmpfs()) {
+    log_error(gc)("-XX:+UseTransparentHugePages can only be enable when using a %s filesystem",
+                  ZFILESYSTEM_TMPFS);
+    return;
+  }
+
+  if (ZLargePages::is_transparent() && !tmpfs_supports_transparent_huge_pages()) {
+    log_error(gc)("-XX:+UseTransparentHugePages on a %s filesystem not supported by kernel",
+                  ZFILESYSTEM_TMPFS);
+    return;
+  }
+
+  if (ZLargePages::is_explicit() && !is_hugetlbfs()) {
+    log_error(gc)("-XX:+UseLargePages (without -XX:+UseTransparentHugePages) can only be enabled "
+                  "when using a %s filesystem", ZFILESYSTEM_HUGETLBFS);
+    return;
+  }
+
+  if (!ZLargePages::is_explicit() && is_hugetlbfs()) {
+    log_error(gc)("-XX:+UseLargePages must be enabled when using a %s filesystem",
+                  ZFILESYSTEM_HUGETLBFS);
+    return;
+  }
+
+  const size_t expected_block_size = is_tmpfs() ? os::vm_page_size() : os::large_page_size();
+  if (expected_block_size != _block_size) {
+    log_error(gc)("%s filesystem has unexpected block size " SIZE_FORMAT " (expected " SIZE_FORMAT ")",
+                  is_tmpfs() ? ZFILESYSTEM_TMPFS : ZFILESYSTEM_HUGETLBFS, _block_size, expected_block_size);
+    return;
+  }
+
+  // Successfully initialized
+  _initialized = true;
+}
+
+int ZPhysicalMemoryBacking::create_mem_fd(const char* name) const {
+  // Create file name
+  char filename[PATH_MAX];
+  snprintf(filename, sizeof(filename), "%s%s", name, ZLargePages::is_explicit() ? ".hugetlb" : "");
+
+  // Create file
+  const int extra_flags = ZLargePages::is_explicit() ? MFD_HUGETLB : 0;
+  const int fd = ZSyscall::memfd_create(filename, MFD_CLOEXEC | extra_flags);
+  if (fd == -1) {
+    ZErrno err;
+    log_debug(gc, init)("Failed to create memfd file (%s)",
+                        ((ZLargePages::is_explicit() && err == EINVAL) ? "Hugepages not supported" : err.to_string()));
+    return -1;
+  }
+
+  log_info(gc, init)("Heap backed by file: /memfd:%s", filename);
+
+  return fd;
+}
+
+int ZPhysicalMemoryBacking::create_file_fd(const char* name) const {
+  const char* const filesystem = ZLargePages::is_explicit()
+                                 ? ZFILESYSTEM_HUGETLBFS
+                                 : ZFILESYSTEM_TMPFS;
+  const char** const preferred_mountpoints = ZLargePages::is_explicit()
+                                             ? z_preferred_hugetlbfs_mountpoints
+                                             : z_preferred_tmpfs_mountpoints;
+
+  // Find mountpoint
+  ZMountPoint mountpoint(filesystem, preferred_mountpoints);
+  if (mountpoint.get() == NULL) {
+    log_error(gc)("Use -XX:ZPath to specify the path to a %s filesystem", filesystem);
+    return -1;
+  }
+
+  // Try to create an anonymous file using the O_TMPFILE flag. Note that this
+  // flag requires kernel >= 3.11. If this fails we fall back to open/unlink.
+  const int fd_anon = os::open(mountpoint.get(), O_TMPFILE|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
+  if (fd_anon == -1) {
+    ZErrno err;
+    log_debug(gc, init)("Failed to create anonymous file in %s (%s)", mountpoint.get(),
+                        (err == EINVAL ? "Not supported" : err.to_string()));
+  } else {
+    // Get inode number for anonymous file
+    struct stat stat_buf;
+    if (fstat(fd_anon, &stat_buf) == -1) {
+      ZErrno err;
+      log_error(gc)("Failed to determine inode number for anonymous file (%s)", err.to_string());
+      return -1;
+    }
+
+    log_info(gc, init)("Heap backed by file: %s/#" UINT64_FORMAT, mountpoint.get(), (uint64_t)stat_buf.st_ino);
+
+    return fd_anon;
+  }
+
+  log_debug(gc, init)("Falling back to open/unlink");
+
+  // Create file name
+  char filename[PATH_MAX];
+  snprintf(filename, sizeof(filename), "%s/%s.%d", mountpoint.get(), name, os::current_process_id());
+
+  // Create file
+  const int fd = os::open(filename, O_CREAT|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
+  if (fd == -1) {
+    ZErrno err;
+    log_error(gc)("Failed to create file %s (%s)", filename, err.to_string());
+    return -1;
+  }
+
+  // Unlink file
+  if (unlink(filename) == -1) {
+    ZErrno err;
+    log_error(gc)("Failed to unlink file %s (%s)", filename, err.to_string());
+    return -1;
+  }
+
+  log_info(gc, init)("Heap backed by file: %s", filename);
+
+  return fd;
+}
+
+int ZPhysicalMemoryBacking::create_fd(const char* name) const {
+  if (ZPath == NULL) {
+    // If the path is not explicitly specified, then we first try to create a memfd file
+    // instead of looking for a tmpfd/hugetlbfs mount point. Note that memfd_create() might
+    // not be supported at all (requires kernel >= 3.17), or it might not support large
+    // pages (requires kernel >= 4.14). If memfd_create() fails, then we try to create a
+    // file on an accessible tmpfs or hugetlbfs mount point.
+    const int fd = create_mem_fd(name);
+    if (fd != -1) {
+      return fd;
+    }
+
+    log_debug(gc, init)("Falling back to searching for an accessible mount point");
+  }
+
+  return create_file_fd(name);
+}
 
 bool ZPhysicalMemoryBacking::is_initialized() const {
-  return _file.is_initialized();
+  return _initialized;
 }
 
 void ZPhysicalMemoryBacking::warn_available_space(size_t max) const {
   // Note that the available space on a tmpfs or a hugetlbfs filesystem
   // will be zero if no size limit was specified when it was mounted.
-  const size_t available = _file.available();
-  if (available == 0) {
+  if (_available == 0) {
     // No size limit set, skip check
     log_info(gc, init)("Available space on backing filesystem: N/A");
     return;
   }
 
-  log_info(gc, init)("Available space on backing filesystem: " SIZE_FORMAT "M", available / M);
+  log_info(gc, init)("Available space on backing filesystem: " SIZE_FORMAT "M", _available / M);
 
   // Warn if the filesystem doesn't currently have enough space available to hold
   // the max heap size. The max heap size will be capped if we later hit this limit
   // when trying to expand the heap.
-  if (available < max) {
+  if (_available < max) {
     log_warning(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****");
     log_warning(gc)("Not enough space available on the backing filesystem to hold the current max Java heap");
     log_warning(gc)("size (" SIZE_FORMAT "M). Please adjust the size of the backing filesystem accordingly "
                     "(available", max / M);
     log_warning(gc)("space is currently " SIZE_FORMAT "M). Continuing execution with the current filesystem "
-                    "size could", available / M);
+                    "size could", _available / M);
     log_warning(gc)("lead to a premature OutOfMemoryError being thrown, due to failure to map memory.");
   }
 }
@@ -126,215 +350,308 @@
   warn_max_map_count(max);
 }
 
-bool ZPhysicalMemoryBacking::supports_uncommit() {
-  assert(!is_init_completed(), "Invalid state");
-  assert(_file.size() >= ZGranuleSize, "Invalid size");
+size_t ZPhysicalMemoryBacking::size() const {
+  return _size;
+}
 
-  // Test if uncommit is supported by uncommitting and then re-committing a granule
-  return commit(uncommit(ZGranuleSize)) == ZGranuleSize;
+bool ZPhysicalMemoryBacking::is_tmpfs() const {
+  return _filesystem == TMPFS_MAGIC;
+}
+
+bool ZPhysicalMemoryBacking::is_hugetlbfs() const {
+  return _filesystem == HUGETLBFS_MAGIC;
 }
 
-size_t ZPhysicalMemoryBacking::commit(size_t size) {
-  size_t committed = 0;
+bool ZPhysicalMemoryBacking::tmpfs_supports_transparent_huge_pages() const {
+  // If the shmem_enabled file exists and is readable then we
+  // know the kernel supports transparent huge pages for tmpfs.
+  return access(ZFILENAME_SHMEM_ENABLED, R_OK) == 0;
+}
 
-  // Fill holes in the backing file
-  while (committed < size) {
-    size_t allocated = 0;
-    const size_t remaining = size - committed;
-    const uintptr_t start = _uncommitted.alloc_from_front_at_most(remaining, &allocated);
-    if (start == UINTPTR_MAX) {
-      // No holes to commit
-      break;
-    }
-
-    // Try commit hole
-    const size_t filled = _file.commit(start, allocated);
-    if (filled > 0) {
-      // Successful or partialy successful
-      _committed.free(start, filled);
-      committed += filled;
-    }
-    if (filled < allocated) {
-      // Failed or partialy failed
-      _uncommitted.free(start + filled, allocated - filled);
-      return committed;
+ZErrno ZPhysicalMemoryBacking::fallocate_compat_ftruncate(size_t size) const {
+  while (ftruncate(_fd, size) == -1) {
+    if (errno != EINTR) {
+      // Failed
+      return errno;
     }
   }
 
-  // Expand backing file
-  if (committed < size) {
-    const size_t remaining = size - committed;
-    const uintptr_t start = _file.size();
-    const size_t expanded = _file.commit(start, remaining);
-    if (expanded > 0) {
-      // Successful or partialy successful
-      _committed.free(start, expanded);
-      committed += expanded;
+  // Success
+  return 0;
+}
+
+ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap(size_t offset, size_t length, bool touch) const {
+  // On hugetlbfs, mapping a file segment will fail immediately, without
+  // the need to touch the mapped pages first, if there aren't enough huge
+  // pages available to back the mapping.
+  void* const addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset);
+  if (addr == MAP_FAILED) {
+    // Failed
+    return errno;
+  }
+
+  // Once mapped, the huge pages are only reserved. We need to touch them
+  // to associate them with the file segment. Note that we can not punch
+  // hole in file segments which only have reserved pages.
+  if (touch) {
+    char* const start = (char*)addr;
+    char* const end = start + length;
+    os::pretouch_memory(start, end, _block_size);
+  }
+
+  // Unmap again. From now on, the huge pages that were mapped are allocated
+  // to this file. There's no risk in getting SIGBUS when touching them.
+  if (munmap(addr, length) == -1) {
+    // Failed
+    return errno;
+  }
+
+  // Success
+  return 0;
+}
+
+ZErrno ZPhysicalMemoryBacking::fallocate_compat_pwrite(size_t offset, size_t length) const {
+  uint8_t data = 0;
+
+  // Allocate backing memory by writing to each block
+  for (size_t pos = offset; pos < offset + length; pos += _block_size) {
+    if (pwrite(_fd, &data, sizeof(data), pos) == -1) {
+      // Failed
+      return errno;
+    }
+  }
+
+  // Success
+  return 0;
+}
+
+ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(size_t offset, size_t length) {
+  // fallocate(2) is only supported by tmpfs since Linux 3.5, and by hugetlbfs
+  // since Linux 4.3. When fallocate(2) is not supported we emulate it using
+  // ftruncate/pwrite (for tmpfs) or ftruncate/mmap/munmap (for hugetlbfs).
+
+  const size_t end = offset + length;
+  if (end > _size) {
+    // Increase file size
+    const ZErrno err = fallocate_compat_ftruncate(end);
+    if (err) {
+      // Failed
+      return err;
     }
   }
 
-  return committed;
+  // Allocate backing memory
+  const ZErrno err = is_hugetlbfs() ? fallocate_compat_mmap(offset, length, false /* touch */)
+                                    : fallocate_compat_pwrite(offset, length);
+  if (err) {
+    if (end > _size) {
+      // Restore file size
+      fallocate_compat_ftruncate(_size);
+    }
+
+    // Failed
+    return err;
+  }
+
+  if (end > _size) {
+    // Record new file size
+    _size = end;
+  }
+
+  // Success
+  return 0;
 }
 
-size_t ZPhysicalMemoryBacking::uncommit(size_t size) {
-  size_t uncommitted = 0;
+ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_syscall(size_t offset, size_t length) {
+  const int mode = 0; // Allocate
+  const int res = ZSyscall::fallocate(_fd, mode, offset, length);
+  if (res == -1) {
+    // Failed
+    return errno;
+  }
 
-  // Punch holes in backing file
-  while (uncommitted < size) {
-    size_t allocated = 0;
-    const size_t remaining = size - uncommitted;
-    const uintptr_t start = _committed.alloc_from_back_at_most(remaining, &allocated);
-    assert(start != UINTPTR_MAX, "Allocation should never fail");
+  const size_t end = offset + length;
+  if (end > _size) {
+    // Record new file size
+    _size = end;
+  }
+
+  // Success
+  return 0;
+}
 
-    // Try punch hole
-    const size_t punched = _file.uncommit(start, allocated);
-    if (punched > 0) {
-      // Successful or partialy successful
-      _uncommitted.free(start, punched);
-      uncommitted += punched;
-    }
-    if (punched < allocated) {
-      // Failed or partialy failed
-      _committed.free(start + punched, allocated - punched);
-      return uncommitted;
+ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(size_t offset, size_t length) {
+  // Using compat mode is more efficient when allocating space on hugetlbfs.
+  // Note that allocating huge pages this way will only reserve them, and not
+  // associate them with segments of the file. We must guarantee that we at
+  // some point touch these segments, otherwise we can not punch hole in them.
+  if (z_fallocate_supported && !is_hugetlbfs()) {
+     const ZErrno err = fallocate_fill_hole_syscall(offset, length);
+     if (!err) {
+       // Success
+       return 0;
+     }
+
+     if (err != ENOSYS && err != EOPNOTSUPP) {
+       // Failed
+       return err;
+     }
+
+     // Not supported
+     log_debug(gc)("Falling back to fallocate() compatibility mode");
+     z_fallocate_supported = false;
+  }
+
+  return fallocate_fill_hole_compat(offset, length);
+}
+
+ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length) {
+  if (is_hugetlbfs()) {
+    // We can only punch hole in pages that have been touched. Non-touched
+    // pages are only reserved, and not associated with any specific file
+    // segment. We don't know which pages have been previously touched, so
+    // we always touch them here to guarantee that we can punch hole.
+    const ZErrno err = fallocate_compat_mmap(offset, length, true /* touch */);
+    if (err) {
+      // Failed
+      return err;
     }
   }
 
-  return uncommitted;
+  const int mode = FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE;
+  if (ZSyscall::fallocate(_fd, mode, offset, length) == -1) {
+    // Failed
+    return errno;
+  }
+
+  // Success
+  return 0;
+}
+
+ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, size_t offset, size_t length) {
+  // Try first half
+  const size_t offset0 = offset;
+  const size_t length0 = align_up(length / 2, _block_size);
+  const ZErrno err0 = fallocate(punch_hole, offset0, length0);
+  if (err0) {
+    return err0;
+  }
+
+  // Try second half
+  const size_t offset1 = offset0 + length0;
+  const size_t length1 = length - length0;
+  const ZErrno err1 = fallocate(punch_hole, offset1, length1);
+  if (err1) {
+    return err1;
+  }
+
+  // Success
+  return 0;
+}
+
+ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, size_t offset, size_t length) {
+  assert(is_aligned(offset, _block_size), "Invalid offset");
+  assert(is_aligned(length, _block_size), "Invalid length");
+
+  const ZErrno err = punch_hole ? fallocate_punch_hole(offset, length) : fallocate_fill_hole(offset, length);
+  if (err == EINTR && length > _block_size) {
+    // Calling fallocate(2) with a large length can take a long time to
+    // complete. When running profilers, such as VTune, this syscall will
+    // be constantly interrupted by signals. Expanding the file in smaller
+    // steps avoids this problem.
+    return split_and_fallocate(punch_hole, offset, length);
+  }
+
+  return err;
 }
 
-ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) {
-  assert(is_aligned(size, ZGranuleSize), "Invalid size");
-
-  ZPhysicalMemory pmem;
+bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) {
+  log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
+                      offset / M, (offset + length) / M, length / M);
 
-  // Allocate segments
-  for (size_t allocated = 0; allocated < size; allocated += ZGranuleSize) {
-    const uintptr_t start = _committed.alloc_from_front(ZGranuleSize);
-    assert(start != UINTPTR_MAX, "Allocation should never fail");
-    pmem.add_segment(ZPhysicalMemorySegment(start, ZGranuleSize));
+retry:
+  const ZErrno err = fallocate(false /* punch_hole */, offset, length);
+  if (err) {
+    if (err == ENOSPC && !is_init_completed() && is_hugetlbfs() && z_fallocate_hugetlbfs_attempts-- > 0) {
+      // If we fail to allocate during initialization, due to lack of space on
+      // the hugetlbfs filesystem, then we wait and retry a few times before
+      // giving up. Otherwise there is a risk that running JVMs back-to-back
+      // will fail, since there is a delay between process termination and the
+      // huge pages owned by that process being returned to the huge page pool
+      // and made available for new allocations.
+      log_debug(gc, init)("Failed to commit memory (%s), retrying", err.to_string());
+
+      // Wait and retry in one second, in the hope that huge pages will be
+      // available by then.
+      sleep(1);
+      goto retry;
+    }
+
+    // Failed
+    log_error(gc)("Failed to commit memory (%s)", err.to_string());
+    return false;
   }
 
-  return pmem;
+  // Success
+  return true;
 }
 
-void ZPhysicalMemoryBacking::free(const ZPhysicalMemory& pmem) {
-  const size_t nsegments = pmem.nsegments();
+size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) {
+  // Try to commit the whole region
+  if (commit_inner(offset, length)) {
+    // Success
+    return length;
+  }
+
+  // Failed, try to commit as much as possible
+  size_t start = offset;
+  size_t end = offset + length;
 
-  // Free segments
-  for (size_t i = 0; i < nsegments; i++) {
-    const ZPhysicalMemorySegment& segment = pmem.segment(i);
-    _committed.free(segment.start(), segment.size());
+  for (;;) {
+    length = align_down((end - start) / 2, ZGranuleSize);
+    if (length < ZGranuleSize) {
+      // Done, don't commit more
+      return start - offset;
+    }
+
+    if (commit_inner(start, length)) {
+      // Success, try commit more
+      start += length;
+    } else {
+      // Failed, try commit less
+      end -= length;
+    }
   }
 }
 
-void ZPhysicalMemoryBacking::map_failed(ZErrno err) const {
-  if (err == ENOMEM) {
-    fatal("Failed to map memory. Please check the system limit on number of "
-          "memory mappings allowed per process (see %s)", ZFILENAME_PROC_MAX_MAP_COUNT);
-  } else {
+size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) {
+  log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
+                      offset / M, (offset + length) / M, length / M);
+
+  const ZErrno err = fallocate(true /* punch_hole */, offset, length);
+  if (err) {
+    log_error(gc)("Failed to uncommit memory (%s)", err.to_string());
+    return 0;
+  }
+
+  return length;
+}
+
+void ZPhysicalMemoryBacking::map(uintptr_t addr, size_t size, uintptr_t offset) const {
+  const void* const res = mmap((void*)addr, size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _fd, offset);
+  if (res == MAP_FAILED) {
+    ZErrno err;
     fatal("Failed to map memory (%s)", err.to_string());
   }
 }
 
-void ZPhysicalMemoryBacking::advise_view(uintptr_t addr, size_t size, int advice) const {
-  if (madvise((void*)addr, size, advice) == -1) {
-    ZErrno err;
-    log_error(gc)("Failed to advise on memory (advice %d, %s)", advice, err.to_string());
-  }
-}
-
-void ZPhysicalMemoryBacking::pretouch_view(uintptr_t addr, size_t size) const {
-  const size_t page_size = ZLargePages::is_explicit() ? os::large_page_size() : os::vm_page_size();
-  os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
-}
-
-void ZPhysicalMemoryBacking::map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
-  const size_t nsegments = pmem.nsegments();
-  size_t size = 0;
-
-  // Map segments
-  for (size_t i = 0; i < nsegments; i++) {
-    const ZPhysicalMemorySegment& segment = pmem.segment(i);
-    const uintptr_t segment_addr = addr + size;
-    const void* const res = mmap((void*)segment_addr, segment.size(), PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _file.fd(), segment.start());
-    if (res == MAP_FAILED) {
-      ZErrno err;
-      map_failed(err);
-    }
-
-    size += segment.size();
-  }
-
-  // Advise on use of transparent huge pages before touching it
-  if (ZLargePages::is_transparent()) {
-    advise_view(addr, size, MADV_HUGEPAGE);
-  }
-
-  // NUMA interleave memory before touching it
-  ZNUMA::memory_interleave(addr, size);
-}
-
-void ZPhysicalMemoryBacking::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
+void ZPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const {
   // Note that we must keep the address space reservation intact and just detach
   // the backing memory. For this reason we map a new anonymous, non-accessible
   // and non-reserved page over the mapping instead of actually unmapping.
-  const void* const res = mmap((void*)addr, pmem.size(), PROT_NONE, MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
+  const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
   if (res == MAP_FAILED) {
     ZErrno err;
-    map_failed(err);
-  }
-}
-
-uintptr_t ZPhysicalMemoryBacking::nmt_address(uintptr_t offset) const {
-  // From an NMT point of view we treat the first heap view (marked0) as committed
-  return ZAddress::marked0(offset);
-}
-
-void ZPhysicalMemoryBacking::pretouch(uintptr_t offset, size_t size) const {
-  if (ZVerifyViews) {
-    // Pre-touch good view
-    pretouch_view(ZAddress::good(offset), size);
-  } else {
-    // Pre-touch all views
-    pretouch_view(ZAddress::marked0(offset), size);
-    pretouch_view(ZAddress::marked1(offset), size);
-    pretouch_view(ZAddress::remapped(offset), size);
+    fatal("Failed to map memory (%s)", err.to_string());
   }
 }
-
-void ZPhysicalMemoryBacking::map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
-  if (ZVerifyViews) {
-    // Map good view
-    map_view(pmem, ZAddress::good(offset));
-  } else {
-    // Map all views
-    map_view(pmem, ZAddress::marked0(offset));
-    map_view(pmem, ZAddress::marked1(offset));
-    map_view(pmem, ZAddress::remapped(offset));
-  }
-}
-
-void ZPhysicalMemoryBacking::unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
-  if (ZVerifyViews) {
-    // Unmap good view
-    unmap_view(pmem, ZAddress::good(offset));
-  } else {
-    // Unmap all views
-    unmap_view(pmem, ZAddress::marked0(offset));
-    unmap_view(pmem, ZAddress::marked1(offset));
-    unmap_view(pmem, ZAddress::remapped(offset));
-  }
-}
-
-void ZPhysicalMemoryBacking::debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
-  // Map good view
-  assert(ZVerifyViews, "Should be enabled");
-  map_view(pmem, ZAddress::good(offset));
-}
-
-void ZPhysicalMemoryBacking::debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
-  // Unmap good view
-  assert(ZVerifyViews, "Should be enabled");
-  unmap_view(pmem, ZAddress::good(offset));
-}
--- a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.hpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.hpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,49 +24,54 @@
 #ifndef OS_LINUX_GC_Z_ZPHYSICALMEMORYBACKING_LINUX_HPP
 #define OS_LINUX_GC_Z_ZPHYSICALMEMORYBACKING_LINUX_HPP
 
-#include "gc/z/zBackingFile_linux.hpp"
-#include "gc/z/zMemory.hpp"
-
 class ZErrno;
-class ZPhysicalMemory;
 
 class ZPhysicalMemoryBacking {
 private:
-  ZBackingFile   _file;
-  ZMemoryManager _committed;
-  ZMemoryManager _uncommitted;
+  int      _fd;
+  size_t   _size;
+  uint64_t _filesystem;
+  size_t   _block_size;
+  size_t   _available;
+  bool     _initialized;
 
   void warn_available_space(size_t max) const;
   void warn_max_map_count(size_t max) const;
 
-  void map_failed(ZErrno err) const;
+  int create_mem_fd(const char* name) const;
+  int create_file_fd(const char* name) const;
+  int create_fd(const char* name) const;
+
+  bool is_tmpfs() const;
+  bool is_hugetlbfs() const;
+  bool tmpfs_supports_transparent_huge_pages() const;
 
-  void advise_view(uintptr_t addr, size_t size, int advice) const;
-  void pretouch_view(uintptr_t addr, size_t size) const;
-  void map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
-  void unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
+  ZErrno fallocate_compat_ftruncate(size_t size) const;
+  ZErrno fallocate_compat_mmap(size_t offset, size_t length, bool reserve_only) const;
+  ZErrno fallocate_compat_pwrite(size_t offset, size_t length) const;
+  ZErrno fallocate_fill_hole_compat(size_t offset, size_t length);
+  ZErrno fallocate_fill_hole_syscall(size_t offset, size_t length);
+  ZErrno fallocate_fill_hole(size_t offset, size_t length);
+  ZErrno fallocate_punch_hole(size_t offset, size_t length);
+  ZErrno split_and_fallocate(bool punch_hole, size_t offset, size_t length);
+  ZErrno fallocate(bool punch_hole, size_t offset, size_t length);
+
+  bool commit_inner(size_t offset, size_t length);
 
 public:
+  ZPhysicalMemoryBacking();
+
   bool is_initialized() const;
 
   void warn_commit_limits(size_t max) const;
-  bool supports_uncommit();
 
-  size_t commit(size_t size);
-  size_t uncommit(size_t size);
-
-  ZPhysicalMemory alloc(size_t size);
-  void free(const ZPhysicalMemory& pmem);
+  size_t size() const;
 
-  uintptr_t nmt_address(uintptr_t offset) const;
-
-  void pretouch(uintptr_t offset, size_t size) const;
+  size_t commit(size_t offset, size_t length);
+  size_t uncommit(size_t offset, size_t length);
 
-  void map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
-  void unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
-
-  void debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
-  void debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
+  void map(uintptr_t addr, size_t size, uintptr_t offset) const;
+  void unmap(uintptr_t addr, size_t size) const;
 };
 
 #endif // OS_LINUX_GC_Z_ZPHYSICALMEMORYBACKING_LINUX_HPP
--- a/src/hotspot/os/linux/osContainer_linux.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/os/linux/osContainer_linux.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,275 +25,16 @@
 #include <string.h>
 #include <math.h>
 #include <errno.h>
-#include "utilities/globalDefinitions.hpp"
-#include "memory/allocation.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/os.hpp"
 #include "logging/log.hpp"
 #include "osContainer_linux.hpp"
+#include "cgroupSubsystem_linux.hpp"
 
-/*
- * PER_CPU_SHARES has been set to 1024 because CPU shares' quota
- * is commonly used in cloud frameworks like Kubernetes[1],
- * AWS[2] and Mesos[3] in a similar way. They spawn containers with
- * --cpu-shares option values scaled by PER_CPU_SHARES. Thus, we do
- * the inverse for determining the number of possible available
- * CPUs to the JVM inside a container. See JDK-8216366.
- *
- * [1] https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu
- *     In particular:
- *        When using Docker:
- *          The spec.containers[].resources.requests.cpu is converted to its core value, which is potentially
- *          fractional, and multiplied by 1024. The greater of this number or 2 is used as the value of the
- *          --cpu-shares flag in the docker run command.
- * [2] https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html
- * [3] https://github.com/apache/mesos/blob/3478e344fb77d931f6122980c6e94cd3913c441d/src/docker/docker.cpp#L648
- *     https://github.com/apache/mesos/blob/3478e344fb77d931f6122980c6e94cd3913c441d/src/slave/containerizer/mesos/isolators/cgroups/constants.hpp#L30
- */
-#define PER_CPU_SHARES 1024
 
 bool  OSContainer::_is_initialized   = false;
 bool  OSContainer::_is_containerized = false;
-int   OSContainer::_active_processor_count = 1;
-julong _unlimited_memory;
-
-class CgroupSubsystem: CHeapObj<mtInternal> {
- friend class OSContainer;
-
-
- private:
-    volatile jlong _next_check_counter;
-
-    /* mountinfo contents */
-    char *_root;
-    char *_mount_point;
-
-    /* Constructed subsystem directory */
-    char *_path;
-
- public:
-    CgroupSubsystem(char *root, char *mountpoint) {
-      _root = os::strdup(root);
-      _mount_point = os::strdup(mountpoint);
-      _path = NULL;
-      _next_check_counter = min_jlong;
-    }
-
-    /*
-     * Set directory to subsystem specific files based
-     * on the contents of the mountinfo and cgroup files.
-     */
-    void set_subsystem_path(char *cgroup_path) {
-      char buf[MAXPATHLEN+1];
-      if (_root != NULL && cgroup_path != NULL) {
-        if (strcmp(_root, "/") == 0) {
-          int buflen;
-          strncpy(buf, _mount_point, MAXPATHLEN);
-          buf[MAXPATHLEN-1] = '\0';
-          if (strcmp(cgroup_path,"/") != 0) {
-            buflen = strlen(buf);
-            if ((buflen + strlen(cgroup_path)) > (MAXPATHLEN-1)) {
-              return;
-            }
-            strncat(buf, cgroup_path, MAXPATHLEN-buflen);
-            buf[MAXPATHLEN-1] = '\0';
-          }
-          _path = os::strdup(buf);
-        } else {
-          if (strcmp(_root, cgroup_path) == 0) {
-            strncpy(buf, _mount_point, MAXPATHLEN);
-            buf[MAXPATHLEN-1] = '\0';
-            _path = os::strdup(buf);
-          } else {
-            char *p = strstr(cgroup_path, _root);
-            if (p != NULL && p == _root) {
-              if (strlen(cgroup_path) > strlen(_root)) {
-                int buflen;
-                strncpy(buf, _mount_point, MAXPATHLEN);
-                buf[MAXPATHLEN-1] = '\0';
-                buflen = strlen(buf);
-                if ((buflen + strlen(cgroup_path) - strlen(_root)) > (MAXPATHLEN-1)) {
-                  return;
-                }
-                strncat(buf, cgroup_path + strlen(_root), MAXPATHLEN-buflen);
-                buf[MAXPATHLEN-1] = '\0';
-                _path = os::strdup(buf);
-              }
-            }
-          }
-        }
-      }
-    }
-
-    char *subsystem_path() { return _path; }
-
-    bool cache_has_expired() {
-      return os::elapsed_counter() > _next_check_counter;
-    }
-
-    void set_cache_expiry_time(jlong timeout) {
-      _next_check_counter = os::elapsed_counter() + timeout;
-    }
-};
-
-class CgroupMemorySubsystem: CgroupSubsystem {
- friend class OSContainer;
-
- private:
-    /* Some container runtimes set limits via cgroup
-     * hierarchy. If set to true consider also memory.stat
-     * file if everything else seems unlimited */
-    bool _uses_mem_hierarchy;
-    volatile jlong _memory_limit_in_bytes;
-
- public:
-    CgroupMemorySubsystem(char *root, char *mountpoint) : CgroupSubsystem::CgroupSubsystem(root, mountpoint) {
-      _uses_mem_hierarchy = false;
-      _memory_limit_in_bytes = -1;
-
-    }
-
-    bool is_hierarchical() { return _uses_mem_hierarchy; }
-    void set_hierarchical(bool value) { _uses_mem_hierarchy = value; }
-
-    jlong memory_limit_in_bytes() { return _memory_limit_in_bytes; }
-    void set_memory_limit_in_bytes(jlong value) {
-      _memory_limit_in_bytes = value;
-      // max memory limit is unlikely to change, but we want to remain
-      // responsive to configuration changes. A very short grace time
-      // between re-read avoids excessive overhead during startup without
-      // significantly reducing the VMs ability to promptly react to reduced
-      // memory availability
-      set_cache_expiry_time(OSCONTAINER_CACHE_TIMEOUT);
-    }
-
-};
-
-CgroupMemorySubsystem* memory = NULL;
-CgroupSubsystem* cpuset = NULL;
-CgroupSubsystem* cpu = NULL;
-CgroupSubsystem* cpuacct = NULL;
-
-typedef char * cptr;
-
-PRAGMA_DIAG_PUSH
-PRAGMA_FORMAT_NONLITERAL_IGNORED
-template <typename T> int subsystem_file_line_contents(CgroupSubsystem* c,
-                                              const char *filename,
-                                              const char *matchline,
-                                              const char *scan_fmt,
-                                              T returnval) {
-  FILE *fp = NULL;
-  char *p;
-  char file[MAXPATHLEN+1];
-  char buf[MAXPATHLEN+1];
-  char discard[MAXPATHLEN+1];
-  bool found_match = false;
-
-  if (c == NULL) {
-    log_debug(os, container)("subsystem_file_line_contents: CgroupSubsytem* is NULL");
-    return OSCONTAINER_ERROR;
-  }
-  if (c->subsystem_path() == NULL) {
-    log_debug(os, container)("subsystem_file_line_contents: subsystem path is NULL");
-    return OSCONTAINER_ERROR;
-  }
-
-  strncpy(file, c->subsystem_path(), MAXPATHLEN);
-  file[MAXPATHLEN-1] = '\0';
-  int filelen = strlen(file);
-  if ((filelen + strlen(filename)) > (MAXPATHLEN-1)) {
-    log_debug(os, container)("File path too long %s, %s", file, filename);
-    return OSCONTAINER_ERROR;
-  }
-  strncat(file, filename, MAXPATHLEN-filelen);
-  log_trace(os, container)("Path to %s is %s", filename, file);
-  fp = fopen(file, "r");
-  if (fp != NULL) {
-    int err = 0;
-    while ((p = fgets(buf, MAXPATHLEN, fp)) != NULL) {
-      found_match = false;
-      if (matchline == NULL) {
-        // single-line file case
-        int matched = sscanf(p, scan_fmt, returnval);
-        found_match = (matched == 1);
-      } else {
-        // multi-line file case
-        if (strstr(p, matchline) != NULL) {
-          // discard matchline string prefix
-          int matched = sscanf(p, scan_fmt, discard, returnval);
-          found_match = (matched == 2);
-        } else {
-          continue; // substring not found
-        }
-      }
-      if (found_match) {
-        fclose(fp);
-        return 0;
-      } else {
-        err = 1;
-        log_debug(os, container)("Type %s not found in file %s", scan_fmt, file);
-      }
-    }
-    if (err == 0) {
-      log_debug(os, container)("Empty file %s", file);
-    }
-  } else {
-    log_debug(os, container)("Open of file %s failed, %s", file, os::strerror(errno));
-  }
-  if (fp != NULL)
-    fclose(fp);
-  return OSCONTAINER_ERROR;
-}
-PRAGMA_DIAG_POP
-
-#define GET_CONTAINER_INFO(return_type, subsystem, filename,              \
-                           logstring, scan_fmt, variable)                 \
-  return_type variable;                                                   \
-{                                                                         \
-  int err;                                                                \
-  err = subsystem_file_line_contents(subsystem,                           \
-                                     filename,                            \
-                                     NULL,                                \
-                                     scan_fmt,                            \
-                                     &variable);                          \
-  if (err != 0)                                                           \
-    return (return_type) OSCONTAINER_ERROR;                               \
-                                                                          \
-  log_trace(os, container)(logstring, variable);                          \
-}
-
-#define GET_CONTAINER_INFO_CPTR(return_type, subsystem, filename,         \
-                               logstring, scan_fmt, variable, bufsize)    \
-  char variable[bufsize];                                                 \
-{                                                                         \
-  int err;                                                                \
-  err = subsystem_file_line_contents(subsystem,                           \
-                                     filename,                            \
-                                     NULL,                                \
-                                     scan_fmt,                            \
-                                     variable);                           \
-  if (err != 0)                                                           \
-    return (return_type) NULL;                                            \
-                                                                          \
-  log_trace(os, container)(logstring, variable);                          \
-}
-
-#define GET_CONTAINER_INFO_LINE(return_type, subsystem, filename,         \
-                           matchline, logstring, scan_fmt, variable)      \
-  return_type variable;                                                   \
-{                                                                         \
-  int err;                                                                \
-  err = subsystem_file_line_contents(subsystem,                           \
-                                filename,                                 \
-                                matchline,                                \
-                                scan_fmt,                                 \
-                                &variable);                               \
-  if (err != 0)                                                           \
-    return (return_type) OSCONTAINER_ERROR;                               \
-                                                                          \
-  log_trace(os, container)(logstring, variable);                          \
-}
+CgroupSubsystem* cgroup_subsystem;
 
 /* init
  *
@@ -301,12 +42,6 @@
  * we are running under cgroup control.
  */
 void OSContainer::init() {
-  FILE *mntinfo = NULL;
-  FILE *cgroup = NULL;
-  char buf[MAXPATHLEN+1];
-  char tmproot[MAXPATHLEN+1];
-  char tmpmount[MAXPATHLEN+1];
-  char *p;
   jlong mem_limit;
 
   assert(!_is_initialized, "Initializing OSContainer more than once");
@@ -314,139 +49,19 @@
   _is_initialized = true;
   _is_containerized = false;
 
-  _unlimited_memory = (LONG_MAX / os::vm_page_size()) * os::vm_page_size();
-
   log_trace(os, container)("OSContainer::init: Initializing Container Support");
   if (!UseContainerSupport) {
     log_trace(os, container)("Container Support not enabled");
     return;
   }
 
-  /*
-   * Find the cgroup mount point for memory and cpuset
-   * by reading /proc/self/mountinfo
-   *
-   * Example for docker:
-   * 219 214 0:29 /docker/7208cebd00fa5f2e342b1094f7bed87fa25661471a4637118e65f1c995be8a34 /sys/fs/cgroup/memory ro,nosuid,nodev,noexec,relatime - cgroup cgroup rw,memory
-   *
-   * Example for host:
-   * 34 28 0:29 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,memory
-   */
-  mntinfo = fopen("/proc/self/mountinfo", "r");
-  if (mntinfo == NULL) {
-      log_debug(os, container)("Can't open /proc/self/mountinfo, %s",
-                               os::strerror(errno));
-      return;
-  }
-
-  while ((p = fgets(buf, MAXPATHLEN, mntinfo)) != NULL) {
-    char tmpcgroups[MAXPATHLEN+1];
-    char *cptr = tmpcgroups;
-    char *token;
-
-    // mountinfo format is documented at https://www.kernel.org/doc/Documentation/filesystems/proc.txt
-    if (sscanf(p, "%*d %*d %*d:%*d %s %s %*[^-]- cgroup %*s %s", tmproot, tmpmount, tmpcgroups) != 3) {
-      continue;
-    }
-    while ((token = strsep(&cptr, ",")) != NULL) {
-      if (strcmp(token, "memory") == 0) {
-        memory = new CgroupMemorySubsystem(tmproot, tmpmount);
-      } else if (strcmp(token, "cpuset") == 0) {
-        cpuset = new CgroupSubsystem(tmproot, tmpmount);
-      } else if (strcmp(token, "cpu") == 0) {
-        cpu = new CgroupSubsystem(tmproot, tmpmount);
-      } else if (strcmp(token, "cpuacct") == 0) {
-        cpuacct= new CgroupSubsystem(tmproot, tmpmount);
-      }
-    }
-  }
-
-  fclose(mntinfo);
-
-  if (memory == NULL) {
-    log_debug(os, container)("Required cgroup memory subsystem not found");
-    return;
-  }
-  if (cpuset == NULL) {
-    log_debug(os, container)("Required cgroup cpuset subsystem not found");
-    return;
-  }
-  if (cpu == NULL) {
-    log_debug(os, container)("Required cgroup cpu subsystem not found");
-    return;
-  }
-  if (cpuacct == NULL) {
-    log_debug(os, container)("Required cgroup cpuacct subsystem not found");
-    return;
+  cgroup_subsystem = CgroupSubsystemFactory::create();
+  if (cgroup_subsystem == NULL) {
+    return; // Required subsystem files not found or other error
   }
-
-  /*
-   * Read /proc/self/cgroup and map host mount point to
-   * local one via /proc/self/mountinfo content above
-   *
-   * Docker example:
-   * 5:memory:/docker/6558aed8fc662b194323ceab5b964f69cf36b3e8af877a14b80256e93aecb044
-   *
-   * Host example:
-   * 5:memory:/user.slice
-   *
-   * Construct a path to the process specific memory and cpuset
-   * cgroup directory.
-   *
-   * For a container running under Docker from memory example above
-   * the paths would be:
-   *
-   * /sys/fs/cgroup/memory
-   *
-   * For a Host from memory example above the path would be:
-   *
-   * /sys/fs/cgroup/memory/user.slice
-   *
-   */
-  cgroup = fopen("/proc/self/cgroup", "r");
-  if (cgroup == NULL) {
-    log_debug(os, container)("Can't open /proc/self/cgroup, %s",
-                             os::strerror(errno));
-    return;
-  }
-
-  while ((p = fgets(buf, MAXPATHLEN, cgroup)) != NULL) {
-    char *controllers;
-    char *token;
-    char *base;
-
-    /* Skip cgroup number */
-    strsep(&p, ":");
-    /* Get controllers and base */
-    controllers = strsep(&p, ":");
-    base = strsep(&p, "\n");
-
-    if (controllers == NULL) {
-      continue;
-    }
-
-    while ((token = strsep(&controllers, ",")) != NULL) {
-      if (strcmp(token, "memory") == 0) {
-        memory->set_subsystem_path(base);
-        jlong hierarchy = uses_mem_hierarchy();
-        if (hierarchy > 0) {
-          memory->set_hierarchical(true);
-        }
-      } else if (strcmp(token, "cpuset") == 0) {
-        cpuset->set_subsystem_path(base);
-      } else if (strcmp(token, "cpu") == 0) {
-        cpu->set_subsystem_path(base);
-      } else if (strcmp(token, "cpuacct") == 0) {
-        cpuacct->set_subsystem_path(base);
-      }
-    }
-  }
-
-  fclose(cgroup);
-
   // We need to update the amount of physical memory now that
-  // command line arguments have been processed.
-  if ((mem_limit = memory_limit_in_bytes()) > 0) {
+  // cgroup subsystem files have been processed.
+  if ((mem_limit = cgroup_subsystem->memory_limit_in_bytes()) > 0) {
     os::Linux::set_physical_memory(mem_limit);
     log_info(os, container)("Memory Limit is: " JLONG_FORMAT, mem_limit);
   }
@@ -456,272 +71,61 @@
 }
 
 const char * OSContainer::container_type() {
-  if (is_containerized()) {
-    return "cgroupv1";
-  } else {
-    return NULL;
-  }
-}
-
-/* uses_mem_hierarchy
- *
- * Return whether or not hierarchical cgroup accounting is being
- * done.
- *
- * return:
- *    A number > 0 if true, or
- *    OSCONTAINER_ERROR for not supported
- */
-jlong OSContainer::uses_mem_hierarchy() {
-  GET_CONTAINER_INFO(jlong, memory, "/memory.use_hierarchy",
-                    "Use Hierarchy is: " JLONG_FORMAT, JLONG_FORMAT, use_hierarchy);
-  return use_hierarchy;
+  assert(cgroup_subsystem != NULL, "cgroup subsystem not available");
+  return cgroup_subsystem->container_type();
 }
 
-
-/* memory_limit_in_bytes
- *
- * Return the limit of available memory for this process.
- *
- * return:
- *    memory limit in bytes or
- *    -1 for unlimited
- *    OSCONTAINER_ERROR for not supported
- */
 jlong OSContainer::memory_limit_in_bytes() {
-  if (!memory->cache_has_expired()) {
-    return memory->memory_limit_in_bytes();
-  }
-  jlong memory_limit = read_memory_limit_in_bytes();
-  // Update CgroupMemorySubsystem to avoid re-reading container settings too often
-  memory->set_memory_limit_in_bytes(memory_limit);
-  return memory_limit;
-}
-
-jlong OSContainer::read_memory_limit_in_bytes() {
-  GET_CONTAINER_INFO(julong, memory, "/memory.limit_in_bytes",
-                     "Memory Limit is: " JULONG_FORMAT, JULONG_FORMAT, memlimit);
-
-  if (memlimit >= _unlimited_memory) {
-    log_trace(os, container)("Non-Hierarchical Memory Limit is: Unlimited");
-    if (memory->is_hierarchical()) {
-      const char* matchline = "hierarchical_memory_limit";
-      const char* format = "%s " JULONG_FORMAT;
-      GET_CONTAINER_INFO_LINE(julong, memory, "/memory.stat", matchline,
-                             "Hierarchical Memory Limit is: " JULONG_FORMAT, format, hier_memlimit)
-      if (hier_memlimit >= _unlimited_memory) {
-        log_trace(os, container)("Hierarchical Memory Limit is: Unlimited");
-      } else {
-        return (jlong)hier_memlimit;
-      }
-    }
-    return (jlong)-1;
-  }
-  else {
-    return (jlong)memlimit;
-  }
+  assert(cgroup_subsystem != NULL, "cgroup subsystem not available");
+  return cgroup_subsystem->memory_limit_in_bytes();
 }
 
 jlong OSContainer::memory_and_swap_limit_in_bytes() {
-  GET_CONTAINER_INFO(julong, memory, "/memory.memsw.limit_in_bytes",
-                     "Memory and Swap Limit is: " JULONG_FORMAT, JULONG_FORMAT, memswlimit);
-  if (memswlimit >= _unlimited_memory) {
-    log_trace(os, container)("Non-Hierarchical Memory and Swap Limit is: Unlimited");
-    if (memory->is_hierarchical()) {
-      const char* matchline = "hierarchical_memsw_limit";
-      const char* format = "%s " JULONG_FORMAT;
-      GET_CONTAINER_INFO_LINE(julong, memory, "/memory.stat", matchline,
-                             "Hierarchical Memory and Swap Limit is : " JULONG_FORMAT, format, hier_memlimit)
-      if (hier_memlimit >= _unlimited_memory) {
-        log_trace(os, container)("Hierarchical Memory and Swap Limit is: Unlimited");
-      } else {
-        return (jlong)hier_memlimit;
-      }
-    }
-    return (jlong)-1;
-  } else {
-    return (jlong)memswlimit;
-  }
+  assert(cgroup_subsystem != NULL, "cgroup subsystem not available");
+  return cgroup_subsystem->memory_and_swap_limit_in_bytes();
 }
 
 jlong OSContainer::memory_soft_limit_in_bytes() {
-  GET_CONTAINER_INFO(julong, memory, "/memory.soft_limit_in_bytes",
-                     "Memory Soft Limit is: " JULONG_FORMAT, JULONG_FORMAT, memsoftlimit);
-  if (memsoftlimit >= _unlimited_memory) {
-    log_trace(os, container)("Memory Soft Limit is: Unlimited");
-    return (jlong)-1;
-  } else {
-    return (jlong)memsoftlimit;
-  }
-}
-
-/* memory_usage_in_bytes
- *
- * Return the amount of used memory for this process.
- *
- * return:
- *    memory usage in bytes or
- *    -1 for unlimited
- *    OSCONTAINER_ERROR for not supported
- */
-jlong OSContainer::memory_usage_in_bytes() {
-  GET_CONTAINER_INFO(jlong, memory, "/memory.usage_in_bytes",
-                     "Memory Usage is: " JLONG_FORMAT, JLONG_FORMAT, memusage);
-  return memusage;
-}
-
-/* memory_max_usage_in_bytes
- *
- * Return the maximum amount of used memory for this process.
- *
- * return:
- *    max memory usage in bytes or
- *    OSCONTAINER_ERROR for not supported
- */
-jlong OSContainer::memory_max_usage_in_bytes() {
-  GET_CONTAINER_INFO(jlong, memory, "/memory.max_usage_in_bytes",
-                     "Maximum Memory Usage is: " JLONG_FORMAT, JLONG_FORMAT, memmaxusage);
-  return memmaxusage;
+  assert(cgroup_subsystem != NULL, "cgroup subsystem not available");
+  return cgroup_subsystem->memory_soft_limit_in_bytes();
 }
 
-/* active_processor_count
- *
- * Calculate an appropriate number of active processors for the
- * VM to use based on these three inputs.
- *
- * cpu affinity
- * cgroup cpu quota & cpu period
- * cgroup cpu shares
- *
- * Algorithm:
- *
- * Determine the number of available CPUs from sched_getaffinity
- *
- * If user specified a quota (quota != -1), calculate the number of
- * required CPUs by dividing quota by period.
- *
- * If shares are in effect (shares != -1), calculate the number
- * of CPUs required for the shares by dividing the share value
- * by PER_CPU_SHARES.
- *
- * All results of division are rounded up to the next whole number.
- *
- * If neither shares or quotas have been specified, return the
- * number of active processors in the system.
- *
- * If both shares and quotas have been specified, the results are
- * based on the flag PreferContainerQuotaForCPUCount.  If true,
- * return the quota value.  If false return the smallest value
- * between shares or quotas.
- *
- * If shares and/or quotas have been specified, the resulting number
- * returned will never exceed the number of active processors.
- *
- * return:
- *    number of CPUs
- */
-int OSContainer::active_processor_count() {
-  int quota_count = 0, share_count = 0;
-  int cpu_count, limit_count;
-  int result;
+jlong OSContainer::memory_usage_in_bytes() {
+  assert(cgroup_subsystem != NULL, "cgroup subsystem not available");
+  return cgroup_subsystem->memory_usage_in_bytes();
+}
 
-  // We use a cache with a timeout to avoid performing expensive
-  // computations in the event this function is called frequently.
-  // [See 8227006].
-  if (!cpu->cache_has_expired()) {
-    log_trace(os, container)("OSContainer::active_processor_count (cached): %d", OSContainer::_active_processor_count);
-    return OSContainer::_active_processor_count;
-  }
-
-  cpu_count = limit_count = os::Linux::active_processor_count();
-  int quota  = cpu_quota();
-  int period = cpu_period();
-  int share  = cpu_shares();
-
-  if (quota > -1 && period > 0) {
-    quota_count = ceilf((float)quota / (float)period);
-    log_trace(os, container)("CPU Quota count based on quota/period: %d", quota_count);
-  }
-  if (share > -1) {
-    share_count = ceilf((float)share / (float)PER_CPU_SHARES);
-    log_trace(os, container)("CPU Share count based on shares: %d", share_count);
-  }
-
-  // If both shares and quotas are setup results depend
-  // on flag PreferContainerQuotaForCPUCount.
-  // If true, limit CPU count to quota
-  // If false, use minimum of shares and quotas
-  if (quota_count !=0 && share_count != 0) {
-    if (PreferContainerQuotaForCPUCount) {
-      limit_count = quota_count;
-    } else {
-      limit_count = MIN2(quota_count, share_count);
-    }
-  } else if (quota_count != 0) {
-    limit_count = quota_count;
-  } else if (share_count != 0) {
-    limit_count = share_count;
-  }
-
-  result = MIN2(cpu_count, limit_count);
-  log_trace(os, container)("OSContainer::active_processor_count: %d", result);
-
-  // Update the value and reset the cache timeout
-  OSContainer::_active_processor_count = result;
-  cpu->set_cache_expiry_time(OSCONTAINER_CACHE_TIMEOUT);
-
-  return result;
+jlong OSContainer::memory_max_usage_in_bytes() {
+  assert(cgroup_subsystem != NULL, "cgroup subsystem not available");
+  return cgroup_subsystem->memory_max_usage_in_bytes();
 }
 
 char * OSContainer::cpu_cpuset_cpus() {
-  GET_CONTAINER_INFO_CPTR(cptr, cpuset, "/cpuset.cpus",
-                     "cpuset.cpus is: %s", "%1023s", cpus, 1024);
-  return os::strdup(cpus);
+  assert(cgroup_subsystem != NULL, "cgroup subsystem not available");
+  return cgroup_subsystem->cpu_cpuset_cpus();
 }
 
 char * OSContainer::cpu_cpuset_memory_nodes() {
-  GET_CONTAINER_INFO_CPTR(cptr, cpuset, "/cpuset.mems",
-                     "cpuset.mems is: %s", "%1023s", mems, 1024);
-  return os::strdup(mems);
+  assert(cgroup_subsystem != NULL, "cgroup subsystem not available");
+  return cgroup_subsystem->cpu_cpuset_memory_nodes();
 }
 
-/* cpu_quota
- *
- * Return the number of milliseconds per period
- * process is guaranteed to run.
- *
- * return:
- *    quota time in milliseconds
- *    -1 for no quota
- *    OSCONTAINER_ERROR for not supported
- */
+int OSContainer::active_processor_count() {
+  assert(cgroup_subsystem != NULL, "cgroup subsystem not available");
+  return cgroup_subsystem->active_processor_count();
+}
+
 int OSContainer::cpu_quota() {
-  GET_CONTAINER_INFO(int, cpu, "/cpu.cfs_quota_us",
-                     "CPU Quota is: %d", "%d", quota);
-  return quota;
+  assert(cgroup_subsystem != NULL, "cgroup subsystem not available");
+  return cgroup_subsystem->cpu_quota();
 }
 
 int OSContainer::cpu_period() {
-  GET_CONTAINER_INFO(int, cpu, "/cpu.cfs_period_us",
-                     "CPU Period is: %d", "%d", period);
-  return period;
+  assert(cgroup_subsystem != NULL, "cgroup subsystem not available");
+  return cgroup_subsystem->cpu_period();
 }
 
-/* cpu_shares
- *
- * Return the amount of cpu shares available to the process
- *
- * return:
- *    Share number (typically a number relative to 1024)
- *                 (2048 typically expresses 2 CPUs worth of processing)
- *    -1 for no share setup
- *    OSCONTAINER_ERROR for not supported
- */
 int OSContainer::cpu_shares() {
-  GET_CONTAINER_INFO(int, cpu, "/cpu.shares",
-                     "CPU Shares is: %d", "%d", shares);
-  // Convert 1024 to no shares setup
-  if (shares == 1024) return -1;
-
-  return shares;
+  assert(cgroup_subsystem != NULL, "cgroup subsystem not available");
+  return cgroup_subsystem->cpu_shares();
 }
--- a/src/hotspot/os/linux/osContainer_linux.hpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/os/linux/osContainer_linux.hpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -48,7 +48,6 @@
   static inline bool is_containerized();
   static const char * container_type();
 
-  static jlong uses_mem_hierarchy();
   static jlong memory_limit_in_bytes();
   static jlong memory_and_swap_limit_in_bytes();
   static jlong memory_soft_limit_in_bytes();
--- a/src/hotspot/os/linux/os_linux.hpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/os/linux/os_linux.hpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,7 @@
 static bool zero_page_read_protected() { return true; }
 
 class Linux {
+  friend class CgroupSubsystem;
   friend class os;
   friend class OSContainer;
   friend class TestReserveMemorySpecial;
--- a/src/hotspot/os/windows/gc/z/zBackingFile_windows.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,126 +0,0 @@
-/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/z/zBackingFile_windows.hpp"
-#include "gc/z/zGlobals.hpp"
-#include "gc/z/zGranuleMap.inline.hpp"
-#include "gc/z/zMapper_windows.hpp"
-#include "logging/log.hpp"
-#include "runtime/globals.hpp"
-#include "utilities/debug.hpp"
-
-// The backing file commits and uncommits physical memory, that can be
-// multi-mapped into the virtual address space. To support fine-graned
-// committing and uncommitting, each ZGranuleSize chunked is mapped to
-// a separate paging file mapping.
-
-ZBackingFile::ZBackingFile() :
-    _handles(MaxHeapSize),
-    _size(0) {}
-
-size_t ZBackingFile::size() const {
-  return _size;
-}
-
-HANDLE ZBackingFile::get_handle(uintptr_t offset) const {
-  HANDLE const handle = _handles.get(offset);
-  assert(handle != 0, "Should be set");
-  return handle;
-}
-
-void ZBackingFile::put_handle(uintptr_t offset, HANDLE handle) {
-  assert(handle != INVALID_HANDLE_VALUE, "Invalid handle");
-  assert(_handles.get(offset) == 0, "Should be cleared");
-  _handles.put(offset, handle);
-}
-
-void ZBackingFile::clear_handle(uintptr_t offset) {
-  assert(_handles.get(offset) != 0, "Should be set");
-  _handles.put(offset, 0);
-}
-
-size_t ZBackingFile::commit_from_paging_file(size_t offset, size_t size) {
-  for (size_t i = 0; i < size; i += ZGranuleSize) {
-    HANDLE const handle = ZMapper::create_and_commit_paging_file_mapping(ZGranuleSize);
-    if (handle == 0) {
-      return i;
-    }
-
-    put_handle(offset + i, handle);
-  }
-
-  return size;
-}
-
-size_t ZBackingFile::uncommit_from_paging_file(size_t offset, size_t size) {
-  for (size_t i = 0; i < size; i += ZGranuleSize) {
-    HANDLE const handle = get_handle(offset + i);
-    clear_handle(offset + i);
-    ZMapper::close_paging_file_mapping(handle);
-  }
-
-  return size;
-}
-
-size_t ZBackingFile::commit(size_t offset, size_t length) {
-  log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
-                      offset / M, (offset + length) / M, length / M);
-
-  const size_t committed = commit_from_paging_file(offset, length);
-
-  const size_t end = offset + committed;
-  if (end > _size) {
-    // Update size
-    _size = end;
-  }
-
-  return committed;
-}
-
-size_t ZBackingFile::uncommit(size_t offset, size_t length) {
-  log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
-                      offset / M, (offset + length) / M, length / M);
-
-  return uncommit_from_paging_file(offset, length);
-}
-
-void ZBackingFile::map(uintptr_t addr, size_t size, size_t offset) const {
-  assert(is_aligned(offset, ZGranuleSize), "Misaligned");
-  assert(is_aligned(addr, ZGranuleSize), "Misaligned");
-  assert(is_aligned(size, ZGranuleSize), "Misaligned");
-
-  for (size_t i = 0; i < size; i += ZGranuleSize) {
-    HANDLE const handle = get_handle(offset + i);
-    ZMapper::map_view_replace_placeholder(handle, 0 /* offset */, addr + i, ZGranuleSize);
-  }
-}
-
-void ZBackingFile::unmap(uintptr_t addr, size_t size) const {
-  assert(is_aligned(addr, ZGranuleSize), "Misaligned");
-  assert(is_aligned(size, ZGranuleSize), "Misaligned");
-
-  for (size_t i = 0; i < size; i += ZGranuleSize) {
-    ZMapper::unmap_view_preserve_placeholder(addr + i, ZGranuleSize);
-  }
-}
--- a/src/hotspot/os/windows/gc/z/zBackingFile_windows.hpp	Fri Jan 17 12:20:00 2020 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef OS_WINDOWS_GC_Z_ZBACKINGFILE_WINDOWS_HPP
-#define OS_WINDOWS_GC_Z_ZBACKINGFILE_WINDOWS_HPP
-
-#include "gc/z/zGranuleMap.hpp"
-#include "memory/allocation.hpp"
-
-#include <Windows.h>
-
-class ZBackingFile {
-private:
-  ZGranuleMap<HANDLE> _handles;
-  size_t              _size;
-
-  HANDLE get_handle(uintptr_t offset) const;
-  void put_handle(uintptr_t offset, HANDLE handle);
-  void clear_handle(uintptr_t offset);
-
-  size_t commit_from_paging_file(size_t offset, size_t size);
-  size_t uncommit_from_paging_file(size_t offset, size_t size);
-
-public:
-  ZBackingFile();
-
-  size_t size() const;
-
-  size_t commit(size_t offset, size_t length);
-  size_t uncommit(size_t offset, size_t length);
-
-  void map(uintptr_t addr, size_t size, size_t offset) const;
-  void unmap(uintptr_t addr, size_t size) const;
-};
-
-#endif // OS_WINDOWS_GC_Z_ZBACKINGFILE_WINDOWS_HPP
--- a/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,18 +22,23 @@
  */
 
 #include "precompiled.hpp"
-#include "gc/z/zAddress.inline.hpp"
 #include "gc/z/zGlobals.hpp"
-#include "gc/z/zLargePages.inline.hpp"
+#include "gc/z/zGranuleMap.inline.hpp"
 #include "gc/z/zMapper_windows.hpp"
-#include "gc/z/zPhysicalMemory.inline.hpp"
 #include "gc/z/zPhysicalMemoryBacking_windows.hpp"
+#include "logging/log.hpp"
 #include "runtime/globals.hpp"
-#include "runtime/init.hpp"
-#include "runtime/os.hpp"
-#include "utilities/align.hpp"
 #include "utilities/debug.hpp"
 
+// The backing commits and uncommits physical memory, that can be
+// multi-mapped into the virtual address space. To support fine-graned
+// committing and uncommitting, each ZGranuleSize'd chunk is mapped to
+// a separate paging file mapping.
+
+ZPhysicalMemoryBacking::ZPhysicalMemoryBacking() :
+    _handles(MaxHeapSize),
+    _size(0) {}
+
 bool ZPhysicalMemoryBacking::is_initialized() const {
   return true;
 }
@@ -42,178 +47,88 @@
   // Does nothing
 }
 
-bool ZPhysicalMemoryBacking::supports_uncommit() {
-  assert(!is_init_completed(), "Invalid state");
-  assert(_file.size() >= ZGranuleSize, "Invalid size");
+size_t ZPhysicalMemoryBacking::size() const {
+  return _size;
+}
 
-  // Test if uncommit is supported by uncommitting and then re-committing a granule
-  return commit(uncommit(ZGranuleSize)) == ZGranuleSize;
+HANDLE ZPhysicalMemoryBacking::get_handle(uintptr_t offset) const {
+  HANDLE const handle = _handles.get(offset);
+  assert(handle != 0, "Should be set");
+  return handle;
 }
 
-size_t ZPhysicalMemoryBacking::commit(size_t size) {
-  size_t committed = 0;
+void ZPhysicalMemoryBacking::put_handle(uintptr_t offset, HANDLE handle) {
+  assert(handle != INVALID_HANDLE_VALUE, "Invalid handle");
+  assert(_handles.get(offset) == 0, "Should be cleared");
+  _handles.put(offset, handle);
+}
 
-  // Fill holes in the backing file
-  while (committed < size) {
-    size_t allocated = 0;
-    const size_t remaining = size - committed;
-    const uintptr_t start = _uncommitted.alloc_from_front_at_most(remaining, &allocated);
-    if (start == UINTPTR_MAX) {
-      // No holes to commit
-      break;
+void ZPhysicalMemoryBacking::clear_handle(uintptr_t offset) {
+  assert(_handles.get(offset) != 0, "Should be set");
+  _handles.put(offset, 0);
+}
+
+size_t ZPhysicalMemoryBacking::commit_from_paging_file(size_t offset, size_t size) {
+  for (size_t i = 0; i < size; i += ZGranuleSize) {
+    HANDLE const handle = ZMapper::create_and_commit_paging_file_mapping(ZGranuleSize);
+    if (handle == 0) {
+      return i;
     }
 
-    // Try commit hole
-    const size_t filled = _file.commit(start, allocated);
-    if (filled > 0) {
-      // Successful or partialy successful
-      _committed.free(start, filled);
-      committed += filled;
-    }
-    if (filled < allocated) {
-      // Failed or partialy failed
-      _uncommitted.free(start + filled, allocated - filled);
-      return committed;
-    }
+    put_handle(offset + i, handle);
+  }
+
+  return size;
+}
+
+size_t ZPhysicalMemoryBacking::uncommit_from_paging_file(size_t offset, size_t size) {
+  for (size_t i = 0; i < size; i += ZGranuleSize) {
+    HANDLE const handle = get_handle(offset + i);
+    clear_handle(offset + i);
+    ZMapper::close_paging_file_mapping(handle);
   }
 
-  // Expand backing file
-  if (committed < size) {
-    const size_t remaining = size - committed;
-    const uintptr_t start = _file.size();
-    const size_t expanded = _file.commit(start, remaining);
-    if (expanded > 0) {
-      // Successful or partialy successful
-      _committed.free(start, expanded);
-      committed += expanded;
-    }
+  return size;
+}
+
+size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) {
+  log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
+                      offset / M, (offset + length) / M, length / M);
+
+  const size_t committed = commit_from_paging_file(offset, length);
+
+  const size_t end = offset + committed;
+  if (end > _size) {
+    // Update size
+    _size = end;
   }
 
   return committed;
 }
 
-size_t ZPhysicalMemoryBacking::uncommit(size_t size) {
-  size_t uncommitted = 0;
-
-  // Punch holes in backing file
-  while (uncommitted < size) {
-    size_t allocated = 0;
-    const size_t remaining = size - uncommitted;
-    const uintptr_t start = _committed.alloc_from_back_at_most(remaining, &allocated);
-    assert(start != UINTPTR_MAX, "Allocation should never fail");
+size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) {
+  log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
+                      offset / M, (offset + length) / M, length / M);
 
-    // Try punch hole
-    const size_t punched = _file.uncommit(start, allocated);
-    if (punched > 0) {
-      // Successful or partialy successful
-      _uncommitted.free(start, punched);
-      uncommitted += punched;
-    }
-    if (punched < allocated) {
-      // Failed or partialy failed
-      _committed.free(start + punched, allocated - punched);
-      return uncommitted;
-    }
-  }
-
-  return uncommitted;
+  return uncommit_from_paging_file(offset, length);
 }
 
-ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) {
-  assert(is_aligned(size, ZGranuleSize), "Invalid size");
-
-  ZPhysicalMemory pmem;
+void ZPhysicalMemoryBacking::map(uintptr_t addr, size_t size, size_t offset) const {
+  assert(is_aligned(offset, ZGranuleSize), "Misaligned");
+  assert(is_aligned(addr, ZGranuleSize), "Misaligned");
+  assert(is_aligned(size, ZGranuleSize), "Misaligned");
 
-  // Allocate segments
-  for (size_t allocated = 0; allocated < size; allocated += ZGranuleSize) {
-    const uintptr_t start = _committed.alloc_from_front(ZGranuleSize);
-    assert(start != UINTPTR_MAX, "Allocation should never fail");
-    pmem.add_segment(ZPhysicalMemorySegment(start, ZGranuleSize));
-  }
-
-  return pmem;
-}
-
-void ZPhysicalMemoryBacking::free(const ZPhysicalMemory& pmem) {
-  const size_t nsegments = pmem.nsegments();
-
-  // Free segments
-  for (size_t i = 0; i < nsegments; i++) {
-    const ZPhysicalMemorySegment& segment = pmem.segment(i);
-    _committed.free(segment.start(), segment.size());
+  for (size_t i = 0; i < size; i += ZGranuleSize) {
+    HANDLE const handle = get_handle(offset + i);
+    ZMapper::map_view_replace_placeholder(handle, 0 /* offset */, addr + i, ZGranuleSize);
   }
 }
 
-void ZPhysicalMemoryBacking::pretouch_view(uintptr_t addr, size_t size) const {
-  const size_t page_size = ZLargePages::is_explicit() ? os::large_page_size() : os::vm_page_size();
-  os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
-}
-
-void ZPhysicalMemoryBacking::map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
-  const size_t nsegments = pmem.nsegments();
-  size_t size = 0;
-
-  // Map segments
-  for (size_t i = 0; i < nsegments; i++) {
-    const ZPhysicalMemorySegment& segment = pmem.segment(i);
-    _file.map(addr + size, segment.size(), segment.start());
-    size += segment.size();
-  }
-}
+void ZPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const {
+  assert(is_aligned(addr, ZGranuleSize), "Misaligned");
+  assert(is_aligned(size, ZGranuleSize), "Misaligned");
 
-void ZPhysicalMemoryBacking::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
-  _file.unmap(addr, pmem.size());
-}
-
-uintptr_t ZPhysicalMemoryBacking::nmt_address(uintptr_t offset) const {
-  // From an NMT point of view we treat the first heap view (marked0) as committed
-  return ZAddress::marked0(offset);
-}
-
-void ZPhysicalMemoryBacking::pretouch(uintptr_t offset, size_t size) const {
-  if (ZVerifyViews) {
-    // Pre-touch good view
-    pretouch_view(ZAddress::good(offset), size);
-  } else {
-    // Pre-touch all views
-    pretouch_view(ZAddress::marked0(offset), size);
-    pretouch_view(ZAddress::marked1(offset), size);
-    pretouch_view(ZAddress::remapped(offset), size);
+  for (size_t i = 0; i < size; i += ZGranuleSize) {
+    ZMapper::unmap_view_preserve_placeholder(addr + i, ZGranuleSize);
   }
 }
-
-void ZPhysicalMemoryBacking::map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
-  if (ZVerifyViews) {
-    // Map good view
-    map_view(pmem, ZAddress::good(offset));
-  } else {
-    // Map all views
-    map_view(pmem, ZAddress::marked0(offset));
-    map_view(pmem, ZAddress::marked1(offset));
-    map_view(pmem, ZAddress::remapped(offset));
-  }
-}
-
-void ZPhysicalMemoryBacking::unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
-  if (ZVerifyViews) {
-    // Unmap good view
-    unmap_view(pmem, ZAddress::good(offset));
-  } else {
-    // Unmap all views
-    unmap_view(pmem, ZAddress::marked0(offset));
-    unmap_view(pmem, ZAddress::marked1(offset));
-    unmap_view(pmem, ZAddress::remapped(offset));
-  }
-}
-
-void ZPhysicalMemoryBacking::debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
-  // Map good view
-  assert(ZVerifyViews, "Should be enabled");
-  map_view(pmem, ZAddress::good(offset));
-}
-
-void ZPhysicalMemoryBacking::debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
-  // Unmap good view
-  assert(ZVerifyViews, "Should be enabled");
-  unmap_view(pmem, ZAddress::good(offset));
-}
--- a/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.hpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.hpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,42 +24,36 @@
 #ifndef OS_WINDOWS_GC_Z_ZPHYSICALMEMORYBACKING_WINDOWS_HPP
 #define OS_WINDOWS_GC_Z_ZPHYSICALMEMORYBACKING_WINDOWS_HPP
 
-#include "gc/z/zBackingFile_windows.hpp"
-#include "gc/z/zMemory.hpp"
+#include "gc/z/zGranuleMap.hpp"
 
-class ZPhysicalMemory;
+#include <Windows.h>
 
 class ZPhysicalMemoryBacking {
 private:
-  ZBackingFile   _file;
-  ZMemoryManager _committed;
-  ZMemoryManager _uncommitted;
+  ZGranuleMap<HANDLE> _handles;
+  size_t              _size;
 
-  void pretouch_view(uintptr_t addr, size_t size) const;
-  void map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
-  void unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
+  HANDLE get_handle(uintptr_t offset) const;
+  void put_handle(uintptr_t offset, HANDLE handle);
+  void clear_handle(uintptr_t offset);
+
+  size_t commit_from_paging_file(size_t offset, size_t size);
+  size_t uncommit_from_paging_file(size_t offset, size_t size);
 
 public:
+  ZPhysicalMemoryBacking();
+
   bool is_initialized() const;
 
   void warn_commit_limits(size_t max) const;
-  bool supports_uncommit();
 
-  size_t commit(size_t size);
-  size_t uncommit(size_t size);
-
-  ZPhysicalMemory alloc(size_t size);
-  void free(const ZPhysicalMemory& pmem);
+  size_t size() const;
 
-  uintptr_t nmt_address(uintptr_t offset) const;
-
-  void pretouch(uintptr_t offset, size_t size) const;
+  size_t commit(size_t offset, size_t length);
+  size_t uncommit(size_t offset, size_t length);
 
-  void map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
-  void unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
-
-  void debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
-  void debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
+  void map(uintptr_t addr, size_t size, size_t offset) const;
+  void unmap(uintptr_t addr, size_t size) const;
 };
 
 #endif // OS_WINDOWS_GC_Z_ZPHYSICALMEMORYBACKING_WINDOWS_HPP
--- a/src/hotspot/share/c1/c1_Runtime1.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/share/c1/c1_Runtime1.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1040,7 +1040,7 @@
   // Now copy code back
 
   {
-    MutexLocker ml_patch (Patching_lock, Mutex::_no_safepoint_check_flag);
+    MutexLocker ml_patch (THREAD, Patching_lock, Mutex::_no_safepoint_check_flag);
     //
     // Deoptimization may have happened while we waited for the lock.
     // In that case we don't bother to do any patching we just return
@@ -1259,7 +1259,7 @@
   // If we are patching in a non-perm oop, make sure the nmethod
   // is on the right list.
   {
-    MutexLocker ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    MutexLocker ml_code (THREAD, CodeCache_lock, Mutex::_no_safepoint_check_flag);
     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
     guarantee(nm != NULL, "only nmethods can contain non-perm oops");
 
--- a/src/hotspot/share/ci/ciEnv.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/share/ci/ciEnv.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -971,7 +971,7 @@
   nmethod* nm = NULL;
   {
     // To prevent compile queue updates.
-    MutexLocker locker(MethodCompileQueue_lock, THREAD);
+    MutexLocker locker(THREAD, MethodCompileQueue_lock);
 
     // Prevent SystemDictionary::add_to_hierarchy from running
     // and invalidating our dependencies until we install this method.
--- a/src/hotspot/share/ci/ciReplay.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/share/ci/ciReplay.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -631,7 +631,7 @@
     {
       // Grab a lock here to prevent multiple
       // MethodData*s from being created.
-      MutexLocker ml(MethodData_lock, THREAD);
+      MutexLocker ml(THREAD, MethodData_lock);
       if (method->method_data() == NULL) {
         ClassLoaderData* loader_data = method->method_holder()->class_loader_data();
         MethodData* method_data = MethodData::allocate(loader_data, methodHandle(THREAD, method), CHECK);
--- a/src/hotspot/share/classfile/classLoader.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/share/classfile/classLoader.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -726,7 +726,7 @@
       ModuleClassPathList* module_cpl = new ModuleClassPathList(module_sym);
       module_cpl->add_to_list(new_entry);
       {
-        MutexLocker ml(Module_lock, THREAD);
+        MutexLocker ml(THREAD, Module_lock);
         _exploded_entries->push(module_cpl);
       }
       log_info(class, load)("path: %s", path);
@@ -1090,7 +1090,7 @@
   // List of pointers to PackageEntrys that have loaded classes.
   GrowableArray<PackageEntry*>* loaded_class_pkgs = new GrowableArray<PackageEntry*>(50);
   {
-    MutexLocker ml(Module_lock, THREAD);
+    MutexLocker ml(THREAD, Module_lock);
 
     PackageEntryTable* pe_table =
       ClassLoaderData::the_null_class_loader_data()->packages();
@@ -1187,7 +1187,7 @@
       // The exploded build entries can be added to at any time so a lock is
       // needed when searching them.
       assert(!ClassLoader::has_jrt_entry(), "Must be exploded build");
-      MutexLocker ml(Module_lock, THREAD);
+      MutexLocker ml(THREAD, Module_lock);
       e = find_first_module_cpe(mod_entry, module_list);
     } else {
       e = find_first_module_cpe(mod_entry, module_list);
@@ -1670,7 +1670,7 @@
   }
 
   {
-    MutexLocker ml(Module_lock, THREAD);
+    MutexLocker ml(THREAD, Module_lock);
     ModuleEntry* jb_module = null_cld_modules->locked_create_entry(Handle(),
                                false, vmSymbols::java_base(), NULL, NULL, null_cld);
     if (jb_module == NULL) {
--- a/src/hotspot/share/classfile/javaClasses.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/share/classfile/javaClasses.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -882,7 +882,7 @@
 
     bool javabase_was_defined = false;
     {
-      MutexLocker m1(Module_lock, THREAD);
+      MutexLocker m1(THREAD, Module_lock);
       // Keep list of classes needing java.base module fixup
       if (!ModuleEntryTable::javabase_defined()) {
         assert(k->java_mirror() != NULL, "Class's mirror is null");
--- a/src/hotspot/share/classfile/modules.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/share/classfile/modules.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
@@ -194,7 +194,7 @@
 
   bool duplicate_javabase = false;
   {
-    MutexLocker m1(Module_lock, THREAD);
+    MutexLocker m1(THREAD, Module_lock);
 
     if (ModuleEntryTable::javabase_defined()) {
       duplicate_javabase = true;
@@ -379,7 +379,7 @@
   PackageEntryTable* package_table = NULL;
   PackageEntry* existing_pkg = NULL;
   {
-    MutexLocker ml(Module_lock, THREAD);
+    MutexLocker ml(THREAD, Module_lock);
 
     if (num_packages > 0) {
       package_table = get_package_entry_table(h_loader);
--- a/src/hotspot/share/classfile/systemDictionary.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/share/classfile/systemDictionary.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -378,7 +378,7 @@
   bool child_already_loaded = false;
   bool throw_circularity_error = false;
   {
-    MutexLocker mu(SystemDictionary_lock, THREAD);
+    MutexLocker mu(THREAD, SystemDictionary_lock);
     InstanceKlass* childk = find_class(d_hash, child_name, dictionary);
     InstanceKlass* quicksuperk;
     // to support // loading: if child done loading, just return superclass
@@ -424,7 +424,7 @@
   // which keeps the loader_data alive, as well as all instanceKlasses in
   // the loader_data. parseClassFile adds the instanceKlass to loader_data.
   {
-    MutexLocker mu(SystemDictionary_lock, THREAD);
+    MutexLocker mu(THREAD, SystemDictionary_lock);
     placeholders()->find_and_remove(p_index, p_hash, child_name, loader_data, PlaceholderTable::LOAD_SUPER, THREAD);
     SystemDictionary_lock->notify_all();
   }
@@ -500,7 +500,7 @@
     Symbol*  kn = klass->name();
     unsigned int d_hash = dictionary->compute_hash(kn);
 
-    MutexLocker mu(SystemDictionary_lock, THREAD);
+    MutexLocker mu(THREAD, SystemDictionary_lock);
     int d_index = dictionary->hash_to_index(d_hash);
     dictionary->add_protection_domain(d_index, d_hash, klass,
                                       protection_domain, THREAD);
@@ -587,7 +587,7 @@
   // parallelCapable class loaders do NOT wait for parallel superclass loads to complete
   // Serial class loaders and bootstrap classloader do wait for superclass loads
  if (!class_loader.is_null() && is_parallelCapable(class_loader)) {
-    MutexLocker mu(SystemDictionary_lock, THREAD);
+    MutexLocker mu(THREAD, SystemDictionary_lock);
     // Check if classloading completed while we were loading superclass or waiting
     return find_class(d_hash, name, dictionary);
   }
@@ -597,7 +597,7 @@
   bool super_load_in_progress = true;
   PlaceholderEntry* placeholder;
   while (super_load_in_progress) {
-    MutexLocker mu(SystemDictionary_lock, THREAD);
+    MutexLocker mu(THREAD, SystemDictionary_lock);
     // Check if classloading completed while we were loading superclass or waiting
     InstanceKlass* check = find_class(d_hash, name, dictionary);
     if (check != NULL) {
@@ -714,7 +714,7 @@
          name->as_C_string(),
          class_loader.is_null() ? "null" : class_loader->klass()->name()->as_C_string());
   {
-    MutexLocker mu(SystemDictionary_lock, THREAD);
+    MutexLocker mu(THREAD, SystemDictionary_lock);
     InstanceKlass* check = find_class(d_hash, name, dictionary);
     if (check != NULL) {
       // InstanceKlass is already loaded, so just return it
@@ -774,7 +774,7 @@
     //    Allow parallel classloading of a class/classloader pair
 
     {
-      MutexLocker mu(SystemDictionary_lock, THREAD);
+      MutexLocker mu(THREAD, SystemDictionary_lock);
       if (class_loader.is_null() || !is_parallelCapable(class_loader)) {
         PlaceholderEntry* oldprobe = placeholders()->get_entry(p_index, p_hash, name, loader_data);
         if (oldprobe) {
@@ -861,7 +861,7 @@
 
           { // Grabbing the Compile_lock prevents systemDictionary updates
             // during compilations.
-            MutexLocker mu(Compile_lock, THREAD);
+            MutexLocker mu(THREAD, Compile_lock);
             update_dictionary(d_hash, p_index, p_hash,
               k, class_loader, THREAD);
           }
@@ -879,7 +879,7 @@
       // clean up placeholder entries for LOAD_INSTANCE success or error
       // This brackets the SystemDictionary updates for both defining
       // and initiating loaders
-      MutexLocker mu(SystemDictionary_lock, THREAD);
+      MutexLocker mu(THREAD, SystemDictionary_lock);
       placeholders()->find_and_remove(p_index, p_hash, name, loader_data, PlaceholderTable::LOAD_INSTANCE, THREAD);
       SystemDictionary_lock->notify_all();
     }
@@ -894,7 +894,7 @@
 #ifdef ASSERT
   {
     ClassLoaderData* loader_data = k->class_loader_data();
-    MutexLocker mu(SystemDictionary_lock, THREAD);
+    MutexLocker mu(THREAD, SystemDictionary_lock);
     InstanceKlass* kk = find_class(name, loader_data);
     assert(kk == k, "should be present in dictionary");
   }
@@ -1026,7 +1026,7 @@
     k->class_loader_data()->initialize_holder(Handle(THREAD, k->java_mirror()));
 
     {
-      MutexLocker mu_r(Compile_lock, THREAD);
+      MutexLocker mu_r(THREAD, Compile_lock);
 
       // Add to class hierarchy, initialize vtables, and do possible
       // deoptimizations.
@@ -1144,7 +1144,7 @@
 
   // Make sure we have an entry in the SystemDictionary on success
   debug_only( {
-    MutexLocker mu(SystemDictionary_lock, THREAD);
+    MutexLocker mu(THREAD, SystemDictionary_lock);
 
     Klass* check = find_class(h_name, k->class_loader_data());
     assert(check == k, "should be present in the dictionary");
@@ -1582,7 +1582,7 @@
     unsigned int p_hash = placeholders()->compute_hash(name_h);
     int p_index = placeholders()->hash_to_index(p_hash);
 
-    MutexLocker mu_r(Compile_lock, THREAD);
+    MutexLocker mu_r(THREAD, Compile_lock);
 
     // Add to class hierarchy, initialize vtables, and do possible
     // deoptimizations.
@@ -1639,7 +1639,7 @@
   PlaceholderEntry* probe;
 
   {
-    MutexLocker mu(SystemDictionary_lock, THREAD);
+    MutexLocker mu(THREAD, SystemDictionary_lock);
     // First check if class already defined
     if (is_parallelDefine(class_loader)) {
       InstanceKlass* check = find_class(d_hash, name_h, dictionary);
@@ -1680,7 +1680,7 @@
 
   // definer must notify any waiting threads
   {
-    MutexLocker mu(SystemDictionary_lock, THREAD);
+    MutexLocker mu(THREAD, SystemDictionary_lock);
     PlaceholderEntry* probe = placeholders()->get_entry(p_index, p_hash, name_h, loader_data);
     assert(probe != NULL, "DEFINE_CLASS placeholder lost?");
     if (probe != NULL) {
@@ -2066,7 +2066,7 @@
     Symbol *name = k->name();
     ClassLoaderData *loader_data = class_loader_data(class_loader);
 
-    MutexLocker mu(SystemDictionary_lock, THREAD);
+    MutexLocker mu(THREAD, SystemDictionary_lock);
 
     InstanceKlass* check = find_class(d_hash, name, loader_data->dictionary());
     if (check != NULL) {
@@ -2131,7 +2131,7 @@
   ClassLoaderData *loader_data = class_loader_data(class_loader);
 
   {
-    MutexLocker mu1(SystemDictionary_lock, THREAD);
+    MutexLocker mu1(THREAD, SystemDictionary_lock);
 
     // Make a new dictionary entry.
     Dictionary* dictionary = loader_data->dictionary();
@@ -2175,7 +2175,7 @@
     if (t != T_OBJECT) {
       klass = Universe::typeArrayKlassObj(t);
     } else {
-      MutexLocker mu(SystemDictionary_lock, THREAD);
+      MutexLocker mu(THREAD, SystemDictionary_lock);
       klass = constraints()->find_constrained_klass(fd.object_key(), class_loader);
     }
     // If element class already loaded, allocate array klass
@@ -2183,7 +2183,7 @@
       klass = klass->array_klass_or_null(fd.dimension());
     }
   } else {
-    MutexLocker mu(SystemDictionary_lock, THREAD);
+    MutexLocker mu(THREAD, SystemDictionary_lock);
     // Non-array classes are easy: simply check the constraint table.
     klass = constraints()->find_constrained_klass(class_name, class_loader);
   }
@@ -2224,7 +2224,7 @@
   unsigned int d_hash2 = dictionary2->compute_hash(constraint_name);
 
   {
-    MutexLocker mu_s(SystemDictionary_lock, THREAD);
+    MutexLocker mu_s(THREAD, SystemDictionary_lock);
     InstanceKlass* klass1 = find_class(d_hash1, constraint_name, dictionary1);
     InstanceKlass* klass2 = find_class(d_hash2, constraint_name, dictionary2);
     return constraints()->add_entry(constraint_name, klass1, class_loader1,
@@ -2239,7 +2239,7 @@
   unsigned int hash = resolution_errors()->compute_hash(pool, which);
   int index = resolution_errors()->hash_to_index(hash);
   {
-    MutexLocker ml(SystemDictionary_lock, Thread::current());
+    MutexLocker ml(Thread::current(), SystemDictionary_lock);
     resolution_errors()->add_entry(index, hash, pool, which, error, message);
   }
 }
@@ -2255,7 +2255,7 @@
   unsigned int hash = resolution_errors()->compute_hash(pool, which);
   int index = resolution_errors()->hash_to_index(hash);
   {
-    MutexLocker ml(SystemDictionary_lock, Thread::current());
+    MutexLocker ml(Thread::current(), SystemDictionary_lock);
     ResolutionErrorEntry* entry = resolution_errors()->find_entry(index, hash, pool, which);
     if (entry != NULL) {
       *message = entry->message();
@@ -2368,7 +2368,7 @@
     // Now grab the lock.  We might have to throw away the new method,
     // if a racing thread has managed to install one at the same time.
     {
-      MutexLocker ml(SystemDictionary_lock, THREAD);
+      MutexLocker ml(THREAD, SystemDictionary_lock);
       spe = invoke_method_table()->find_entry(index, hash, signature, iid);
       if (spe == NULL)
         spe = invoke_method_table()->add_entry(index, hash, signature, iid);
@@ -2627,7 +2627,7 @@
 
   if (can_be_cached) {
     // We can cache this MethodType inside the JVM.
-    MutexLocker ml(SystemDictionary_lock, THREAD);
+    MutexLocker ml(THREAD, SystemDictionary_lock);
     spe = invoke_method_table()->find_entry(index, hash, signature, null_iid);
     if (spe == NULL)
       spe = invoke_method_table()->add_entry(index, hash, signature, null_iid);
--- a/src/hotspot/share/classfile/systemDictionaryShared.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -823,7 +823,7 @@
       ObjectLocker ol(lockObject, THREAD, DoObjectLock);
 
       {
-        MutexLocker mu(SystemDictionary_lock, THREAD);
+        MutexLocker mu(THREAD, SystemDictionary_lock);
         InstanceKlass* check = find_class(d_hash, name, dictionary);
         if (check != NULL) {
           return check;
@@ -935,7 +935,7 @@
   ClassLoaderData* loader_data = ClassLoaderData::class_loader_data(class_loader());
 
   {
-    MutexLocker mu(SharedDictionary_lock, THREAD);
+    MutexLocker mu(THREAD, SharedDictionary_lock);
     if (ik->class_loader_data() != NULL) {
       //    ik is already loaded (by this loader or by a different loader)
       // or ik is being loaded by a different thread (by this loader or by a different loader)
@@ -978,7 +978,7 @@
   } else {
     bool isnew = _loaded_unregistered_classes.put(name, true);
     assert(isnew, "sanity");
-    MutexLocker mu_r(Compile_lock, THREAD); // add_to_hierarchy asserts this.
+    MutexLocker mu_r(THREAD, Compile_lock); // add_to_hierarchy asserts this.
     SystemDictionary::add_to_hierarchy(k, CHECK_0);
     return true;
   }
--- a/src/hotspot/share/compiler/compileBroker.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/share/compiler/compileBroker.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -266,7 +266,7 @@
   if (task->is_blocking()) {
     bool free_task = false;
     {
-      MutexLocker notifier(task->lock(), thread);
+      MutexLocker notifier(thread, task->lock());
       task->mark_complete();
 #if INCLUDE_JVMCI
       if (CompileBroker::compiler(task->comp_level())->is_jvmci()) {
@@ -765,7 +765,7 @@
 JavaThread* CompileBroker::make_thread(jobject thread_handle, CompileQueue* queue, AbstractCompiler* comp, Thread* THREAD) {
   JavaThread* new_thread = NULL;
   {
-    MutexLocker mu(Threads_lock, THREAD);
+    MutexLocker mu(THREAD, Threads_lock);
     if (comp != NULL) {
       if (!InjectCompilerCreationFailure || comp->num_compiler_threads() == 0) {
         CompilerCounters* counters = new CompilerCounters();
@@ -1102,7 +1102,7 @@
 
   // Acquire our lock.
   {
-    MutexLocker locker(MethodCompileQueue_lock, thread);
+    MutexLocker locker(thread, MethodCompileQueue_lock);
 
     // Make sure the method has not slipped into the queues since
     // last we checked; note that those checks were "fast bail-outs".
@@ -1507,7 +1507,7 @@
 //
 // Public wrapper for assign_compile_id that acquires the needed locks
 uint CompileBroker::assign_compile_id_unlocked(Thread* thread, const methodHandle& method, int osr_bci) {
-  MutexLocker locker(MethodCompileQueue_lock, thread);
+  MutexLocker locker(thread, MethodCompileQueue_lock);
   return assign_compile_id(method, osr_bci);
 }
 
@@ -1552,7 +1552,7 @@
  * @return true if this thread needs to free/recycle the task
  */
 bool CompileBroker::wait_for_jvmci_completion(JVMCICompiler* jvmci, CompileTask* task, JavaThread* thread) {
-  MonitorLocker ml(task->lock(), thread);
+  MonitorLocker ml(thread, task->lock());
   int progress_wait_attempts = 0;
   int methods_compiled = jvmci->methods_compiled();
   while (!task->is_complete() && !is_compilation_disabled_forever() &&
@@ -1613,7 +1613,7 @@
   } else
 #endif
   {
-    MonitorLocker ml(task->lock(), thread);
+    MonitorLocker ml(thread, task->lock());
     free_task = true;
     while (!task->is_complete() && !is_compilation_disabled_forever()) {
       ml.wait();
@@ -1783,7 +1783,7 @@
 
   {
     ASSERT_IN_VM;
-    MutexLocker only_one (CompileThread_lock, thread);
+    MutexLocker only_one (thread, CompileThread_lock);
     if (!ciObjectFactory::is_initialized()) {
       ciObjectFactory::initialize();
     }
--- a/src/hotspot/share/gc/epsilon/epsilonBarrierSet.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/share/gc/epsilon/epsilonBarrierSet.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
--- a/src/hotspot/share/gc/epsilon/epsilonBarrierSet.hpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/share/gc/epsilon/epsilonBarrierSet.hpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
--- a/src/hotspot/share/gc/epsilon/epsilonHeap.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/share/gc/epsilon/epsilonHeap.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
--- a/src/hotspot/share/gc/epsilon/epsilonHeap.hpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/share/gc/epsilon/epsilonHeap.hpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
--- a/src/hotspot/share/gc/epsilon/epsilonMemoryPool.hpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/share/gc/epsilon/epsilonMemoryPool.hpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
--- a/src/hotspot/share/gc/epsilon/epsilonMonitoringSupport.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/share/gc/epsilon/epsilonMonitoringSupport.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
--- a/src/hotspot/share/gc/epsilon/epsilonMonitoringSupport.hpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/share/gc/epsilon/epsilonMonitoringSupport.hpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
--- a/src/hotspot/share/gc/epsilon/epsilonThreadLocalData.hpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/share/gc/epsilon/epsilonThreadLocalData.hpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -190,6 +190,10 @@
 // For logging zone values, ensuring consistency of level and tags.
 #define LOG_ZONES(...) log_debug( CTRL_TAGS )(__VA_ARGS__)
 
+static size_t buffers_to_cards(size_t value) {
+  return value * G1UpdateBufferSize;
+}
+
 // Package for pair of refinement thread activation and deactivation
 // thresholds.  The activation and deactivation levels are resp. the first
 // and second values of the pair.
@@ -207,8 +211,9 @@
     // available buffers near green_zone value.  When yellow_size is
     // large we don't want to allow a full step to accumulate before
     // doing any processing, as that might lead to significantly more
-    // than green_zone buffers to be processed during scanning.
-    step = MIN2(step, ParallelGCThreads / 2.0);
+    // than green_zone buffers to be processed during pause.  So limit
+    // to an extra half buffer per pause-time processing thread.
+    step = MIN2(step, buffers_to_cards(ParallelGCThreads) / 2.0);
   }
   size_t activate_offset = static_cast<size_t>(ceil(step * (worker_id + 1)));
   size_t deactivate_offset = static_cast<size_t>(floor(step * worker_id));
@@ -233,10 +238,6 @@
   return _thread_control.initialize(this, max_num_threads());
 }
 
-static size_t buffers_to_cards(size_t value) {
-  return value * G1UpdateBufferSize;
-}
-
 static size_t calc_min_yellow_zone_size() {
   size_t step = buffers_to_cards(G1ConcRefinementThresholdStep);
   uint n_workers = G1ConcurrentRefine::max_num_threads();
@@ -443,8 +444,8 @@
   return G1DirtyCardQueueSet::num_par_ids();
 }
 
-void G1ConcurrentRefine::maybe_activate_more_threads(uint worker_id, size_t num_cur_buffers) {
-  if (num_cur_buffers > activation_threshold(worker_id + 1)) {
+void G1ConcurrentRefine::maybe_activate_more_threads(uint worker_id, size_t num_cur_cards) {
+  if (num_cur_cards > activation_threshold(worker_id + 1)) {
     _thread_control.maybe_activate_next(worker_id);
   }
 }
--- a/src/hotspot/share/gc/parallel/asPSOldGen.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/share/gc/parallel/asPSOldGen.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
 #include "gc/parallel/asPSOldGen.hpp"
 #include "gc/parallel/parallelScavengeHeap.hpp"
 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
-#include "gc/parallel/psMarkSweepDecorator.hpp"
 #include "gc/shared/cardTableBarrierSet.hpp"
 #include "gc/shared/genArguments.hpp"
 #include "oops/oop.inline.hpp"
--- a/src/hotspot/share/gc/parallel/asPSYoungGen.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/share/gc/parallel/asPSYoungGen.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 #include "precompiled.hpp"
 #include "gc/parallel/asPSYoungGen.hpp"
 #include "gc/parallel/parallelScavengeHeap.hpp"
-#include "gc/parallel/psMarkSweepDecorator.hpp"
 #include "gc/parallel/psScavenge.inline.hpp"
 #include "gc/parallel/psYoungGen.hpp"
 #include "gc/shared/gcUtil.hpp"
--- a/src/hotspot/share/gc/parallel/parallelArguments.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/share/gc/parallel/parallelArguments.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -44,12 +44,7 @@
 
 void ParallelArguments::initialize() {
   GCArguments::initialize();
-  assert(UseParallelGC || UseParallelOldGC, "Error");
-  // Enable ParallelOld unless it was explicitly disabled (cmd line or rc file).
-  if (FLAG_IS_DEFAULT(UseParallelOldGC)) {
-    FLAG_SET_DEFAULT(UseParallelOldGC, true);
-  }
-  FLAG_SET_DEFAULT(UseParallelGC, true);
+  assert(UseParallelGC, "Error");
 
   // If no heap maximum was requested explicitly, use some reasonable fraction
   // of the physical memory, up to a maximum of 1GB.
@@ -85,13 +80,11 @@
     }
   }
 
-  if (UseParallelOldGC) {
-    // Par compact uses lower default values since they are treated as
-    // minimums.  These are different defaults because of the different
-    // interpretation and are not ergonomically set.
-    if (FLAG_IS_DEFAULT(MarkSweepDeadRatio)) {
-      FLAG_SET_DEFAULT(MarkSweepDeadRatio, 1);
-    }
+  // Par compact uses lower default values since they are treated as
+  // minimums.  These are different defaults because of the different
+  // interpretation and are not ergonomically set.
+  if (FLAG_IS_DEFAULT(MarkSweepDeadRatio)) {
+    FLAG_SET_DEFAULT(MarkSweepDeadRatio, 1);
   }
 }
 
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,7 +31,6 @@
 #include "gc/parallel/objectStartArray.inline.hpp"
 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
-#include "gc/parallel/psMarkSweepProxy.hpp"
 #include "gc/parallel/psMemoryPool.hpp"
 #include "gc/parallel/psParallelCompact.inline.hpp"
 #include "gc/parallel/psPromotionManager.hpp"
@@ -116,7 +115,7 @@
   _gc_policy_counters =
     new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy);
 
-  if (UseParallelOldGC && !PSParallelCompact::initialize()) {
+  if (!PSParallelCompact::initialize()) {
     return JNI_ENOMEM;
   }
 
@@ -165,11 +164,7 @@
   CollectedHeap::post_initialize();
   // Need to init the tenuring threshold
   PSScavenge::initialize();
-  if (UseParallelOldGC) {
-    PSParallelCompact::post_initialize();
-  } else {
-    PSMarkSweepProxy::initialize();
-  }
+  PSParallelCompact::post_initialize();
   PSPromotionManager::initialize();
 
   ScavengableNMethods::initialize(&_is_scavengable);
@@ -414,15 +409,11 @@
 }
 
 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
-  if (UseParallelOldGC) {
-    // The do_full_collection() parameter clear_all_soft_refs
-    // is interpreted here as maximum_compaction which will
-    // cause SoftRefs to be cleared.
-    bool maximum_compaction = clear_all_soft_refs;
-    PSParallelCompact::invoke(maximum_compaction);
-  } else {
-    PSMarkSweepProxy::invoke(clear_all_soft_refs);
-  }
+  // The do_full_collection() parameter clear_all_soft_refs
+  // is interpreted here as maximum_compaction which will
+  // cause SoftRefs to be cleared.
+  bool maximum_compaction = clear_all_soft_refs;
+  PSParallelCompact::invoke(maximum_compaction);
 }
 
 // Failed allocation policy. Must be called from the VM thread, and
@@ -554,9 +545,7 @@
 }
 
 jlong ParallelScavengeHeap::millis_since_last_gc() {
-  return UseParallelOldGC ?
-    PSParallelCompact::millis_since_last_gc() :
-    PSMarkSweepProxy::millis_since_last_gc();
+  return PSParallelCompact::millis_since_last_gc();
 }
 
 void ParallelScavengeHeap::prepare_for_verify() {
@@ -599,10 +588,8 @@
 void ParallelScavengeHeap::print_on_error(outputStream* st) const {
   this->CollectedHeap::print_on_error(st);
 
-  if (UseParallelOldGC) {
-    st->cr();
-    PSParallelCompact::print_on_error(st);
-  }
+  st->cr();
+  PSParallelCompact::print_on_error(st);
 }
 
 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
@@ -616,8 +603,7 @@
 void ParallelScavengeHeap::print_tracing_info() const {
   AdaptiveSizePolicyOutput::print();
   log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
-  log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs",
-      UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweepProxy::accumulated_time()->seconds());
+  log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());
 }
 
 PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.inline.hpp	Fri Jan 17 12:20:00 2020 -0800
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.inline.hpp	Tue Jan 21 07:29:48 2020 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,13 +26,11 @@
 #define SHARE_GC_PARALLEL_PARALLELSCAVENGEHEAP_INLINE_HPP
 
 #include "gc/parallel/parallelScavengeHeap.hpp"
-#include "gc/parallel/psMarkSweepProxy.hpp"
 #include "gc/parallel/psParallelCompact.inline.hpp"
 #include "gc/parallel/psScavenge.hpp"
 
 inline size_t ParallelScavengeHeap::total_invocations() {
-  return UseParallelOldGC ? PSParallelCompact::total_invocations() :
-    PSMarkSweepProxy::total_invocations();
+  return PSParallelCompact::total_invocations();
 }
 
 inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const {
--- a/src/hotspot/share/gc/parallel/psMarkSweep.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,660 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "aot/aotLoader.hpp"
-#include "classfile/classLoaderDataGraph.hpp"
-#include "classfile/stringTable.hpp"
-#include "classfile/symbolTable.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "code/codeCache.hpp"
-#include "gc/parallel/parallelScavengeHeap.hpp"
-#include "gc/parallel/psAdaptiveSizePolicy.hpp"
-#include "gc/parallel/psMarkSweep.hpp"
-#include "gc/parallel/psMarkSweepDecorator.hpp"
-#include "gc/parallel/psOldGen.hpp"
-#include "gc/parallel/psScavenge.hpp"
-#include "gc/parallel/psYoungGen.hpp"
-#include "gc/serial/markSweep.hpp"
-#include "gc/shared/gcCause.hpp"
-#include "gc/shared/gcHeapSummary.hpp"
-#include "gc/shared/gcId.hpp"
-#include "gc/shared/gcLocker.hpp"
-#include "gc/shared/gcTimer.hpp"
-#include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTraceTime.inline.hpp"
-#include "gc/shared/isGCActiveMark.hpp"
-#include "gc/shared/referencePolicy.hpp"
-#include "gc/shared/referenceProcessor.hpp"
-#include "gc/shared/referenceProcessorPhaseTimes.hpp"
-#include "gc/shared/spaceDecorator.inline.hpp"
-#include "gc/shared/weakProcessor.hpp"
-#include "memory/universe.hpp"
-#include "logging/log.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/biasedLocking.hpp"
-#include "runtime/flags/flagSetting.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/safepoint.hpp"
-#include "runtime/vmThread.hpp"
-#include "services/management.hpp"
-#include "services/memoryService.hpp"
-#include "utilities/align.hpp"
-#include "utilities/events.hpp"
-#include "utilities/stack.inline.hpp"
-#if INCLUDE_JVMCI
-#include "jvmci/jvmci.hpp"
-#endif
-
-elapsedTimer        PSMarkSweep::_accumulated_time;
-jlong               PSMarkSweep::_time_of_last_gc   = 0;
-CollectorCounters*  PSMarkSweep::_counters = NULL;
-
-SpanSubjectToDiscoveryClosure PSMarkSweep::_span_based_discoverer;
-
-void PSMarkSweep::initialize() {
-  _span_based_discoverer.set_span(ParallelScavengeHeap::heap()->reserved_region());
-  set_ref_processor(new ReferenceProcessor(&_span_based_discoverer));     // a vanilla ref proc
-  _counters = new CollectorCounters("Serial full collection pauses", 1);
-  MarkSweep::initialize();
-}
-
-// This method contains all heap specific policy for invoking mark sweep.
-// PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
-// the heap. It will do nothing further. If we need to bail out for policy
-// reasons, scavenge before full gc, or any other specialized behavior, it
-// needs to be added here.
-//
-// Note that this method should only be called from the vm_thread while
-// at a safepoint!
-//
-// Note that the all_soft_refs_clear flag in the soft ref policy
-// may be true because this method can be called without intervening
-// activity.  For example when the heap space is tight and full measure
-// are being taken to free space.
-
-void PSMarkSweep::invoke(bool maximum_heap_compaction) {
-  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
-  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
-  assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
-
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  GCCause::Cause gc_cause = heap->gc_cause();
-  PSAdaptiveSizePolicy* policy = heap->size_policy();
-  IsGCActiveMark mark;
-
-  if (ScavengeBeforeFullGC) {
-    PSScavenge::invoke_no_policy();
-  }
-
-  const bool clear_all_soft_refs =
-    heap->soft_ref_policy()->should_clear_all_soft_refs();
-
-  uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
-  UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
-  PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
-}
-
-// This method contains no policy. You should probably
-// be calling invoke() instead.
-bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
-  assert(ref_processor() != NULL, "Sanity");
-
-  if (GCLocker::check_active_before_gc()) {
-    return false;
-  }
-
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  GCCause::Cause gc_cause = heap->gc_cause();
-
-  GCIdMark gc_id_mark;
-  _gc_timer->register_gc_start();
-  _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
-
-  PSAdaptiveSizePolicy* size_policy = heap->size_policy();
-
-  // The scope of casr should end after code that can change
-  // SoftRefolicy::_should_clear_all_soft_refs.
-  ClearedAllSoftRefs casr(clear_all_softrefs, heap->soft_ref_policy());
-
-  PSYoungGen* young_gen = heap->young_gen();
-  PSOldGen* old_gen = heap->old_gen();
-
-  // Increment the invocation count
-  heap->increment_total_collections(true /* full */);
-
-  // Save information needed to minimize mangling
-  heap->record_gen_tops_before_GC();
-
-  // We need to track unique mark sweep invocations as well.
-  _total_invocations++;
-
-  heap->print_heap_before_gc();
-  heap->trace_heap_before_gc(_gc_tracer);
-
-  // Fill in TLABs
-  heap->ensure_parsability(true);  // retire TLABs
-
-  if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
-    HandleMark hm;  // Discard invalid handles created during verification
-    Universe::verify("Before GC");
-  }
-
-  // Verify object start arrays
-  if (VerifyObjectStartArray &&
-      VerifyBeforeGC) {
-    old_gen->verify_object_start_array();
-  }
-
-  // Filled in below to track the state of the young gen after the collection.
-  bool eden_empty;
-  bool survivors_empty;
-  bool young_gen_empty;
-
-  {
-    HandleMark hm;
-
-    GCTraceCPUTime tcpu;
-    GCTraceTime(Info, gc) t("Pause Full", NULL, gc_cause, true);
-
-    heap->pre_full_gc_dump(_gc_timer);
-
-    TraceCollectorStats tcs(counters());
-    TraceMemoryManagerStats tms(heap->old_gc_manager(),gc_cause);
-
-    if (log_is_enabled(Debug, gc, heap, exit)) {
-      accumulated_time()->start();
-    }
-
-    // Let the size policy know we're starting
-    size_policy->major_collection_begin();
-
-    BiasedLocking::preserve_marks();
-
-    const PreGenGCValues pre_gc_values = heap->get_pre_gc_values();
-
-    allocate_stacks();
-
-#if COMPILER2_OR_JVMCI
-    DerivedPointerTable::clear();
-#endif
-
-    ref_processor()->enable_discovery();
-    ref_processor()->setup_policy(clear_all_softrefs);
-
-    mark_sweep_phase1(clear_all_softrefs);
-
-    mark_sweep_phase2();
-
-#if COMPILER2_OR_JVMCI
-    // Don't add any more derived pointers during phase3
-    assert(DerivedPointerTable::is_active(), "Sanity");
-    DerivedPointerTable::set_active(false);
-#endif
-
-    mark_sweep_phase3();
-
-    mark_sweep_phase4();
-
-    restore_marks();
-
-    deallocate_stacks();
-
-    if (ZapUnusedHeapArea) {
-      // Do a complete mangle (top to end) because the usage for
-      // scratch does not maintain a top pointer.
-      young_gen->to_space()->mangle_unused_area_complete();
-    }
-
-    eden_empty = young_gen->eden_space()->is_empty();
-    if (!eden_empty) {
-      eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
-    }
-
-    // Update heap occupancy information which is used as
-    // input to soft ref clearing policy at the next gc.
-    Universe::update_heap_info_at_gc();
-
-    survivors_empty = young_gen->from_space()->is_empty() &&
-                      young_gen->to_space()->is_empty();
-    young_gen_empty = eden_empty && survivors_empty;
-
-    PSCardTable* card_table = heap->card_table();
-    MemRegion old_mr = heap->old_gen()->reserved();
-    if (young_gen_empty) {
-      card_table->clear(MemRegion(old_mr.start(), old_mr.end()));
-    } else {
-      card_table->invalidate(MemRegion(old_mr.start(), old_mr.end()));
-    }
-
-    // Delete metaspaces for unloaded class loaders and clean up loader_data graph
-    ClassLoaderDataGraph::purge();
-    MetaspaceUtils::verify_metrics();
-
-    BiasedLocking::restore_marks();
-    heap->prune_scavengable_nmethods();
-
-#if COMPILER2_OR_JVMCI
-    DerivedPointerTable::update_pointers();
-#endif
-
-    assert(!ref_processor()->discovery_enabled(), "Should have been disabled earlier");
-
-    // Update time of last GC
-    reset_millis_since_last_gc();
-
-    // Let the size policy know we're done
-    size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
-
-    if (UseAdaptiveSizePolicy) {
-
-     log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
-     log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
-                         old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
-
-      // Don't check if the size_policy is ready here.  Let
-      // the size_policy check that internally.
-      if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
-          AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
-        // Swap the survivor spaces if from_space is empty. The
-        // resize_young_gen() called below is normally used after
-        // a successful young GC and swapping of survivor spaces;
-        // otherwise, it will fail to resize the young gen with
-        // the current implementation.
-        if (young_gen->from_space()->is_empty()) {
-          young_gen->from_space()->clear(SpaceDecorator::Mangle);
-          young_gen->swap_spaces();
-        }
-
-        // Calculate optimal free space amounts
-        assert(young_gen->max_size() >
-          young_gen->from_space()->capacity_in_bytes() +
-          young_gen->to_space()->capacity_in_bytes(),
-          "Sizes of space in young gen are out of bounds");
-
-        size_t young_live = young_gen->used_in_bytes();
-        size_t eden_live = young_gen->eden_space()->used_in_bytes();
-        size_t old_live = old_gen->used_in_bytes();
-        size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
-        size_t max_old_gen_size = old_gen->max_gen_size();
-        size_t max_eden_size = young_gen->max_size() -
-          young_gen->from_space()->capacity_in_bytes() -
-          young_gen->to_space()->capacity_in_bytes();
-
-        // Used for diagnostics
-        size_policy->clear_generation_free_space_flags();
-
-        size_policy->compute_generations_free_space(young_live,
-                                                    eden_live,
-                                                    old_live,
-                                                    cur_eden,
-                                                    max_old_gen_size,
-                                                    max_eden_size,
-                                                    true /* full gc*/);
-
-        size_policy->check_gc_overhead_limit(eden_live,
-                                             max_old_gen_size,
-                                             max_eden_size,
-                                             true /* full gc*/,
-                                             gc_cause,
-                                             heap->soft_ref_policy());
-
-        size_policy->decay_supplemental_growth(true /* full gc*/);
-
-        heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
-
-        heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
-                               size_policy->calculated_survivor_size_in_bytes());
-      }
-      log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
-    }
-
-    if (UsePerfData) {
-      heap->gc_policy_counters()->update_counters();
-      heap->gc_policy_counters()->update_old_capacity(
-        old_gen->capacity_in_bytes());
-      heap->gc_policy_counters()->update_young_capacity(
-        young_gen->capacity_in_bytes());
-    }
-
-    heap->resize_all_tlabs();
-
-    // We collected the heap, recalculate the metaspace capacity
-    MetaspaceGC::compute_new_size();
-
-    if (log_is_enabled(Debug, gc, heap, exit)) {
-      accumulated_time()->stop();
-    }
-
-    heap->print_heap_change(pre_gc_values);
-
-    // Track memory usage and detect low memory
-    MemoryService::track_memory_usage();
-    heap->update_counters();
-
-    heap->post_full_gc_dump(_gc_timer);
-  }
-
-  if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
-    HandleMark hm;  // Discard invalid handles created during verification
-    Universe::verify("After GC");
-  }
-
-  // Re-verify object start arrays
-  if (VerifyObjectStartArray &&
-      VerifyAfterGC) {
-    old_gen->verify_object_start_array();
-  }
-
-  if (ZapUnusedHeapArea) {
-    old_gen->object_space()->check_mangled_unused_area_complete();
-  }
-
-  NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
-
-  heap->print_heap_after_gc();
-  heap->trace_heap_after_gc(_gc_tracer);
-
-#ifdef TRACESPINNING
-  ParallelTaskTerminator::print_termination_counts();
-#endif
-
-  AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
-
-  _gc_timer->register_gc_end();
-
-  _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
-
-  return true;
-}
-
-bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
-                                             PSYoungGen* young_gen,
-                                             PSOldGen* old_gen) {
-  MutableSpace* const eden_space = young_gen->eden_space();
-  assert(!eden_space->is_empty(), "eden must be non-empty");
-  assert(young_gen->virtual_space()->alignment() ==
-         old_gen->virtual_space()->alignment(), "alignments do not match");
-
-  if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
-    return false;
-  }
-
-  // Both generations must be completely committed.
-  if (young_gen->virtual_space()->uncommitted_size() != 0) {
-    return false;
-  }
-  if (old_gen->virtual_space()->uncommitted_size() != 0) {
-    return false;
-  }
-
-  // Figure out how much to take from eden.  Include the average amount promoted
-  // in the total; otherwise the next young gen GC will simply bail out to a
-  // full GC.
-  const size_t alignment = old_gen->virtual_space()->alignment();
-  const size_t eden_used = eden_space->used_in_bytes();
-  const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
-  const size_t absorb_size = align_up(eden_used + promoted, alignment);
-  const size_t eden_capacity = eden_space->capacity_in_bytes();
-
-  if (absorb_size >= eden_capacity) {
-    return false; // Must leave some space in eden.
-  }
-
-  const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
-  if (new_young_size < young_gen->min_gen_size()) {
-    return false; // Respect young gen minimum size.
-  }
-
-  log_trace(gc, ergo, heap)(" absorbing " SIZE_FORMAT "K:  "
-                            "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
-                            "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
-                            "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
-                            absorb_size / K,
-                            eden_capacity / K, (eden_capacity - absorb_size) / K,
-                            young_gen->from_space()->used_in_bytes() / K,
-                            young_gen->to_space()->used_in_bytes() / K,
-                            young_gen->capacity_in_bytes() / K, new_young_size / K);
-
-  // Fill the unused part of the old gen.
-  MutableSpace* const old_space = old_gen->object_space();
-  HeapWord* const unused_start = old_space->top();
-  size_t const unused_words = pointer_delta(old_space->end(), unused_start);
-
-  if (unused_words > 0) {
-    if (unused_words < CollectedHeap::min_fill_size()) {
-      return false;  // If the old gen cannot be filled, must give up.
-    }
-    CollectedHeap::fill_with_objects(unused_start, unused_words);
-  }
-
-  // Take the live data from eden and set both top and end in the old gen to
-  // eden top.  (Need to set end because reset_after_change() mangles the region
-  // from end to virtual_space->high() in debug builds).
-  HeapWord* const new_top = eden_space->top();
-  old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
-                                        absorb_size);
-  young_gen->reset_after_change();
-  old_space->set_top(new_top);
-  old_space->set_end(new_top);
-  old_gen->reset_after_change();
-
-  // Update the object start array for the filler object and the data from eden.
-  ObjectStartArray* const start_array = old_gen->start_array();
-  for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
-    start_array->allocate_block(p);
-  }
-
-  // Could update the promoted average here, but it is not typically updated at
-  // full GCs and the value to use is unclear.  Something like
-  //
-  // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
-
-  size_policy->set_bytes_absorbed_from_eden(absorb_size);
-  return true;
-}
-
-void PSMarkSweep::allocate_stacks() {
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  PSYoungGen* young_gen = heap->young_gen();
-
-  MutableSpace* to_space = young_gen->to_space();
-  _preserved_marks = (PreservedMark*)to_space->top();
-  _preserved_count = 0;
-
-  // We want to calculate the size in bytes first.
-  _preserved_count_max  = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
-  // Now divide by the size of a PreservedMark
-  _preserved_count_max /= sizeof(PreservedMark);
-}
-
-
-void PSMarkSweep::deallocate_stacks() {
-  _preserved_mark_stack.clear(true);
-  _preserved_oop_stack.clear(true);
-  _marking_stack.clear();
-  _objarray_stack.clear(true);
-}
-
-void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
-  // Recursively traverse all live objects and mark them
-  GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer);
-
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-
-  // Need to clear claim bits before the tracing starts.
-  ClassLoaderDataGraph::clear_claimed_marks();
-
-  // General strong roots.
-  {
-    ParallelScavengeHeap::ParStrongRootsScope psrs;
-    Universe::oops_do(mark_and_push_closure());
-    JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
-    MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);
-    Threads::oops_do(mark_and_push_closure(), &each_active_code_blob);
-    ObjectSynchronizer::oops_do(mark_and_push_closure());
-    Management::oops_do(mark_and_push_closure());
-    JvmtiExport::oops_do(mark_and_push_closure());
-    SystemDictionary::oops_do(mark_and_push_closure());
-    ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());
-    // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
-    //ScavengableNMethods::scavengable_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
-    AOT_ONLY(AOTLoader::oops_do(mark_and_push_closure());)
-  }
-
-  // Flush marking stack.
-  follow_stack();
-
-  // Process reference objects found during marking
-  {
-    GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer);
-
-    ref_processor()->setup_policy(clear_all_softrefs);
-    ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->max_num_queues());
-    const ReferenceProcessorStats& stats =
-      ref_processor()->process_discovered_references(
-        is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, &pt);
-    gc_tracer()->report_gc_reference_stats(stats);
-    pt.print_all_references();
-  }
-
-  // This is the point where the entire marking should have completed.
-  assert(_marking_stack.is_empty(), "Marking should have completed");
-
-  {
-    GCTraceTime(Debug, gc, phases) t("Weak Processing", _gc_timer);
-    WeakProcessor::weak_oops_do(is_alive_closure(), &do_nothing_cl);
-  }
-
-  {
-    GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer);
-
-    // Unload classes and purge the SystemDictionary.
-    bool purged_class = SystemDictionary::do_unloading(_gc_timer);
-
-    // Unload nmethods.
-    CodeCache::do_unloading(is_alive_closure(), purged_class);
-
-    // Prune dead klasses from subklass/sibling/implementor lists.
-    Klass::clean_weak_klass_links(purged_class);
-
-    // Clean JVMCI metadata handles.
-    JVMCI_ONLY(JVMCI::do_unloading(purged_class));
-  }
-
-  _gc_tracer->report_object_count_after_gc(is_alive_closure());
-}
-
-
-void PSMarkSweep::mark_sweep_phase2() {
-  GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
-
-  // Now all live objects are marked, compute the new object addresses.
-
-  // It is not required that we traverse spaces in the same order in
-  // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
-  // tracking expects us to do so. See comment under phase4.
-
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  PSOldGen* old_gen = heap->old_gen();
-
-  // Begin compacting into the old gen
-  PSMarkSweepDecorator::set_destination_decorator_tenured();
-
-  // This will also compact the young gen spaces.
-  old_gen->precompact();
-}
-
-void PSMarkSweep::mark_sweep_phase3() {
-  // Adjust the pointers to reflect the new locations
-  GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", _gc_timer);
-
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  PSYoungGen* young_gen = heap->young_gen();
-  PSOldGen* old_gen = heap->old_gen();
-
-  // Need to clear claim bits before the tracing starts.
-  ClassLoaderDataGraph::clear_claimed_marks();
-
-  // General strong roots.
-  Universe::oops_do(adjust_pointer_closure());
-  JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
-  Threads::oops_do(adjust_pointer_closure(), NULL);
-  ObjectSynchronizer::oops_do(adjust_pointer_closure());
-  Management::oops_do(adjust_pointer_closure());
-  JvmtiExport::oops_do(adjust_pointer_closure());
-  SystemDictionary::oops_do(adjust_pointer_closure());
-  ClassLoaderDataGraph::cld_do(adjust_cld_closure());
-
-  // Now adjust pointers in remaining weak roots.  (All of which should
-  // have been cleared if they pointed to non-surviving objects.)
-  // Global (weak) JNI handles
-  WeakProcessor::oops_do(adjust_pointer_closure());
-
-  CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
-  CodeCache::blobs_do(&adjust_from_blobs);
-  AOT_ONLY(AOTLoader::oops_do(adjust_pointer_closure());)
-
-  ref_processor()->weak_oops_do(adjust_pointer_closure());
-  PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
-
-  adjust_marks();
-
-  young_gen->adjust_pointers();
-  old_gen->adjust_pointers();
-}
-
-void PSMarkSweep::mark_sweep_phase4() {
-  EventMark m("4 compact heap");
-  GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
-
-  // All pointers are now adjusted, move objects accordingly
-
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  PSYoungGen* young_gen = heap->young_gen();
-  PSOldGen* old_gen = heap->old_gen();
-
-  old_gen->compact();
-  young_gen->compact();
-}
-
-jlong PSMarkSweep::millis_since_last_gc() {
-  // We need a monotonically non-decreasing time in ms but
-  // os::javaTimeMillis() does not guarantee monotonicity.
-  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
-  jlong ret_val = now - _time_of_last_gc;
-  // XXX See note in genCollectedHeap::millis_since_last_gc().
-  if (ret_val < 0) {
-    NOT_PRODUCT(log_warning(gc)("time warp: " JLONG_FORMAT, ret_val);)
-    return 0;
-  }
-  return ret_val;
-}
-
-void PSMarkSweep::reset_millis_since_last_gc() {
-  // We need a monotonically non-decreasing time in ms but
-  // os::javaTimeMillis() does not guarantee monotonicity.
-  _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
-}
--- a/src/hotspot/share/gc/parallel/psMarkSweep.hpp	Fri Jan 17 12:20:00 2020 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,89 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_PARALLEL_PSMARKSWEEP_HPP
-#define SHARE_GC_PARALLEL_PSMARKSWEEP_HPP
-
-#include "gc/serial/markSweep.hpp"
-#include "gc/shared/collectorCounters.hpp"
-#include "gc/shared/referenceProcessor.hpp"
-#include "utilities/stack.hpp"
-
-class PSAdaptiveSizePolicy;
-class PSYoungGen;
-class PSOldGen;
-
-class PSMarkSweep : public MarkSweep {
- private:
-  static elapsedTimer        _accumulated_time;
-  static jlong               _time_of_last_gc;   // ms
-  static CollectorCounters*  _counters;
-
-  static SpanSubjectToDiscoveryClosure _span_based_discoverer;
-
-  // Closure accessors
-  static OopClosure* mark_and_push_closure()   { return &MarkSweep::mark_and_push_closure; }
-  static VoidClosure* follow_stack_closure()   { return &MarkSweep::follow_stack_closure; }
-  static CLDClosure* follow_cld_closure()      { return &MarkSweep::follow_cld_closure; }
-  static OopClosure* adjust_pointer_closure()  { return &MarkSweep::adjust_pointer_closure; }
-  static CLDClosure* adjust_cld_closure()      { return &MarkSweep::adjust_cld_closure; }
-  static BoolObjectClosure* is_alive_closure() { return &MarkSweep::is_alive; }
-
-  // Mark live objects
-  static void mark_sweep_phase1(bool clear_all_softrefs);
-  // Calculate new addresses
-  static void mark_sweep_phase2();
-  // Update pointers
-  static void mark_sweep_phase3();
-  // Move objects to new positions
-  static void mark_sweep_phase4();
-
-  // Temporary data structures for traversal and storing/restoring marks
-  static void allocate_stacks();
-  static void deallocate_stacks();
-
-  // If objects are left in eden after a collection, try to move the boundary
-  // and absorb them into the old gen.  Returns true if eden was emptied.
-  static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
-                                         PSYoungGen* young_gen,
-                                         PSOldGen* old_gen);
-
-  // Reset time since last full gc
-  static void reset_millis_since_last_gc();
-
- public:
-  static void invoke(bool clear_all_softrefs);
-  static bool invoke_no_policy(bool clear_all_softrefs);
-
-  static void initialize();
-
-  // Public accessors
-  static elapsedTimer* accumulated_time() { return &_accumulated_time; }
-  static CollectorCounters* counters()    { return _counters; }
-
-  // Time since last full gc (in milliseconds)
-  static jlong millis_since_last_gc();
-};
-
-#endif // SHARE_GC_PARALLEL_PSMARKSWEEP_HPP
--- a/src/hotspot/share/gc/parallel/psMarkSweepDecorator.cpp	Fri Jan 17 12:20:00 2020 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,395 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "gc/parallel/objectStartArray.hpp"
-#include "gc/parallel/parallelScavengeHeap.hpp"
-#include "gc/parallel/parMarkBitMap.inline.hpp"
-#include "gc/parallel/psMarkSweep.hpp"
-#include "gc/parallel/psMarkSweepDecorator.hpp"
-#include "gc/parallel/psParallelCompact.inline.hpp"
-#include "gc/serial/markSweep.inline.hpp"
-#include "gc/shared/spaceDecorator.inline.hpp"
-#include "memory/iterator.inline.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/prefetch.inline.hpp"
-
-PSMarkSweepDecorator* PSMarkSweepDecorator::_destination_decorator = NULL;
-
-
-void PSMarkSweepDecorator::set_destination_decorator_tenured() {
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  _destination_decorator = heap->old_gen()->object_mark_sweep();
-}
-
-void PSMarkSweepDecorator::advance_destination_decorator() {
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-
-  assert(_destination_decorator != NULL, "Sanity");
-
-  PSMarkSweepDecorator* first = heap->old_gen()->object_mark_sweep();
-  PSMarkSweepDecorator* second = heap->young_gen()->eden_mark_sweep();
-  PSMarkSweepDecorator* third = heap->young_gen()->from_mark_sweep();
-  PSMarkSweepDecorator* fourth = heap->young_gen()->to_mark_sweep();
-
-  if ( _destination_decorator == first ) {
-    _destination_decorator = second;
-  } else if ( _destination_decorator == second ) {
-    _destination_decorator = third;
-  } else if ( _destination_decorator == third ) {
-    _destination_decorator = fourth;
-  } else {
-    fatal("PSMarkSweep attempting to advance past last compaction area");
-  }
-}
-
-PSMarkSweepDecorator* PSMarkSweepDecorator::destination_decorator() {
-  assert(_destination_decorator != NULL, "Sanity");
-
-  return _destination_decorator;
-}
-
-// FIX ME FIX ME FIX ME FIX ME!!!!!!!!!
-// The object forwarding code is duplicated. Factor this out!!!!!
-//
-// This method "precompacts" objects inside its space to dest. It places forwarding
-// pointers into markWords for use by adjust_pointers. If "dest" should overflow, we
-// finish by compacting into our own space.
-
-void PSMarkSweepDecorator::precompact() {
-  // Reset our own compact top.
-  set_compaction_top(space()->bottom());
-
-  /* We allow some amount of garbage towards the bottom of the space, so
-   * we don't start compacting before there is a significant gain to be made.
-   * Occasionally, we want to ensure a full compaction, which is determined
-   * by the MarkSweepAlwaysCompactCount parameter. This is a significant
-   * performance improvement!
-   */
-  bool skip_dead = ((PSMarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0);
-
-  size_t allowed_deadspace = 0;
-  if (skip_dead) {
-    const size_t ratio = allowed_dead_ratio();
-    allowed_deadspace = space()->capacity_in_words() * ratio / 100;
-  }
-
-  // Fetch the current destination decorator
-  PSMarkSweepDecorator* dest = destination_decorator();
-  ObjectStartArray* start_array = dest->start_array();
-
-  HeapWord* compact_top = dest->compaction_top();
-  HeapWord* compact_end = dest->space()->end();
-
-  HeapWord* q = space()->bottom();
-  HeapWord* t = space()->top();
-
-  HeapWord*  end_of_live= q;    /* One byte beyond the last byte of the last
-                                   live object. */
-  HeapWord*  first_dead = space()->end(); /* The first dead object. */
-
-  const intx interval = PrefetchScanIntervalInBytes;
-
-  while (q < t) {
-    assert(oop(q)->mark_raw().is_marked() || oop(q)->mark_raw().is_unlocked() ||
-           oop(q)->mark_raw().has_bias_pattern(),
-           "these are the only valid states during a mark sweep");
-    if (oop(q)->is_gc_marked()) {
-      /* prefetch beyond q */
-      Prefetch::write(q, interval);
-      size_t size = oop(q)->size();
-
-      size_t compaction_max_size = pointer_delta(compact_end, compact_top);
-
-      // This should only happen if a space in the young gen overflows the
-      // old gen. If that should happen, we null out the start_array, because
-      // the young spaces are not covered by one.
-      while(size > compaction_max_size) {
-        // First record the last compact_top
-        dest->set_compaction_top(compact_top);
-
-        // Advance to the next compaction decorator
-        advance_destination_decorator();
-        dest = destination_decorator();
-
-        // Update compaction info
-        start_array = dest->start_array();
-        compact_top = dest->compaction_top();
-        compact_end = dest->space()->end();
-        assert(compact_top == dest->space()->bottom(), "Advanced to space already in use");
-        assert(compact_end > compact_top, "Must always be space remaining");
-        compaction_max_size =
-          pointer_delta(compact_end, compact_top);
-      }
-
-      // store the forwarding pointer into the mark word
-      if (q != compact_top) {
-        oop(q)->forward_to(oop(compact_top));
-        assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
-      } else {
-        // if the object isn't moving we can just set the mark to the default
-        // mark and handle it specially later on.
-        oop(q)->init_mark_raw();
-        assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
-      }
-
-      // Update object start array
-      if (start_array) {
-        start_array->allocate_block(compact_top);
-      }
-
-      compact_top += size;
-      assert(compact_top <= dest->space()->end(),
-        "Exceeding space in destination");
-
-      q += size;
-      end_of_live = q;
-    } else {
-      /* run over all the contiguous dead objects */
-      HeapWord* end = q;
-      do {
-        /* prefetch beyond end */
-        Prefetch::write(end, interval);
-        end += oop(end)->size();
-      } while (end < t && (!oop(end)->is_gc_marked()));
-
-      /* see if we might want to pretend this object is alive so that
-       * we don't have to compact quite as often.
-       */
-      if (allowed_deadspace > 0 && q == compact_top) {
-        size_t sz = pointer_delta(end, q);
-        if (insert_deadspace(allowed_deadspace, q, sz)) {
-          size_t compaction_max_size = pointer_delta(compact_end, compact_top);
-
-          // This should only happen if a space in the young gen overflows the
-          // old gen. If that should happen, we null out the start_array, because
-          // the young spaces are not covered by one.
-          while (sz > compaction_max_size) {
-            // First record the last compact_top
-            dest->set_compaction_top(compact_top);
-
-            // Advance to the next compaction decorator
-            advance_destination_decorator();
-            dest = destination_decorator();
-
-            // Update compaction info
-            start_array = dest->start_array();
-            compact_top = dest->compaction_top();
-            compact_end = dest->space()->end();
-            assert(compact_top == dest->space()->bottom(), "Advanced to space already in use");
-            assert(compact_end > compact_top, "Must always be space remaining");
-            compaction_max_size =
-              pointer_delta(compact_end, compact_top);
-          }
-
-          // store the forwarding pointer into the mark word
-          if (q != compact_top) {
-            oop(q)->forward_to(oop(compact_top));
-            assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
-          } else {
-            // if the object isn't moving we can just set the mark to the default
-            // mark and handle it specially later on.
-            oop(q)->init_mark_raw();
-            assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
-          }
-
-          // Update object start array
-          if (start_array) {
-            start_array->allocate_block(compact_top);
-          }
-
-          compact_top += sz;
-          assert(compact_top <= dest->space()->end(),
-            "Exceeding space in destination");
-
-          q = end;
-          end_of_live = end;
-          continue;
-        }
-      }
-
-      // q is a pointer to a dead object. Use this dead memory to store a pointer to the next live object.
-      (*(HeapWord**)q) = end;
-
-      /* see if this is the first dead region. */
-      if (q < first_dead) {
-        first_dead = q;
-      }
-
-      /* move on to the next object */
-      q = end;
-    }
-  }
-
-  assert(q == t, "just checking");
-  _end_of_live = end_of_live;
-  if (end_of_live < first_dead) {
-    first_dead = end_of_live;
-  }
-  _first_dead = first_dead;
-
-  // Update compaction top
-  dest->set_compaction_top(compact_top);
-}
-
-bool PSMarkSweepDecorator::insert_deadspace(size_t& allowed_deadspace_words,
-                                            HeapWord* q, size_t deadlength) {
-  if (allowed_deadspace_words >= deadlength) {
-    allowed_deadspace_words -= deadlength;
-    CollectedHeap::fill_with_object(q, deadlength);
-    oop(q)->set_mark_raw(oop(q)->mark_raw().set_marked());
-    assert((int) deadlength == oop(q)->size(), "bad filler object size");
-    // Recall that we required "q == compaction_top".
-    return true;
-  } else {
-    allowed_deadspace_words = 0;
-    return false;
-  }
-}
-
-void