changeset 55809:fee8a1150263

Merge
author psadhukhan
date Thu, 25 Jul 2019 11:31:07 +0530
parents 8538b1f28a71 6073b2290c0a
children 3307a6ded22d
files src/hotspot/share/gc/z/zOopClosures.cpp src/hotspot/share/jfr/leakprofiler/emitEventOperation.cpp src/hotspot/share/jfr/leakprofiler/emitEventOperation.hpp src/java.base/share/classes/jdk/internal/access/JavaNetSocketAccess.java src/java.base/share/classes/jdk/internal/access/JavaNetURLClassLoaderAccess.java src/java.base/share/classes/jdk/internal/reflect/LangReflectAccess.java src/jdk.javadoc/share/legal/pako.md test/jdk/ProblemList.txt test/jdk/sun/misc/ClassLoaderUtil/test.jar test/jdk/sun/security/tools/keytool/DefaultSignatureAlgorithm.java test/jdk/sun/security/tools/keytool/pss/PSS.java test/jdk/sun/security/tools/keytool/pss/java.base/sun/security/rsa/RSAKeyPairGenerator.java
diffstat 916 files changed, 38361 insertions(+), 9377 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Tue Jul 23 22:21:16 2019 -0700
+++ b/.hgtags	Thu Jul 25 11:31:07 2019 +0530
@@ -569,3 +569,10 @@
 43627549a488b7d0b4df8fad436e36233df89877 jdk-14+2
 b7f68ddec66f996ae3aad03291d129ca9f02482d jdk-13+27
 e64383344f144217c36196c3c8a2df8f588a2af3 jdk-14+3
+1e95931e7d8fa7e3899340a9c7cb28dbea50c10c jdk-13+28
+19d0b382f0869f72d4381b54fa129f1c74b6e766 jdk-14+4
+3081f39a3d30d63b112098386ac2bb027c2b7223 jdk-13+29
+0f1e29c77e50c7da11d83df410026392c4d1a28c jdk-14+5
+2e63fb0a885fa908a97bbb0da8d7c3de11536aca jdk-13+30
+443f7359b34d60e7821216ffc60f88b6ffe0ccdd jdk-14+6
+28ab01c067551ef158abaef08e154e1051ca0893 jdk-14+7
--- a/doc/building.html	Tue Jul 23 22:21:16 2019 -0700
+++ b/doc/building.html	Thu Jul 25 11:31:07 2019 +0530
@@ -297,10 +297,7 @@
 </tr>
 </tbody>
 </table>
-<p>All compilers are expected to be able to compile to the C99 language standard,
-as some C99 features are used in the source code. Microsoft Visual Studio
-doesn't fully support C99 so in practice shared code is limited to using C99
-features that it does support.</p>
+<p>All compilers are expected to be able to compile to the C99 language standard, as some C99 features are used in the source code. Microsoft Visual Studio doesn't fully support C99 so in practice shared code is limited to using C99 features that it does support.</p>
 <h3 id="gcc">gcc</h3>
 <p>The minimum accepted version of gcc is 4.8. Older versions will generate a warning by <code>configure</code> and are unlikely to work.</p>
 <p>The JDK is currently known to be able to compile with at least version 7.4 of gcc.</p>
--- a/doc/testing.html	Tue Jul 23 22:21:16 2019 -0700
+++ b/doc/testing.html	Thu Jul 25 11:31:07 2019 +0530
@@ -154,6 +154,9 @@
 <p>Use additional problem lists file or files, in addition to the default ProblemList.txt located at the JTReg test roots.</p>
 <p>If multiple file names are specified, they should be separated by space (or, to help avoid quoting issues, the special value <code>%20</code>).</p>
 <p>The file names should be either absolute, or relative to the JTReg test root of the tests to be run.</p>
+<h4 id="run_problem_lists">RUN_PROBLEM_LISTS</h4>
+<p>Use the problem lists to select tests instead of excluding them.</p>
+<p>Set to <code>true</code> or <code>false</code>. If <code>true</code>, JTReg will use <code>-match:</code> option, otherwise <code>-exclude:</code> will be used. Default is <code>false</code>.</p>
 <h4 id="options">OPTIONS</h4>
 <p>Additional options to the JTReg test framework.</p>
 <p>Use <code>JTREG=&quot;OPTIONS=--help all&quot;</code> to see all available JTReg options.</p>
--- a/doc/testing.md	Tue Jul 23 22:21:16 2019 -0700
+++ b/doc/testing.md	Thu Jul 25 11:31:07 2019 +0530
@@ -306,6 +306,14 @@
 The file names should be either absolute, or relative to the JTReg test root of
 the tests to be run.
 
+#### RUN_PROBLEM_LISTS
+
+Use the problem lists to select tests instead of excluding them.
+
+Set to `true` or `false`.
+If `true`, JTReg will use `-match:` option, otherwise `-exclude:` will be used.
+Default is `false`.
+
 
 #### OPTIONS
 Additional options to the JTReg test framework.
--- a/make/Docs.gmk	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/Docs.gmk	Thu Jul 25 11:31:07 2019 +0530
@@ -553,7 +553,7 @@
   $(eval specs_bottom_rel_path := $(specs_bottom_rel_path)../) \
 )
 
-SPECS_TOP := $(if $(filter true, $(IS_DRAFT)), <div class="draft-header">$(DRAFT_TEXT)</div>)
+SPECS_TOP := $(if $(filter true, $(IS_DRAFT)), <header class="draft-header">$(DRAFT_TEXT)</header>)
 
 # For all html files in $module/share/specs directories, copy and add the
 # copyright footer.
@@ -595,7 +595,9 @@
             DEST := $(DOCS_OUTPUTDIR)/specs/, \
             CSS := $(GLOBAL_SPECS_DEFAULT_CSS_FILE), \
             OPTIONS := -V include-before='$(SPECS_TOP)' -V include-after='$(SPECS_BOTTOM_$($m_$f_NOF_SUBDIRS))', \
-            REPLACEMENTS := @@VERSION_STRING@@ => $(VERSION_STRING), \
+            REPLACEMENTS := \
+		@@VERSION_SPECIFICATION@@ => $(VERSION_SPECIFICATION) ; \
+		@@VERSION_STRING@@ => $(VERSION_STRING), \
             POST_PROCESS := $(TOOL_FIXUPPANDOC), \
         )) \
         $(eval JDK_SPECS_TARGETS += $($($m_$f_NAME))) \
@@ -625,8 +627,11 @@
             DEST := $(DOCS_OUTPUTDIR)/specs/man, \
             FILTER := $(PANDOC_HTML_MANPAGE_FILTER), \
             CSS := $(GLOBAL_SPECS_DEFAULT_CSS_FILE), \
-            REPLACEMENTS := @@VERSION_SHORT@@ => $(VERSION_SHORT), \
-            OPTIONS := -V include-before='$(SPECS_TOP)' -V include-after='$(SPECS_BOTTOM_1)', \
+            REPLACEMENTS := \
+		@@COPYRIGHT_YEAR@@ => $(COPYRIGHT_YEAR) ; \
+		@@VERSION_SHORT@@ => $(VERSION_SHORT) ; \
+		@@VERSION_SPECIFICATION@@ => $(VERSION_SPECIFICATION), \
+            OPTIONS := --toc -V include-before='$(SPECS_TOP)' -V include-after='$(SPECS_BOTTOM_1)', \
             POST_PROCESS := $(TOOL_FIXUPPANDOC), \
             EXTRA_DEPS := $(PANDOC_HTML_MANPAGE_FILTER) \
                 $(PANDOC_HTML_MANPAGE_FILTER_JAVASCRIPT), \
--- a/make/Images.gmk	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/Images.gmk	Thu Jul 25 11:31:07 2019 +0530
@@ -102,7 +102,7 @@
     WARN := Creating legacy jre image, \
     DEPS := $(JMODS) $(BASE_RELEASE_FILE) \
         $(call DependOnVariable, JDK_MODULES_LIST), \
-    OUTPUT_DIR := $(JDK_IMAGE_DIR), \
+    OUTPUT_DIR := $(JRE_IMAGE_DIR), \
     SUPPORT_DIR := $(SUPPORT_OUTPUTDIR)/images/jre, \
     PRE_COMMAND := $(RM) -r $(JRE_IMAGE_DIR), \
     COMMAND := $(JLINK_TOOL) --add-modules $(JRE_MODULES_LIST) \
@@ -138,7 +138,7 @@
           -Xmx128M -Xms128M $(LOG_INFO), \
   ))
 
-  JDK_TARGETS += $(gen_cds_archive_jdk)
+  JRE_TARGETS += $(gen_cds_archive_jre)
 endif
 
 ################################################################################
--- a/make/InitSupport.gmk	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/InitSupport.gmk	Thu Jul 25 11:31:07 2019 +0530
@@ -424,8 +424,8 @@
 	        $(if $(filter all, $(LOG_REPORT)), \
 	          $(GREP) -v -e "^Note: including file:" <  $(logfile) || true $(NEWLINE) \
 	        , \
-	          ($(GREP) -v -e "^Note: including file:" <  $(logfile) || true) | $(HEAD) -n 12 $(NEWLINE) \
-	          if test `$(WC) -l < $(logfile)` -gt 12; then \
+	          ($(GREP) -v -e "^Note: including file:" <  $(logfile) || true) | $(HEAD) -n 15 $(NEWLINE) \
+	          if test `$(WC) -l < $(logfile)` -gt 15; then \
 	            $(ECHO) "   ... (rest of output omitted)" ; \
 	          fi $(NEWLINE) \
 	        ) \
--- a/make/RunTests.gmk	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/RunTests.gmk	Thu Jul 25 11:31:07 2019 +0530
@@ -278,7 +278,7 @@
 
 $(eval $(call ParseKeywordVariable, JTREG, \
     SINGLE_KEYWORDS := JOBS TIMEOUT_FACTOR TEST_MODE ASSERT VERBOSE RETAIN \
-        MAX_MEM, \
+        MAX_MEM RUN_PROBLEM_LISTS, \
     STRING_KEYWORDS := OPTIONS JAVA_OPTIONS VM_OPTIONS KEYWORDS \
         EXTRA_PROBLEM_LISTS AOT_MODULES, \
 ))
@@ -828,6 +828,7 @@
   endif
   JTREG_VERBOSE ?= fail,error,summary
   JTREG_RETAIN ?= fail,error
+  JTREG_RUN_PROBLEM_LISTS ?= false
 
   ifneq ($$($1_JTREG_MAX_MEM), 0)
     $1_JTREG_BASIC_OPTIONS += -vmoption:-Xmx$$($1_JTREG_MAX_MEM)
@@ -865,13 +866,19 @@
     $1_JTREG_BASIC_OPTIONS += -nativepath:$$($1_JTREG_NATIVEPATH)
   endif
 
+  ifeq ($$(JTREG_RUN_PROBLEM_LISTS), true)
+    JTREG_PROBLEM_LIST_PREFIX := -match:
+  else
+    JTREG_PROBLEM_LIST_PREFIX := -exclude:
+  endif
+
   ifneq ($$($1_JTREG_PROBLEM_LIST), )
-    $1_JTREG_BASIC_OPTIONS += $$(addprefix -exclude:, $$($1_JTREG_PROBLEM_LIST))
+    $1_JTREG_BASIC_OPTIONS += $$(addprefix $$(JTREG_PROBLEM_LIST_PREFIX), $$($1_JTREG_PROBLEM_LIST))
   endif
 
   ifneq ($$(JTREG_EXTRA_PROBLEM_LISTS), )
     # Accept both absolute paths as well as relative to the current test root.
-    $1_JTREG_BASIC_OPTIONS += $$(addprefix -exclude:, $$(wildcard \
+    $1_JTREG_BASIC_OPTIONS += $$(addprefix $$(JTREG_PROBLEM_LIST_PREFIX), $$(wildcard \
         $$(JTREG_EXTRA_PROBLEM_LISTS) \
         $$(addprefix $$($1_TEST_ROOT)/, $$(JTREG_EXTRA_PROBLEM_LISTS)) \
     ))
--- a/make/autoconf/flags-cflags.m4	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/autoconf/flags-cflags.m4	Thu Jul 25 11:31:07 2019 +0530
@@ -229,7 +229,7 @@
       ;;
 
     xlc)
-      DISABLE_WARNING_PREFIX="-qsuppress="
+      DISABLE_WARNING_PREFIX="-Wno-"
       CFLAGS_WARNINGS_ARE_ERRORS="-qhalt=w"
 
       # Possibly a better subset than "all" is "lan:trx:ret:zea:cmp:ret"
@@ -576,10 +576,10 @@
 
   elif test "x$TOOLCHAIN_TYPE" = xxlc; then
     # Suggested additions: -qsrcmsg to get improved error reporting
-    TOOLCHAIN_CFLAGS_JDK="-qchars=signed -qfullpath -qsaveopt -qstackprotect"  # add on both CFLAGS
-    TOOLCHAIN_CFLAGS_JVM="-qtune=balanced \
-        -qalias=noansi -qstrict -qtls=default -qlanglvl=c99vla \
-        -qlanglvl=noredefmac -qnortti -qnoeh -qignerrno -qstackprotect"
+    # set -qtbtable=full for a better traceback table/better stacks in hs_err when xlc16 is used
+    TOOLCHAIN_CFLAGS_JDK="-qtbtable=full -qchars=signed -qfullpath -qsaveopt -qstackprotect"  # add on both CFLAGS
+    TOOLCHAIN_CFLAGS_JVM="-qtbtable=full -qtune=balanced \
+        -qalias=noansi -qstrict -qtls=default -qnortti -qnoeh -qignerrno -qstackprotect"
   elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
     TOOLCHAIN_CFLAGS_JVM="-nologo -MD -MP"
     TOOLCHAIN_CFLAGS_JDK="-nologo -MD -Zc:wchar_t-"
--- a/make/autoconf/flags.m4	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/autoconf/flags.m4	Thu Jul 25 11:31:07 2019 +0530
@@ -176,6 +176,10 @@
     AC_MSG_WARN([Ignoring LDFLAGS($LDFLAGS) found in environment. Use --with-extra-ldflags])
   fi
 
+  if test "x$ASFLAGS" != "x"; then
+    AC_MSG_WARN([Ignoring ASFLAGS($ASFLAGS) found in environment. Use --with-extra-asflags])
+  fi
+
   AC_ARG_WITH(extra-cflags, [AS_HELP_STRING([--with-extra-cflags],
       [extra flags to be used when compiling jdk c-files])])
 
@@ -185,9 +189,13 @@
   AC_ARG_WITH(extra-ldflags, [AS_HELP_STRING([--with-extra-ldflags],
       [extra flags to be used when linking jdk])])
 
+  AC_ARG_WITH(extra-asflags, [AS_HELP_STRING([--with-extra-asflags],
+      [extra flags to be passed to the assembler])])
+
   USER_CFLAGS="$with_extra_cflags"
   USER_CXXFLAGS="$with_extra_cxxflags"
   USER_LDFLAGS="$with_extra_ldflags"
+  USER_ASFLAGS="$with_extra_asflags"
 ])
 
 # Setup the sysroot flags and add them to global CFLAGS and LDFLAGS so
@@ -280,10 +288,12 @@
   EXTRA_CFLAGS="$MACHINE_FLAG $USER_CFLAGS"
   EXTRA_CXXFLAGS="$MACHINE_FLAG $USER_CXXFLAGS"
   EXTRA_LDFLAGS="$MACHINE_FLAG $USER_LDFLAGS"
+  EXTRA_ASFLAGS="$USER_ASFLAGS"
 
   AC_SUBST(EXTRA_CFLAGS)
   AC_SUBST(EXTRA_CXXFLAGS)
   AC_SUBST(EXTRA_LDFLAGS)
+  AC_SUBST(EXTRA_ASFLAGS)
 
   # For autoconf testing to work, the global flags must also be stored in the
   # "unnamed" CFLAGS etc.
--- a/make/autoconf/spec.gmk.in	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/autoconf/spec.gmk.in	Thu Jul 25 11:31:07 2019 +0530
@@ -489,6 +489,7 @@
 EXTRA_CFLAGS = @EXTRA_CFLAGS@
 EXTRA_CXXFLAGS = @EXTRA_CXXFLAGS@
 EXTRA_LDFLAGS = @EXTRA_LDFLAGS@
+EXTRA_ASFLAGS = @EXTRA_ASFLAGS@
 
 CXX:=@FIXPATH@ @CCACHE@ @ICECC@ @CXX@
 
--- a/make/data/docs-resources/resources/jdk-default.css	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/data/docs-resources/resources/jdk-default.css	Thu Jul 25 11:31:07 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -85,6 +85,8 @@
     margin-top: -1em;
 }
 
+a { text-decoration: none }
+
 a:link {
   color: #4A6782;
 }
@@ -162,3 +164,22 @@
     font-style: italic;
     font-size: 80%;
 }
+
+nav#TOC {
+  border: 1px solid gray;
+  border-radius: 10px;
+  padding: 5px 10px;
+  margin-bottom: 15px;
+}
+
+nav#TOC ul ul {
+    font-size:smaller;
+}
+
+nav#TOC ul ul li {
+    display: inline-block
+}
+
+nav#TOC ul ul li::before {
+    content: " \2022  "
+}
--- a/make/data/tzdata/VERSION	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/data/tzdata/VERSION	Thu Jul 25 11:31:07 2019 +0530
@@ -21,4 +21,4 @@
 # or visit www.oracle.com if you need additional information or have any
 # questions.
 #
-tzdata2018g
+tzdata2019a
--- a/make/data/tzdata/africa	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/data/tzdata/africa	Thu Jul 25 11:31:07 2019 +0530
@@ -387,6 +387,11 @@
 # See Africa/Lagos.
 
 # Eritrea
+# See Africa/Nairobi.
+
+# Eswatini (formerly Swaziland)
+# See Africa/Johannesburg.
+
 # Ethiopia
 # See Africa/Nairobi.
 #
@@ -870,8 +875,41 @@
 # From Mohamed Essedik Najd (2018-10-26):
 # Today, a Moroccan government council approved the perpetual addition
 # of 60 minutes to the regular Moroccan timezone.
-# From Brian Inglis (2018-10-26):
-# http://www.maroc.ma/fr/actualites/le-conseil-de-gouvernement-adopte-un-projet-de-decret-relatif-lheure-legale-stipulant-le
+# From Matt Johnson (2018-10-28):
+# http://www.sgg.gov.ma/Portals/1/BO/2018/BO_6720-bis_Ar.pdf
+#
+# From Maamar Abdelkader (2018-11-01):
+# We usually move clocks back the previous week end and come back to the +1
+# the week end after....  The government does not announce yet the decision
+# about this temporary change.  But it s 99% sure that it will be the case,
+# as in previous years.  An unofficial survey was done these days, showing
+# that 64% of asked peopke are ok for moving from +1 to +0 during Ramadan.
+# https://leconomiste.com/article/1035870-enquete-l-economiste-sunergia-64-des-marocains-plebiscitent-le-gmt-pendant-ramadan
+#
+# From Paul Eggert (2018-11-01):
+# For now, guess that Morocco will fall back at 03:00 the last Sunday
+# before Ramadan, and spring forward at 02:00 the first Sunday after
+# Ramadan, as this has been the practice since 2012.  To implement this,
+# transition dates for 2019 through 2037 were determined by running the
+# following program under GNU Emacs 26.1.
+# (let ((islamic-year 1440))
+#   (require 'cal-islam)
+#   (while (< islamic-year 1460)
+#     (let ((a (calendar-islamic-to-absolute (list 9 1 islamic-year)))
+#           (b (calendar-islamic-to-absolute (list 10 1 islamic-year)))
+#           (sunday 0))
+#       (while (/= sunday (mod (setq a (1- a)) 7)))
+#       (while (/= sunday (mod b 7))
+#         (setq b (1+ b)))
+#       (setq a (calendar-gregorian-from-absolute a))
+#       (setq b (calendar-gregorian-from-absolute b))
+#       (insert
+#        (format
+#         (concat "Rule\tMorocco\t%d\tonly\t-\t%s\t%2d\t 3:00\t-1:00\t-\n"
+#                 "Rule\tMorocco\t%d\tonly\t-\t%s\t%2d\t 2:00\t0\t-\n")
+#         (car (cdr (cdr a))) (calendar-month-name (car a) t) (car (cdr a))
+#         (car (cdr (cdr b))) (calendar-month-name (car b) t) (car (cdr b)))))
+#     (setq islamic-year (+ 1 islamic-year))))
 
 # RULE	NAME	FROM	TO	TYPE	IN	ON	AT	SAVE	LETTER/S
 Rule	Morocco	1939	only	-	Sep	12	 0:00	1:00	-
@@ -903,7 +941,7 @@
 Rule	Morocco	2012	only	-	Sep	30	 3:00	0	-
 Rule	Morocco	2013	only	-	Jul	 7	 3:00	0	-
 Rule	Morocco	2013	only	-	Aug	10	 2:00	1:00	-
-Rule	Morocco	2013	2018	-	Oct	lastSun	 3:00	0	-
+Rule	Morocco	2013	2017	-	Oct	lastSun	 3:00	0	-
 Rule	Morocco	2014	2018	-	Mar	lastSun	 2:00	1:00	-
 Rule	Morocco	2014	only	-	Jun	28	 3:00	0	-
 Rule	Morocco	2014	only	-	Aug	 2	 2:00	1:00	-
@@ -915,13 +953,53 @@
 Rule	Morocco	2017	only	-	Jul	 2	 2:00	1:00	-
 Rule	Morocco	2018	only	-	May	13	 3:00	0	-
 Rule	Morocco	2018	only	-	Jun	17	 2:00	1:00	-
+Rule	Morocco	2019	only	-	May	 5	 3:00	0	-
+Rule	Morocco	2019	only	-	Jun	 9	 2:00	1:00	-
+Rule	Morocco	2020	only	-	Apr	19	 3:00	0	-
+Rule	Morocco	2020	only	-	May	24	 2:00	1:00	-
+Rule	Morocco	2021	only	-	Apr	11	 3:00	0	-
+Rule	Morocco	2021	only	-	May	16	 2:00	1:00	-
+Rule	Morocco	2022	only	-	Mar	27	 3:00	0	-
+Rule	Morocco	2022	only	-	May	 8	 2:00	1:00	-
+Rule	Morocco	2023	only	-	Mar	19	 3:00	0	-
+Rule	Morocco	2023	only	-	Apr	23	 2:00	1:00	-
+Rule	Morocco	2024	only	-	Mar	10	 3:00	0	-
+Rule	Morocco	2024	only	-	Apr	14	 2:00	1:00	-
+Rule	Morocco	2025	only	-	Feb	23	 3:00	0	-
+Rule	Morocco	2025	only	-	Apr	 6	 2:00	1:00	-
+Rule	Morocco	2026	only	-	Feb	15	 3:00	0	-
+Rule	Morocco	2026	only	-	Mar	22	 2:00	1:00	-
+Rule	Morocco	2027	only	-	Feb	 7	 3:00	0	-
+Rule	Morocco	2027	only	-	Mar	14	 2:00	1:00	-
+Rule	Morocco	2028	only	-	Jan	23	 3:00	0	-
+Rule	Morocco	2028	only	-	Feb	27	 2:00	1:00	-
+Rule	Morocco	2029	only	-	Jan	14	 3:00	0	-
+Rule	Morocco	2029	only	-	Feb	18	 2:00	1:00	-
+Rule	Morocco	2029	only	-	Dec	30	 3:00	0	-
+Rule	Morocco	2030	only	-	Feb	10	 2:00	1:00	-
+Rule	Morocco	2030	only	-	Dec	22	 3:00	0	-
+Rule	Morocco	2031	only	-	Jan	26	 2:00	1:00	-
+Rule	Morocco	2031	only	-	Dec	14	 3:00	0	-
+Rule	Morocco	2032	only	-	Jan	18	 2:00	1:00	-
+Rule	Morocco	2032	only	-	Nov	28	 3:00	0	-
+Rule	Morocco	2033	only	-	Jan	 9	 2:00	1:00	-
+Rule	Morocco	2033	only	-	Nov	20	 3:00	0	-
+Rule	Morocco	2033	only	-	Dec	25	 2:00	1:00	-
+Rule	Morocco	2034	only	-	Nov	 5	 3:00	0	-
+Rule	Morocco	2034	only	-	Dec	17	 2:00	1:00	-
+Rule	Morocco	2035	only	-	Oct	28	 3:00	0	-
+Rule	Morocco	2035	only	-	Dec	 2	 2:00	1:00	-
+Rule	Morocco	2036	only	-	Oct	19	 3:00	0	-
+Rule	Morocco	2036	only	-	Nov	23	 2:00	1:00	-
+Rule	Morocco	2037	only	-	Oct	 4	 3:00	0	-
+Rule	Morocco	2037	only	-	Nov	15	 2:00	1:00	-
 
 # Zone	NAME		GMTOFF	RULES	FORMAT	[UNTIL]
 Zone Africa/Casablanca	-0:30:20 -	LMT	1913 Oct 26
 			 0:00	Morocco	+00/+01	1984 Mar 16
 			 1:00	-	+01	1986
-			 0:00	Morocco	+00/+01	2018 Oct 27
-			 1:00	-	+01
+			 0:00	Morocco	+00/+01	2018 Oct 28  3:00
+			 0:00	Morocco	+00/+01
 
 # Western Sahara
 #
@@ -936,8 +1014,8 @@
 
 Zone Africa/El_Aaiun	-0:52:48 -	LMT	1934 Jan # El Aaiún
 			-1:00	-	-01	1976 Apr 14
-			 0:00	Morocco	+00/+01	2018 Oct 27
-			 1:00	-	+01
+			 0:00	Morocco	+00/+01	2018 Oct 28  3:00
+			 0:00	Morocco	+00/+01
 
 # Mozambique
 #
@@ -1094,10 +1172,20 @@
 # the switch is from 01:00 to 02:00 ... [Decree No. 25/2017]
 # http://www.mnec.gov.st/index.php/publicacoes/documentos/file/90-decreto-lei-n-25-2017
 
+# From Vadim Nasardinov (2018-12-29):
+# São Tomé and Príncipe is about to do the following on Jan 1, 2019:
+# https://www.stp-press.st/2018/12/05/governo-jesus-ja-decidiu-repor-hora-legal-sao-tomense/
+#
+# From Michael Deckers (2018-12-30):
+# https://www.legis-palop.org/download.jsp?idFile=102818
+# ... [The legal time of the country, which coincides with universal
+# coordinated time, will be restituted at 2 o'clock on day 1 of January, 2019.]
+
 Zone	Africa/Sao_Tome	 0:26:56 -	LMT	1884
 			-0:36:45 -	LMT	1912 Jan  1 00:00u # Lisbon MT
 			 0:00	-	GMT	2018 Jan  1 01:00
-			 1:00	-	WAT
+			 1:00	-	WAT	2019 Jan  1 02:00
+			 0:00	-	GMT
 
 # Senegal
 # See Africa/Abidjan.
@@ -1128,7 +1216,7 @@
 			1:30	-	SAST	1903 Mar
 			2:00	SA	SAST
 Link Africa/Johannesburg Africa/Maseru	   # Lesotho
-Link Africa/Johannesburg Africa/Mbabane    # Swaziland
+Link Africa/Johannesburg Africa/Mbabane    # Eswatini
 #
 # Marion and Prince Edward Is
 # scientific station since 1947
@@ -1170,9 +1258,6 @@
 			2:00	Sudan	CA%sT	2000 Jan 15 12:00
 			3:00	-	EAT
 
-# Swaziland
-# See Africa/Johannesburg.
-
 # Tanzania
 # See Africa/Nairobi.
 
--- a/make/data/tzdata/asia	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/data/tzdata/asia	Thu Jul 25 11:31:07 2019 +0530
@@ -609,12 +609,82 @@
 # obtained from
 # http://www.hko.gov.hk/gts/time/Summertime.htm
 
-# From Arthur David Olson (2009-10-28):
+# From Phake Nick (2018-10-27):
+# According to Singaporean newspaper
+# http://eresources.nlb.gov.sg/newspapers/Digitised/Article/singfreepresswk19041102-1.2.37
+# the day that Hong Kong start using GMT+8 should be Oct 30, 1904.
+#
+# From Paul Eggert (2018-11-17):
+# Hong Kong had a time ball near the Marine Police Station, Tsim Sha Tsui.
+# "The ball was raised manually each day and dropped at exactly 1pm
+# (except on Sundays and Government holidays)."
+# Dyson AD. From Time Ball to Atomic Clock. Hong Kong Government. 1983.
+# <https://www.hko.gov.hk/publica/gen_pub/timeball_atomic_clock.pdf>
+# "From 1904 October 30 the time-ball at Hong Kong has been dropped by order
+# of the Governor of the Colony at 17h 0m 0s G.M.T., which is 23m 18s.14 in
+# advance of 1h 0m 0s of Hong Kong mean time."
+# Hollis HP. Universal Time, Longitudes, and Geodesy. Mon Not R Astron Soc.
+# 1905-02-10;65(4):405-6. https://doi.org/10.1093/mnras/65.4.382
+#
+# From Joseph Myers (2018-11-18):
+# An astronomer before 1925 referring to GMT would have been using the old
+# astronomical convention where the day started at noon, not midnight.
+#
+# From Steve Allen (2018-11-17):
+# Meteorological Observations made at the Hongkong Observatory in the year 1904
+# page 4 <https://books.google.com/books?id=kgw5AQAAMAAJ&pg=RA4-PA4>
+# ... the log of drop times in Table II shows that on Sunday 1904-10-30 the
+# ball was dropped.  So that looks like a special case drop for the sake
+# of broadcasting the new local time.
+#
+# From Phake Nick (2018-11-18):
+# According to The Hong Kong Weekly Press, 1904-10-29, p.324, the
+# governor of Hong Kong at the time stated that "We are further desired to
+# make it known that the change will be effected by firing the gun and by the
+# dropping of the Ball at 23min. 18sec. before one."
+# From Paul Eggert (2018-11-18):
+# See <https://mmis.hkpl.gov.hk> for this; unfortunately Flash is required.
+
+# From Phake Nick (2018-10-26):
+# I went to check microfilm records stored at Hong Kong Public Library....
+# on September 30 1941, according to Ta Kung Pao (Hong Kong edition), it was
+# stated that fallback would occur on the next day (the 1st)'s "03:00 am (Hong
+# Kong Time 04:00 am)" and the clock will fall back for a half hour. (03:00
+# probably refer to the time commonly used in mainland China at the time given
+# the paper's background) ... the sunrise/sunset time given by South China
+# Morning Post for October 1st was indeed moved by half an hour compares to
+# before.  After that, in December, the battle to capture Hong Kong started and
+# the library doesn't seems to have any record stored about press during that
+# period of time.  Some media resumed publication soon after that within the
+# same month, but there were not much information about time there.  Later they
+# started including a radio program guide when they restored radio service,
+# explicitly mentioning it use Tokyo standard time, and later added a note
+# saying it's half an hour ahead of the old Hong Kong standard time, and it
+# also seems to indicate that Hong Kong was not using GMT+8 when it was
+# captured by Japan.
+#
+# Image of related sections on newspaper:
+# * 1941-09-30, Ta Kung Pao (Hong Kong), "Winter Time start tomorrow".
+#   https://i.imgur.com/6waY51Z.jpg (Chinese)
+# * 1941-09-29, South China Morning Post, Information on sunrise/sunset
+#   time and other things for September 30 and October 1.
+#   https://i.imgur.com/kCiUR78.jpg
+# * 1942-02-05. The Hong Kong News, Radio Program Guide.
+#   https://i.imgur.com/eVvDMzS.jpg
+# * 1941-06-14. Hong Kong Daily Press, Daylight Saving from 3am Tomorrow.
+#   https://i.imgur.com/05KkvtC.png
+# * 1941-09-30, Hong Kong Daily Press, Winter Time Warning.
+#   https://i.imgur.com/dge4kFJ.png
+# Also, the Liberation day of Hong Kong after WWII which British rule
+# over the territory resumed was August 30, 1945, which I think should
+# be the termination date for the use of JST in the territory....
+
+# From Paul Eggert (2018-11-17):
 # Here are the dates given at
-# http://www.hko.gov.hk/gts/time/Summertime.htm
-# as of 2009-10-28:
+# https://www.hko.gov.hk/gts/time/Summertime.htm
+# as of 2014-06-19:
 # Year        Period
-# 1941        1 Apr to 30 Sep
+# 1941        15 Jun to 30 Sep
 # 1942        Whole year
 # 1943        Whole year
 # 1944        Whole year
@@ -625,7 +695,7 @@
 # 1949        3 Apr to 30 Oct
 # 1950        2 Apr to 29 Oct
 # 1951        1 Apr to 28 Oct
-# 1952        6 Apr to 25 Oct
+# 1952        6 Apr to 2 Nov
 # 1953        5 Apr to 1 Nov
 # 1954        21 Mar to 31 Oct
 # 1955        20 Mar to 6 Nov
@@ -654,25 +724,25 @@
 # 1978        Nil
 # 1979        13 May to 21 Oct
 # 1980 to Now Nil
-# The page does not give start or end times of day.
-# The page does not give a start date for 1942.
-# The page does not givw an end date for 1945.
-# The Japanese occupation of Hong Kong began on 1941-12-25.
-# The Japanese surrender of Hong Kong was signed 1945-09-15.
-# For lack of anything better, use start of those days as the transition times.
+# The page does not give times of day for transitions,
+# or dates for the 1942 and 1945 transitions.
+# The Japanese occupation of Hong Kong began 1941-12-25.
+# The Japanese surrender of Hong Kong was signed 1945-09-16; see:
+# Heaver S. The days after the Pacific war ended: unsettling times
+# in Hong Kong. Post Magazine. 2016-06-13.
+# https://www.scmp.com/magazines/post-magazine/article/1852990/days-after-pacific-war-ended-unsettling-times-hong-kong
+# For lack of anything better, use start of those days as the
+# transition times.
 
 # Rule	NAME	FROM	TO	TYPE	IN	ON	AT	SAVE	LETTER/S
-Rule	HK	1941	only	-	Apr	1	3:30	1:00	S
-Rule	HK	1941	only	-	Sep	30	3:30	0	-
 Rule	HK	1946	only	-	Apr	20	3:30	1:00	S
 Rule	HK	1946	only	-	Dec	1	3:30	0	-
 Rule	HK	1947	only	-	Apr	13	3:30	1:00	S
 Rule	HK	1947	only	-	Dec	30	3:30	0	-
 Rule	HK	1948	only	-	May	2	3:30	1:00	S
 Rule	HK	1948	1951	-	Oct	lastSun	3:30	0	-
-Rule	HK	1952	only	-	Oct	25	3:30	0	-
+Rule	HK	1952	1953	-	Nov	Sun>=1	3:30	0	-
 Rule	HK	1949	1953	-	Apr	Sun>=1	3:30	1:00	S
-Rule	HK	1953	only	-	Nov	1	3:30	0	-
 Rule	HK	1954	1964	-	Mar	Sun>=18	3:30	1:00	S
 Rule	HK	1954	only	-	Oct	31	3:30	0	-
 Rule	HK	1955	1964	-	Nov	Sun>=1	3:30	0	-
@@ -682,9 +752,11 @@
 Rule	HK	1979	only	-	May	Sun>=8	3:30	1:00	S
 Rule	HK	1979	only	-	Oct	Sun>=16	3:30	0	-
 # Zone	NAME		GMTOFF	RULES	FORMAT	[UNTIL]
-Zone	Asia/Hong_Kong	7:36:42 -	LMT	1904 Oct 30
-			8:00	HK	HK%sT	1941 Dec 25
-			9:00	-	JST	1945 Sep 15
+Zone	Asia/Hong_Kong	7:36:42 -	LMT	1904 Oct 30  0:36:42
+			8:00	-	HKT	1941 Jun 15  3:30
+			8:00	1:00	HKST	1941 Oct  1  4:00
+			8:30	-	HKT	1941 Dec 25
+			9:00	-	JST	1945 Sep 16
 			8:00	HK	HK%sT
 
 ###############################################################################
@@ -1080,6 +1152,16 @@
 
 # India
 
+# British astronomer Henry Park Hollis disliked India Standard Time's offset:
+# "A new time system has been proposed for India, Further India, and Burmah.
+# The scheme suggested is that the times of the meridians 5½ and 6½ hours
+# east of Greenwich should be adopted in these territories.  No reason is
+# given why hourly meridians five hours and six hours east should not be
+# chosen; a plan which would bring the time of India into harmony with
+# that of almost the whole of the civilised world."
+# Hollis HP. Universal Time, Longitudes, and Geodesy. Mon Not R Astron Soc.
+# 1905-02-10;65(4):405-6. https://doi.org/10.1093/mnras/65.4.382
+
 # From Ian P. Beacock, in "A brief history of (modern) time", The Atlantic
 # https://www.theatlantic.com/technology/archive/2015/12/the-creation-of-modern-time/421419/
 # (2015-12-22):
@@ -1250,12 +1332,65 @@
 # leap year calculation involved.  There has never been any serious
 # plan to change that law....
 #
-# From Paul Eggert (2006-03-22):
+# From Paul Eggert (2018-11-30):
 # Go with Shanks & Pottenger before Sept. 1991, and with Pournader thereafter.
-# I used Ed Reingold's cal-persia in GNU Emacs 21.2 to check Persian dates,
-# stopping after 2037 when 32-bit time_t's overflow.
-# That cal-persia used Birashk's approximation, which disagrees with the solar
-# calendar predictions for the year 2025, so I corrected those dates by hand.
+# I used the following code in GNU Emacs 26.1 to generate the "Rule Iran"
+# lines from 2008 through 2087.  Emacs 26.1 uses Ed Reingold's
+# cal-persia implementation of Birashk's approximation, which in the
+# 2008-2087 range disagrees with the the astronomical Persian calendar
+# for Persian years 1404 (Gregorian 2025) and 1437 (Gregorian 2058),
+# so the following code special-case those years.  See Table 15.1, page 264, of:
+# Edward M. Reingold and Nachum Dershowitz, Calendrical Calculations:
+# The Ultimate Edition, Cambridge University Press (2018).
+# https://www.cambridge.org/fr/academic/subjects/computer-science/computing-general-interest/calendrical-calculations-ultimate-edition-4th-edition
+# Page 258, footnote 2, of this book says there is some dispute over what will
+# happen in 2091 (and some other years after that), so this code
+# stops in 2087, as 2088 and 2089 agree with the "max" rule below.
+# (cl-loop
+#  initially (require 'cal-persia)
+#  with first-persian-year = 1387
+#  with last-persian-year = 1466
+#  ;; Exceptional years in the above range,
+#  ;; from Reingold & Dershowitz Table 15.1, page 264:
+#  with exceptional-persian-years = '(1404 1437)
+#  with range-start = nil
+#  for persian-year from first-persian-year to last-persian-year
+#  do
+#  (let*
+#      ((exceptional-year-offset
+#        (if (member persian-year exceptional-persian-years) 1 0))
+#       (beg-dst-absolute
+#        (+ (calendar-persian-to-absolute (list 1 1 persian-year))
+#           exceptional-year-offset))
+#       (end-dst-absolute
+#        (+ (calendar-persian-to-absolute (list 6 30 persian-year))
+#           exceptional-year-offset))
+#       (next-year-beg-dst-absolute
+#        (+ (calendar-persian-to-absolute (list 1 1 (1+ persian-year)))
+#           (if (member (1+ persian-year) exceptional-persian-years) 1 0)))
+#       (beg-dst (calendar-gregorian-from-absolute beg-dst-absolute))
+#       (end-dst (calendar-gregorian-from-absolute end-dst-absolute))
+#       (next-year-beg-dst (calendar-gregorian-from-absolute
+#                           next-year-beg-dst-absolute))
+#       (year (calendar-extract-year beg-dst))
+#       (range-end (if range-start year "only")))
+#    (setq range-start (or range-start year))
+#    (when (or (/= (calendar-extract-day beg-dst)
+#                  (calendar-extract-day next-year-beg-dst))
+#              (= persian-year last-persian-year))
+#      (insert
+#       (format
+#        "Rule\tIran\t%d\t%s\t-\t%s\t%2d\t24:00\t1:00\t-\n"
+#        range-start range-end
+#        (calendar-month-name (calendar-extract-month beg-dst) t)
+#        (calendar-extract-day beg-dst)))
+#      (insert
+#       (format
+#        "Rule\tIran\t%d\t%s\t-\t%s\t%2d\t24:00\t0\t-\n"
+#        range-start range-end
+#        (calendar-month-name (calendar-extract-month end-dst) t)
+#        (calendar-extract-day end-dst)))
+#      (setq range-start nil))))
 #
 # From Oscar van Vlijmen (2005-03-30), writing about future
 # discrepancies between cal-persia and the Iranian calendar:
@@ -1290,61 +1425,113 @@
 # thirtieth day of Shahrivar.
 #
 # Rule	NAME	FROM	TO	TYPE	IN	ON	AT	SAVE	LETTER/S
-Rule	Iran	1978	1980	-	Mar	21	0:00	1:00	-
-Rule	Iran	1978	only	-	Oct	21	0:00	0	-
-Rule	Iran	1979	only	-	Sep	19	0:00	0	-
-Rule	Iran	1980	only	-	Sep	23	0:00	0	-
-Rule	Iran	1991	only	-	May	 3	0:00	1:00	-
-Rule	Iran	1992	1995	-	Mar	22	0:00	1:00	-
-Rule	Iran	1991	1995	-	Sep	22	0:00	0	-
-Rule	Iran	1996	only	-	Mar	21	0:00	1:00	-
-Rule	Iran	1996	only	-	Sep	21	0:00	0	-
-Rule	Iran	1997	1999	-	Mar	22	0:00	1:00	-
-Rule	Iran	1997	1999	-	Sep	22	0:00	0	-
-Rule	Iran	2000	only	-	Mar	21	0:00	1:00	-
-Rule	Iran	2000	only	-	Sep	21	0:00	0	-
-Rule	Iran	2001	2003	-	Mar	22	0:00	1:00	-
-Rule	Iran	2001	2003	-	Sep	22	0:00	0	-
-Rule	Iran	2004	only	-	Mar	21	0:00	1:00	-
-Rule	Iran	2004	only	-	Sep	21	0:00	0	-
-Rule	Iran	2005	only	-	Mar	22	0:00	1:00	-
-Rule	Iran	2005	only	-	Sep	22	0:00	0	-
-Rule	Iran	2008	only	-	Mar	21	0:00	1:00	-
-Rule	Iran	2008	only	-	Sep	21	0:00	0	-
-Rule	Iran	2009	2011	-	Mar	22	0:00	1:00	-
-Rule	Iran	2009	2011	-	Sep	22	0:00	0	-
-Rule	Iran	2012	only	-	Mar	21	0:00	1:00	-
-Rule	Iran	2012	only	-	Sep	21	0:00	0	-
-Rule	Iran	2013	2015	-	Mar	22	0:00	1:00	-
-Rule	Iran	2013	2015	-	Sep	22	0:00	0	-
-Rule	Iran	2016	only	-	Mar	21	0:00	1:00	-
-Rule	Iran	2016	only	-	Sep	21	0:00	0	-
-Rule	Iran	2017	2019	-	Mar	22	0:00	1:00	-
-Rule	Iran	2017	2019	-	Sep	22	0:00	0	-
-Rule	Iran	2020	only	-	Mar	21	0:00	1:00	-
-Rule	Iran	2020	only	-	Sep	21	0:00	0	-
-Rule	Iran	2021	2023	-	Mar	22	0:00	1:00	-
-Rule	Iran	2021	2023	-	Sep	22	0:00	0	-
-Rule	Iran	2024	only	-	Mar	21	0:00	1:00	-
-Rule	Iran	2024	only	-	Sep	21	0:00	0	-
-Rule	Iran	2025	2027	-	Mar	22	0:00	1:00	-
-Rule	Iran	2025	2027	-	Sep	22	0:00	0	-
-Rule	Iran	2028	2029	-	Mar	21	0:00	1:00	-
-Rule	Iran	2028	2029	-	Sep	21	0:00	0	-
-Rule	Iran	2030	2031	-	Mar	22	0:00	1:00	-
-Rule	Iran	2030	2031	-	Sep	22	0:00	0	-
-Rule	Iran	2032	2033	-	Mar	21	0:00	1:00	-
-Rule	Iran	2032	2033	-	Sep	21	0:00	0	-
-Rule	Iran	2034	2035	-	Mar	22	0:00	1:00	-
-Rule	Iran	2034	2035	-	Sep	22	0:00	0	-
+Rule	Iran	1978	1980	-	Mar	20	24:00	1:00	-
+Rule	Iran	1978	only	-	Oct	20	24:00	0	-
+Rule	Iran	1979	only	-	Sep	18	24:00	0	-
+Rule	Iran	1980	only	-	Sep	22	24:00	0	-
+Rule	Iran	1991	only	-	May	 2	24:00	1:00	-
+Rule	Iran	1992	1995	-	Mar	21	24:00	1:00	-
+Rule	Iran	1991	1995	-	Sep	21	24:00	0	-
+Rule	Iran	1996	only	-	Mar	20	24:00	1:00	-
+Rule	Iran	1996	only	-	Sep	20	24:00	0	-
+Rule	Iran	1997	1999	-	Mar	21	24:00	1:00	-
+Rule	Iran	1997	1999	-	Sep	21	24:00	0	-
+Rule	Iran	2000	only	-	Mar	20	24:00	1:00	-
+Rule	Iran	2000	only	-	Sep	20	24:00	0	-
+Rule	Iran	2001	2003	-	Mar	21	24:00	1:00	-
+Rule	Iran	2001	2003	-	Sep	21	24:00	0	-
+Rule	Iran	2004	only	-	Mar	20	24:00	1:00	-
+Rule	Iran	2004	only	-	Sep	20	24:00	0	-
+Rule	Iran	2005	only	-	Mar	21	24:00	1:00	-
+Rule	Iran	2005	only	-	Sep	21	24:00	0	-
+Rule	Iran	2008	only	-	Mar	20	24:00	1:00	-
+Rule	Iran	2008	only	-	Sep	20	24:00	0	-
+Rule	Iran	2009	2011	-	Mar	21	24:00	1:00	-
+Rule	Iran	2009	2011	-	Sep	21	24:00	0	-
+Rule	Iran	2012	only	-	Mar	20	24:00	1:00	-
+Rule	Iran	2012	only	-	Sep	20	24:00	0	-
+Rule	Iran	2013	2015	-	Mar	21	24:00	1:00	-
+Rule	Iran	2013	2015	-	Sep	21	24:00	0	-
+Rule	Iran	2016	only	-	Mar	20	24:00	1:00	-
+Rule	Iran	2016	only	-	Sep	20	24:00	0	-
+Rule	Iran	2017	2019	-	Mar	21	24:00	1:00	-
+Rule	Iran	2017	2019	-	Sep	21	24:00	0	-
+Rule	Iran	2020	only	-	Mar	20	24:00	1:00	-
+Rule	Iran	2020	only	-	Sep	20	24:00	0	-
+Rule	Iran	2021	2023	-	Mar	21	24:00	1:00	-
+Rule	Iran	2021	2023	-	Sep	21	24:00	0	-
+Rule	Iran	2024	only	-	Mar	20	24:00	1:00	-
+Rule	Iran	2024	only	-	Sep	20	24:00	0	-
+Rule	Iran	2025	2027	-	Mar	21	24:00	1:00	-
+Rule	Iran	2025	2027	-	Sep	21	24:00	0	-
+Rule	Iran	2028	2029	-	Mar	20	24:00	1:00	-
+Rule	Iran	2028	2029	-	Sep	20	24:00	0	-
+Rule	Iran	2030	2031	-	Mar	21	24:00	1:00	-
+Rule	Iran	2030	2031	-	Sep	21	24:00	0	-
+Rule	Iran	2032	2033	-	Mar	20	24:00	1:00	-
+Rule	Iran	2032	2033	-	Sep	20	24:00	0	-
+Rule	Iran	2034	2035	-	Mar	21	24:00	1:00	-
+Rule	Iran	2034	2035	-	Sep	21	24:00	0	-
+Rule	Iran	2036	2037	-	Mar	20	24:00	1:00	-
+Rule	Iran	2036	2037	-	Sep	20	24:00	0	-
+Rule	Iran	2038	2039	-	Mar	21	24:00	1:00	-
+Rule	Iran	2038	2039	-	Sep	21	24:00	0	-
+Rule	Iran	2040	2041	-	Mar	20	24:00	1:00	-
+Rule	Iran	2040	2041	-	Sep	20	24:00	0	-
+Rule	Iran	2042	2043	-	Mar	21	24:00	1:00	-
+Rule	Iran	2042	2043	-	Sep	21	24:00	0	-
+Rule	Iran	2044	2045	-	Mar	20	24:00	1:00	-
+Rule	Iran	2044	2045	-	Sep	20	24:00	0	-
+Rule	Iran	2046	2047	-	Mar	21	24:00	1:00	-
+Rule	Iran	2046	2047	-	Sep	21	24:00	0	-
+Rule	Iran	2048	2049	-	Mar	20	24:00	1:00	-
+Rule	Iran	2048	2049	-	Sep	20	24:00	0	-
+Rule	Iran	2050	2051	-	Mar	21	24:00	1:00	-
+Rule	Iran	2050	2051	-	Sep	21	24:00	0	-
+Rule	Iran	2052	2053	-	Mar	20	24:00	1:00	-
+Rule	Iran	2052	2053	-	Sep	20	24:00	0	-
+Rule	Iran	2054	2055	-	Mar	21	24:00	1:00	-
+Rule	Iran	2054	2055	-	Sep	21	24:00	0	-
+Rule	Iran	2056	2057	-	Mar	20	24:00	1:00	-
+Rule	Iran	2056	2057	-	Sep	20	24:00	0	-
+Rule	Iran	2058	2059	-	Mar	21	24:00	1:00	-
+Rule	Iran	2058	2059	-	Sep	21	24:00	0	-
+Rule	Iran	2060	2062	-	Mar	20	24:00	1:00	-
+Rule	Iran	2060	2062	-	Sep	20	24:00	0	-
+Rule	Iran	2063	only	-	Mar	21	24:00	1:00	-
+Rule	Iran	2063	only	-	Sep	21	24:00	0	-
+Rule	Iran	2064	2066	-	Mar	20	24:00	1:00	-
+Rule	Iran	2064	2066	-	Sep	20	24:00	0	-
+Rule	Iran	2067	only	-	Mar	21	24:00	1:00	-
+Rule	Iran	2067	only	-	Sep	21	24:00	0	-
+Rule	Iran	2068	2070	-	Mar	20	24:00	1:00	-
+Rule	Iran	2068	2070	-	Sep	20	24:00	0	-
+Rule	Iran	2071	only	-	Mar	21	24:00	1:00	-
+Rule	Iran	2071	only	-	Sep	21	24:00	0	-
+Rule	Iran	2072	2074	-	Mar	20	24:00	1:00	-
+Rule	Iran	2072	2074	-	Sep	20	24:00	0	-
+Rule	Iran	2075	only	-	Mar	21	24:00	1:00	-
+Rule	Iran	2075	only	-	Sep	21	24:00	0	-
+Rule	Iran	2076	2078	-	Mar	20	24:00	1:00	-
+Rule	Iran	2076	2078	-	Sep	20	24:00	0	-
+Rule	Iran	2079	only	-	Mar	21	24:00	1:00	-
+Rule	Iran	2079	only	-	Sep	21	24:00	0	-
+Rule	Iran	2080	2082	-	Mar	20	24:00	1:00	-
+Rule	Iran	2080	2082	-	Sep	20	24:00	0	-
+Rule	Iran	2083	only	-	Mar	21	24:00	1:00	-
+Rule	Iran	2083	only	-	Sep	21	24:00	0	-
+Rule	Iran	2084	2086	-	Mar	20	24:00	1:00	-
+Rule	Iran	2084	2086	-	Sep	20	24:00	0	-
+Rule	Iran	2087	only	-	Mar	21	24:00	1:00	-
+Rule	Iran	2087	only	-	Sep	21	24:00	0	-
 #
-# The following rules are approximations starting in the year 2038.
-# These are the best post-2037 approximations available, given the
-# restrictions of a single rule using a Gregorian-based data format.
+# The following rules are approximations starting in the year 2088.
+# These are the best post-2088 approximations available, given the
+# restrictions of a single rule using ordinary Gregorian dates.
 # At some point this table will need to be extended, though quite
 # possibly Iran will change the rules first.
-Rule	Iran	2036	max	-	Mar	21	0:00	1:00	-
-Rule	Iran	2036	max	-	Sep	21	0:00	0	-
+Rule	Iran	2088	max	-	Mar	20	24:00	1:00	-
+Rule	Iran	2088	max	-	Sep	20	24:00	0	-
 
 # Zone	NAME		GMTOFF	RULES	FORMAT	[UNTIL]
 Zone	Asia/Tehran	3:25:44	-	LMT	1916
@@ -1456,6 +1643,24 @@
 Rule	Zion	1974	only	-	Oct	13	0:00	0	S
 Rule	Zion	1975	only	-	Apr	20	0:00	1:00	D
 Rule	Zion	1975	only	-	Aug	31	0:00	0	S
+
+# From Alois Treindl (2019-03-06):
+# http://www.moin.gov.il/Documents/שעון קיץ/clock-50-years-7-2014.pdf
+# From Isaac Starkman (2019-03-06):
+# Summer time was in that period in 1980 and 1984, see
+# https://www.ynet.co.il/articles/0,7340,L-3951073,00.html
+# You can of course read it in translation.
+# I checked the local newspapers for that years.
+# It started on midnight and end at 01.00 am.
+# From Paul Eggert (2019-03-06):
+# Also see this thread about the moin.gov.il URL:
+# https://mm.icann.org/pipermail/tz/2018-November/027194.html
+Rule	Zion	1980	only	-	Aug	 2	0:00	1:00	D
+Rule	Zion	1980	only	-	Sep	13	1:00	0	S
+Rule	Zion	1984	only	-	May	 5	0:00	1:00	D
+Rule	Zion	1984	only	-	Aug	25	1:00	0	S
+
+# From Shanks & Pottenger:
 Rule	Zion	1985	only	-	Apr	14	0:00	1:00	D
 Rule	Zion	1985	only	-	Sep	15	0:00	0	S
 Rule	Zion	1986	only	-	May	18	0:00	1:00	D
@@ -1714,7 +1919,9 @@
 # Zone	NAME		GMTOFF	RULES	FORMAT	[UNTIL]
 Zone	Asia/Tokyo	9:18:59	-	LMT	1887 Dec 31 15:00u
 			9:00	Japan	J%sT
-# Since 1938, all Japanese possessions have been like Asia/Tokyo.
+# Since 1938, all Japanese possessions have been like Asia/Tokyo,
+# except that Truk (Chuuk), Ponape (Pohnpei), and Jaluit (Kosrae) did not
+# switch from +10 to +09 until 1941-04-01; see the 'australasia' file.
 
 # Jordan
 #
@@ -2004,8 +2211,10 @@
 # and in Byalokoz) lists Ural river (plus 10 versts on its left bank) in
 # the third time belt (before 1930 this means +03).
 
-# From Paul Eggert (2016-12-06):
-# The tables below reflect Golosunov's remarks, with exceptions as noted.
+# From Alexander Konzurovski (2018-12-20):
+# Qyzyolrda Region (Asia/Qyzylorda) is changing its time zone from
+# UTC+6 to UTC+5 effective December 21st, 2018. The legal document is
+# located here: http://adilet.zan.kz/rus/docs/P1800000817 (russian language).
 
 # Zone	NAME		GMTOFF	RULES	FORMAT	[UNTIL]
 #
@@ -2019,8 +2228,6 @@
 			6:00 RussiaAsia	+06/+07	2004 Oct 31  2:00s
 			6:00	-	+06
 # Qyzylorda (aka Kyzylorda, Kizilorda, Kzyl-Orda, etc.) (KZ-KZY)
-# This currently includes Qostanay (aka Kostanay, Kustanay) (KZ-KUS);
-# see comments below.
 Zone	Asia/Qyzylorda	4:21:52 -	LMT	1924 May  2
 			4:00	-	+04	1930 Jun 21
 			5:00	-	+05	1981 Apr  1
@@ -2031,21 +2238,22 @@
 			5:00 RussiaAsia	+05/+06	1992 Jan 19  2:00s
 			6:00 RussiaAsia	+06/+07	1992 Mar 29  2:00s
 			5:00 RussiaAsia	+05/+06	2004 Oct 31  2:00s
+			6:00	-	+06	2018 Dec 21  0:00
+			5:00	-	+05
+#
+# Qostanay (aka Kostanay, Kustanay) (KZ-KUS)
+# The 1991/2 rules are unclear partly because of the 1997 Turgai
+# reorganization.
+Zone	Asia/Qostanay	4:14:28 -	LMT	1924 May  2
+			4:00	-	+04	1930 Jun 21
+			5:00	-	+05	1981 Apr  1
+			5:00	1:00	+06	1981 Oct  1
+			6:00	-	+06	1982 Apr  1
+			5:00 RussiaAsia	+05/+06	1991 Mar 31  2:00s
+			4:00 RussiaAsia	+04/+05	1992 Jan 19  2:00s
+			5:00 RussiaAsia	+05/+06	2004 Oct 31  2:00s
 			6:00	-	+06
-# The following zone is like Asia/Qyzylorda except for being one
-# hour earlier from 1991-09-29 to 1992-03-29.  The 1991/2 rules for
-# Qostanay are unclear partly because of the 1997 Turgai
-# reorganization, so this zone is commented out for now.
-#Zone	Asia/Qostanay	4:14:20 -	LMT	1924 May  2
-#			4:00	-	+04	1930 Jun 21
-#			5:00	-	+05	1981 Apr  1
-#			5:00	1:00	+06	1981 Oct  1
-#			6:00	-	+06	1982 Apr  1
-#			5:00 RussiaAsia	+05/+06	1991 Mar 31  2:00s
-#			4:00 RussiaAsia	+04/+05	1992 Jan 19  2:00s
-#			5:00 RussiaAsia	+05/+06	2004 Oct 31  2:00s
-#			6:00	-	+06
-#
+
 # Aqtöbe (aka Aktobe, formerly Aktyubinsk) (KZ-AKT)
 Zone	Asia/Aqtobe	3:48:40	-	LMT	1924 May  2
 			4:00	-	+04	1930 Jun 21
@@ -2139,21 +2347,43 @@
 # started at June 1 in that year.  For another example, the article in
 # 1988 said that DST started at 2:00 AM in that year.
 
+# From Phake Nick (2018-10-27):
+# 1. According to official announcement from Korean government, the DST end
+# date in South Korea should be
+# 1955-09-08 without specifying time
+# http://theme.archives.go.kr/next/common/viewEbook.do?singleData=N&archiveEventId=0027977557
+# 1956-09-29 without specifying time
+# http://theme.archives.go.kr/next/common/viewEbook.do?singleData=N&archiveEventId=0027978341
+# 1957-09-21 24 o'clock
+# http://theme.archives.go.kr/next/common/viewEbook.do?singleData=N&archiveEventId=0027979690#3
+# 1958-09-20 24 o'clock
+# http://theme.archives.go.kr/next/common/viewEbook.do?singleData=N&archiveEventId=0027981189
+# 1959-09-19 24 o'clock
+# http://theme.archives.go.kr/next/common/viewEbook.do?singleData=N&archiveEventId=0027982974#2
+# 1960-09-17 24 o'clock
+# http://theme.archives.go.kr/next/common/viewEbook.do?singleData=N&archiveEventId=0028044104
+# ...
+# 2.... https://namu.wiki/w/대한민국%20표준시 ... [says]
+# when Korea was using GMT+8:30 as standard time, the international
+# aviation/marine/meteorological industry in the country refused to
+# follow and continued to use GMT+9:00 for interoperability.
+
+
 # Rule	NAME	FROM	TO	TYPE	IN	ON	AT	SAVE	LETTER/S
-Rule	ROK	1948	only	-	Jun	 1	0:00	1:00	D
-Rule	ROK	1948	only	-	Sep	13	0:00	0	S
-Rule	ROK	1949	only	-	Apr	 3	0:00	1:00	D
-Rule	ROK	1949	1951	-	Sep	Sun>=8	0:00	0	S
-Rule	ROK	1950	only	-	Apr	 1	0:00	1:00	D
-Rule	ROK	1951	only	-	May	 6	0:00	1:00	D
-Rule	ROK	1955	only	-	May	 5	0:00	1:00	D
-Rule	ROK	1955	only	-	Sep	 9	0:00	0	S
-Rule	ROK	1956	only	-	May	20	0:00	1:00	D
-Rule	ROK	1956	only	-	Sep	30	0:00	0	S
-Rule	ROK	1957	1960	-	May	Sun>=1	0:00	1:00	D
-Rule	ROK	1957	1960	-	Sep	Sun>=18	0:00	0	S
-Rule	ROK	1987	1988	-	May	Sun>=8	2:00	1:00	D
-Rule	ROK	1987	1988	-	Oct	Sun>=8	3:00	0	S
+Rule	ROK	1948	only	-	Jun	 1	 0:00	1:00	D
+Rule	ROK	1948	only	-	Sep	12	24:00	0	S
+Rule	ROK	1949	only	-	Apr	 3	 0:00	1:00	D
+Rule	ROK	1949	1951	-	Sep	Sat>=7	24:00	0	S
+Rule	ROK	1950	only	-	Apr	 1	 0:00	1:00	D
+Rule	ROK	1951	only	-	May	 6	 0:00	1:00	D
+Rule	ROK	1955	only	-	May	 5	 0:00	1:00	D
+Rule	ROK	1955	only	-	Sep	 8	24:00	0	S
+Rule	ROK	1956	only	-	May	20	 0:00	1:00	D
+Rule	ROK	1956	only	-	Sep	29	24:00	0	S
+Rule	ROK	1957	1960	-	May	Sun>=1	 0:00	1:00	D
+Rule	ROK	1957	1960	-	Sep	Sat>=17	24:00	0	S
+Rule	ROK	1987	1988	-	May	Sun>=8	 2:00	1:00	D
+Rule	ROK	1987	1988	-	Oct	Sun>=8	 3:00	0	S
 
 # From Paul Eggert (2016-08-23):
 # The Korean Wikipedia entry gives the following sources for UT offsets:
@@ -2882,9 +3112,15 @@
 # the official website, though the decree did not specify the exact
 # time of the time shift.
 # http://www.palestinecabinet.gov.ps/Website/AR/NDecrees/ViewFile.ashx?ID=e7a42ab7-ee23-435a-b9c8-a4f7e81f3817
+
+# From Even Scharning (2019-03-23):
+# DST in Palestine will start on 30 March this year, not 23 March as the time
+# zone database predicted.
+# https://ramallah.news/post/123610
 #
-# From Paul Eggert (2018-03-16):
-# For 2016 on, predict spring transitions on March's fourth Saturday at 01:00.
+# From Tim Parenti (2019-03-23):
+# Combining this with the rules observed since 2016, adjust our spring
+# transition guess to Mar Sat>=24.
 
 # Rule	NAME	FROM	TO	TYPE	IN	ON	AT	SAVE	LETTER/S
 Rule EgyptAsia	1957	only	-	May	10	0:00	1:00	S
@@ -2915,7 +3151,7 @@
 Rule Palestine	2013	only	-	Sep	Fri>=21	0:00	0	-
 Rule Palestine	2014	2015	-	Oct	Fri>=21	0:00	0	-
 Rule Palestine	2015	only	-	Mar	lastFri	24:00	1:00	S
-Rule Palestine	2016	max	-	Mar	Sat>=22	1:00	1:00	S
+Rule Palestine	2016	max	-	Mar	Sat>=24	1:00	1:00	S
 Rule Palestine	2016	max	-	Oct	lastSat	1:00	0	-
 
 # Zone	NAME		GMTOFF	RULES	FORMAT	[UNTIL]
@@ -2943,6 +3179,11 @@
 # no information
 
 # Philippines
+
+# From Paul Eggert (2018-11-18):
+# The Spanish initially used American (west-of-Greenwich) time.
+# It is unknown what time Manila kept when the British occupied it from
+# 1762-10-06 through 1764-04; for now assume it kept American time.
 # On 1844-08-16, Narciso Clavería, governor-general of the
 # Philippines, issued a proclamation announcing that 1844-12-30 was to
 # be immediately followed by 1845-01-01; see R.H. van Gent's
@@ -3028,8 +3269,8 @@
 # going to run on Higgins Time.' And so, until last year, it did."  See:
 # Antar E. Dinner at When? Saudi Aramco World, 1969 March/April. 2-3.
 # http://archive.aramcoworld.com/issue/196902/dinner.at.when.htm
-# newspapers.com says a similar story about Higgins was published in the Port
-# Angeles (WA) Evening News, 1965-03-10, page 5, but I lack access to the text.
+# Also see: Antar EN. Arabian flying is confusing.
+# Port Angeles (WA) Evening News. 1965-03-10. page 3.
 #
 # The TZ database cannot represent quasi-solar time; airline time is the best
 # we can do.  The 1946 foreign air news digest of the U.S. Civil Aeronautics
@@ -3402,5 +3643,17 @@
 			8:00	-	+08	1975 Jun 13
 			7:00	-	+07
 
+# From Paul Eggert (2019-02-19):
+#
+# The Ho Chi Minh entry suffices for most purposes as it agrees with all of
+# Vietnam since 1975-06-13.  Presumably clocks often changed in south Vietnam
+# in the early 1970s as locations changed hands during the war; however the
+# details are unknown and would likely be too voluminous for this database.
+#
+# For timestamps in north Vietnam back to 1970 (the tzdb cutoff),
+# use Asia/Bangkok; see the VN entries in the file zone1970.tab.
+# For timestamps before 1970, see Asia/Hanoi in the file 'backzone'.
+
+
 # Yemen
 # See Asia/Riyadh.
--- a/make/data/tzdata/australasia	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/data/tzdata/australasia	Thu Jul 25 11:31:07 2019 +0530
@@ -425,10 +425,44 @@
 # it is uninhabited.
 
 # Guam
+
+# Rule	NAME	FROM	TO	TYPE	IN	ON	AT	SAVE	LETTER/S
+# http://guamlegislature.com/Public_Laws_5th/PL05-025.pdf
+# http://documents.guam.gov/wp-content/uploads/E.O.-59-7-Guam-Daylight-Savings-Time-May-6-1959.pdf
+Rule	Guam	1959	only	-	Jun	27	2:00	1:00	D
+# http://documents.guam.gov/wp-content/uploads/E.O.-61-5-Revocation-of-Daylight-Saving-Time-and-Restoratio.pdf
+Rule	Guam	1961	only	-	Jan	29	2:00	0	S
+# http://documents.guam.gov/wp-content/uploads/E.O.-67-13-Guam-Daylight-Savings-Time.pdf
+Rule	Guam	1967	only	-	Sep	 1	2:00	1:00	D
+# http://documents.guam.gov/wp-content/uploads/E.O.-69-2-Repeal-of-Guam-Daylight-Saving-Time.pdf
+Rule	Guam	1969	only	-	Jan	26	0:01	0	S
+# http://documents.guam.gov/wp-content/uploads/E.O.-69-10-Guam-Daylight-Saving-Time.pdf
+Rule	Guam	1969	only	-	Jun	22	2:00	1:00	D
+Rule	Guam	1969	only	-	Aug	31	2:00	0	S
+# http://documents.guam.gov/wp-content/uploads/E.O.-70-10-Guam-Daylight-Saving-Time.pdf
+# http://documents.guam.gov/wp-content/uploads/E.O.-70-30-End-of-Guam-Daylight-Saving-Time.pdf
+# http://documents.guam.gov/wp-content/uploads/E.O.-71-5-Guam-Daylight-Savings-Time.pdf
+Rule	Guam	1970	1971	-	Apr	lastSun	2:00	1:00	D
+Rule	Guam	1970	1971	-	Sep	Sun>=1	2:00	0	S
+# http://documents.guam.gov/wp-content/uploads/E.O.-73-28.-Guam-Day-light-Saving-Time.pdf
+Rule	Guam	1973	only	-	Dec	16	2:00	1:00	D
+# http://documents.guam.gov/wp-content/uploads/E.O.-74-7-Guam-Daylight-Savings-Time-Rescinded.pdf
+Rule	Guam	1974	only	-	Feb	24	2:00	0	S
+# http://documents.guam.gov/wp-content/uploads/E.O.-76-13-Daylight-Savings-Time.pdf
+Rule	Guam	1976	only	-	May	26	2:00	1:00	D
+# http://documents.guam.gov/wp-content/uploads/E.O.-76-25-Revocation-of-E.O.-76-13.pdf
+Rule	Guam	1976	only	-	Aug	22	2:01	0	S
+# http://documents.guam.gov/wp-content/uploads/E.O.-77-4-Daylight-Savings-Time.pdf
+Rule	Guam	1977	only	-	Apr	24	2:00	1:00	D
+# http://documents.guam.gov/wp-content/uploads/E.O.-77-18-Guam-Standard-Time.pdf
+Rule	Guam	1977	only	-	Aug	28	2:00	0	S
+
 # Zone	NAME		GMTOFF	RULES	FORMAT	[UNTIL]
 Zone	Pacific/Guam	-14:21:00 -	LMT	1844 Dec 31
 			 9:39:00 -	LMT	1901        # Agana
-			10:00	-	GST	2000 Dec 23 # Guam
+			10:00	-	GST	1941 Dec 10 # Guam
+			 9:00	-	+09	1944 Jul 31
+			10:00	Guam	G%sT	2000 Dec 23
 			10:00	-	ChST	# Chamorro Standard Time
 Link Pacific/Guam Pacific/Saipan # N Mariana Is
 
@@ -450,31 +484,56 @@
 
 # Marshall Is
 # Zone	NAME		GMTOFF	RULES	FORMAT	[UNTIL]
-Zone Pacific/Majuro	11:24:48 -	LMT	1901
-			11:00	-	+11	1969 Oct
-			12:00	-	+12
-Zone Pacific/Kwajalein	11:09:20 -	LMT	1901
-			11:00	-	+11	1969 Oct
-			-12:00	-	-12	1993 Aug 20
-			12:00	-	+12
+Zone Pacific/Majuro	 11:24:48 -	LMT	1901
+			 11:00	-	+11	1914 Oct
+			  9:00	-	+09	1919 Feb  1
+			 11:00	-	+11	1937
+			 10:00	-	+10	1941 Apr  1
+			  9:00	-	+09	1944 Jan 30
+			 11:00	-	+11	1969 Oct
+			 12:00	-	+12
+Zone Pacific/Kwajalein	 11:09:20 -	LMT	1901
+			 11:00	-	+11	1937
+			 10:00	-	+10	1941 Apr  1
+			  9:00	-	+09	1944 Feb  6
+			 11:00	-	+11	1969 Oct
+			-12:00	-	-12	1993 Aug 20 24:00
+			 12:00	-	+12
 
 # Micronesia
 # Zone	NAME		GMTOFF	RULES	FORMAT	[UNTIL]
-Zone Pacific/Chuuk	10:07:08 -	LMT	1901
-			10:00	-	+10
-Zone Pacific/Pohnpei	10:32:52 -	LMT	1901 # Kolonia
-			11:00	-	+11
-Zone Pacific/Kosrae	10:51:56 -	LMT	1901
-			11:00	-	+11	1969 Oct
-			12:00	-	+12	1999
-			11:00	-	+11
+Zone Pacific/Chuuk	-13:52:52 -	LMT	1844 Dec 31
+			 10:07:08 -	LMT	1901
+			 10:00	-	+10	1914 Oct
+			  9:00	-	+09	1919 Feb  1
+			 10:00	-	+10	1941 Apr  1
+			  9:00	-	+09	1945 Aug
+			 10:00	-	+10
+Zone Pacific/Pohnpei	-13:27:08 -	LMT	1844 Dec 31	# Kolonia
+			 10:32:52 -	LMT	1901
+			 11:00	-	+11	1914 Oct
+			  9:00	-	+09	1919 Feb  1
+			 11:00	-	+11	1937
+			 10:00	-	+10	1941 Apr  1
+			  9:00	-	+09	1945 Aug
+			 11:00	-	+11
+Zone Pacific/Kosrae	-13:08:04 -	LMT	1844 Dec 31
+			 10:51:56 -	LMT	1901
+			 11:00	-	+11	1914 Oct
+			  9:00	-	+09	1919 Feb  1
+			 11:00	-	+11	1937
+			 10:00	-	+10	1941 Apr  1
+			  9:00	-	+09	1945 Aug
+			 11:00	-	+11	1969 Oct
+			 12:00	-	+12	1999
+			 11:00	-	+11
 
 # Nauru
 # Zone	NAME		GMTOFF	RULES	FORMAT	[UNTIL]
 Zone	Pacific/Nauru	11:07:40 -	LMT	1921 Jan 15 # Uaobe
-			11:30	-	+1130	1942 Mar 15
-			9:00	-	+09	1944 Aug 15
-			11:30	-	+1130	1979 May
+			11:30	-	+1130	1942 Aug 29
+			 9:00	-	+09	1945 Sep  8
+			11:30	-	+1130	1979 Feb 10  2:00
 			12:00	-	+12
 
 # New Caledonia
@@ -575,8 +634,9 @@
 
 # Palau (Belau)
 # Zone	NAME		GMTOFF	RULES	FORMAT	[UNTIL]
-Zone Pacific/Palau	8:57:56 -	LMT	1901 # Koror
-			9:00	-	+09
+Zone Pacific/Palau	-15:02:04 -	LMT	1844 Dec 31	# Koror
+			  8:57:56 -	LMT	1901
+			  9:00	-	+09
 
 # Papua New Guinea
 # Zone	NAME		GMTOFF	RULES	FORMAT	[UNTIL]
@@ -838,7 +898,7 @@
 # tz@iana.org for general use in the future).  For more, please see
 # the file CONTRIBUTING in the tz distribution.
 
-# From Paul Eggert (2017-02-10):
+# From Paul Eggert (2018-11-18):
 #
 # Unless otherwise specified, the source for data through 1990 is:
 # Thomas G. Shanks and Rique Pottenger, The International Atlas (6th edition),
@@ -863,6 +923,7 @@
 # A reliable and entertaining source about time zones is
 # Derek Howse, Greenwich time and longitude, Philip Wilson Publishers (1997).
 #
+# I invented the abbreviation marked "*".
 # The following abbreviations are from other sources.
 # Corrections are welcome!
 #		std	dst
@@ -870,7 +931,7 @@
 #	  8:00	AWST	AWDT	Western Australia
 #	  9:30	ACST	ACDT	Central Australia
 #	 10:00	AEST	AEDT	Eastern Australia
-#	 10:00	GST		Guam through 2000
+#	 10:00	GST	GDT*	Guam through 2000
 #	 10:00	ChST		Chamorro
 #	 11:30	NZMT	NZST	New Zealand through 1945
 #	 12:00	NZST	NZDT	New Zealand 1946-present
@@ -1569,28 +1630,70 @@
 
 # Kwajalein
 
-# In comp.risks 14.87 (26 August 1993), Peter Neumann writes:
-# I wonder what happened in Kwajalein, where there was NO Friday,
-# 1993-08-20.  Thursday night at midnight Kwajalein switched sides with
-# respect to the International Date Line, to rejoin its fellow islands,
-# going from 11:59 p.m. Thursday to 12:00 m. Saturday in a blink.
+# From an AP article (1993-08-22):
+# "The nearly 3,000 Americans living on this remote Pacific atoll have a good
+# excuse for not remembering Saturday night: there wasn't one.  Residents were
+# going to bed Friday night and waking up Sunday morning because at midnight
+# -- 8 A.M. Eastern daylight time on Saturday -- Kwajalein was jumping from
+# one side of the international date line to the other."
+# "In Marshall Islands, Friday is followed by Sunday", NY Times. 1993-08-22.
+# https://www.nytimes.com/1993/08/22/world/in-marshall-islands-friday-is-followed-by-sunday.html
+
+# From Phake Nick (2018-10-27):
+# <https://wiki.suikawiki.org/n/南洋群島の標準時> ... pointed out that
+# currently tzdata say Pacific/Kwajalein switched from GMT+11 to GMT-12 in
+# 1969 October without explanation, however an 1993 article from NYT say it
+# synchorized its day with US mainland about 40 years ago and thus the switch
+# should occur at around 1950s instead.
+#
+# From Paul Eggert (2018-11-18):
+# The NYT (actually, AP) article is vague and possibly wrong about this.
+# The article says the earlier switch was "40 years ago when the United States
+# Army established a missile test range here".  However, the Kwajalein Test
+# Center was established on 1960-10-01 and was run by the US Navy.  It was
+# transferred to the US Army on 1964-07-01.  See "Seize the High Ground"
+# <https://history.army.mil/html/books/070/70-88-1/cmhPub_70-88-1.pdf>.
+# Given that Shanks was right on the money about the 1993 change, I'm inclined
+# to take Shanks's word for the 1969 change unless we find better evidence.
 
 
 # N Mariana Is, Guam
 
+# From Phake Nick (2018-10-27):
+# Guam Island was briefly annexed by Japan during ... year 1941-1944 ...
+# however there are no detailed information about what time it use during that
+# period.  It would probably be reasonable to assume Guam use GMT+9 during
+# that period of time like the surrounding area.
+
+# From Paul Eggert (2018-11-18):
 # Howse writes (p 153) "The Spaniards, on the other hand, reached the
 # Philippines and the Ladrones from America," and implies that the Ladrones
 # (now called the Marianas) kept American date for quite some time.
 # For now, we assume the Ladrones switched at the same time as the Philippines;
 # see Asia/Manila.
-
+#
+# Use 1941-12-10 and 1944-07-31 for Guam WWII transitions, as the rough start
+# and end of Japanese control of Agana.  We don't know whether the Northern
+# Marianas followed Guam's DST rules from 1959 through 1977; for now, assume
+# they did as that avoids the need for a separate zone due to our 1970 cutoff.
+#
 # US Public Law 106-564 (2000-12-23) made UT +10 the official standard time,
 # under the name "Chamorro Standard Time".  There is no official abbreviation,
 # but Congressman Robert A. Underwood, author of the bill that became law,
 # wrote in a press release (2000-12-27) that he will seek the use of "ChST".
 
+# See also the commentary for Micronesia.
 
-# Micronesia
+
+# Marshall Is
+# See the commentary for Micronesia.
+
+
+# Micronesia (and nearby)
+
+# From Paul Eggert (2018-11-18):
+# Like the Ladrones (see Guam commentary), assume the Spanish East Indies
+# kept American time until the Philippines switched at the end of 1844.
 
 # Alan Eugene Davis writes (1996-03-16),
 # "I am certain, having lived there for the past decade, that 'Truk'
@@ -1606,6 +1709,95 @@
 # that Truk and Yap are UT +10, and Ponape and Kosrae are +11.
 # We don't know when Kosrae switched from +12; assume January 1 for now.
 
+# From Phake Nick (2018-10-27):
+#
+# From a Japanese wiki site https://wiki.suikawiki.org/n/南洋群島の標準時
+# ...
+# For "Southern Islands" (modern region of Mariana + Palau + Federation of
+# Micronesia + Marshall Islands):
+#
+# A 1906 Japanese magazine shown the Caroline Islands and Mariana Islands
+# who was occupied by Germany at the time as GMT+10, together with the like
+# of German New Guinea.  However there is a marking saying it have not been
+# implemented (yet).  No further information after that were found.
+#
+# Japan invaded those islands in 1914, and records shows that they were
+# instructed to use JST at the time.
+#
+# 1915 January telecommunication record on the Jaluit Atoll shows they use
+# the meridian of 170E as standard time (GMT+11:20), which is similar to the
+# longitude of the atoll.
+# 1915 February record say the 170E standard time is to be used until
+# February 9 noon, and after February 9 noon they are to use JST.
+# However these are time used within the Japanese Military at the time and
+# probably does not reflect the time used by local resident at the time (that
+# is if they keep their own time back then)
+#
+# In January 1919 the occupying force issued a command that split the area
+# into three different timezone with meridian of 135E, 150E, 165E (JST+0, +1,
+# +2), and the command was to become effective from February 1 of the same
+# year.  Despite the target of the command is still only for the occupying
+# force itself, further publication have described the time as the standard
+# time for the occupied area and thus it can probably be seen as such.
+#  * Area that use meridian of 135E: Palau and Yap civil administration area
+#    (Southern Islands Western Standard Time)
+#  * Area that use meridian of 150E: Truk (Chuuk) and Saipan civil
+#    administration area (Southern Islands Central Standard Time)
+#  * Area that use meridian of 165E: Ponape (Pohnpei) and Jaluit civil
+#    administration area (Southern Islands Eastern Standard Time).
+#  * In the next few years Japanese occupation of those islands have been
+#    formalized via League of Nation Mandate (South Pacific Mandate) and formal
+#    governance structure have been established, these district [become
+#    subprefectures] and timezone classification have been inherited as standard
+#    time of the area.
+#  * Saipan subprefecture include Mariana islands (exclude Guam which was
+#    occupied by America at the time), Palau and Yap subprefecture rule the
+#    Western Caroline Islands with 137E longitude as border, Truk and Ponape
+#    subprefecture rule the Eastern Caroline Islands with 154E as border, Ponape
+#    subprefecture also rule part of Marshall Islands to the west of 164E
+#    starting from (1918?) and Jaluit subprefecture rule the rest of the
+#    Marshall Islands.
+#
+# And then in year 1937, an announcement was made to change the time in the
+# area into 2 timezones:
+#  * Area that use meridian of 135E: area administered by Palau, Yap and
+#    Saipan subprefecture (Southern Islands Western Standard Time)
+#  * Area that use meridian of 150E: area administered by Truk (Chuuk),
+#    Ponape (Pohnpei) and Jaluit subprefecture (Southern Islands Eastern
+#    Standard Time)
+#
+# Another announcement issued in 1941 say that on April 1 that year,
+# standard time of the Southern Islands would be changed to use the meridian
+# of 135E (GMT+9), and thus abolishing timezone different within the area.
+#
+# Then Pacific theater of WWII started and Japan slowly lose control on the
+# island.  The webpage I linked above contain no information during this
+# period of time....
+#
+# After the end of WWII, in 1946 February, a document written by the
+# (former?) Japanese military personnel describe there are 3 hours time
+# different between Caroline islands time/Wake island time and the Chungking
+# time, which would mean the time being used there at the time was GMT+10.
+#
+# After that, the area become Trust Territories of the Pacific Islands
+# under American administration from year 1947.  The site listed some
+# American/International books/maps/publications about time used in those
+# area during this period of time but they doesn't seems to be reliable
+# information so it would be the best if someone know where can more reliable
+# information can be found.
+#
+#
+# From Paul Eggert (2018-11-18):
+#
+# For the above, use vague dates like "1914" and "1945" for transitions that
+# plausibly exist but for which the details are not known.  The information
+# for Wake is too sketchy to act on.
+#
+# The 1906 GMT+10 info about German-controlled islands might not have been
+# done, so omit it from the data for now.
+#
+# The Jaluit info governs Kwajalein.
+
 
 # Midway
 
@@ -1623,6 +1815,29 @@
 # started DST on June 3.  Possibly DST was observed other years
 # in Midway, but we have no record of it.
 
+# Nauru
+
+# From Phake Nick (2018-10-31):
+# Currently, the tz database say Nauru use LMT until 1921, and then
+# switched to GMT+11:30 for the next two decades.
+# However, a number of timezone map published in America/Japan back then
+# showed its timezone as GMT+11 per https://wiki.suikawiki.org/n/ナウルの標準時
+# And it would also be nice if the 1921 transition date could be sourced.
+# ...
+# The "Nauru Standard Time Act 1978 Time Change"
+# http://ronlaw.gov.nr/nauru_lpms/files/gazettes/4b23a17d2030150404db7a5fa5872f52.pdf#page=3
+# based on "Nauru Standard Time Act 1978 Time Change"
+# http://www.paclii.org/nr/legis/num_act/nsta1978207/ defined that "Nauru
+# Alternative Time" (GMT+12) should be in effect from 1979 Feb.
+#
+# From Paul Eggert (2018-11-19):
+# The 1921-01-15 introduction of standard time is in Shanks; it is also in
+# "Standard Time Throughout the World", US National Bureau of Standards (1935),
+# page 3, which does not give the UT offset.  In response to a comment by
+# Phake Nick I set the Nauru time of occupation by Japan to
+# 1942-08-29/1945-09-08 by using dates from:
+# https://en.wikipedia.org/wiki/Japanese_occupation_of_Nauru
+
 # Norfolk
 
 # From Alexander Krivenyshev (2015-09-23):
@@ -1638,6 +1853,9 @@
 # other than in 1974/5.  See:
 # https://www.timeanddate.com/time/australia/norfolk-island.html
 
+# Palau
+# See commentary for Micronesia.
+
 # Pitcairn
 
 # From Rives McDow (1999-11-08):
@@ -1802,6 +2020,9 @@
 # From Paul Eggert (2003-03-23):
 # We have no other report of DST in Wake Island, so omit this info for now.
 
+# See also the commentary for Micronesia.
+
+
 ###############################################################################
 
 # The International Date Line
--- a/make/data/tzdata/backward	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/data/tzdata/backward	Thu Jul 25 11:31:07 2019 +0530
@@ -100,6 +100,7 @@
 Link	America/Havana		Cuba
 Link	Africa/Cairo		Egypt
 Link	Europe/Dublin		Eire
+Link	Etc/UTC			Etc/UCT
 Link	Europe/London		Europe/Belfast
 Link	Europe/Chisinau		Europe/Tiraspol
 Link	Europe/London		GB
@@ -134,7 +135,7 @@
 Link	Asia/Seoul		ROK
 Link	Asia/Singapore		Singapore
 Link	Europe/Istanbul		Turkey
-Link	Etc/UCT			UCT
+Link	Etc/UTC			UCT
 Link	America/Anchorage	US/Alaska
 Link	America/Adak		US/Aleutian
 Link	America/Phoenix		US/Arizona
--- a/make/data/tzdata/etcetera	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/data/tzdata/etcetera	Thu Jul 25 11:31:07 2019 +0530
@@ -42,7 +42,6 @@
 
 Zone	Etc/GMT		0	-	GMT
 Zone	Etc/UTC		0	-	UTC
-Zone	Etc/UCT		0	-	UCT
 
 # The following link uses older naming conventions,
 # but it belongs here, not in the file 'backward',
--- a/make/data/tzdata/europe	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/data/tzdata/europe	Thu Jul 25 11:31:07 2019 +0530
@@ -1878,7 +1878,7 @@
 			1:00	Belgium	CE%sT	1977
 			1:00	EU	CE%sT
 
-# Macedonia
+# North Macedonia
 # See Europe/Belgrade.
 
 # Malta
@@ -3382,7 +3382,7 @@
 Link Europe/Belgrade Europe/Ljubljana	# Slovenia
 Link Europe/Belgrade Europe/Podgorica	# Montenegro
 Link Europe/Belgrade Europe/Sarajevo	# Bosnia and Herzegovina
-Link Europe/Belgrade Europe/Skopje	# Macedonia
+Link Europe/Belgrade Europe/Skopje	# North Macedonia
 Link Europe/Belgrade Europe/Zagreb	# Croatia
 
 # Slovakia
--- a/make/data/tzdata/iso3166.tab	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/data/tzdata/iso3166.tab	Thu Jul 25 11:31:07 2019 +0530
@@ -32,8 +32,8 @@
 # All text uses UTF-8 encoding.  The columns of the table are as follows:
 #
 # 1.  ISO 3166-1 alpha-2 country code, current as of
-#     ISO 3166-1 N905 (2016-11-15).  See: Updates on ISO 3166-1
-#     http://isotc.iso.org/livelink/livelink/Open/16944257
+#     ISO 3166-1 N976 (2018-11-06).  See: Updates on ISO 3166-1
+#     https://isotc.iso.org/livelink/livelink/Open/16944257
 # 2.  The usual English name for the coded region,
 #     chosen so that alphabetic sorting of subsets produces helpful lists.
 #     This is not the same as the English name in the ISO 3166 tables.
@@ -189,7 +189,7 @@
 MF	St Martin (French)
 MG	Madagascar
 MH	Marshall Islands
-MK	Macedonia
+MK	North Macedonia
 ML	Mali
 MM	Myanmar (Burma)
 MN	Mongolia
@@ -258,7 +258,7 @@
 SV	El Salvador
 SX	St Maarten (Dutch)
 SY	Syria
-SZ	Swaziland
+SZ	Eswatini (Swaziland)
 TC	Turks & Caicos Is
 TD	Chad
 TF	French Southern & Antarctic Lands
--- a/make/data/tzdata/leapseconds	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/data/tzdata/leapseconds	Thu Jul 25 11:31:07 2019 +0530
@@ -42,9 +42,12 @@
 # See: Levine J. Coordinated Universal Time and the leap second.
 # URSI Radio Sci Bull. 2016;89(4):30-6. doi:10.23919/URSIRSB.2016.7909995
 # <https://ieeexplore.ieee.org/document/7909995>.
+
 # There were no leap seconds before 1972, because the official mechanism
 # accounting for the discrepancy between atomic time and the earth's rotation
-# did not exist.
+# did not exist.  The first ("1 Jan 1972") data line in leap-seconds.list
+# does not denote a leap second; it denotes the start of the current definition
+# of UTC.
 
 # The correction (+ or -) is made at the given time, so lines
 # will typically look like:
@@ -83,7 +86,7 @@
 
 # POSIX timestamps for the data in this file:
 #updated 1467936000
-#expires 1561680000
+#expires 1577491200
 
-#	Updated through IERS Bulletin C56
-#	File expires on:  28 June 2019
+#	Updated through IERS Bulletin C57
+#	File expires on:  28 December 2019
--- a/make/data/tzdata/northamerica	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/data/tzdata/northamerica	Thu Jul 25 11:31:07 2019 +0530
@@ -622,6 +622,26 @@
 # between AKST and AKDT from now on....
 # https://www.krbd.org/2015/10/30/annette-island-times-they-are-a-changing/
 
+# From Ryan Stanley (2018-11-06):
+# The Metlakatla community in Alaska has decided not to change its
+# clock back an hour starting on November 4th, 2018 (day before yesterday).
+# They will be gmtoff=-28800 year-round.
+# https://www.facebook.com/141055983004923/photos/pb.141055983004923.-2207520000.1541465673./569081370202380/
+
+# From Paul Eggert (2018-12-16):
+# In a 2018-12-11 special election, Metlakatla voted to go back to
+# Alaska time (including daylight saving time) starting next year.
+# https://www.krbd.org/2018/12/12/metlakatla-to-follow-alaska-standard-time-allow-liquor-sales/
+#
+# From Ryan Stanley (2019-01-11):
+# The community will be changing back on the 20th of this month...
+# From Tim Parenti (2019-01-11):
+# Per an announcement on the Metlakatla community's official Facebook page, the
+# "fall back" will be on Sunday 2019-01-20 at 02:00:
+# https://www.facebook.com/141055983004923/photos/607150969728753/
+# So they won't be waiting for Alaska to join them on 2019-03-10, but will
+# rather change their clocks twice in seven weeks.
+
 # Zone	NAME		GMTOFF	RULES	FORMAT	[UNTIL]
 Zone America/Juneau	 15:02:19 -	LMT	1867 Oct 19 15:33:32
 			 -8:57:41 -	LMT	1900 Aug 20 12:00
@@ -648,6 +668,8 @@
 			 -8:00	-	PST	1969
 			 -8:00	US	P%sT	1983 Oct 30  2:00
 			 -8:00	-	PST	2015 Nov  1  2:00
+			 -9:00	US	AK%sT	2018 Nov  4  2:00
+			 -8:00	-	PST	2019 Jan 20  2:00
 			 -9:00	US	AK%sT
 Zone America/Yakutat	 14:41:05 -	LMT	1867 Oct 19 15:12:18
 			 -9:18:55 -	LMT	1900 Aug 20 12:00
@@ -808,6 +830,22 @@
 # For a map of Indiana's time zone regions, see:
 # https://en.wikipedia.org/wiki/Time_in_Indiana
 #
+# From Paul Eggert (2018-11-30):
+# A brief but entertaining history of time in Indiana describes a 1949 debate
+# in the Indiana House where city legislators (who favored "fast time")
+# tussled with farm legislators (who didn't) over a bill to outlaw DST:
+#  "Lacking enough votes, the city faction tries to filibuster until time runs
+#   out on the session at midnight, but rural champion Rep. Herbert Copeland,
+#   R-Madison, leans over the gallery railing and forces the official clock
+#   back to 9 p.m., breaking it in the process.  The clock sticks on 9 as the
+#   debate rages on into the night.  The filibuster finally dies out and the
+#   bill passes, while outside the chamber, clocks read 3:30 a.m.  In the end,
+#   it doesn't matter which side won.  The law has no enforcement powers and
+#   is simply ignored by fast-time communities."
+# How Indiana went from 'God's time' to split zones and daylight-saving.
+# Indianapolis Star. 2018-11-27 14:58 -05.
+# https://www.indystar.com/story/news/politics/2018/11/27/indianapolis-indiana-time-zone-history-central-eastern-daylight-savings-time/2126300002/
+#
 # From Paul Eggert (2007-08-17):
 # Since 1970, most of Indiana has been like America/Indiana/Indianapolis,
 # with the following exceptions:
--- a/make/data/tzdata/zone.tab	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/data/tzdata/zone.tab	Thu Jul 25 11:31:07 2019 +0530
@@ -262,6 +262,7 @@
 KY	+1918-08123	America/Cayman
 KZ	+4315+07657	Asia/Almaty	Kazakhstan (most areas)
 KZ	+4448+06528	Asia/Qyzylorda	Qyzylorda/Kyzylorda/Kzyl-Orda
+KZ	+5312+06337	Asia/Qostanay	Qostanay/Kostanay/Kustanay
 KZ	+5017+05710	Asia/Aqtobe	Aqtobe/Aktobe
 KZ	+4431+05016	Asia/Aqtau	Mangghystau/Mankistau
 KZ	+4707+05156	Asia/Atyrau	Atyrau/Atirau/Gur'yev
@@ -355,9 +356,9 @@
 RU	+5443+02030	Europe/Kaliningrad	MSK-01 - Kaliningrad
 RU	+554521+0373704	Europe/Moscow	MSK+00 - Moscow area
 RU	+4457+03406	Europe/Simferopol	MSK+00 - Crimea
-RU	+4844+04425	Europe/Volgograd	MSK+00 - Volgograd
 RU	+5836+04939	Europe/Kirov	MSK+00 - Kirov
 RU	+4621+04803	Europe/Astrakhan	MSK+01 - Astrakhan
+RU	+4844+04425	Europe/Volgograd	MSK+01 - Volgograd
 RU	+5134+04602	Europe/Saratov	MSK+01 - Saratov
 RU	+5420+04824	Europe/Ulyanovsk	MSK+01 - Ulyanovsk
 RU	+5312+05009	Europe/Samara	MSK+01 - Samara, Udmurtia
--- a/make/hotspot/lib/CompileJvm.gmk	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/hotspot/lib/CompileJvm.gmk	Thu Jul 25 11:31:07 2019 +0530
@@ -47,6 +47,8 @@
     $(EXTRA_LDFLAGS) \
     #
 
+JVM_ASFLAGS += $(EXTRA_ASFLAGS)
+
 JVM_LIBS += \
     $(JVM_LIBS_FEATURES) \
     #
@@ -95,8 +97,7 @@
     unknownpragma doubunder w_enumnotused w_toomanyenumnotused \
     wvarhidenmem wunreachable wnoretvalue notemsource
 
-DISABLED_WARNINGS_xlc := 1540-0216 1540-0198 1540-1090 1540-1639 1540-1088 \
-    1500-010
+DISABLED_WARNINGS_xlc := tautological-compare shift-negative-value
 
 DISABLED_WARNINGS_microsoft :=
 
@@ -260,6 +261,15 @@
 #
 # Search the output for the operator(s) of interest, to see where they are
 # referenced.
+#
+# When a reference to the global 'operator delete' is reported, it might be
+# due to a "deleting destructor".  In this case, objdump may show the
+# reference to be associated with but not actually in a destructor.  A
+# deleting destructor is automatically generated for a class whose destructor
+# is virtual.  A deleting destructor requires an accessible 'operator delete'
+# for the associated class.  If the class doesn't provide a more specific
+# declaration (either directly or by inheriting from a class that provides
+# one) then the global definition will be used, triggering this check.
 
 ifneq ($(GENERATE_COMPILE_COMMANDS_ONLY), true)
   ifneq ($(filter $(TOOLCHAIN_TYPE), gcc clang solstudio), )
--- a/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java	Thu Jul 25 11:31:07 2019 +0530
@@ -108,7 +108,7 @@
     private static final ResourceBundle.Control defCon =
         ResourceBundle.Control.getControl(ResourceBundle.Control.FORMAT_DEFAULT);
 
-    private static final String[] AVAILABLE_TZIDS = TimeZone.getAvailableIDs();
+    private static Set<String> AVAILABLE_TZIDS;
     private static String zoneNameTempFile;
     private static String tzDataDir;
     private static final Map<String, String> canonicalTZMap = new HashMap<>();
@@ -730,7 +730,7 @@
             });
         }
 
-        Arrays.stream(AVAILABLE_TZIDS).forEach(tzid -> {
+        getAvailableZoneIds().stream().forEach(tzid -> {
             // If the tzid is deprecated, get the data for the replacement id
             String tzKey = Optional.ofNullable((String)handlerSupplMeta.get(tzid))
                                    .orElse(tzid);
@@ -1074,8 +1074,20 @@
             StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);
     }
 
+    // This method assumes handlerMetaZones is already initialized
+    private static Set<String> getAvailableZoneIds() {
+        assert handlerMetaZones != null;
+        if (AVAILABLE_TZIDS == null) {
+            AVAILABLE_TZIDS = new HashSet<>(ZoneId.getAvailableZoneIds());
+            AVAILABLE_TZIDS.addAll(handlerMetaZones.keySet());
+            AVAILABLE_TZIDS.remove(MetaZonesParseHandler.NO_METAZONE_KEY);
+        }
+
+        return AVAILABLE_TZIDS;
+    }
+
     private static Stream<String> zidMapEntry() {
-        return ZoneId.getAvailableZoneIds().stream()
+        return getAvailableZoneIds().stream()
                 .map(id -> {
                     String canonId = canonicalTZMap.getOrDefault(id, id);
                     String meta = handlerMetaZones.get(canonId);
--- a/make/jdk/src/classes/build/tools/fixuppandoc/Main.java	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/jdk/src/classes/build/tools/fixuppandoc/Main.java	Thu Jul 25 11:31:07 2019 +0530
@@ -81,6 +81,10 @@
  *
  * Update the content string, to indicate it has been processed by this program.
  *
+ * <h2>{@code <nav id="TOC">}</h2>
+ *
+ * Set attribute {@code title="Table Of Contents"}
+ *
  */
 public class Main {
     /**
@@ -273,6 +277,11 @@
                     }
                     // <main> is not permitted within these elements
                     allowMain = false;
+                    if (name.equals("nav") && Objects.equals(attrs.get("id"), "TOC")) {
+                        out.write(buffer.toString()
+                                .replaceAll(">$", " title=\"Table Of Contents\">"));
+                        buffer.setLength(0);
+                    }
                     break;
 
                 case "body":
--- a/make/jdk/src/classes/build/tools/jdwpgen/AbstractCommandNode.java	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/jdk/src/classes/build/tools/jdwpgen/AbstractCommandNode.java	Thu Jul 25 11:31:07 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,8 +30,8 @@
 class AbstractCommandNode extends AbstractNamedNode {
 
     void document(PrintWriter writer) {
-        writer.println("<h5 id=\"" + context.whereC + "\">" + name +
-                       " Command (" + nameNode.value() + ")</h5>");
+        writer.println("<h3 id=\"" + context.whereC + "\">" + name +
+                       " Command (" + nameNode.value() + ")</h3>");
         writer.println(comment());
         writer.println("<dl>");
         for (Node node : components) {
--- a/make/jdk/src/classes/build/tools/jdwpgen/AbstractNamedNode.java	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/jdk/src/classes/build/tools/jdwpgen/AbstractNamedNode.java	Thu Jul 25 11:31:07 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -62,8 +62,8 @@
     }
 
     void document(PrintWriter writer) {
-        writer.println("<h4 id=\"" + name + "\">" + name +
-                       " Command Set</h4>");
+        writer.println("<h2 id=\"" + name + "\">" + name +
+                       " Command Set</h2>");
         for (Node node : components) {
             node.document(writer);
         }
--- a/make/jdk/src/classes/build/tools/jdwpgen/CommandSetNode.java	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/jdk/src/classes/build/tools/jdwpgen/CommandSetNode.java	Thu Jul 25 11:31:07 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,9 +38,9 @@
     }
 
     void document(PrintWriter writer) {
-        writer.println("<h4 id=\"" + context.whereC + "\">" + name +
+        writer.println("<h2 id=\"" + context.whereC + "\">" + name +
                        " Command Set (" +
-                       nameNode.value() + ")</h4>");
+                       nameNode.value() + ")</h2>");
         writer.println(comment());
         for (Node node : components) {
             node.document(writer);
--- a/make/jdk/src/classes/build/tools/jdwpgen/ConstantSetNode.java	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/jdk/src/classes/build/tools/jdwpgen/ConstantSetNode.java	Thu Jul 25 11:31:07 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,8 +54,8 @@
     }
 
     void document(PrintWriter writer) {
-        writer.println("<h4 id=\"" + context.whereC + "\">" + name +
-                       " Constants</h4>");
+        writer.println("<h2 id=\"" + context.whereC + "\">" + name +
+                       " Constants</h2>");
         writer.println(comment());
         writer.println("<table><tr>");
         writer.println("<th style=\"width: 20%\"><th style=\"width: 5%\"><th style=\"width:  65%\">");
--- a/make/jdk/src/classes/build/tools/jdwpgen/RootNode.java	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/jdk/src/classes/build/tools/jdwpgen/RootNode.java	Thu Jul 25 11:31:07 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -52,11 +52,16 @@
         writer.println("</style>");
         writer.println("</head>");
         writer.println("<body>");
-        writer.println("<ul role=\"navigation\">");
+        writer.println("<div class=\"centered\" role=\"banner\">");
+        writer.println("<h1 id=\"Protocol_Details\">Java Debug Wire Protocol Details</h1>");
+        writer.println("</div>");
+        writer.println("<nav>");
+        writer.println("<ul>");
         for (Node node : components) {
             node.documentIndex(writer);
         }
         writer.println("</ul>");
+        writer.println("</nav>");
         writer.println("<div role=\"main\">");
         for (Node node : components) {
             node.document(writer);
--- a/make/launcher/LauncherCommon.gmk	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/launcher/LauncherCommon.gmk	Thu Jul 25 11:31:07 2019 +0530
@@ -235,7 +235,10 @@
           FORMAT := man, \
           FILTER := $(PANDOC_TROFF_MANPAGE_FILTER), \
           POST_PROCESS := $(MAN_POST_PROCESS), \
-          REPLACEMENTS := @@VERSION_SHORT@@ => $(VERSION_SHORT), \
+          REPLACEMENTS := \
+		@@COPYRIGHT_YEAR@@ => $(COPYRIGHT_YEAR) ; \
+		@@VERSION_SHORT@@ => $(VERSION_SHORT) ; \
+		@@VERSION_SPECIFICATION@@ => $(VERSION_SPECIFICATION), \
           EXTRA_DEPS := $(PANDOC_TROFF_MANPAGE_FILTER) \
               $(PANDOC_TROFF_MANPAGE_FILTER_JAVASCRIPT), \
       ))
--- a/make/lib/Awt2dLibraries.gmk	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/lib/Awt2dLibraries.gmk	Thu Jul 25 11:31:07 2019 +0530
@@ -471,7 +471,6 @@
           $(LIBAWT_HEADLESS_CFLAGS), \
       EXTRA_HEADER_DIRS := $(LIBAWT_HEADLESS_EXTRA_HEADER_DIRS), \
       DISABLED_WARNINGS_gcc := unused-function, \
-      DISABLED_WARNINGS_xlc := 1506-356, \
       DISABLED_WARNINGS_solstudio := E_EMPTY_TRANSLATION_UNIT, \
       LDFLAGS := $(LDFLAGS_JDKLIB) \
           $(call SET_SHARED_LIBRARY_ORIGIN), \
@@ -481,10 +480,6 @@
       LIBS_solaris := $(LIBM) $(LIBDL) $(LIBCXX), \
   ))
 
-  # AIX warning explanation:
-  # 1506-356 : (W) Compilation unit is empty.
-  #            This happens during the headless build
-
   $(BUILD_LIBAWT_HEADLESS): $(BUILD_LIBAWT)
 
   TARGETS += $(BUILD_LIBAWT_HEADLESS)
--- a/make/test/JtregNativeHotspot.gmk	Tue Jul 23 22:21:16 2019 -0700
+++ b/make/test/JtregNativeHotspot.gmk	Thu Jul 25 11:31:07 2019 +0530
@@ -862,12 +862,13 @@
     BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libredefineClasses := -lpthread
     BUILD_HOTSPOT_JTREG_EXECUTABLES_LIBS_exeinvoke := -ljvm -lpthread
     BUILD_HOTSPOT_JTREG_EXECUTABLES_LIBS_exestack-gap := -ljvm -lpthread
+    BUILD_HOTSPOT_JTREG_EXECUTABLES_LIBS_exestack-tls := -ljvm
     BUILD_TEST_exeinvoke_exeinvoke.c_OPTIMIZATION := NONE
     BUILD_HOTSPOT_JTREG_EXECUTABLES_LIBS_exeFPRegs := -ldl
     BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libAsyncGetCallTraceTest := -ldl
 else
   BUILD_HOTSPOT_JTREG_EXCLUDE += libtest-rw.c libtest-rwx.c libTestJNI.c \
-      exeinvoke.c exestack-gap.c libAsyncGetCallTraceTest.cpp
+      exeinvoke.c exestack-gap.c exestack-tls.c libAsyncGetCallTraceTest.cpp
 endif
 
 BUILD_HOTSPOT_JTREG_EXECUTABLES_LIBS_exesigtest := -ljvm
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/c1/shenandoahBarrierSetC1_aarch64.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/c1/shenandoahBarrierSetC1_aarch64.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -99,7 +99,7 @@
   __ xchg(access.resolved_addr(), value_opr, result, tmp);
 
   if (access.is_oop()) {
-    result = load_reference_barrier(access.gen(), result, access.access_emit_info(), true);
+    result = load_reference_barrier(access.gen(), result);
     if (ShenandoahSATBBarrier) {
       pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr,
                   result /* pre_val */);
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -24,7 +24,7 @@
 #include "precompiled.hpp"
 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
 #include "gc/shenandoah/shenandoahForwarding.hpp"
-#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 #include "gc/shenandoah/shenandoahHeuristics.hpp"
 #include "gc/shenandoah/shenandoahRuntime.hpp"
@@ -47,7 +47,7 @@
                                                        Register src, Register dst, Register count, RegSet saved_regs) {
   if (is_oop) {
     bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
-    if (ShenandoahSATBBarrier && !dest_uninitialized && !ShenandoahHeap::heap()->heuristics()->can_do_traversal_gc()) {
+    if (ShenandoahSATBBarrier && !dest_uninitialized) {
 
       Label done;
 
@@ -282,6 +282,40 @@
   __ leave();
 }
 
+void ShenandoahBarrierSetAssembler::load_reference_barrier_native(MacroAssembler* masm, Register dst, Register tmp) {
+  if (!ShenandoahLoadRefBarrier) {
+    return;
+  }
+
+  assert(dst != rscratch2, "need rscratch2");
+
+  Label is_null;
+  Label done;
+
+  __ cbz(dst, is_null);
+
+  __ enter();
+
+  Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
+  __ ldrb(rscratch2, gc_state);
+
+  // Check for heap in evacuation phase
+  __ tbz(rscratch2, ShenandoahHeap::EVACUATION_BITPOS, done);
+
+  __ mov(rscratch2, dst);
+  __ push_call_clobbered_registers();
+  __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native));
+  __ mov(r0, rscratch2);
+  __ blrt(lr, 1, 0, MacroAssembler::ret_type_integral);
+  __ mov(rscratch2, r0);
+  __ pop_call_clobbered_registers();
+  __ mov(dst, rscratch2);
+
+  __ bind(done);
+  __ leave();
+  __ bind(is_null);
+}
+
 void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) {
   if (ShenandoahStoreValEnqueueBarrier) {
     // Save possibly live regs.
@@ -309,15 +343,25 @@
 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
                                             Register dst, Address src, Register tmp1, Register tmp_thread) {
   bool on_oop = type == T_OBJECT || type == T_ARRAY;
+  bool not_in_heap = (decorators & IN_NATIVE) != 0;
   bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
   bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
   bool on_reference = on_weak || on_phantom;
+  bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
 
   BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
   if (on_oop) {
-    load_reference_barrier(masm, dst, tmp1);
-
-    if (ShenandoahKeepAliveBarrier && on_reference) {
+     if (not_in_heap) {
+       if (ShenandoahHeap::heap()->is_traversal_mode()) {
+         load_reference_barrier(masm, dst, tmp1);
+         keep_alive = true;
+       } else {
+         load_reference_barrier_native(masm, dst, tmp1);
+       }
+     } else {
+       load_reference_barrier(masm, dst, tmp1);
+     }
+    if (ShenandoahKeepAliveBarrier && on_reference && keep_alive) {
       __ enter();
       satb_write_barrier_pre(masm /* masm */,
                              noreg /* obj */,
@@ -469,9 +513,7 @@
     __ mov(res, obj);
   }
   // Check for null.
-  if (stub->needs_null_check()) {
-    __ cbz(res, done);
-  }
+  __ cbz(res, done);
 
   load_reference_barrier_not_null(ce->masm(), res, rscratch1);
 
@@ -572,7 +614,7 @@
 
   __ push_call_clobbered_registers();
 
-  __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_JRT));
+  __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier));
   __ blrt(lr, 1, 0, MacroAssembler::ret_type_integral);
   __ mov(rscratch1, r0);
   __ pop_call_clobbered_registers();
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp	Thu Jul 25 11:31:07 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
@@ -58,6 +58,7 @@
   void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp = noreg);
   void load_reference_barrier(MacroAssembler* masm, Register dst, Register tmp);
   void load_reference_barrier_not_null(MacroAssembler* masm, Register dst, Register tmp);
+  void load_reference_barrier_native(MacroAssembler* masm, Register dst, Register tmp);
 
   address generate_shenandoah_lrb(StubCodeGenerator* cgen);
 
--- a/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/cpu/aarch64/gc/z/z_aarch64.ad	Thu Jul 25 11:31:07 2019 +0530
@@ -61,7 +61,7 @@
 //
 // Execute ZGC load barrier (strong) slow path
 //
-instruct loadBarrierSlowReg(iRegP dst, memory mem, rFlagsReg cr,
+instruct loadBarrierSlowReg(iRegP dst, memory src, rFlagsReg cr,
     vRegD_V0 v0, vRegD_V1 v1, vRegD_V2 v2, vRegD_V3 v3, vRegD_V4 v4,
     vRegD_V5 v5, vRegD_V6 v6, vRegD_V7 v7, vRegD_V8 v8, vRegD_V9 v9,
     vRegD_V10 v10, vRegD_V11 v11, vRegD_V12 v12, vRegD_V13 v13, vRegD_V14 v14,
@@ -69,20 +69,22 @@
     vRegD_V20 v20, vRegD_V21 v21, vRegD_V22 v22, vRegD_V23 v23, vRegD_V24 v24,
     vRegD_V25 v25, vRegD_V26 v26, vRegD_V27 v27, vRegD_V28 v28, vRegD_V29 v29,
     vRegD_V30 v30, vRegD_V31 v31) %{
-  match(Set dst (LoadBarrierSlowReg mem));
+  match(Set dst (LoadBarrierSlowReg src dst));
   predicate(!n->as_LoadBarrierSlowReg()->is_weak());
 
-  effect(DEF dst, KILL cr,
+  effect(KILL cr,
      KILL v0, KILL v1, KILL v2, KILL v3, KILL v4, KILL v5, KILL v6, KILL v7,
      KILL v8, KILL v9, KILL v10, KILL v11, KILL v12, KILL v13, KILL v14,
      KILL v15, KILL v16, KILL v17, KILL v18, KILL v19, KILL v20, KILL v21,
      KILL v22, KILL v23, KILL v24, KILL v25, KILL v26, KILL v27, KILL v28,
      KILL v29, KILL v30, KILL v31);
 
-  format %{"LoadBarrierSlowReg $dst, $mem" %}
+  format %{ "lea $dst, $src\n\t"
+            "call #ZLoadBarrierSlowPath" %}
+
   ins_encode %{
-    z_load_barrier_slow_reg(_masm, $dst$$Register, $mem$$base$$Register,
-                            $mem$$index, $mem$$scale, $mem$$disp, false);
+    z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$base$$Register,
+                            $src$$index, $src$$scale, $src$$disp, false);
   %}
   ins_pipe(pipe_slow);
 %}
@@ -90,7 +92,7 @@
 //
 // Execute ZGC load barrier (weak) slow path
 //
-instruct loadBarrierWeakSlowReg(iRegP dst, memory mem, rFlagsReg cr,
+instruct loadBarrierWeakSlowReg(iRegP dst, memory src, rFlagsReg cr,
     vRegD_V0 v0, vRegD_V1 v1, vRegD_V2 v2, vRegD_V3 v3, vRegD_V4 v4,
     vRegD_V5 v5, vRegD_V6 v6, vRegD_V7 v7, vRegD_V8 v8, vRegD_V9 v9,
     vRegD_V10 v10, vRegD_V11 v11, vRegD_V12 v12, vRegD_V13 v13, vRegD_V14 v14,
@@ -98,20 +100,22 @@
     vRegD_V20 v20, vRegD_V21 v21, vRegD_V22 v22, vRegD_V23 v23, vRegD_V24 v24,
     vRegD_V25 v25, vRegD_V26 v26, vRegD_V27 v27, vRegD_V28 v28, vRegD_V29 v29,
     vRegD_V30 v30, vRegD_V31 v31) %{
-  match(Set dst (LoadBarrierSlowReg mem));
+  match(Set dst (LoadBarrierSlowReg src dst));
   predicate(n->as_LoadBarrierSlowReg()->is_weak());
 
-  effect(DEF dst, KILL cr,
+  effect(KILL cr,
      KILL v0, KILL v1, KILL v2, KILL v3, KILL v4, KILL v5, KILL v6, KILL v7,
      KILL v8, KILL v9, KILL v10, KILL v11, KILL v12, KILL v13, KILL v14,
      KILL v15, KILL v16, KILL v17, KILL v18, KILL v19, KILL v20, KILL v21,
      KILL v22, KILL v23, KILL v24, KILL v25, KILL v26, KILL v27, KILL v28,
      KILL v29, KILL v30, KILL v31);
 
-  format %{"LoadBarrierWeakSlowReg $dst, $mem" %}
+  format %{ "lea $dst, $src\n\t"
+            "call #ZLoadBarrierSlowPath" %}
+
   ins_encode %{
-    z_load_barrier_slow_reg(_masm, $dst$$Register, $mem$$base$$Register,
-                            $mem$$index, $mem$$scale, $mem$$disp, true);
+    z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$base$$Register,
+                            $src$$index, $src$$scale, $src$$disp, true);
   %}
   ins_pipe(pipe_slow);
 %}
--- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -800,6 +800,7 @@
 #endif
 
   // Class initialization barrier for static methods
+  address c2i_no_clinit_check_entry = NULL;
   if (VM_Version::supports_fast_class_init_checks()) {
     Label L_skip_barrier;
 
@@ -812,13 +813,15 @@
     __ load_method_holder(rscratch2, rmethod);
     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
+
     __ bind(L_skip_barrier);
+    c2i_no_clinit_check_entry = __ pc();
   }
 
   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 
   __ flush();
-  return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
+  return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
 }
 
 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
--- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -1277,6 +1277,7 @@
   c2i_entry = __ pc();
 
   // Class initialization barrier for static methods
+  address c2i_no_clinit_check_entry = NULL;
   if (VM_Version::supports_fast_class_init_checks()) {
     Label L_skip_barrier;
 
@@ -1295,11 +1296,12 @@
     __ bctr();
 
     __ bind(L_skip_barrier);
+    c2i_no_clinit_check_entry = __ pc();
   }
 
   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, call_interpreter, ientry);
 
-  return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
+  return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
 }
 
 #ifdef COMPILER2
--- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -2713,6 +2713,7 @@
   address c2i_entry = __ pc();
 
   // Class initialization barrier for static methods
+  address c2i_no_clinit_check_entry = NULL;
   if (VM_Version::supports_fast_class_init_checks()) {
     Label L_skip_barrier;
 
@@ -2729,11 +2730,12 @@
     __ z_br(klass);
 
     __ bind(L_skip_barrier);
+    c2i_no_clinit_check_entry = __ pc();
   }
 
   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 
-  return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
+  return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
 }
 
 // This function returns the adjust size (in number of words) to a c2i adapter
--- a/src/hotspot/cpu/x86/gc/shenandoah/c1/shenandoahBarrierSetC1_x86.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/cpu/x86/gc/shenandoah/c1/shenandoahBarrierSetC1_x86.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -110,7 +110,7 @@
   __ xchg(access.resolved_addr(), result, result, LIR_OprFact::illegalOpr);
 
   if (access.is_oop()) {
-    result = load_reference_barrier(access.gen(), result, access.access_emit_info(), true);
+    result = load_reference_barrier(access.gen(), result);
     if (ShenandoahSATBBarrier) {
       pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr,
                   result /* pre_val */);
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -24,7 +24,7 @@
 #include "precompiled.hpp"
 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
 #include "gc/shenandoah/shenandoahForwarding.hpp"
-#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 #include "gc/shenandoah/shenandoahHeuristics.hpp"
 #include "gc/shenandoah/shenandoahRuntime.hpp"
@@ -69,7 +69,7 @@
     }
 #endif
 
-    if (ShenandoahSATBBarrier && !dest_uninitialized && !ShenandoahHeap::heap()->heuristics()->can_do_traversal_gc()) {
+    if (ShenandoahSATBBarrier && !dest_uninitialized) {
       Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
       assert_different_registers(dst, count, thread); // we don't care about src here?
 #ifndef _LP64
@@ -401,6 +401,86 @@
 #endif
 }
 
+void ShenandoahBarrierSetAssembler::load_reference_barrier_native(MacroAssembler* masm, Register dst) {
+  if (!ShenandoahLoadRefBarrier) {
+    return;
+  }
+
+  Label done;
+  Label not_null;
+  Label slow_path;
+
+  // null check
+  __ testptr(dst, dst);
+  __ jcc(Assembler::notZero, not_null);
+  __ jmp(done);
+  __ bind(not_null);
+
+
+#ifdef _LP64
+  Register thread = r15_thread;
+#else
+  Register thread = rcx;
+  if (thread == dst) {
+    thread = rbx;
+  }
+  __ push(thread);
+  __ get_thread(thread);
+#endif
+  assert_different_registers(dst, thread);
+
+  Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
+  __ testb(gc_state, ShenandoahHeap::EVACUATION);
+#ifndef _LP64
+  __ pop(thread);
+#endif
+  __ jccb(Assembler::notZero, slow_path);
+  __ jmp(done);
+  __ bind(slow_path);
+
+  if (dst != rax) {
+    __ xchgptr(dst, rax); // Move obj into rax and save rax into obj.
+  }
+  __ push(rcx);
+  __ push(rdx);
+  __ push(rdi);
+  __ push(rsi);
+#ifdef _LP64
+  __ push(r8);
+  __ push(r9);
+  __ push(r10);
+  __ push(r11);
+  __ push(r12);
+  __ push(r13);
+  __ push(r14);
+  __ push(r15);
+#endif
+
+  __ movptr(rdi, rax);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native), rdi);
+
+#ifdef _LP64
+  __ pop(r15);
+  __ pop(r14);
+  __ pop(r13);
+  __ pop(r12);
+  __ pop(r11);
+  __ pop(r10);
+  __ pop(r9);
+  __ pop(r8);
+#endif
+  __ pop(rsi);
+  __ pop(rdi);
+  __ pop(rdx);
+  __ pop(rcx);
+
+  if (dst != rax) {
+    __ xchgptr(rax, dst); // Swap back obj with rax.
+  }
+
+  __ bind(done);
+}
+
 void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) {
   if (ShenandoahStoreValEnqueueBarrier) {
     storeval_barrier_impl(masm, dst, tmp);
@@ -457,12 +537,24 @@
   bool on_oop = type == T_OBJECT || type == T_ARRAY;
   bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
   bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
+  bool not_in_heap = (decorators & IN_NATIVE) != 0;
   bool on_reference = on_weak || on_phantom;
-   BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
+  bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
+
+  BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
   if (on_oop) {
-    load_reference_barrier(masm, dst);
+    if (not_in_heap) {
+      if (ShenandoahHeap::heap()->is_traversal_mode()) {
+        load_reference_barrier(masm, dst);
+        keep_alive = true;
+      } else {
+        load_reference_barrier_native(masm, dst);
+      }
+    } else {
+      load_reference_barrier(masm, dst);
+    }
 
-    if (ShenandoahKeepAliveBarrier && on_reference) {
+    if (ShenandoahKeepAliveBarrier && on_reference && keep_alive) {
       const Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
       assert_different_registers(dst, tmp1, tmp_thread);
       NOT_LP64(__ get_thread(thread));
@@ -788,10 +880,8 @@
   }
 
   // Check for null.
-  if (stub->needs_null_check()) {
-    __ testptr(res, res);
-    __ jcc(Assembler::zero, done);
-  }
+  __ testptr(res, res);
+  __ jcc(Assembler::zero, done);
 
   load_reference_barrier_not_null(ce->masm(), res);
 
@@ -943,7 +1033,7 @@
 
   save_vector_registers(cgen->assembler());
   __ movptr(rdi, rax);
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_JRT), rdi);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier), rdi);
   restore_vector_registers(cgen->assembler());
 
 #ifdef _LP64
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp	Thu Jul 25 11:31:07 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
@@ -78,6 +78,7 @@
 #endif
 
   void load_reference_barrier(MacroAssembler* masm, Register dst);
+  void load_reference_barrier_native(MacroAssembler* masm, Register dst);
 
   void cmpxchg_oop(MacroAssembler* masm,
                    Register res, Address addr, Register oldval, Register newval,
--- a/src/hotspot/cpu/x86/gc/z/z_x86_64.ad	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/cpu/x86/gc/z/z_x86_64.ad	Thu Jul 25 11:31:07 2019 +0530
@@ -45,32 +45,31 @@
 
 // For XMM and YMM enabled processors
 instruct zLoadBarrierSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr,
-                                      rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+                                      rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
                                       rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
                                       rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
                                       rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
+  match(Set dst (LoadBarrierSlowReg src dst));
+  predicate(UseAVX <= 2 && !n->as_LoadBarrierSlowReg()->is_weak());
 
-  match(Set dst (LoadBarrierSlowReg src));
-  predicate((UseAVX <= 2) && !n->as_LoadBarrierSlowReg()->is_weak());
-
-  effect(DEF dst, KILL cr,
+  effect(KILL cr,
          KILL x0, KILL x1, KILL x2, KILL x3,
          KILL x4, KILL x5, KILL x6, KILL x7,
          KILL x8, KILL x9, KILL x10, KILL x11,
          KILL x12, KILL x13, KILL x14, KILL x15);
 
-  format %{ "zLoadBarrierSlowRegXmmAndYmm $dst, $src" %}
+  format %{ "lea $dst, $src\n\t"
+            "call #ZLoadBarrierSlowPath" %}
 
   ins_encode %{
     z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */);
   %}
-
   ins_pipe(pipe_slow);
 %}
 
 // For ZMM enabled processors
 instruct zLoadBarrierSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
-                                rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+                                rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
                                 rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
                                 rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
                                 rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
@@ -79,10 +78,10 @@
                                 rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
                                 rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
 
-  match(Set dst (LoadBarrierSlowReg src));
-  predicate((UseAVX == 3) && !n->as_LoadBarrierSlowReg()->is_weak());
+  match(Set dst (LoadBarrierSlowReg src dst));
+  predicate(UseAVX == 3 && !n->as_LoadBarrierSlowReg()->is_weak());
 
-  effect(DEF dst, KILL cr,
+  effect(KILL cr,
          KILL x0, KILL x1, KILL x2, KILL x3,
          KILL x4, KILL x5, KILL x6, KILL x7,
          KILL x8, KILL x9, KILL x10, KILL x11,
@@ -92,43 +91,42 @@
          KILL x24, KILL x25, KILL x26, KILL x27,
          KILL x28, KILL x29, KILL x30, KILL x31);
 
-  format %{ "zLoadBarrierSlowRegZmm $dst, $src" %}
+  format %{ "lea $dst, $src\n\t"
+            "call #ZLoadBarrierSlowPath" %}
 
   ins_encode %{
     z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, false /* weak */);
   %}
-
   ins_pipe(pipe_slow);
 %}
 
 // For XMM and YMM enabled processors
 instruct zLoadBarrierWeakSlowRegXmmAndYmm(rRegP dst, memory src, rFlagsReg cr,
-                                          rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+                                          rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
                                           rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
                                           rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
                                           rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15) %{
+  match(Set dst (LoadBarrierSlowReg src dst));
+  predicate(UseAVX <= 2 && n->as_LoadBarrierSlowReg()->is_weak());
 
-  match(Set dst (LoadBarrierSlowReg src));
-  predicate((UseAVX <= 2) && n->as_LoadBarrierSlowReg()->is_weak());
-
-  effect(DEF dst, KILL cr,
+  effect(KILL cr,
          KILL x0, KILL x1, KILL x2, KILL x3,
          KILL x4, KILL x5, KILL x6, KILL x7,
          KILL x8, KILL x9, KILL x10, KILL x11,
          KILL x12, KILL x13, KILL x14, KILL x15);
 
-  format %{ "zLoadBarrierWeakSlowRegXmmAndYmm $dst, $src" %}
+  format %{ "lea $dst, $src\n\t"
+            "call #ZLoadBarrierSlowPath" %}
 
   ins_encode %{
     z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */);
   %}
-
   ins_pipe(pipe_slow);
 %}
 
 // For ZMM enabled processors
 instruct zLoadBarrierWeakSlowRegZmm(rRegP dst, memory src, rFlagsReg cr,
-                                    rxmm0 x0, rxmm1 x1, rxmm2 x2,rxmm3 x3,
+                                    rxmm0 x0, rxmm1 x1, rxmm2 x2, rxmm3 x3,
                                     rxmm4 x4, rxmm5 x5, rxmm6 x6, rxmm7 x7,
                                     rxmm8 x8, rxmm9 x9, rxmm10 x10, rxmm11 x11,
                                     rxmm12 x12, rxmm13 x13, rxmm14 x14, rxmm15 x15,
@@ -137,10 +135,10 @@
                                     rxmm24 x24, rxmm25 x25, rxmm26 x26, rxmm27 x27,
                                     rxmm28 x28, rxmm29 x29, rxmm30 x30, rxmm31 x31) %{
 
-  match(Set dst (LoadBarrierSlowReg src));
-  predicate((UseAVX == 3) && n->as_LoadBarrierSlowReg()->is_weak());
+  match(Set dst (LoadBarrierSlowReg src dst));
+  predicate(UseAVX == 3 && n->as_LoadBarrierSlowReg()->is_weak());
 
-  effect(DEF dst, KILL cr,
+  effect(KILL cr,
          KILL x0, KILL x1, KILL x2, KILL x3,
          KILL x4, KILL x5, KILL x6, KILL x7,
          KILL x8, KILL x9, KILL x10, KILL x11,
@@ -150,12 +148,12 @@
          KILL x24, KILL x25, KILL x26, KILL x27,
          KILL x28, KILL x29, KILL x30, KILL x31);
 
-  format %{ "zLoadBarrierWeakSlowRegZmm $dst, $src" %}
+  format %{ "lea $dst, $src\n\t"
+            "call #ZLoadBarrierSlowPath" %}
 
   ins_encode %{
     z_load_barrier_slow_reg(_masm, $dst$$Register, $src$$Address, true /* weak */);
   %}
-
   ins_pipe(pipe_slow);
 %}
 
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -971,10 +971,8 @@
 
   address c2i_entry = __ pc();
 
-  BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
-  bs->c2i_entry_barrier(masm);
-
   // Class initialization barrier for static methods
+  address c2i_no_clinit_check_entry = NULL;
   if (VM_Version::supports_fast_class_init_checks()) {
     Label L_skip_barrier;
     Register method = rbx;
@@ -993,12 +991,16 @@
     __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
 
     __ bind(L_skip_barrier);
+    c2i_no_clinit_check_entry = __ pc();
   }
 
+  BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+  bs->c2i_entry_barrier(masm);
+
   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 
   __ flush();
-  return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
+  return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
 }
 
 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
--- a/src/hotspot/cpu/x86/x86_64.ad	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/cpu/x86/x86_64.ad	Thu Jul 25 11:31:07 2019 +0530
@@ -5543,7 +5543,7 @@
   ins_pipe( pipe_slow );
 %}
 
-instruct maxF_reduction_reg(regF dst, regF a, regF b, regF xmmt, rRegI tmp, rFlagsReg cr) %{
+instruct maxF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xmmt, rRegI tmp, rFlagsReg cr) %{
   predicate(UseAVX > 0 && n->is_reduction());
   match(Set dst (MaxF a b));
   effect(USE a, USE b, TEMP xmmt, TEMP tmp, KILL cr);
@@ -5579,7 +5579,7 @@
   ins_pipe( pipe_slow );
 %}
 
-instruct maxD_reduction_reg(regD dst, regD a, regD b, regD xmmt, rRegL tmp, rFlagsReg cr) %{
+instruct maxD_reduction_reg(legRegD dst, legRegD a, legRegD b, legRegD xmmt, rRegL tmp, rFlagsReg cr) %{
   predicate(UseAVX > 0 && n->is_reduction());
   match(Set dst (MaxD a b));
   effect(USE a, USE b, TEMP xmmt, TEMP tmp, KILL cr);
@@ -5615,7 +5615,7 @@
   ins_pipe( pipe_slow );
 %}
 
-instruct minF_reduction_reg(regF dst, regF a, regF b, regF xmmt, rRegI tmp, rFlagsReg cr) %{
+instruct minF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xmmt, rRegI tmp, rFlagsReg cr) %{
   predicate(UseAVX > 0 && n->is_reduction());
   match(Set dst (MinF a b));
   effect(USE a, USE b, TEMP xmmt, TEMP tmp, KILL cr);
@@ -5651,7 +5651,7 @@
   ins_pipe( pipe_slow );
 %}
 
-instruct minD_reduction_reg(regD dst, regD a, regD b, regD xmmt, rRegL tmp, rFlagsReg cr) %{
+instruct minD_reduction_reg(legRegD dst, legRegD a, legRegD b, legRegD xmmt, rRegL tmp, rFlagsReg cr) %{
   predicate(UseAVX > 0 && n->is_reduction());
   match(Set dst (MinD a b));
   effect(USE a, USE b, TEMP xmmt, TEMP tmp, KILL cr);
--- a/src/hotspot/os/aix/attachListener_aix.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/os/aix/attachListener_aix.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2012, 2018 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -71,17 +71,7 @@
   // the file descriptor for the listening socket
   static int _listener;
 
-  static void set_path(char* path) {
-    if (path == NULL) {
-      _has_path = false;
-    } else {
-      strncpy(_path, path, UNIX_PATH_MAX);
-      _path[UNIX_PATH_MAX-1] = '\0';
-      _has_path = true;
-    }
-  }
-
-  static void set_listener(int s)               { _listener = s; }
+  static bool _atexit_registered;
 
   // reads a request from the given connected socket
   static AixAttachOperation* read_request(int s);
@@ -94,6 +84,19 @@
     ATTACH_ERROR_BADVERSION     = 101           // error codes
   };
 
+  static void set_path(char* path) {
+    if (path == NULL) {
+      _path[0] = '\0';
+      _has_path = false;
+    } else {
+      strncpy(_path, path, UNIX_PATH_MAX);
+      _path[UNIX_PATH_MAX-1] = '\0';
+      _has_path = true;
+    }
+  }
+
+  static void set_listener(int s)               { _listener = s; }
+
   // initialize the listener, returns 0 if okay
   static int init();
 
@@ -130,6 +133,7 @@
 char AixAttachListener::_path[UNIX_PATH_MAX];
 bool AixAttachListener::_has_path;
 int AixAttachListener::_listener = -1;
+bool AixAttachListener::_atexit_registered = false;
 // Shutdown marker to prevent accept blocking during clean-up
 bool AixAttachListener::_shutdown = false;
 
@@ -177,17 +181,15 @@
 //    should be sufficient for cleanup.
 extern "C" {
   static void listener_cleanup() {
-    static int cleanup_done;
-    if (!cleanup_done) {
-      cleanup_done = 1;
-      AixAttachListener::set_shutdown(true);
-      int s = AixAttachListener::listener();
-      if (s != -1) {
-        ::shutdown(s, 2);
-      }
-      if (AixAttachListener::has_path()) {
-        ::unlink(AixAttachListener::path());
-      }
+    AixAttachListener::set_shutdown(true);
+    int s = AixAttachListener::listener();
+    if (s != -1) {
+      AixAttachListener::set_listener(-1);
+      ::shutdown(s, 2);
+    }
+    if (AixAttachListener::has_path()) {
+      ::unlink(AixAttachListener::path());
+      AixAttachListener::set_path(NULL);
     }
   }
 }
@@ -200,7 +202,10 @@
   int listener;                      // listener socket (file descriptor)
 
   // register function to cleanup
-  ::atexit(listener_cleanup);
+  if (!_atexit_registered) {
+    _atexit_registered = true;
+    ::atexit(listener_cleanup);
+  }
 
   int n = snprintf(path, UNIX_PATH_MAX, "%s/.java_pid%d",
                    os::get_temp_directory(), os::current_process_id());
@@ -515,6 +520,26 @@
   return ret_code;
 }
 
+bool AttachListener::check_socket_file() {
+  int ret;
+  struct stat64 st;
+  ret = stat64(AixAttachListener::path(), &st);
+  if (ret == -1) { // need to restart attach listener.
+    log_debug(attach)("Socket file %s does not exist - Restart Attach Listener",
+                      AixAttachListener::path());
+
+    listener_cleanup();
+
+    // wait to terminate current attach listener instance...
+    while (AttachListener::transit_state(AL_INITIALIZING,
+                                         AL_NOT_INITIALIZED) != AL_NOT_INITIALIZED) {
+      os::naked_yield();
+    }
+    return is_init_trigger();
+  }
+  return false;
+}
+
 // Attach Listener is started lazily except in the case when
 // +ReduseSignalUsage is used
 bool AttachListener::init_at_startup() {
--- a/src/hotspot/os/aix/loadlib_aix.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/os/aix/loadlib_aix.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2019 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -207,7 +207,7 @@
     }
   }
 
-  trcVerbose("loadquery buffer size is %llu.", buflen);
+  trcVerbose("loadquery buffer size is " SIZE_FORMAT ".", buflen);
 
   // Iterate over the loadquery result. For details see sys/ldr.h on AIX.
   ldi = (struct ld_info*) buffer;
@@ -264,7 +264,7 @@
       e->info.is_in_vm = true;
     }
 
-    trcVerbose("entry: %p %llu, %p %llu, %s %s %s, %d",
+    trcVerbose("entry: %p " SIZE_FORMAT ", %p " SIZE_FORMAT ", %s %s %s, %d",
       e->info.text, e->info.text_len,
       e->info.data, e->info.data_len,
       e->info.path, e->info.shortname,
--- a/src/hotspot/os/aix/os_aix.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/os/aix/os_aix.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -487,8 +487,7 @@
       if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
         const int en = errno;
         ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
-        trcVerbose("shmctl(SHM_PAGESIZE) failed with errno=%n",
-          errno);
+        trcVerbose("shmctl(SHM_PAGESIZE) failed with errno=%d", errno);
       } else {
         // Attach and double check pageisze.
         void* p = ::shmat(shmid, NULL, 0);
@@ -496,7 +495,7 @@
         guarantee0(p != (void*) -1); // Should always work.
         const size_t real_pagesize = os::Aix::query_pagesize(p);
         if (real_pagesize != pagesize) {
-          trcVerbose("real page size (0x%llX) differs.", real_pagesize);
+          trcVerbose("real page size (" SIZE_FORMAT_HEX ") differs.", real_pagesize);
         } else {
           can_use = true;
         }
@@ -1888,12 +1887,12 @@
     if (!contains_range(p, s)) {
       trcVerbose("[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
               "range of [" PTR_FORMAT " - " PTR_FORMAT "].",
-              p, p + s, addr, addr + size);
+              p2i(p), p2i(p + s), p2i(addr), p2i(addr + size));
       guarantee0(false);
     }
     if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
       trcVerbose("range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
-              " aligned to pagesize (%lu)", p, p + s, (unsigned long) pagesize);
+              " aligned to pagesize (%lu)", p2i(p), p2i(p + s), (unsigned long) pagesize);
       guarantee0(false);
     }
   }
@@ -1964,7 +1963,7 @@
 
   trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
     PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
-    bytes, requested_addr, alignment_hint);
+    bytes, p2i(requested_addr), alignment_hint);
 
   // Either give me wish address or wish alignment but not both.
   assert0(!(requested_addr != NULL && alignment_hint != 0));
@@ -1973,7 +1972,7 @@
   // BRK because that may cause malloc OOM.
   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
-      "Will attach anywhere.", requested_addr);
+      "Will attach anywhere.", p2i(requested_addr));
     // Act like the OS refused to attach there.
     requested_addr = NULL;
   }
@@ -2025,7 +2024,7 @@
 
   // Handle shmat error. If we failed to attach, just return.
   if (addr == (char*)-1) {
-    trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
+    trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", p2i(requested_addr), errno_shmat);
     return NULL;
   }
 
@@ -2033,15 +2032,15 @@
   // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
   const size_t real_pagesize = os::Aix::query_pagesize(addr);
   if (real_pagesize != shmbuf.shm_pagesize) {
-    trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
+    trcVerbose("pagesize is, surprisingly, " SIZE_FORMAT, real_pagesize);
   }
 
   if (addr) {
     trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
-      addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
+      p2i(addr), p2i(addr + size - 1), size, size/real_pagesize, describe_pagesize(real_pagesize));
   } else {
     if (requested_addr != NULL) {
-      trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr);
+      trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, p2i(requested_addr));
     } else {
       trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
     }
@@ -2057,7 +2056,7 @@
 static bool release_shmated_memory(char* addr, size_t size) {
 
   trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
-    addr, addr + size - 1);
+    p2i(addr), p2i(addr + size - 1));
 
   bool rc = false;
 
@@ -2073,12 +2072,12 @@
 
 static bool uncommit_shmated_memory(char* addr, size_t size) {
   trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
-    addr, addr + size - 1);
+    p2i(addr), p2i(addr + size - 1));
 
   const bool rc = my_disclaim64(addr, size);
 
   if (!rc) {
-    trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
+    trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", p2i(addr), size);
     return false;
   }
   return true;
@@ -2095,11 +2094,11 @@
 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
   trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
     "alignment_hint " UINTX_FORMAT "...",
-    bytes, requested_addr, alignment_hint);
+    bytes, p2i(requested_addr), alignment_hint);
 
   // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
   if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
-    trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
+    trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", p2i(requested_addr));
     return NULL;
   }
 
@@ -2107,7 +2106,7 @@
   // BRK because that may cause malloc OOM.
   if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
     trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
-      "Will attach anywhere.", requested_addr);
+      "Will attach anywhere.", p2i(requested_addr));
     // Act like the OS refused to attach there.
     requested_addr = NULL;
   }
@@ -2154,7 +2153,7 @@
       PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
 
   if (addr == MAP_FAILED) {
-    trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
+    trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", p2i(requested_addr), size, errno);
     return NULL;
   }
 
@@ -2173,10 +2172,10 @@
 
   if (addr) {
     trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
-      addr, addr + bytes, bytes);
+      p2i(addr), p2i(addr + bytes), bytes);
   } else {
     if (requested_addr != NULL) {
-      trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
+      trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, p2i(requested_addr));
     } else {
       trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
     }
@@ -2196,7 +2195,7 @@
   assert0(is_aligned_to(size, os::vm_page_size()));
 
   trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
-    addr, addr + size - 1);
+    p2i(addr), p2i(addr + size - 1));
   bool rc = false;
 
   if (::munmap(addr, size) != 0) {
@@ -2216,7 +2215,7 @@
   assert0(is_aligned_to(size, os::vm_page_size()));
 
   trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
-    addr, addr + size - 1);
+    p2i(addr), p2i(addr + size - 1));
   bool rc = false;
 
   // Uncommit mmap memory with msync MS_INVALIDATE.
@@ -2247,7 +2246,7 @@
 static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
                                     int err) {
   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
-          ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
+          ", %d) failed; error='%s' (errno=%d)", p2i(addr), size, exec,
           os::errno_name(err), err);
 }
 #endif
@@ -2275,7 +2274,7 @@
   guarantee0(vmi);
   vmi->assert_is_valid_subrange(addr, size);
 
-  trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
+  trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", p2i(addr), p2i(addr + size - 1));
 
   if (UseExplicitCommit) {
     // AIX commits memory on touch. So, touch all pages to be committed.
@@ -4075,7 +4074,7 @@
     assert(minor > 0, "invalid OS release");
     _os_version = (major << 24) | (minor << 16);
     char ver_str[20] = {0};
-    char *name_str = "unknown OS";
+    const char* name_str = "unknown OS";
     if (strcmp(uts.sysname, "OS400") == 0) {
       // We run on AS/400 PASE. We do not support versions older than V5R4M0.
       _on_pase = 1;
@@ -4086,19 +4085,19 @@
       name_str = "OS/400 (pase)";
       jio_snprintf(ver_str, sizeof(ver_str), "%u.%u", major, minor);
     } else if (strcmp(uts.sysname, "AIX") == 0) {
-      // We run on AIX. We do not support versions older than AIX 5.3.
+      // We run on AIX. We do not support versions older than AIX 7.1.
       _on_pase = 0;
       // Determine detailed AIX version: Version, Release, Modification, Fix Level.
       odmWrapper::determine_os_kernel_version(&_os_version);
-      if (os_version_short() < 0x0503) {
-        trcVerbose("AIX release older than AIX 5.3 not supported.");
+      if (os_version_short() < 0x0701) {
+        trcVerbose("AIX releases older than AIX 7.1 are not supported.");
         assert(false, "AIX release too old.");
       }
       name_str = "AIX";
       jio_snprintf(ver_str, sizeof(ver_str), "%u.%u.%u.%u",
                    major, minor, (_os_version >> 8) & 0xFF, _os_version & 0xFF);
     } else {
-      assert(false, name_str);
+      assert(false, "%s", name_str);
     }
     trcVerbose("We run on %s %s", name_str, ver_str);
   }
--- a/src/hotspot/os/aix/perfMemory_aix.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/os/aix/perfMemory_aix.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -1111,7 +1111,7 @@
 
   if ((statbuf.st_size == 0) ||
      ((size_t)statbuf.st_size % os::vm_page_size() != 0)) {
-    THROW_MSG_0(vmSymbols::java_lang_Exception(),
+    THROW_MSG_0(vmSymbols::java_io_IOException(),
                 "Invalid PerfMemory size");
   }
 
--- a/src/hotspot/os/bsd/attachListener_bsd.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/os/bsd/attachListener_bsd.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -68,17 +68,7 @@
   // the file descriptor for the listening socket
   static int _listener;
 
-  static void set_path(char* path) {
-    if (path == NULL) {
-      _has_path = false;
-    } else {
-      strncpy(_path, path, UNIX_PATH_MAX);
-      _path[UNIX_PATH_MAX-1] = '\0';
-      _has_path = true;
-    }
-  }
-
-  static void set_listener(int s)               { _listener = s; }
+  static bool _atexit_registered;
 
   // reads a request from the given connected socket
   static BsdAttachOperation* read_request(int s);
@@ -91,6 +81,19 @@
     ATTACH_ERROR_BADVERSION     = 101           // error codes
   };
 
+  static void set_path(char* path) {
+    if (path == NULL) {
+      _path[0] = '\0';
+      _has_path = false;
+    } else {
+      strncpy(_path, path, UNIX_PATH_MAX);
+      _path[UNIX_PATH_MAX-1] = '\0';
+      _has_path = true;
+    }
+  }
+
+  static void set_listener(int s)               { _listener = s; }
+
   // initialize the listener, returns 0 if okay
   static int init();
 
@@ -124,6 +127,7 @@
 char BsdAttachListener::_path[UNIX_PATH_MAX];
 bool BsdAttachListener::_has_path;
 int BsdAttachListener::_listener = -1;
+bool BsdAttachListener::_atexit_registered = false;
 
 // Supporting class to help split a buffer into individual components
 class ArgumentIterator : public StackObj {
@@ -158,16 +162,15 @@
 // bound too.
 extern "C" {
   static void listener_cleanup() {
-    static int cleanup_done;
-    if (!cleanup_done) {
-      cleanup_done = 1;
-      int s = BsdAttachListener::listener();
-      if (s != -1) {
-        ::close(s);
-      }
-      if (BsdAttachListener::has_path()) {
-        ::unlink(BsdAttachListener::path());
-      }
+    int s = BsdAttachListener::listener();
+    if (s != -1) {
+      BsdAttachListener::set_listener(-1);
+      ::shutdown(s, SHUT_RDWR);
+      ::close(s);
+    }
+    if (BsdAttachListener::has_path()) {
+      ::unlink(BsdAttachListener::path());
+      BsdAttachListener::set_path(NULL);
     }
   }
 }
@@ -180,7 +183,10 @@
   int listener;                      // listener socket (file descriptor)
 
   // register function to cleanup
-  ::atexit(listener_cleanup);
+  if (!_atexit_registered) {
+    _atexit_registered = true;
+    ::atexit(listener_cleanup);
+  }
 
   int n = snprintf(path, UNIX_PATH_MAX, "%s/.java_pid%d",
                    os::get_temp_directory(), os::current_process_id());
@@ -485,6 +491,28 @@
   return ret_code;
 }
 
+bool AttachListener::check_socket_file() {
+  int ret;
+  struct stat st;
+  ret = stat(BsdAttachListener::path(), &st);
+  if (ret == -1) { // need to restart attach listener.
+    log_debug(attach)("Socket file %s does not exist - Restart Attach Listener",
+                      BsdAttachListener::path());
+
+    listener_cleanup();
+
+    // wait to terminate current attach listener instance...
+
+    while (AttachListener::transit_state(AL_INITIALIZING,
+
+                                         AL_NOT_INITIALIZED) != AL_NOT_INITIALIZED) {
+      os::naked_yield();
+    }
+    return is_init_trigger();
+  }
+  return false;
+}
+
 // Attach Listener is started lazily except in the case when
 // +ReduseSignalUsage is used
 bool AttachListener::init_at_startup() {
--- a/src/hotspot/os/bsd/perfMemory_bsd.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/os/bsd/perfMemory_bsd.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -1028,7 +1028,7 @@
 
   if ((statbuf.st_size == 0) ||
      ((size_t)statbuf.st_size % os::vm_page_size() != 0)) {
-    THROW_MSG_0(vmSymbols::java_lang_Exception(),
+    THROW_MSG_0(vmSymbols::java_io_IOException(),
                 "Invalid PerfMemory size");
   }
 
--- a/src/hotspot/os/linux/attachListener_linux.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/os/linux/attachListener_linux.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -69,17 +69,7 @@
   // the file descriptor for the listening socket
   static int _listener;
 
-  static void set_path(char* path) {
-    if (path == NULL) {
-      _has_path = false;
-    } else {
-      strncpy(_path, path, UNIX_PATH_MAX);
-      _path[UNIX_PATH_MAX-1] = '\0';
-      _has_path = true;
-    }
-  }
-
-  static void set_listener(int s)               { _listener = s; }
+  static bool _atexit_registered;
 
   // reads a request from the given connected socket
   static LinuxAttachOperation* read_request(int s);
@@ -92,6 +82,19 @@
     ATTACH_ERROR_BADVERSION     = 101           // error codes
   };
 
+  static void set_path(char* path) {
+    if (path == NULL) {
+      _path[0] = '\0';
+      _has_path = false;
+    } else {
+      strncpy(_path, path, UNIX_PATH_MAX);
+      _path[UNIX_PATH_MAX-1] = '\0';
+      _has_path = true;
+    }
+  }
+
+  static void set_listener(int s)               { _listener = s; }
+
   // initialize the listener, returns 0 if okay
   static int init();
 
@@ -125,6 +128,7 @@
 char LinuxAttachListener::_path[UNIX_PATH_MAX];
 bool LinuxAttachListener::_has_path;
 int LinuxAttachListener::_listener = -1;
+bool LinuxAttachListener::_atexit_registered = false;
 
 // Supporting class to help split a buffer into individual components
 class ArgumentIterator : public StackObj {
@@ -159,16 +163,15 @@
 // bound too.
 extern "C" {
   static void listener_cleanup() {
-    static int cleanup_done;
-    if (!cleanup_done) {
-      cleanup_done = 1;
-      int s = LinuxAttachListener::listener();
-      if (s != -1) {
-        ::close(s);
-      }
-      if (LinuxAttachListener::has_path()) {
-        ::unlink(LinuxAttachListener::path());
-      }
+    int s = LinuxAttachListener::listener();
+    if (s != -1) {
+      LinuxAttachListener::set_listener(-1);
+      ::shutdown(s, SHUT_RDWR);
+      ::close(s);
+    }
+    if (LinuxAttachListener::has_path()) {
+      ::unlink(LinuxAttachListener::path());
+      LinuxAttachListener::set_path(NULL);
     }
   }
 }
@@ -181,7 +184,10 @@
   int listener;                      // listener socket (file descriptor)
 
   // register function to cleanup
-  ::atexit(listener_cleanup);
+  if (!_atexit_registered) {
+    _atexit_registered = true;
+    ::atexit(listener_cleanup);
+  }
 
   int n = snprintf(path, UNIX_PATH_MAX, "%s/.java_pid%d",
                    os::get_temp_directory(), os::current_process_id());
@@ -485,6 +491,26 @@
   return ret_code;
 }
 
+bool AttachListener::check_socket_file() {
+  int ret;
+  struct stat64 st;
+  ret = stat64(LinuxAttachListener::path(), &st);
+  if (ret == -1) { // need to restart attach listener.
+    log_debug(attach)("Socket file %s does not exist - Restart Attach Listener",
+                      LinuxAttachListener::path());
+
+    listener_cleanup();
+
+    // wait to terminate current attach listener instance...
+    while (AttachListener::transit_state(AL_INITIALIZING,
+                                         AL_NOT_INITIALIZED) != AL_NOT_INITIALIZED) {
+      os::naked_yield();
+    }
+    return is_init_trigger();
+  }
+  return false;
+}
+
 // Attach Listener is started lazily except in the case when
 // +ReduseSignalUsage is used
 bool AttachListener::init_at_startup() {
--- a/src/hotspot/os/linux/globals_linux.hpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/os/linux/globals_linux.hpp	Thu Jul 25 11:31:07 2019 +0530
@@ -64,9 +64,13 @@
                                                                         \
   product(bool, PreferContainerQuotaForCPUCount, true,                  \
           "Calculate the container CPU availability based on the value" \
-          " of quotas (if set), when true. Otherwise, use the CPU"    \
+          " of quotas (if set), when true. Otherwise, use the CPU"      \
           " shares value, provided it is less than quota.")             \
                                                                         \
+  product(bool, AdjustStackSizeForTLS, false,                           \
+          "Increase the thread stack size to include space for glibc "  \
+          "static thread-local storage (TLS) if true")                  \
+                                                                        \
   diagnostic(bool, DumpPrivateMappingsInCore, true,                     \
           "If true, sets bit 2 of /proc/PID/coredump_filter, thus "     \
           "resulting in file-backed private mappings of the process to "\
--- a/src/hotspot/os/linux/os_linux.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/os/linux/os_linux.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -84,6 +84,7 @@
 # include <sys/select.h>
 # include <pthread.h>
 # include <signal.h>
+# include <endian.h>
 # include <errno.h>
 # include <dlfcn.h>
 # include <stdio.h>
@@ -800,6 +801,73 @@
   return 0;
 }
 
+// On Linux, glibc places static TLS blocks (for __thread variables) on
+// the thread stack. This decreases the stack size actually available
+// to threads.
+//
+// For large static TLS sizes, this may cause threads to malfunction due
+// to insufficient stack space. This is a well-known issue in glibc:
+// http://sourceware.org/bugzilla/show_bug.cgi?id=11787.
+//
+// As a workaround, we call a private but assumed-stable glibc function,
+// __pthread_get_minstack() to obtain the minstack size and derive the
+// static TLS size from it. We then increase the user requested stack
+// size by this TLS size.
+//
+// Due to compatibility concerns, this size adjustment is opt-in and
+// controlled via AdjustStackSizeForTLS.
+typedef size_t (*GetMinStack)(const pthread_attr_t *attr);
+
+GetMinStack _get_minstack_func = NULL;
+
+static void get_minstack_init() {
+  _get_minstack_func =
+        (GetMinStack)dlsym(RTLD_DEFAULT, "__pthread_get_minstack");
+  log_info(os, thread)("Lookup of __pthread_get_minstack %s",
+                       _get_minstack_func == NULL ? "failed" : "succeeded");
+}
+
+// Returns the size of the static TLS area glibc puts on thread stacks.
+// The value is cached on first use, which occurs when the first thread
+// is created during VM initialization.
+static size_t get_static_tls_area_size(const pthread_attr_t *attr) {
+  size_t tls_size = 0;
+  if (_get_minstack_func != NULL) {
+    // Obtain the pthread minstack size by calling __pthread_get_minstack.
+    size_t minstack_size = _get_minstack_func(attr);
+
+    // Remove non-TLS area size included in minstack size returned
+    // by __pthread_get_minstack() to get the static TLS size.
+    // In glibc before 2.27, minstack size includes guard_size.
+    // In glibc 2.27 and later, guard_size is automatically added
+    // to the stack size by pthread_create and is no longer included
+    // in minstack size. In both cases, the guard_size is taken into
+    // account, so there is no need to adjust the result for that.
+    //
+    // Although __pthread_get_minstack() is a private glibc function,
+    // it is expected to have a stable behavior across future glibc
+    // versions while glibc still allocates the static TLS blocks off
+    // the stack. Following is glibc 2.28 __pthread_get_minstack():
+    //
+    // size_t
+    // __pthread_get_minstack (const pthread_attr_t *attr)
+    // {
+    //   return GLRO(dl_pagesize) + __static_tls_size + PTHREAD_STACK_MIN;
+    // }
+    //
+    //
+    // The following 'minstack_size > os::vm_page_size() + PTHREAD_STACK_MIN'
+    // if check is done for precaution.
+    if (minstack_size > (size_t)os::vm_page_size() + PTHREAD_STACK_MIN) {
+      tls_size = minstack_size - os::vm_page_size() - PTHREAD_STACK_MIN;
+    }
+  }
+
+  log_info(os, thread)("Stack size adjustment for TLS is " SIZE_FORMAT,
+                       tls_size);
+  return tls_size;
+}
+
 bool os::create_thread(Thread* thread, ThreadType thr_type,
                        size_t req_stack_size) {
   assert(thread->osthread() == NULL, "caller responsible");
@@ -825,7 +893,7 @@
 
   // Calculate stack size if it's not specified by caller.
   size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
-  // In the Linux NPTL pthread implementation the guard size mechanism
+  // In glibc versions prior to 2.7 the guard size mechanism
   // is not implemented properly. The posix standard requires adding
   // the size of the guard pages to the stack size, instead Linux
   // takes the space out of 'stacksize'. Thus we adapt the requested
@@ -833,17 +901,27 @@
   // behaviour. However, be careful not to end up with a size
   // of zero due to overflow. Don't add the guard page in that case.
   size_t guard_size = os::Linux::default_guard_size(thr_type);
-  if (stack_size <= SIZE_MAX - guard_size) {
-    stack_size += guard_size;
+  // Configure glibc guard page. Must happen before calling
+  // get_static_tls_area_size(), which uses the guard_size.
+  pthread_attr_setguardsize(&attr, guard_size);
+
+  size_t stack_adjust_size = 0;
+  if (AdjustStackSizeForTLS) {
+    // Adjust the stack_size for on-stack TLS - see get_static_tls_area_size().
+    stack_adjust_size += get_static_tls_area_size(&attr);
+  } else {
+    stack_adjust_size += guard_size;
+  }
+
+  stack_adjust_size = align_up(stack_adjust_size, os::vm_page_size());
+  if (stack_size <= SIZE_MAX - stack_adjust_size) {
+    stack_size += stack_adjust_size;
   }
   assert(is_aligned(stack_size, os::vm_page_size()), "stack_size not aligned");
 
   int status = pthread_attr_setstacksize(&attr, stack_size);
   assert_status(status == 0, status, "pthread_attr_setstacksize");
 
-  // Configure glibc guard page.
-  pthread_attr_setguardsize(&attr, os::Linux::default_guard_size(thr_type));
-
   ThreadState state;
 
   {
@@ -1747,11 +1825,26 @@
     return NULL;
   }
 
+  if (elf_head.e_ident[EI_DATA] != LITTLE_ENDIAN_ONLY(ELFDATA2LSB) BIG_ENDIAN_ONLY(ELFDATA2MSB)) {
+    // handle invalid/out of range endianness values
+    if (elf_head.e_ident[EI_DATA] == 0 || elf_head.e_ident[EI_DATA] > 2) {
+      return NULL;
+    }
+
+#if defined(VM_LITTLE_ENDIAN)
+    // VM is LE, shared object BE
+    elf_head.e_machine = be16toh(elf_head.e_machine);
+#else
+    // VM is BE, shared object LE
+    elf_head.e_machine = le16toh(elf_head.e_machine);
+#endif
+  }
+
   typedef struct {
     Elf32_Half    code;         // Actual value as defined in elf.h
     Elf32_Half    compat_class; // Compatibility of archs at VM's sense
     unsigned char elf_class;    // 32 or 64 bit
-    unsigned char endianess;    // MSB or LSB
+    unsigned char endianness;   // MSB or LSB
     char*         name;         // String representation
   } arch_t;
 
@@ -1778,8 +1871,9 @@
     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
     {EM_SH,          EM_SH,      ELFCLASS32, ELFDATA2MSB, (char*)"SuperH BE"},
 #endif
-    {EM_ARM,         EM_ARM,     ELFCLASS32,   ELFDATA2LSB, (char*)"ARM"},
-    {EM_S390,        EM_S390,    ELFCLASSNONE, ELFDATA2MSB, (char*)"IBM System/390"},
+    {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM"},
+    // we only support 64 bit z architecture
+    {EM_S390,        EM_S390,    ELFCLASS64, ELFDATA2MSB, (char*)"IBM System/390"},
     {EM_ALPHA,       EM_ALPHA,   ELFCLASS64, ELFDATA2LSB, (char*)"Alpha"},
     {EM_MIPS_RS3_LE, EM_MIPS_RS3_LE, ELFCLASS32, ELFDATA2LSB, (char*)"MIPSel"},
     {EM_MIPS,        EM_MIPS,    ELFCLASS32, ELFDATA2MSB, (char*)"MIPS"},
@@ -1825,7 +1919,7 @@
         AARCH64, ALPHA, ARM, AMD64, IA32, IA64, M68K, MIPS, MIPSEL, PARISC, __powerpc__, __powerpc64__, S390, SH, __sparc
 #endif
 
-  // Identify compatability class for VM's architecture and library's architecture
+  // Identify compatibility class for VM's architecture and library's architecture
   // Obtain string descriptions for architectures
 
   arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
@@ -1849,29 +1943,35 @@
     return NULL;
   }
 
-  if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
-    ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
-    return NULL;
-  }
-
-#ifndef S390
-  if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
-    ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
-    return NULL;
-  }
-#endif // !S390
-
   if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
-    if (lib_arch.name!=NULL) {
+    if (lib_arch.name != NULL) {
       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
-                 " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
+                 " (Possible cause: can't load %s .so on a %s platform)",
                  lib_arch.name, arch_array[running_arch_index].name);
     } else {
       ::snprintf(diag_msg_buf, diag_msg_max_length-1,
-                 " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
-                 lib_arch.code,
-                 arch_array[running_arch_index].name);
+                 " (Possible cause: can't load this .so (machine code=0x%x) on a %s platform)",
+                 lib_arch.code, arch_array[running_arch_index].name);
     }
+    return NULL;
+  }
+
+  if (lib_arch.endianness != arch_array[running_arch_index].endianness) {
+    ::snprintf(diag_msg_buf, diag_msg_max_length-1, " (Possible cause: endianness mismatch)");
+    return NULL;
+  }
+
+  // ELF file class/capacity : 0 - invalid, 1 - 32bit, 2 - 64bit
+  if (lib_arch.elf_class > 2 || lib_arch.elf_class < 1) {
+    ::snprintf(diag_msg_buf, diag_msg_max_length-1, " (Possible cause: invalid ELF file class)");
+    return NULL;
+  }
+
+  if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
+    ::snprintf(diag_msg_buf, diag_msg_max_length-1,
+               " (Possible cause: architecture word width mismatch, can't load %d-bit .so on a %d-bit platform)",
+               (int) lib_arch.elf_class * 32, arch_array[running_arch_index].elf_class * 32);
+    return NULL;
   }
 
   return NULL;
@@ -5122,6 +5222,10 @@
     jdk_misc_signal_init();
   }
 
+  if (AdjustStackSizeForTLS) {
+    get_minstack_init();
+  }
+
   // Check and sets minimum stack sizes against command line options
   if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
     return JNI_ERR;
--- a/src/hotspot/os/linux/perfMemory_linux.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/os/linux/perfMemory_linux.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -1107,7 +1107,7 @@
 
   if ((statbuf.st_size == 0) ||
      ((size_t)statbuf.st_size % os::vm_page_size() != 0)) {
-    THROW_MSG_0(vmSymbols::java_lang_Exception(),
+    THROW_MSG_0(vmSymbols::java_io_IOException(),
                 "Invalid PerfMemory size");
   }
 
--- a/src/hotspot/os/posix/vmError_posix.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/os/posix/vmError_posix.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -132,8 +132,9 @@
   // Needed because asserts may happen in error handling too.
 #ifdef CAN_SHOW_REGISTERS_ON_ASSERT
   if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
-    handle_assert_poison_fault(ucVoid, info->si_addr);
-    return;
+    if (handle_assert_poison_fault(ucVoid, info->si_addr)) {
+      return;
+    }
   }
 #endif // CAN_SHOW_REGISTERS_ON_ASSERT
 
--- a/src/hotspot/os/solaris/attachListener_solaris.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/os/solaris/attachListener_solaris.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -75,17 +75,7 @@
   // door descriptor returned by door_create
   static int _door_descriptor;
 
-  static void set_door_path(char* path) {
-    if (path == NULL) {
-      _has_door_path = false;
-    } else {
-      strncpy(_door_path, path, PATH_MAX);
-      _door_path[PATH_MAX] = '\0';      // ensure it's nul terminated
-      _has_door_path = true;
-    }
-  }
-
-  static void set_door_descriptor(int dd)               { _door_descriptor = dd; }
+  static bool _atexit_registered;
 
   // mutex to protect operation list
   static mutex_t _mutex;
@@ -121,6 +111,19 @@
     ATTACH_ERROR_DENIED         = 104
   };
 
+  static void set_door_path(char* path) {
+    if (path == NULL) {
+      _door_path[0] = '\0';
+      _has_door_path = false;
+    } else {
+      strncpy(_door_path, path, PATH_MAX);
+      _door_path[PATH_MAX] = '\0';      // ensure it's nul terminated
+      _has_door_path = true;
+    }
+  }
+
+  static void set_door_descriptor(int dd)               { _door_descriptor = dd; }
+
   // initialize the listener
   static int init();
 
@@ -169,6 +172,7 @@
 char SolarisAttachListener::_door_path[PATH_MAX+1];
 volatile bool SolarisAttachListener::_has_door_path;
 int SolarisAttachListener::_door_descriptor = -1;
+bool SolarisAttachListener::_atexit_registered = false;
 mutex_t SolarisAttachListener::_mutex;
 sema_t SolarisAttachListener::_wakeup;
 SolarisAttachOperation* SolarisAttachListener::_head = NULL;
@@ -364,18 +368,16 @@
 // atexit hook to detach the door and remove the file
 extern "C" {
   static void listener_cleanup() {
-    static int cleanup_done;
-    if (!cleanup_done) {
-      cleanup_done = 1;
-      int dd = SolarisAttachListener::door_descriptor();
-      if (dd >= 0) {
-        ::close(dd);
-      }
-      if (SolarisAttachListener::has_door_path()) {
-        char* path = SolarisAttachListener::door_path();
-        ::fdetach(path);
-        ::unlink(path);
-      }
+    int dd = SolarisAttachListener::door_descriptor();
+    if (dd >= 0) {
+      SolarisAttachListener::set_door_descriptor(-1);
+      ::close(dd);
+    }
+    if (SolarisAttachListener::has_door_path()) {
+      char* path = SolarisAttachListener::door_path();
+      ::fdetach(path);
+      ::unlink(path);
+      SolarisAttachListener::set_door_path(NULL);
     }
   }
 }
@@ -387,7 +389,10 @@
   int fd, res;
 
   // register exit function
-  ::atexit(listener_cleanup);
+  if (!_atexit_registered) {
+    _atexit_registered = true;
+    ::atexit(listener_cleanup);
+  }
 
   // create the door descriptor
   int dd = ::door_create(enqueue_proc, NULL, 0);
@@ -643,6 +648,26 @@
   }
 }
 
+bool AttachListener::check_socket_file() {
+  int ret;
+  struct stat64 st;
+  ret = stat64(SolarisAttachListener::door_path(), &st);
+  if (ret == -1) { // need to restart attach listener.
+    log_debug(attach)("Door file %s does not exist - Restart Attach Listener",
+                      SolarisAttachListener::door_path());
+
+    listener_cleanup();
+
+    // wait to terminate current attach listener instance...
+    while (AttachListener::transit_state(AL_INITIALIZING,
+                                         AL_NOT_INITIALIZED) != AL_NOT_INITIALIZED) {
+      os::naked_yield();
+    }
+    return is_init_trigger();
+  }
+  return false;
+}
+
 // If the file .attach_pid<pid> exists in the working directory
 // or /tmp then this is the trigger to start the attach mechanism
 bool AttachListener::is_init_trigger() {
--- a/src/hotspot/os/solaris/os_solaris.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/os/solaris/os_solaris.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -1520,6 +1520,13 @@
   }
 }
 
+static void change_endianness(Elf32_Half& val) {
+  unsigned char *ptr = (unsigned char *)&val;
+  unsigned char swp = ptr[0];
+  ptr[0] = ptr[1];
+  ptr[1] = swp;
+}
+
 // Loads .dll/.so and
 // in case of error it checks if .dll/.so was built for the
 // same architecture as Hotspot is running on
@@ -1570,6 +1577,14 @@
     return NULL;
   }
 
+  if (elf_head.e_ident[EI_DATA] != LITTLE_ENDIAN_ONLY(ELFDATA2LSB) BIG_ENDIAN_ONLY(ELFDATA2MSB)) {
+    // handle invalid/out of range endianness values
+    if (elf_head.e_ident[EI_DATA] == 0 || elf_head.e_ident[EI_DATA] > 2) {
+      return NULL;
+    }
+    change_endianness(elf_head.e_machine);
+  }
+
   typedef struct {
     Elf32_Half    code;         // Actual value as defined in elf.h
     Elf32_Half    compat_class; // Compatibility of archs at VM's sense
@@ -1588,7 +1603,10 @@
     {EM_SPARCV9,     EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
-    {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
+    {EM_ARM,         EM_ARM,     ELFCLASS32, ELFDATA2LSB, (char*)"ARM"},
+    // we only support 64 bit z architecture
+    {EM_S390,        EM_S390,    ELFCLASS64, ELFDATA2MSB, (char*)"IBM System/390"},
+    {EM_AARCH64,     EM_AARCH64, ELFCLASS64, ELFDATA2LSB, (char*)"AARCH64"}
   };
 
 #if  (defined IA32)
@@ -1612,7 +1630,7 @@
        IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
 #endif
 
-  // Identify compatability class for VM's architecture and library's architecture
+  // Identify compatibility class for VM's architecture and library's architecture
   // Obtain string descriptions for architectures
 
   arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
@@ -1636,29 +1654,37 @@
     return NULL;
   }
 
+  if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
+    if (lib_arch.name != NULL) {
+      ::snprintf(diag_msg_buf, diag_msg_max_length-1,
+                 " (Possible cause: can't load %s .so on a %s platform)",
+                 lib_arch.name, arch_array[running_arch_index].name);
+    } else {
+      ::snprintf(diag_msg_buf, diag_msg_max_length-1,
+                 " (Possible cause: can't load this .so (machine code=0x%x) on a %s platform)",
+                 lib_arch.code, arch_array[running_arch_index].name);
+    }
+    return NULL;
+  }
+
   if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
     ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
     return NULL;
   }
 
+  // ELF file class/capacity : 0 - invalid, 1 - 32bit, 2 - 64bit
+  if (lib_arch.elf_class > 2 || lib_arch.elf_class < 1) {
+    ::snprintf(diag_msg_buf, diag_msg_max_length-1, " (Possible cause: invalid ELF file class)");
+    return NULL;
+  }
+
   if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
-    ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
+    ::snprintf(diag_msg_buf, diag_msg_max_length-1,
+               " (Possible cause: architecture word width mismatch, can't load %d-bit .so on a %d-bit platform)",
+               (int) lib_arch.elf_class * 32, arch_array[running_arch_index].elf_class * 32);
     return NULL;
   }
 
-  if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
-    if (lib_arch.name!=NULL) {
-      ::snprintf(diag_msg_buf, diag_msg_max_length-1,
-                 " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
-                 lib_arch.name, arch_array[running_arch_index].name);
-    } else {
-      ::snprintf(diag_msg_buf, diag_msg_max_length-1,
-                 " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
-                 lib_arch.code,
-                 arch_array[running_arch_index].name);
-    }
-  }
-
   return NULL;
 }
 
--- a/src/hotspot/os/solaris/perfMemory_solaris.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/os/solaris/perfMemory_solaris.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -1055,7 +1055,7 @@
 
   if ((statbuf.st_size == 0) ||
      ((size_t)statbuf.st_size % os::vm_page_size() != 0)) {
-    THROW_MSG_0(vmSymbols::java_lang_Exception(),
+    THROW_MSG_0(vmSymbols::java_io_IOException(),
                 "Invalid PerfMemory size");
   }
 
--- a/src/hotspot/os/windows/attachListener_windows.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/os/windows/attachListener_windows.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -272,23 +272,13 @@
 
 // open the pipe to the client
 HANDLE Win32AttachOperation::open_pipe() {
-  HANDLE hPipe;
-
-  hPipe = ::CreateFile( pipe(),  // pipe name
+  HANDLE hPipe = ::CreateFile( pipe(),  // pipe name
                         GENERIC_WRITE,   // write only
                         0,              // no sharing
                         NULL,           // default security attributes
                         OPEN_EXISTING,  // opens existing pipe
                         0,              // default attributes
                         NULL);          // no template file
-
-  if (hPipe != INVALID_HANDLE_VALUE) {
-    // shouldn't happen as there is a pipe created per operation
-    if (::GetLastError() == ERROR_PIPE_BUSY) {
-      ::CloseHandle(hPipe);
-      return INVALID_HANDLE_VALUE;
-    }
-  }
   return hPipe;
 }
 
@@ -307,8 +297,7 @@
     }
     buf += nwrote;
     len -= nwrote;
-  }
-  while (len > 0);
+  } while (len > 0);
   return TRUE;
 }
 
@@ -326,6 +315,7 @@
   // java_suspend_self() via check_and_wait_while_suspended()
 
   HANDLE hPipe = open_pipe();
+  int lastError = (int)::GetLastError();
   if (hPipe != INVALID_HANDLE_VALUE) {
     BOOL fSuccess;
 
@@ -337,6 +327,7 @@
     if (fSuccess) {
       fSuccess = write_pipe(hPipe, (char*)result_stream->base(), (int)(result_stream->size()));
     }
+    lastError = (int)::GetLastError();
 
     // Need to flush buffers
     FlushFileBuffers(hPipe);
@@ -345,10 +336,10 @@
     if (fSuccess) {
       log_debug(attach)("wrote result of attach operation %s to pipe %s", name(), pipe());
     } else {
-      log_error(attach)("failure writing result of operation %s to pipe %s", name(), pipe());
+      log_error(attach)("failure (%d) writing result of operation %s to pipe %s", lastError, name(), pipe());
     }
   } else {
-    log_error(attach)("could not open pipe %s to send result of operation %s", pipe(), name());
+    log_error(attach)("could not open (%d) pipe %s to send result of operation %s", lastError, pipe(), name());
   }
 
   DWORD res = ::WaitForSingleObject(Win32AttachListener::mutex(), INFINITE);
@@ -392,6 +383,12 @@
   return Win32AttachListener::init();
 }
 
+// This function is used for Un*x OSes only.
+// We need not to implement it for Windows.
+bool AttachListener::check_socket_file() {
+  return false;
+}
+
 bool AttachListener::init_at_startup() {
   return true;
 }
--- a/src/hotspot/os/windows/perfMemory_windows.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/os/windows/perfMemory_windows.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -1561,7 +1561,7 @@
       warning("unexpected file size: size = " SIZE_FORMAT "\n",
               statbuf.st_size);
     }
-    THROW_MSG_0(vmSymbols::java_lang_Exception(),
+    THROW_MSG_0(vmSymbols::java_io_IOException(),
                 "Invalid PerfMemory size");
   }
 
--- a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -279,8 +279,9 @@
 
 #ifdef CAN_SHOW_REGISTERS_ON_ASSERT
   if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
-    handle_assert_poison_fault(ucVoid, info->si_addr);
-    return 1;
+    if (handle_assert_poison_fault(ucVoid, info->si_addr)) {
+      return 1;
+    }
   }
 #endif
 
--- a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -301,8 +301,9 @@
 
 #ifdef CAN_SHOW_REGISTERS_ON_ASSERT
   if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
-    handle_assert_poison_fault(ucVoid, info->si_addr);
-    return 1;
+    if (handle_assert_poison_fault(ucVoid, info->si_addr)) {
+      return 1;
+    }
   }
 #endif
 
--- a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -271,8 +271,9 @@
 
 #ifdef CAN_SHOW_REGISTERS_ON_ASSERT
   if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
-    handle_assert_poison_fault(ucVoid, info->si_addr);
-    return 1;
+    if (handle_assert_poison_fault(ucVoid, info->si_addr)) {
+      return 1;
+    }
   }
 #endif
 
--- a/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -270,8 +270,9 @@
 
 #ifdef CAN_SHOW_REGISTERS_ON_ASSERT
   if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
-    handle_assert_poison_fault(ucVoid, info->si_addr);
-    return 1;
+    if (handle_assert_poison_fault(ucVoid, info->si_addr)) {
+      return 1;
+    }
   }
 #endif
 
--- a/src/hotspot/os_cpu/linux_sparc/os_linux_sparc.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/os_cpu/linux_sparc/os_linux_sparc.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -514,8 +514,9 @@
 
 #ifdef CAN_SHOW_REGISTERS_ON_ASSERT
   if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
-    handle_assert_poison_fault(ucVoid, info->si_addr);
-    return 1;
+    if (handle_assert_poison_fault(ucVoid, info->si_addr)) {
+      return 1;
+    }
   }
 #endif
 
--- a/src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.hpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.hpp	Thu Jul 25 11:31:07 2019 +0530
@@ -57,7 +57,13 @@
 
 inline void OrderAccess::cross_modify_fence() {
   int idx = 0;
+#ifdef AMD64
   __asm__ volatile ("cpuid " : "+a" (idx) : : "ebx", "ecx", "edx", "memory");
+#else
+  // On some x86 systems EBX is a reserved register that cannot be
+  // clobbered, so we must protect it around the CPUID.
+  __asm__ volatile ("xchg %%esi, %%ebx; cpuid; xchg %%esi, %%ebx " : "+a" (idx) : : "esi", "ecx", "edx", "memory");
+#endif
 }
 
 template<>
--- a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -303,8 +303,9 @@
 
 #ifdef CAN_SHOW_REGISTERS_ON_ASSERT
   if ((sig == SIGSEGV || sig == SIGBUS) && info != NULL && info->si_addr == g_assert_poison) {
-    handle_assert_poison_fault(ucVoid, info->si_addr);
-    return 1;
+    if (handle_assert_poison_fault(ucVoid, info->si_addr)) {
+      return 1;
+    }
   }
 #endif
 
--- a/src/hotspot/share/adlc/formssel.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/adlc/formssel.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1513,7 +1513,7 @@
 
   MatchNode *mnode =
     strcmp(_matrule->_opType, "Set") ? _matrule : _matrule->_rChild;
-  mnode->count_instr_names(names);
+  if (mnode != NULL) mnode->count_instr_names(names);
 
   uint first = 1;
   // Start with the predicate supplied in the .ad file.
@@ -1726,26 +1726,25 @@
   const char *description = NULL;
   const char *value       = NULL;
   // Check if user provided any opcode definitions
-  if( this != NULL ) {
-    // Update 'value' if user provided a definition in the instruction
-    switch (desired_opcode) {
-    case PRIMARY:
-      description = "primary()";
-      if( _primary   != NULL)  { value = _primary;     }
-      break;
-    case SECONDARY:
-      description = "secondary()";
-      if( _secondary != NULL ) { value = _secondary;   }
-      break;
-    case TERTIARY:
-      description = "tertiary()";
-      if( _tertiary  != NULL ) { value = _tertiary;    }
-      break;
-    default:
-      assert( false, "ShouldNotReachHere();");
-      break;
-    }
+  // Update 'value' if user provided a definition in the instruction
+  switch (desired_opcode) {
+  case PRIMARY:
+    description = "primary()";
+    if( _primary   != NULL)  { value = _primary;     }
+    break;
+  case SECONDARY:
+    description = "secondary()";
+    if( _secondary != NULL ) { value = _secondary;   }
+    break;
+  case TERTIARY:
+    description = "tertiary()";
+    if( _tertiary  != NULL ) { value = _tertiary;    }
+    break;
+  default:
+    assert( false, "ShouldNotReachHere();");
+    break;
   }
+
   if (value != NULL) {
     fprintf(fp, "(%s /*%s*/)", value, description);
   }
@@ -3413,7 +3412,6 @@
 // Count occurrences of operands names in the leaves of the instruction
 // match rule.
 void MatchNode::count_instr_names( Dict &names ) {
-  if( this == NULL ) return;
   if( _lChild ) _lChild->count_instr_names(names);
   if( _rChild ) _rChild->count_instr_names(names);
   if( !_lChild && !_rChild ) {
@@ -3513,7 +3511,7 @@
     "GetAndSetB", "GetAndSetS", "GetAndAddI", "GetAndSetI", "GetAndSetP",
     "GetAndAddB", "GetAndAddS", "GetAndAddL", "GetAndSetL", "GetAndSetN",
 #if INCLUDE_ZGC
-    "LoadBarrierSlowReg", "ZGetAndSetP", "ZCompareAndSwapP", "ZCompareAndExchangeP", "ZWeakCompareAndSwapP",
+    "ZGetAndSetP", "ZCompareAndSwapP", "ZCompareAndExchangeP", "ZWeakCompareAndSwapP",
 #endif
     "ClearArray"
   };
--- a/src/hotspot/share/adlc/output_c.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/adlc/output_c.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2377,7 +2377,7 @@
     _processing_noninput = false;
     // A replacement variable, originally '$'
     if ( Opcode::as_opcode_type(rep_var) != Opcode::NOT_AN_OPCODE ) {
-      if (!_inst._opcode->print_opcode(_fp, Opcode::as_opcode_type(rep_var) )) {
+      if ((_inst._opcode == NULL) || !_inst._opcode->print_opcode(_fp, Opcode::as_opcode_type(rep_var) )) {
         // Missing opcode
         _AD.syntax_err( _inst._linenum,
                         "Missing $%s opcode definition in %s, used by encoding %s\n",
@@ -2433,7 +2433,7 @@
       else if( Opcode::as_opcode_type(inst_rep_var) != Opcode::NOT_AN_OPCODE ) {
         // else check if "primary", "secondary", "tertiary"
         assert( _constant_status == LITERAL_ACCESSED, "Must be processing a literal constant parameter");
-        if (!_inst._opcode->print_opcode(_fp, Opcode::as_opcode_type(inst_rep_var) )) {
+        if ((_inst._opcode == NULL) || !_inst._opcode->print_opcode(_fp, Opcode::as_opcode_type(inst_rep_var) )) {
           // Missing opcode
           _AD.syntax_err( _inst._linenum,
                           "Missing $%s opcode definition in %s\n",
--- a/src/hotspot/share/aot/aotCompiledMethod.hpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/aot/aotCompiledMethod.hpp	Thu Jul 25 11:31:07 2019 +0530
@@ -168,7 +168,7 @@
   int state() const { return *_state_adr; }
 
   // Non-virtual for speed
-  bool _is_alive() const { return state() < zombie; }
+  bool _is_alive() const { return state() < unloaded; }
 
   virtual bool is_zombie() const { return state() == zombie; }
   virtual bool is_unloaded() const { return state() == unloaded; }
--- a/src/hotspot/share/c1/c1_Runtime1.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/c1/c1_Runtime1.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -294,6 +294,12 @@
     if (entry == entry_for((StubID)id)) return name_for((StubID)id);
   }
 
+  BarrierSetC1* bsc1 = BarrierSet::barrier_set()->barrier_set_c1();
+  const char* name = bsc1->rtcall_name_for_address(entry);
+  if (name != NULL) {
+    return name;
+  }
+
 #define FUNCTION_CASE(a, f) \
   if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f))  return #f
 
--- a/src/hotspot/share/ci/ciEnv.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/ci/ciEnv.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -98,7 +98,7 @@
 
 // ------------------------------------------------------------------
 // ciEnv::ciEnv
-ciEnv::ciEnv(CompileTask* task, int system_dictionary_modification_counter)
+ciEnv::ciEnv(CompileTask* task)
   : _ciEnv_arena(mtCompiler) {
   VM_ENTRY_MARK;
 
@@ -118,7 +118,6 @@
   assert(!firstEnv, "not initialized properly");
 #endif /* !PRODUCT */
 
-  _system_dictionary_modification_counter = system_dictionary_modification_counter;
   _num_inlined_bytecodes = 0;
   assert(task == NULL || thread->task() == task, "sanity");
   if (task != NULL) {
@@ -183,7 +182,6 @@
   firstEnv = false;
 #endif /* !PRODUCT */
 
-  _system_dictionary_modification_counter = 0;
   _num_inlined_bytecodes = 0;
   _task = NULL;
   _log = NULL;
@@ -919,17 +917,6 @@
   return JavaThread::current()->thread_state() == _thread_in_vm;
 }
 
-bool ciEnv::system_dictionary_modification_counter_changed_locked() {
-  assert_locked_or_safepoint(Compile_lock);
-  return _system_dictionary_modification_counter != SystemDictionary::number_of_modifications();
-}
-
-bool ciEnv::system_dictionary_modification_counter_changed() {
-  VM_ENTRY_MARK;
-  MutexLocker ml(Compile_lock, THREAD); // lock with safepoint check
-  return system_dictionary_modification_counter_changed_locked();
-}
-
 // ------------------------------------------------------------------
 // ciEnv::validate_compile_task_dependencies
 //
@@ -938,8 +925,7 @@
 void ciEnv::validate_compile_task_dependencies(ciMethod* target) {
   if (failing())  return;  // no need for further checks
 
-  bool counter_changed = system_dictionary_modification_counter_changed_locked();
-  Dependencies::DepType result = dependencies()->validate_dependencies(_task, counter_changed);
+  Dependencies::DepType result = dependencies()->validate_dependencies(_task);
   if (result != Dependencies::end_marker) {
     if (result == Dependencies::call_site_target_value) {
       _inc_decompile_count_on_failure = false;
--- a/src/hotspot/share/ci/ciEnv.hpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/ci/ciEnv.hpp	Thu Jul 25 11:31:07 2019 +0530
@@ -51,7 +51,6 @@
 private:
   Arena*           _arena;       // Alias for _ciEnv_arena except in init_shared_objects()
   Arena            _ciEnv_arena;
-  int              _system_dictionary_modification_counter;
   ciObjectFactory* _factory;
   OopRecorder*     _oop_recorder;
   DebugInformationRecorder* _debug_info;
@@ -291,9 +290,6 @@
   // Helper routine for determining the validity of a compilation with
   // respect to method dependencies (e.g. concurrent class loading).
   void validate_compile_task_dependencies(ciMethod* target);
-
-  // Call internally when Compile_lock is already held.
-  bool system_dictionary_modification_counter_changed_locked();
 public:
   enum {
     MethodCompilable,
@@ -301,7 +297,7 @@
     MethodCompilable_never
   };
 
-  ciEnv(CompileTask* task, int system_dictionary_modification_counter);
+  ciEnv(CompileTask* task);
   // Used only during initialization of the ci
   ciEnv(Arena* arena);
   ~ciEnv();
@@ -456,9 +452,6 @@
   CompileLog* log() { return _log; }
   void set_log(CompileLog* log) { _log = log; }
 
-  // Check for changes to the system dictionary during compilation
-  bool system_dictionary_modification_counter_changed();
-
   void record_failure(const char* reason);      // Record failure and report later
   void report_failure(const char* reason);      // Report failure immediately
   void record_method_not_compilable(const char* reason, bool all_tiers = true);
--- a/src/hotspot/share/classfile/classLoaderData.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/classfile/classLoaderData.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -266,6 +266,19 @@
 }
 #endif // PRODUCT
 
+void ClassLoaderData::clear_claim(int claim) {
+  for (;;) {
+    int old_claim = Atomic::load(&_claim);
+    if ((old_claim & claim) == 0) {
+      return;
+    }
+    int new_claim = old_claim & ~claim;
+    if (Atomic::cmpxchg(new_claim, &_claim, old_claim) == old_claim) {
+      return;
+    }
+  }
+}
+
 bool ClassLoaderData::try_claim(int claim) {
   for (;;) {
     int old_claim = Atomic::load(&_claim);
--- a/src/hotspot/share/classfile/classLoaderData.hpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/classfile/classLoaderData.hpp	Thu Jul 25 11:31:07 2019 +0530
@@ -206,16 +206,17 @@
 
   // The "claim" is typically used to check if oops_do needs to be applied on
   // the CLD or not. Most GCs only perform strong marking during the marking phase.
-  enum {
-    _claim_none        = 0,
-    _claim_finalizable = 2,
-    _claim_strong      = 3
+  enum Claim {
+    _claim_none         = 0,
+    _claim_finalizable  = 2,
+    _claim_strong       = 3,
+    _claim_other        = 4
   };
   void clear_claim() { _claim = 0; }
+  void clear_claim(int claim);
   bool claimed() const { return _claim != 0; }
+  bool claimed(int claim) const { return (_claim & claim) == claim; }
   bool try_claim(int claim);
-  int get_claim() const { return _claim; }
-  void set_claim(int claim) { _claim = claim; }
 
   // Computes if the CLD is alive or not. This is safe to call in concurrent
   // contexts.
--- a/src/hotspot/share/classfile/classLoaderDataGraph.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/classfile/classLoaderDataGraph.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -64,6 +64,11 @@
   }
 }
 
+void ClassLoaderDataGraph::clear_claimed_marks(int claim) {
+ for (ClassLoaderData* cld = OrderAccess::load_acquire(&_head); cld != NULL; cld = cld->next()) {
+    cld->clear_claim(claim);
+  }
+}
 // Class iterator used by the compiler.  It gets some number of classes at
 // a safepoint to decay invocation counters on the methods.
 class ClassLoaderDataGraphKlassIteratorStatic {
@@ -471,7 +476,7 @@
   // The CLDs in [_head, _saved_head] were all added during last call to remember_new_clds(true);
   ClassLoaderData* curr = _head;
   while (curr != _saved_head) {
-    if (!curr->claimed()) {
+    if (!curr->claimed(ClassLoaderData::_claim_strong)) {
       array->push(curr);
       LogTarget(Debug, class, loader, data) lt;
       if (lt.is_enabled()) {
--- a/src/hotspot/share/classfile/classLoaderDataGraph.hpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/classfile/classLoaderDataGraph.hpp	Thu Jul 25 11:31:07 2019 +0530
@@ -68,6 +68,7 @@
   static void clean_module_and_package_info();
   static void purge();
   static void clear_claimed_marks();
+  static void clear_claimed_marks(int claim);
   // Iteration through CLDG inside a safepoint; GC support
   static void cld_do(CLDClosure* cl);
   static void cld_unloading_do(CLDClosure* cl);
--- a/src/hotspot/share/classfile/stringTable.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/classfile/stringTable.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -342,7 +342,7 @@
   if (found_string != NULL) {
     return found_string;
   }
-  return do_intern(string_or_null_h, name, len, hash, CHECK_NULL);
+  return do_intern(string_or_null_h, name, len, hash, THREAD);
 }
 
 oop StringTable::do_intern(Handle string_or_null_h, const jchar* name,
--- a/src/hotspot/share/classfile/systemDictionary.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/classfile/systemDictionary.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -98,7 +98,6 @@
 SymbolPropertyTable*   SystemDictionary::_invoke_method_table = NULL;
 ProtectionDomainCacheTable*   SystemDictionary::_pd_cache_table = NULL;
 
-int         SystemDictionary::_number_of_modifications = 0;
 oop         SystemDictionary::_system_loader_lock_obj     =  NULL;
 
 InstanceKlass*      SystemDictionary::_well_known_klasses[SystemDictionary::WKID_LIMIT]
@@ -115,6 +114,7 @@
 
 const int defaultProtectionDomainCacheSize = 1009;
 
+OopStorage* SystemDictionary::_vm_global_oop_storage = NULL;
 OopStorage* SystemDictionary::_vm_weak_oop_storage = NULL;
 
 
@@ -1039,11 +1039,7 @@
       // Add to class hierarchy, initialize vtables, and do possible
       // deoptimizations.
       add_to_hierarchy(k, CHECK_NULL); // No exception, but can block
-
       // But, do not add to dictionary.
-
-      // compiled code dependencies need to be validated anyway
-      notice_modification();
     }
 
     // Rewrite and patch constant pool here.
@@ -1849,7 +1845,7 @@
   return unloading_occurred;
 }
 
-void SystemDictionary::oops_do(OopClosure* f) {
+void SystemDictionary::oops_do(OopClosure* f, bool include_handles) {
   f->do_oop(&_java_system_loader);
   f->do_oop(&_java_platform_loader);
   f->do_oop(&_system_loader_lock_obj);
@@ -1857,6 +1853,10 @@
 
   // Visit extra methods
   invoke_method_table()->oops_do(f);
+
+  if (include_handles) {
+    vm_global_oop_storage()->oops_do(f);
+  }
 }
 
 // CDS: scan and relocate all classes referenced by _well_known_klasses[].
@@ -1880,7 +1880,6 @@
 void SystemDictionary::initialize(TRAPS) {
   // Allocate arrays
   _placeholders        = new PlaceholderTable(_placeholder_table_size);
-  _number_of_modifications = 0;
   _loader_constraints  = new LoaderConstraintTable(_loader_constraint_size);
   _resolution_errors   = new ResolutionErrorTable(_resolution_error_size);
   _invoke_method_table = new SymbolPropertyTable(_invoke_method_size);
@@ -2164,8 +2163,6 @@
     InstanceKlass* sd_check = find_class(d_hash, name, dictionary);
     if (sd_check == NULL) {
       dictionary->add_klass(d_hash, name, k);
-
-      notice_modification();
     }
   #ifdef ASSERT
     sd_check = find_class(d_hash, name, dictionary);
@@ -2901,12 +2898,22 @@
 }
 
 void SystemDictionary::initialize_oop_storage() {
+  _vm_global_oop_storage =
+    new OopStorage("VM Global Oop Handles",
+                   VMGlobalAlloc_lock,
+                   VMGlobalActive_lock);
+
   _vm_weak_oop_storage =
     new OopStorage("VM Weak Oop Handles",
                    VMWeakAlloc_lock,
                    VMWeakActive_lock);
 }
 
+OopStorage* SystemDictionary::vm_global_oop_storage() {
+  assert(_vm_global_oop_storage != NULL, "Uninitialized");
+  return _vm_global_oop_storage;
+}
+
 OopStorage* SystemDictionary::vm_weak_oop_storage() {
   assert(_vm_weak_oop_storage != NULL, "Uninitialized");
   return _vm_weak_oop_storage;
--- a/src/hotspot/share/classfile/systemDictionary.hpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/classfile/systemDictionary.hpp	Thu Jul 25 11:31:07 2019 +0530
@@ -348,7 +348,9 @@
   static bool do_unloading(GCTimer* gc_timer);
 
   // Applies "f->do_oop" to all root oops in the system dictionary.
-  static void oops_do(OopClosure* f);
+  // If include_handles is true (the default), then the handles in the
+  // storage object returned by vm_global_oop_storage() are included.
+  static void oops_do(OopClosure* f, bool include_handles = true);
 
   // System loader lock
   static oop system_loader_lock()           { return _system_loader_lock_obj; }
@@ -362,13 +364,6 @@
   static void print_on(outputStream* st);
   static void dump(outputStream* st, bool verbose);
 
-  // Monotonically increasing counter which grows as classes are
-  // loaded or modifications such as hot-swapping or setting/removing
-  // of breakpoints are performed
-  static inline int number_of_modifications()     { assert_locked_or_safepoint(Compile_lock); return _number_of_modifications; }
-  // Needed by evolution and breakpoint code
-  static inline void notice_modification()        { assert_locked_or_safepoint(Compile_lock); ++_number_of_modifications;      }
-
   // Verification
   static void verify();
 
@@ -555,11 +550,6 @@
   // Hashtable holding placeholders for classes being loaded.
   static PlaceholderTable*       _placeholders;
 
-  // Monotonically increasing counter which grows with
-  // loading classes as well as hot-swapping and breakpoint setting
-  // and removal.
-  static int                     _number_of_modifications;
-
   // Lock object for system class loader
   static oop                     _system_loader_lock_obj;
 
@@ -575,7 +565,8 @@
   // ProtectionDomain cache
   static ProtectionDomainCacheTable*   _pd_cache_table;
 
-  // VM weak OopStorage object.
+  // VM OopStorage objects.
+  static OopStorage*             _vm_global_oop_storage;
   static OopStorage*             _vm_weak_oop_storage;
 
 protected:
@@ -633,6 +624,7 @@
   }
 
   static void initialize_oop_storage();
+  static OopStorage* vm_global_oop_storage();
   static OopStorage* vm_weak_oop_storage();
 
 protected:
--- a/src/hotspot/share/classfile/vmSymbols.hpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/classfile/vmSymbols.hpp	Thu Jul 25 11:31:07 2019 +0530
@@ -247,9 +247,6 @@
   template(clazz_name,                                "clazz")                                    \
   template(exceptionTypes_name,                       "exceptionTypes")                           \
   template(modifiers_name,                            "modifiers")                                \
-  template(newConstructor_name,                       "newConstructor")                           \
-  template(newField_name,                             "newField")                                 \
-  template(newMethod_name,                            "newMethod")                                \
   template(invokeBasic_name,                          "invokeBasic")                              \
   template(linkToVirtual_name,                        "linkToVirtual")                            \
   template(linkToStatic_name,                         "linkToStatic")                             \
@@ -1088,6 +1085,7 @@
                                                                                                                         \
   /* support for Unsafe */                                                                                              \
   do_class(jdk_internal_misc_Unsafe,               "jdk/internal/misc/Unsafe")                                          \
+  do_class(sun_misc_Unsafe,                        "sun/misc/Unsafe")                                                   \
                                                                                                                         \
   do_intrinsic(_allocateInstance,         jdk_internal_misc_Unsafe,     allocateInstance_name, allocateInstance_signature, F_RN) \
    do_name(     allocateInstance_name,                                  "allocateInstance")                                      \
--- a/src/hotspot/share/code/codeCache.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/code/codeCache.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -771,9 +771,10 @@
 uint8_t CodeCache::_unloading_cycle = 1;
 
 void CodeCache::increment_unloading_cycle() {
-  if (_unloading_cycle == 1) {
-    _unloading_cycle = 2;
-  } else {
+  // 2-bit value (see IsUnloadingState in nmethod.cpp for details)
+  // 0 is reserved for new methods.
+  _unloading_cycle = (_unloading_cycle + 1) % 4;
+  if (_unloading_cycle == 0) {
     _unloading_cycle = 1;
   }
 }
--- a/src/hotspot/share/code/compiledMethod.hpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/code/compiledMethod.hpp	Thu Jul 25 11:31:07 2019 +0530
@@ -208,9 +208,9 @@
          not_used      = 1,  // not entrant, but revivable
          not_entrant   = 2,  // marked for deoptimization but activations may still exist,
                              // will be transformed to zombie when all activations are gone
-         zombie        = 3,  // no activations exist, nmethod is ready for purge
-         unloaded      = 4   // there should be no activations, should not be called,
-                             // will be transformed to zombie immediately
+         unloaded      = 3,  // there should be no activations, should not be called, will be
+                             // transformed to zombie by the sweeper, when not "locked in vm".
+         zombie        = 4   // no activations exist, nmethod is ready for purge
   };
 
   virtual bool  is_in_use() const = 0;
--- a/src/hotspot/share/code/dependencies.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/code/dependencies.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -627,32 +627,10 @@
   guarantee(FIRST_TYPE <= dept && dept < TYPE_LIMIT, "invalid dependency type: %d", (int) dept);
 }
 
-Dependencies::DepType Dependencies::validate_dependencies(CompileTask* task, bool counter_changed, char** failure_detail) {
-  // First, check non-klass dependencies as we might return early and
-  // not check klass dependencies if the system dictionary
-  // modification counter hasn't changed (see below).
-  for (Dependencies::DepStream deps(this); deps.next(); ) {
-    if (deps.is_klass_type())  continue;  // skip klass dependencies
-    Klass* witness = deps.check_dependency();
-    if (witness != NULL) {
-      return deps.type();
-    }
-  }
-
-  // Klass dependencies must be checked when the system dictionary
-  // changes.  If logging is enabled all violated dependences will be
-  // recorded in the log.  In debug mode check dependencies even if
-  // the system dictionary hasn't changed to verify that no invalid
-  // dependencies were inserted.  Any violated dependences in this
-  // case are dumped to the tty.
-  if (!counter_changed && !trueInDebug) {
-    return end_marker;
-  }
-
+Dependencies::DepType Dependencies::validate_dependencies(CompileTask* task, char** failure_detail) {
   int klass_violations = 0;
   DepType result = end_marker;
   for (Dependencies::DepStream deps(this); deps.next(); ) {
-    if (!deps.is_klass_type())  continue;  // skip non-klass dependencies
     Klass* witness = deps.check_dependency();
     if (witness != NULL) {
       if (klass_violations == 0) {
@@ -667,12 +645,7 @@
         }
       }
       klass_violations++;
-      if (!counter_changed) {
-        // Dependence failed but counter didn't change.  Log a message
-        // describing what failed and allow the assert at the end to
-        // trigger.
-        deps.print_dependency(witness);
-      } else if (xtty == NULL) {
+      if (xtty == NULL) {
         // If we're not logging then a single violation is sufficient,
         // otherwise we want to log all the dependences which were
         // violated.
@@ -681,15 +654,6 @@
     }
   }
 
-  if (klass_violations != 0) {
-#ifdef ASSERT
-    if (task != NULL && !counter_changed && !PrintCompilation) {
-      // Print out the compile task that failed
-      task->print_tty();
-    }
-#endif
-    assert(counter_changed, "failed dependencies, but counter didn't change");
-  }
   return result;
 }
 
--- a/src/hotspot/share/code/dependencies.hpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/code/dependencies.hpp	Thu Jul 25 11:31:07 2019 +0530
@@ -476,7 +476,7 @@
 
   void copy_to(nmethod* nm);
 
-  DepType validate_dependencies(CompileTask* task, bool counter_changed, char** failure_detail = NULL);
+  DepType validate_dependencies(CompileTask* task, char** failure_detail = NULL);
 
   void log_all_dependencies();
 
--- a/src/hotspot/share/code/nmethod.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/code/nmethod.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -1136,6 +1136,20 @@
   mdo->inc_decompile_count();
 }
 
+bool nmethod::try_transition(int new_state_int) {
+  signed char new_state = new_state_int;
+  for (;;) {
+    signed char old_state = Atomic::load(&_state);
+    if (old_state >= new_state) {
+      // Ensure monotonicity of transitions.
+      return false;
+    }
+    if (Atomic::cmpxchg(new_state, &_state, old_state) == old_state) {
+      return true;
+    }
+  }
+}
+
 void nmethod::make_unloaded() {
   post_compiled_method_unload();
 
@@ -1159,7 +1173,9 @@
   }
   // Unlink the osr method, so we do not look this up again
   if (is_osr_method()) {
-    // Invalidate the osr nmethod only once
+    // Invalidate the osr nmethod only once. Note that with concurrent
+    // code cache unloading, OSR nmethods are invalidated before they
+    // are made unloaded. Therefore, this becomes a no-op then.
     if (is_in_use()) {
       invalidate_osr_method();
     }
@@ -1213,12 +1229,14 @@
   set_osr_link(NULL);
   NMethodSweeper::report_state_change(this);
 
-  // The release is only needed for compile-time ordering, as accesses
-  // into the nmethod after the store are not safe due to the sweeper
-  // being allowed to free it when the store is observed, during
-  // concurrent nmethod unloading. Therefore, there is no need for
-  // acquire on the loader side.
-  OrderAccess::release_store(&_state, (signed char)unloaded);
+  bool transition_success = try_transition(unloaded);
+
+  // It is an important invariant that there exists no race between
+  // the sweeper and GC thread competing for making the same nmethod
+  // zombie and unloaded respectively. This is ensured by
+  // can_convert_to_zombie() returning false for any is_unloading()
+  // nmethod, informing the sweeper not to step on any GC toes.
+  assert(transition_success, "Invalid nmethod transition to unloaded");
 
 #if INCLUDE_JVMCI
   // Clear the link between this nmethod and a HotSpotNmethod mirror
@@ -1283,7 +1301,7 @@
   assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
   assert(!is_zombie(), "should not already be a zombie");
 
-  if (_state == state) {
+  if (Atomic::load(&_state) >= state) {
     // Avoid taking the lock if already in required state.
     // This is safe from races because the state is an end-state,
     // which the nmethod cannot back out of once entered.
@@ -1318,7 +1336,7 @@
     // Enter critical section.  Does not block for safepoint.
     MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag);
 
-    if (_state == state) {
+    if (Atomic::load(&_state) >= state) {
       // another thread already performed this transition so nothing
       // to do, but return false to indicate this.
       return false;
@@ -1354,7 +1372,18 @@
     }
 
     // Change state
-    _state = state;
+    if (!try_transition(state)) {
+      // If the transition fails, it is due to another thread making the nmethod more
+      // dead. In particular, one thread might be making the nmethod unloaded concurrently.
+      // If so, having patched in the jump in the verified entry unnecessarily is fine.
+      // The nmethod is no longer possible to call by Java threads.
+      // Incrementing the decompile count is also fine as the caller of make_not_entrant()
+      // had a valid reason to deoptimize the nmethod.
+      // Marking the nmethod as seen on stack also has no effect, as the nmethod is now
+      // !is_alive(), and the seen on stack value is only used to convert not_entrant
+      // nmethods to zombie in can_convert_to_zombie().
+      return false;
+    }
 
     // Log the transition once
     log_state_change();
@@ -1774,10 +1803,9 @@
   }
 }
 
-void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
+void nmethod::oops_do(OopClosure* f, bool allow_dead) {
   // make sure the oops ready to receive visitors
-  assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
-  assert(!is_unloaded(), "should not call follow on unloaded nmethod");
+  assert(allow_dead || is_alive(), "should not call follow on dead nmethod");
 
   // Prevent extra code cache walk for platforms that don't have immediate oops.
   if (relocInfo::mustIterateImmediateOopsInCode()) {
--- a/src/hotspot/share/code/nmethod.hpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/code/nmethod.hpp	Thu Jul 25 11:31:07 2019 +0530
@@ -212,6 +212,9 @@
   void* operator new(size_t size, int nmethod_size, int comp_level) throw();
 
   const char* reloc_string_for(u_char* begin, u_char* end);
+
+  bool try_transition(int new_state);
+
   // Returns true if this thread changed the state of the nmethod or
   // false if another thread performed the transition.
   bool make_not_entrant_or_zombie(int state);
@@ -339,7 +342,7 @@
   // flag accessing and manipulation
   bool  is_not_installed() const                  { return _state == not_installed; }
   bool  is_in_use() const                         { return _state <= in_use; }
-  bool  is_alive() const                          { return _state < zombie; }
+  bool  is_alive() const                          { return _state < unloaded; }
   bool  is_not_entrant() const                    { return _state == not_entrant; }
   bool  is_zombie() const                         { return _state == zombie; }
   bool  is_unloaded() const                       { return _state == unloaded; }
@@ -473,7 +476,7 @@
 
  public:
   void oops_do(OopClosure* f) { oops_do(f, false); }
-  void oops_do(OopClosure* f, bool allow_zombie);
+  void oops_do(OopClosure* f, bool allow_dead);
 
   bool test_set_oops_do_mark();
   static void oops_do_marking_prologue();
--- a/src/hotspot/share/compiler/compileBroker.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/compiler/compileBroker.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -1595,16 +1595,10 @@
   // Final sanity check - the compiler object must exist
   guarantee(comp != NULL, "Compiler object must exist");
 
-  int system_dictionary_modification_counter;
-  {
-    MutexLocker locker(Compile_lock, thread);
-    system_dictionary_modification_counter = SystemDictionary::number_of_modifications();
-  }
-
   {
     // Must switch to native to allocate ci_env
     ThreadToNativeFromVM ttn(thread);
-    ciEnv ci_env(NULL, system_dictionary_modification_counter);
+    ciEnv ci_env((CompileTask*)NULL);
     // Cache Jvmti state
     ci_env.cache_jvmti_state();
     // Cache DTrace flags
@@ -2045,12 +2039,6 @@
   bool failure_reason_on_C_heap = false;
   const char* retry_message = NULL;
 
-  int system_dictionary_modification_counter;
-  {
-    MutexLocker locker(Compile_lock, thread);
-    system_dictionary_modification_counter = SystemDictionary::number_of_modifications();
-  }
-
 #if INCLUDE_JVMCI
   if (UseJVMCICompiler && comp != NULL && comp->is_jvmci()) {
     JVMCICompiler* jvmci = (JVMCICompiler*) comp;
@@ -2064,7 +2052,7 @@
       retry_message = "not retryable";
       compilable = ciEnv::MethodCompilable_never;
     } else {
-      JVMCICompileState compile_state(task, system_dictionary_modification_counter);
+      JVMCICompileState compile_state(task);
       JVMCIEnv env(thread, &compile_state, __FILE__, __LINE__);
       methodHandle method(thread, target_handle);
       env.runtime()->compile_method(&env, jvmci, method, osr_bci);
@@ -2090,7 +2078,7 @@
     NoHandleMark  nhm;
     ThreadToNativeFromVM ttn(thread);
 
-    ciEnv ci_env(task, system_dictionary_modification_counter);
+    ciEnv ci_env(task);
     if (should_break) {
       ci_env.set_break_at_compile(true);
     }
--- a/src/hotspot/share/compiler/disassembler.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/compiler/disassembler.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -315,56 +315,96 @@
   }
 }
 
-decode_env::decode_env(CodeBuffer* code, outputStream* output) {
-  memset(this, 0, sizeof(*this));
-  _output = output ? output : tty;
-  _codeBlob    = NULL;
-  _codeBuffer  = code;
-  _helpPrinted = false;
+decode_env::decode_env(CodeBuffer* code, outputStream* output) :
+  _output(output ? output : tty),
+  _codeBuffer(code),
+  _codeBlob(NULL),
+  _nm(NULL),
+  _strings(),
+  _start(NULL),
+  _end(NULL),
+  _option_buf(),
+  _print_raw(0),
+  _cur_insn(NULL),
+  _bytes_per_line(0),
+  _pre_decode_alignment(0),
+  _post_decode_alignment(0),
+  _print_file_name(false),
+  _print_help(false),
+  _helpPrinted(false) {
 
+  memset(_option_buf, 0, sizeof(_option_buf));
   process_options(_output);
 }
 
-decode_env::decode_env(CodeBlob* code, outputStream* output, CodeStrings c) {
-   memset(this, 0, sizeof(*this)); // Beware, this zeroes bits of fields.
-   _output = output ? output : tty;
-  _codeBlob    = code;
-  _codeBuffer  = NULL;
-  _helpPrinted = false;
-  if (_codeBlob != NULL && _codeBlob->is_nmethod()) {
-    _nm = (nmethod*) code;
-  }
+decode_env::decode_env(CodeBlob* code, outputStream* output, CodeStrings c) :
+  _output(output ? output : tty),
+  _codeBuffer(NULL),
+  _codeBlob(code),
+  _nm(_codeBlob != NULL && _codeBlob->is_nmethod() ? (nmethod*) code : NULL),
+  _strings(),
+  _start(NULL),
+  _end(NULL),
+  _option_buf(),
+  _print_raw(0),
+  _cur_insn(NULL),
+  _bytes_per_line(0),
+  _pre_decode_alignment(0),
+  _post_decode_alignment(0),
+  _print_file_name(false),
+  _print_help(false),
+  _helpPrinted(false) {
+
+  memset(_option_buf, 0, sizeof(_option_buf));
   _strings.copy(c);
-
   process_options(_output);
 }
 
-decode_env::decode_env(nmethod* code, outputStream* output, CodeStrings c) {
-  memset(this, 0, sizeof(*this)); // Beware, this zeroes bits of fields.
-  _output = output ? output : tty;
-  _codeBlob    = NULL;
-  _codeBuffer  = NULL;
-  _nm          = code;
-  _start       = _nm->code_begin();
-  _end         = _nm->code_end();
-  _helpPrinted = false;
+decode_env::decode_env(nmethod* code, outputStream* output, CodeStrings c) :
+  _output(output ? output : tty),
+  _codeBuffer(NULL),
+  _codeBlob(NULL),
+  _nm(code),
+  _strings(),
+  _start(_nm->code_begin()),
+  _end(_nm->code_end()),
+  _option_buf(),
+  _print_raw(0),
+  _cur_insn(NULL),
+  _bytes_per_line(0),
+  _pre_decode_alignment(0),
+  _post_decode_alignment(0),
+  _print_file_name(false),
+  _print_help(false),
+  _helpPrinted(false) {
+
+  memset(_option_buf, 0, sizeof(_option_buf));
   _strings.copy(c);
-
   process_options(_output);
 }
 
 // Constructor for a 'decode_env' to decode a memory range [start, end)
 // of unknown origin, assuming it contains code.
-decode_env::decode_env(address start, address end, outputStream* output) {
+decode_env::decode_env(address start, address end, outputStream* output) :
+  _output(output ? output : tty),
+  _codeBuffer(NULL),
+  _codeBlob(NULL),
+  _nm(NULL),
+  _strings(),
+  _start(start),
+  _end(end),
+  _option_buf(),
+  _print_raw(0),
+  _cur_insn(NULL),
+  _bytes_per_line(0),
+  _pre_decode_alignment(0),
+  _post_decode_alignment(0),
+  _print_file_name(false),
+  _print_help(false),
+  _helpPrinted(false) {
+
   assert(start < end, "Range must have a positive size, [" PTR_FORMAT ".." PTR_FORMAT ").", p2i(start), p2i(end));
-  memset(this, 0, sizeof(*this));
-  _output = output ? output : tty;
-  _codeBlob    = NULL;
-  _codeBuffer  = NULL;
-  _start       = start;
-  _end         = end;
-  _helpPrinted = false;
-
+  memset(_option_buf, 0, sizeof(_option_buf));
   process_options(_output);
 }
 
--- a/src/hotspot/share/gc/cms/cmsHeap.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/gc/cms/cmsHeap.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -62,7 +62,7 @@
   }
 
   size_t used_in_bytes() {
-    return _space->used_stable();
+    return _space->used();
   }
 };
 
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -372,8 +372,6 @@
     )
   }
   _dictionary->set_par_lock(&_parDictionaryAllocLock);
-
-  _used_stable = 0;
 }
 
 // Like CompactibleSpace forward() but always calls cross_threshold() to
@@ -579,14 +577,6 @@
   return capacity() - free();
 }
 
-size_t CompactibleFreeListSpace::used_stable() const {
-  return _used_stable;
-}
-
-void CompactibleFreeListSpace::recalculate_used_stable() {
-  _used_stable = used();
-}
-
 size_t CompactibleFreeListSpace::free() const {
   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
   // if you do this while the structures are in flux you
@@ -1384,9 +1374,6 @@
     debug_only(fc->mangleAllocated(size));
   }
 
-  // After allocation, recalculate used space and update used_stable
-  recalculate_used_stable();
-
   return res;
 }
 
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.hpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/gc/cms/compactibleFreeListSpace.hpp	Thu Jul 25 11:31:07 2019 +0530
@@ -192,9 +192,6 @@
   // Used to keep track of limit of sweep for the space
   HeapWord* _sweep_limit;
 
-  // Stable value of used().
-  size_t _used_stable;
-
   // Used to make the young collector update the mod union table
   MemRegionClosure* _preconsumptionDirtyCardClosure;
 
@@ -415,17 +412,6 @@
   // which overestimates the region by returning the entire
   // committed region (this is safe, but inefficient).
 
-  // Returns monotonically increasing stable used space bytes for CMS.
-  // This is required for jstat and other memory monitoring tools
-  // that might otherwise see inconsistent used space values during a garbage
-  // collection, promotion or allocation into compactibleFreeListSpace.
-  // The value returned by this function might be smaller than the
-  // actual value.
-  size_t used_stable() const;
-  // Recalculate and cache the current stable used() value. Only to be called
-  // in places where we can be sure that the result is stable.
-  void recalculate_used_stable();
-
   // Returns a subregion of the space containing all the objects in
   // the space.
   MemRegion used_region() const {
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -692,10 +692,6 @@
   return _cmsSpace->max_alloc_in_words() * HeapWordSize;
 }
 
-size_t ConcurrentMarkSweepGeneration::used_stable() const {
-  return cmsSpace()->used_stable();
-}
-
 size_t ConcurrentMarkSweepGeneration::max_available() const {
   return free() + _virtual_space.uncommitted_size();
 }
@@ -1527,8 +1523,6 @@
   FreelistLocker z(this);
   MetaspaceGC::compute_new_size();
   _cmsGen->compute_new_size_free_list();
-  // recalculate CMS used space after CMS collection
-  _cmsGen->cmsSpace()->recalculate_used_stable();
 }
 
 // A work method used by the foreground collector to do
@@ -2057,7 +2051,6 @@
 
   _capacity_at_prologue = capacity();
   _used_at_prologue = used();
-  _cmsSpace->recalculate_used_stable();
 
   // We enable promotion tracking so that card-scanning can recognize
   // which objects have been promoted during this GC and skip them.
@@ -2130,7 +2123,6 @@
   _eden_chunk_index = 0;
 
   size_t cms_used   = _cmsGen->cmsSpace()->used();
-  _cmsGen->cmsSpace()->recalculate_used_stable();
 
   // update performance counters - this uses a special version of
   // update_counters() that allows the utilization to be passed as a
@@ -2824,8 +2816,6 @@
     rp->enable_discovery();
     _collectorState = Marking;
   }
-
-  _cmsGen->cmsSpace()->recalculate_used_stable();
 }
 
 void CMSCollector::checkpointRootsInitialWork() {
@@ -4187,7 +4177,6 @@
     MutexLocker y(bitMapLock(),
                   Mutex::_no_safepoint_check_flag);
     checkpointRootsFinalWork();
-    _cmsGen->cmsSpace()->recalculate_used_stable();
   }
   verify_work_stacks_empty();
   verify_overflow_empty();
@@ -5347,14 +5336,9 @@
     // further below.
     {
       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
-
       // Update heap occupancy information which is used as
       // input to soft ref clearing policy at the next gc.
       Universe::update_heap_info_at_gc();
-
-      // recalculate CMS used space after CMS collection
-      _cmsGen->cmsSpace()->recalculate_used_stable();
-
       _collectorState = Resizing;
     }
   }
@@ -5443,7 +5427,6 @@
     // Gather statistics on the young generation collection.
     collector()->stats().record_gc0_end(used());
   }
-  _cmsSpace->recalculate_used_stable();
 }
 
 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp	Thu Jul 25 11:31:07 2019 +0530
@@ -1112,7 +1112,6 @@
   double occupancy() const { return ((double)used())/((double)capacity()); }
   size_t contiguous_available() const;
   size_t unsafe_max_alloc_nogc() const;
-  size_t used_stable() const;
 
   // over-rides
   MemRegion used_region_at_save_marks() const;
--- a/src/hotspot/share/gc/cms/gSpaceCounters.hpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/gc/cms/gSpaceCounters.hpp	Thu Jul 25 11:31:07 2019 +0530
@@ -59,7 +59,7 @@
   }
 
   inline void update_used() {
-    _used->set_value(_gen->used_stable());
+    _used->set_value(_gen->used());
   }
 
   // special version of update_used() to allow the used value to be
@@ -103,7 +103,7 @@
     GenerationUsedHelper(Generation* g) : _gen(g) { }
 
     inline jlong take_sample() {
-      return _gen->used_stable();
+      return _gen->used();
     }
 };
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1CardTableEntryClosure.hpp	Thu Jul 25 11:31:07 2019 +0530
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_G1_G1CARDTABLEENTRYCLOSURE_HPP
+#define SHARE_GC_G1_G1CARDTABLEENTRYCLOSURE_HPP
+
+#include "gc/shared/cardTable.hpp"
+#include "memory/allocation.hpp"
+
+// A closure class for processing card table entries.  Note that we don't
+// require these closure objects to be stack-allocated.
+class G1CardTableEntryClosure: public CHeapObj<mtGC> {
+public:
+  typedef CardTable::CardValue CardValue;
+
+  // Process the card whose card table entry is "card_ptr".  If returns
+  // "false", terminate the iteration early.
+  virtual bool do_card_ptr(CardValue* card_ptr, uint worker_id) = 0;
+};
+
+#endif // SHARE_GC_G1_G1CARDTABLEENTRYCLOSURE_HPP
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -31,6 +31,7 @@
 #include "gc/g1/g1Allocator.inline.hpp"
 #include "gc/g1/g1Arguments.hpp"
 #include "gc/g1/g1BarrierSet.hpp"
+#include "gc/g1/g1CardTableEntryClosure.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectionSet.hpp"
 #include "gc/g1/g1CollectorState.hpp"
@@ -47,8 +48,10 @@
 #include "gc/g1/g1HotCardCache.hpp"
 #include "gc/g1/g1MemoryPool.hpp"
 #include "gc/g1/g1OopClosures.inline.hpp"
+#include "gc/g1/g1ParallelCleaning.hpp"
 #include "gc/g1/g1ParScanThreadState.inline.hpp"
 #include "gc/g1/g1Policy.hpp"
+#include "gc/g1/g1RedirtyCardsQueue.hpp"
 #include "gc/g1/g1RegionToSpaceMapper.hpp"
 #include "gc/g1/g1RemSet.hpp"
 #include "gc/g1/g1RootClosures.hpp"
@@ -72,7 +75,6 @@
 #include "gc/shared/generationSpec.hpp"
 #include "gc/shared/isGCActiveMark.hpp"
 #include "gc/shared/oopStorageParState.hpp"
-#include "gc/shared/parallelCleaning.hpp"
 #include "gc/shared/preservedMarks.inline.hpp"
 #include "gc/shared/suspendibleThreadSet.hpp"
 #include "gc/shared/referenceProcessor.inline.hpp"
@@ -1078,7 +1080,9 @@
 
   // Discard all remembered set updates.
   G1BarrierSet::dirty_card_queue_set().abandon_logs();
-  assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
+  assert(G1BarrierSet::dirty_card_queue_set().num_completed_buffers() == 0,
+         "DCQS should be empty");
+  redirty_cards_queue_set().verify_empty();
 }
 
 void G1CollectedHeap::verify_after_full_collection() {
@@ -1517,7 +1521,7 @@
   _collection_set(this, _policy),
   _hot_card_cache(NULL),
   _rem_set(NULL),
-  _dirty_card_queue_set(false),
+  _redirty_cards_queue_set(),
   _cm(NULL),
   _cm_thread(NULL),
   _cr(NULL),
@@ -1687,8 +1691,8 @@
                                                   &bs->dirty_card_queue_buffer_allocator(),
                                                   true); // init_free_ids
 
-  dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
-                                    &bs->dirty_card_queue_buffer_allocator());
+  // Use same buffer allocator as dirty card qset, to allow merging.
+  _redirty_cards_queue_set.initialize(&bs->dirty_card_queue_buffer_allocator());
 
   // Create the hot card cache.
   _hot_card_cache = new G1HotCardCache(this);
@@ -1953,7 +1957,7 @@
   while (dcqs.apply_closure_during_gc(cl, worker_i)) {
     n_completed_buffers++;
   }
-  assert(dcqs.completed_buffers_num() == 0, "Completed buffers exist!");
+  assert(dcqs.num_completed_buffers() == 0, "Completed buffers exist!");
   phase_times()->record_thread_work_item(G1GCPhaseTimes::MergeLB, worker_i, n_completed_buffers, G1GCPhaseTimes::MergeLBProcessedBuffers);
 }
 
@@ -2609,10 +2613,9 @@
   Threads::threads_do(&count_from_threads);
 
   G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
-  size_t buffer_size = dcqs.buffer_size();
-  size_t buffer_num = dcqs.completed_buffers_num();
-
-  return buffer_size * buffer_num + count_from_threads._cards;
+  dcqs.verify_num_entries_in_completed_buffers();
+
+  return dcqs.num_entries_in_completed_buffers() + count_from_threads._cards;
 }
 
 bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
@@ -3162,7 +3165,7 @@
 void G1CollectedHeap::complete_cleaning(BoolObjectClosure* is_alive,
                                         bool class_unloading_occurred) {
   uint num_workers = workers()->active_workers();
-  ParallelCleaningTask unlink_task(is_alive, num_workers, class_unloading_occurred, false);
+  G1ParallelCleaningTask unlink_task(is_alive, num_workers, class_unloading_occurred, false);
   workers()->run_task(&unlink_task);
 }
 
@@ -3213,18 +3216,43 @@
 
 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
  private:
-  G1DirtyCardQueueSet* _queue;
+  G1RedirtyCardsQueueSet* _qset;
   G1CollectedHeap* _g1h;
+  BufferNode* volatile _nodes;
+
+  void apply(G1CardTableEntryClosure* cl, BufferNode* node, uint worker_id) {
+    void** buf = BufferNode::make_buffer_from_node(node);
+    size_t limit = _qset->buffer_size();
+    for (size_t i = node->index(); i < limit; ++i) {
+      CardTable::CardValue* card_ptr = static_cast<CardTable::CardValue*>(buf[i]);
+      bool result = cl->do_card_ptr(card_ptr, worker_id);
+      assert(result, "Closure should always return true");
+    }
+  }
+
+  void par_apply(G1CardTableEntryClosure* cl, uint worker_id) {
+    BufferNode* next = Atomic::load(&_nodes);
+    while (next != NULL) {
+      BufferNode* node = next;
+      next = Atomic::cmpxchg(node->next(), &_nodes, node);
+      if (next == node) {
+        apply(cl, node, worker_id);
+        next = node->next();
+      }
+    }
+  }
+
  public:
-  G1RedirtyLoggedCardsTask(G1DirtyCardQueueSet* queue, G1CollectedHeap* g1h) : AbstractGangTask("Redirty Cards"),
-    _queue(queue), _g1h(g1h) { }
+  G1RedirtyLoggedCardsTask(G1RedirtyCardsQueueSet* qset, G1CollectedHeap* g1h) :
+    AbstractGangTask("Redirty Cards"),
+    _qset(qset), _g1h(g1h), _nodes(qset->all_completed_buffers()) { }
 
   virtual void work(uint worker_id) {
     G1GCPhaseTimes* p = _g1h->phase_times();
     G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::RedirtyCards, worker_id);
 
     RedirtyLoggedCardTableEntryClosure cl(_g1h);
-    _queue->par_apply_closure_to_all_completed_buffers(&cl);
+    par_apply(&cl, worker_id);
 
     p->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());
   }
@@ -3233,13 +3261,12 @@
 void G1CollectedHeap::redirty_logged_cards() {
   double redirty_logged_cards_start = os::elapsedTime();
 
-  G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set(), this);
-  dirty_card_queue_set().reset_for_par_iteration();
+  G1RedirtyLoggedCardsTask redirty_task(&redirty_cards_queue_set(), this);
   workers()->run_task(&redirty_task);
 
   G1DirtyCardQueueSet& dcq = G1BarrierSet::dirty_card_queue_set();
-  dcq.merge_bufferlists(&dirty_card_queue_set());
-  assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
+  dcq.merge_bufferlists(&redirty_cards_queue_set());
+  redirty_cards_queue_set().verify_empty();
 
   phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
 }
@@ -3571,7 +3598,7 @@
   // Should G1EvacuationFailureALot be in effect for this GC?
   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
 
-  assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
+  redirty_cards_queue_set().verify_empty();
 }
 
 class G1EvacuateRegionsBaseTask : public AbstractGangTask {
@@ -3683,7 +3710,7 @@
 
   {
     Ticks start = Ticks::now();
-    rem_set()->merge_heap_roots(false /* remset_only */, G1GCPhaseTimes::MergeRS);
+    rem_set()->merge_heap_roots(true /* initial_evacuation */);
     p->record_merge_heap_roots_time((Ticks::now() - start).seconds() * 1000.0);
   }
 
@@ -3759,7 +3786,7 @@
 
     {
       Ticks start = Ticks::now();
-      rem_set()->merge_heap_roots(true /* remset_only */, G1GCPhaseTimes::OptMergeRS);
+      rem_set()->merge_heap_roots(false /* initial_evacuation */);
       phase_times()->record_or_add_optional_merge_heap_roots_time((Ticks::now() - start).seconds() * 1000.0);
     }
 
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Thu Jul 25 11:31:07 2019 +0530
@@ -31,7 +31,6 @@
 #include "gc/g1/g1CollectionSet.hpp"
 #include "gc/g1/g1CollectorState.hpp"
 #include "gc/g1/g1ConcurrentMark.hpp"
-#include "gc/g1/g1DirtyCardQueue.hpp"
 #include "gc/g1/g1EdenRegions.hpp"
 #include "gc/g1/g1EvacFailure.hpp"
 #include "gc/g1/g1EvacStats.hpp"
@@ -42,6 +41,7 @@
 #include "gc/g1/g1HRPrinter.hpp"
 #include "gc/g1/g1HeapRegionAttr.hpp"
 #include "gc/g1/g1MonitoringSupport.hpp"
+#include "gc/g1/g1RedirtyCardsQueue.hpp"
 #include "gc/g1/g1SurvivorRegions.hpp"
 #include "gc/g1/g1YCTypes.hpp"
 #include "gc/g1/heapRegionManager.hpp"
@@ -73,6 +73,7 @@
 class SpaceClosure;
 class CompactibleSpaceClosure;
 class Space;
+class G1CardTableEntryClosure;
 class G1CollectionSet;
 class G1Policy;
 class G1HotCardCache;
@@ -353,6 +354,7 @@
     assert(Thread::current()->is_VM_thread(), "current thread is not VM thread"); \
   } while (0)
 
+#ifdef ASSERT
 #define assert_used_and_recalculate_used_equal(g1h)                           \
   do {                                                                        \
     size_t cur_used_bytes = g1h->used();                                      \
@@ -361,6 +363,9 @@
            " same as recalculated used(" SIZE_FORMAT ").",                    \
            cur_used_bytes, recal_used_bytes);                                 \
   } while (0)
+#else
+#define assert_used_and_recalculate_used_equal(g1h) do {} while(0)
+#endif
 
   const char* young_gc_name() const;
 
@@ -771,7 +776,7 @@
 
   // A set of cards that cover the objects for which the Rsets should be updated
   // concurrently after the collection.
-  G1DirtyCardQueueSet _dirty_card_queue_set;
+  G1RedirtyCardsQueueSet _redirty_cards_queue_set;
 
   // After a collection pause, convert the regions in the collection set into free
   // regions.
@@ -931,7 +936,9 @@
   uint num_task_queues() const;
 
   // A set of cards where updates happened during the GC
-  G1DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
+  G1RedirtyCardsQueueSet& redirty_cards_queue_set() {
+    return _redirty_cards_queue_set;
+  }
 
   // Create a G1CollectedHeap.
   // Must call the initialize method afterwards.
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -397,7 +397,7 @@
     dcqs.set_max_completed_buffers(red_zone());
   }
 
-  size_t curr_queue_size = dcqs.completed_buffers_num();
+  size_t curr_queue_size = dcqs.num_completed_buffers();
   if ((dcqs.max_completed_buffers() > 0) &&
       (curr_queue_size >= yellow_zone())) {
     dcqs.set_completed_buffers_padding(curr_queue_size);
@@ -430,7 +430,7 @@
 bool G1ConcurrentRefine::do_refinement_step(uint worker_id) {
   G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
 
-  size_t curr_buffer_num = dcqs.completed_buffers_num();
+  size_t curr_buffer_num = dcqs.num_completed_buffers();
   // If the number of the buffers falls down into the yellow zone,
   // that means that the transition period after the evacuation pause has ended.
   // Since the value written to the DCQS is the same for all threads, there is no
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -104,7 +104,7 @@
     size_t buffers_processed = 0;
     log_debug(gc, refine)("Activated worker %d, on threshold: " SIZE_FORMAT ", current: " SIZE_FORMAT,
                           _worker_id, _cr->activation_threshold(_worker_id),
-                           G1BarrierSet::dirty_card_queue_set().completed_buffers_num());
+                           G1BarrierSet::dirty_card_queue_set().num_completed_buffers());
 
     {
       SuspendibleThreadSetJoiner sts_join;
@@ -126,7 +126,7 @@
     log_debug(gc, refine)("Deactivated worker %d, off threshold: " SIZE_FORMAT
                           ", current: " SIZE_FORMAT ", processed: " SIZE_FORMAT,
                           _worker_id, _cr->deactivation_threshold(_worker_id),
-                          G1BarrierSet::dirty_card_queue_set().completed_buffers_num(),
+                          G1BarrierSet::dirty_card_queue_set().num_completed_buffers(),
                           buffers_processed);
 
     if (os::supports_vtime()) {
--- a/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -23,9 +23,11 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/g1/g1CardTableEntryClosure.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1DirtyCardQueue.hpp"
 #include "gc/g1/g1FreeIdSet.hpp"
+#include "gc/g1/g1RedirtyCardsQueue.hpp"
 #include "gc/g1/g1RemSet.hpp"
 #include "gc/g1/g1ThreadLocalData.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
@@ -82,7 +84,7 @@
   _cbl_mon(NULL),
   _completed_buffers_head(NULL),
   _completed_buffers_tail(NULL),
-  _n_completed_buffers(0),
+  _num_entries_in_completed_buffers(0),
   _process_completed_buffers_threshold(ProcessCompletedBuffersThresholdNever),
   _process_completed_buffers(false),
   _notify_when_complete(notify_when_complete),
@@ -90,8 +92,7 @@
   _completed_buffers_padding(0),
   _free_ids(NULL),
   _processed_buffers_mut(0),
-  _processed_buffers_rs_thread(0),
-  _cur_par_buffer_node(NULL)
+  _processed_buffers_rs_thread(0)
 {
   _all_active = true;
 }
@@ -132,42 +133,56 @@
     _completed_buffers_tail->set_next(cbn);
     _completed_buffers_tail = cbn;
   }
-  _n_completed_buffers++;
+  _num_entries_in_completed_buffers += buffer_size() - cbn->index();
 
   if (!process_completed_buffers() &&
-      (_n_completed_buffers > process_completed_buffers_threshold())) {
+      (num_completed_buffers() > process_completed_buffers_threshold())) {
     set_process_completed_buffers(true);
     if (_notify_when_complete) {
       _cbl_mon->notify_all();
     }
   }
-  assert_completed_buffers_list_len_correct_locked();
+  verify_num_entries_in_completed_buffers();
 }
 
 BufferNode* G1DirtyCardQueueSet::get_completed_buffer(size_t stop_at) {
   MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag);
 
-  if (_n_completed_buffers <= stop_at) {
+  if (num_completed_buffers() <= stop_at) {
     return NULL;
   }
 
-  assert(_n_completed_buffers > 0, "invariant");
+  assert(num_completed_buffers() > 0, "invariant");
   assert(_completed_buffers_head != NULL, "invariant");
   assert(_completed_buffers_tail != NULL, "invariant");
 
   BufferNode* bn = _completed_buffers_head;
-  _n_completed_buffers--;
+  _num_entries_in_completed_buffers -= buffer_size() - bn->index();
   _completed_buffers_head = bn->next();
   if (_completed_buffers_head == NULL) {
-    assert(_n_completed_buffers == 0, "invariant");
+    assert(num_completed_buffers() == 0, "invariant");
     _completed_buffers_tail = NULL;
     set_process_completed_buffers(false);
   }
-  assert_completed_buffers_list_len_correct_locked();
+  verify_num_entries_in_completed_buffers();
   bn->set_next(NULL);
   return bn;
 }
 
+#ifdef ASSERT
+void G1DirtyCardQueueSet::verify_num_entries_in_completed_buffers() const {
+  size_t actual = 0;
+  BufferNode* cur = _completed_buffers_head;
+  while (cur != NULL) {
+    actual += buffer_size() - cur->index();
+    cur = cur->next();
+  }
+  assert(actual == _num_entries_in_completed_buffers,
+         "Num entries in completed buffers should be " SIZE_FORMAT " but are " SIZE_FORMAT,
+         _num_entries_in_completed_buffers, actual);
+}
+#endif
+
 void G1DirtyCardQueueSet::abandon_completed_buffers() {
   BufferNode* buffers_to_delete = NULL;
   {
@@ -175,7 +190,7 @@
     buffers_to_delete = _completed_buffers_head;
     _completed_buffers_head = NULL;
     _completed_buffers_tail = NULL;
-    _n_completed_buffers = 0;
+    _num_entries_in_completed_buffers = 0;
     set_process_completed_buffers(false);
   }
   while (buffers_to_delete != NULL) {
@@ -188,59 +203,41 @@
 
 void G1DirtyCardQueueSet::notify_if_necessary() {
   MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag);
-  if (_n_completed_buffers > process_completed_buffers_threshold()) {
+  if (num_completed_buffers() > process_completed_buffers_threshold()) {
     set_process_completed_buffers(true);
     if (_notify_when_complete)
       _cbl_mon->notify();
   }
 }
 
-#ifdef ASSERT
-void G1DirtyCardQueueSet::assert_completed_buffers_list_len_correct_locked() {
-  assert_lock_strong(_cbl_mon);
-  size_t n = 0;
-  for (BufferNode* bn = _completed_buffers_head; bn != NULL; bn = bn->next()) {
-    ++n;
-  }
-  assert(n == _n_completed_buffers,
-         "Completed buffer length is wrong: counted: " SIZE_FORMAT
-         ", expected: " SIZE_FORMAT, n, _n_completed_buffers);
-}
-#endif // ASSERT
-
 // Merge lists of buffers. Notify the processing threads.
 // The source queue is emptied as a result. The queues
 // must share the monitor.
-void G1DirtyCardQueueSet::merge_bufferlists(G1DirtyCardQueueSet *src) {
-  assert(_cbl_mon == src->_cbl_mon, "Should share the same lock");
+void G1DirtyCardQueueSet::merge_bufferlists(G1RedirtyCardsQueueSet* src) {
+  assert(allocator() == src->allocator(), "precondition");
+  const G1RedirtyCardsBufferList from = src->take_all_completed_buffers();
+  if (from._head == NULL) return;
+
   MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag);
   if (_completed_buffers_tail == NULL) {
     assert(_completed_buffers_head == NULL, "Well-formedness");
-    _completed_buffers_head = src->_completed_buffers_head;
-    _completed_buffers_tail = src->_completed_buffers_tail;
+    _completed_buffers_head = from._head;
+    _completed_buffers_tail = from._tail;
   } else {
     assert(_completed_buffers_head != NULL, "Well formedness");
-    if (src->_completed_buffers_head != NULL) {
-      _completed_buffers_tail->set_next(src->_completed_buffers_head);
-      _completed_buffers_tail = src->_completed_buffers_tail;
-    }
+    _completed_buffers_tail->set_next(from._head);
+    _completed_buffers_tail = from._tail;
   }
-  _n_completed_buffers += src->_n_completed_buffers;
-
-  src->_n_completed_buffers = 0;
-  src->_completed_buffers_head = NULL;
-  src->_completed_buffers_tail = NULL;
-  src->set_process_completed_buffers(false);
+  _num_entries_in_completed_buffers += from._entry_count;
 
   assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL ||
          _completed_buffers_head != NULL && _completed_buffers_tail != NULL,
          "Sanity");
-  assert_completed_buffers_list_len_correct_locked();
+  verify_num_entries_in_completed_buffers();
 }
 
 bool G1DirtyCardQueueSet::apply_closure_to_buffer(G1CardTableEntryClosure* cl,
                                                   BufferNode* node,
-                                                  bool consume,
                                                   uint worker_i) {
   if (cl == NULL) return true;
   bool result = true;
@@ -255,10 +252,8 @@
       break;
     }
   }
-  if (consume) {
-    assert(i <= buffer_size(), "invariant");
-    node->set_index(i);
-  }
+  assert(i <= buffer_size(), "invariant");
+  node->set_index(i);
   return result;
 }
 
@@ -284,7 +279,7 @@
     // add of padding could overflow, which is treated as unlimited.
     size_t max_buffers = max_completed_buffers();
     size_t limit = max_buffers + completed_buffers_padding();
-    if ((completed_buffers_num() > limit) && (limit >= max_buffers)) {
+    if ((num_completed_buffers() > limit) && (limit >= max_buffers)) {
       if (mut_process_buffer(node)) {
         return true;
       }
@@ -299,7 +294,7 @@
 
   uint worker_i = _free_ids->claim_par_id(); // temporarily claim an id
   G1RefineCardConcurrentlyClosure cl;
-  bool result = apply_closure_to_buffer(&cl, node, true, worker_i);
+  bool result = apply_closure_to_buffer(&cl, node, worker_i);
   _free_ids->release_par_id(worker_i); // release the id
 
   if (result) {
@@ -328,7 +323,7 @@
   if (nd == NULL) {
     return false;
   } else {
-    if (apply_closure_to_buffer(cl, nd, true, worker_i)) {
+    if (apply_closure_to_buffer(cl, nd, worker_i)) {
       assert_fully_consumed(nd, buffer_size());
       // Done with fully processed buffer.
       deallocate_buffer(nd);
@@ -342,21 +337,6 @@
   }
 }
 
-void G1DirtyCardQueueSet::par_apply_closure_to_all_completed_buffers(G1CardTableEntryClosure* cl) {
-  BufferNode* nd = _cur_par_buffer_node;
-  while (nd != NULL) {
-    BufferNode* next = nd->next();
-    BufferNode* actual = Atomic::cmpxchg(next, &_cur_par_buffer_node, nd);
-    if (actual == nd) {
-      bool b = apply_closure_to_buffer(cl, nd, false);
-      guarantee(b, "Should not stop early.");
-      nd = next;
-    } else {
-      nd = actual;
-    }
-  }
-}
-
 void G1DirtyCardQueueSet::abandon_logs() {
   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
   abandon_completed_buffers();
--- a/src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp	Thu Jul 25 11:31:07 2019 +0530
@@ -25,26 +25,16 @@
 #ifndef SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP
 #define SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP
 
-#include "gc/shared/cardTable.hpp"
 #include "gc/shared/ptrQueue.hpp"
 #include "memory/allocation.hpp"
 
+class G1CardTableEntryClosure;
 class G1DirtyCardQueueSet;
 class G1FreeIdSet;
+class G1RedirtyCardsQueueSet;
 class Thread;
 class Monitor;
 
-// A closure class for processing card table entries.  Note that we don't
-// require these closure objects to be stack-allocated.
-class G1CardTableEntryClosure: public CHeapObj<mtGC> {
-public:
-  typedef CardTable::CardValue CardValue;
-
-  // Process the card whose card table entry is "card_ptr".  If returns
-  // "false", terminate the iteration early.
-  virtual bool do_card_ptr(CardValue* card_ptr, uint worker_i) = 0;
-};
-
 // A ptrQueue whose elements are "oops", pointers to object heads.
 class G1DirtyCardQueue: public PtrQueue {
 protected:
@@ -79,7 +69,9 @@
   Monitor* _cbl_mon;  // Protects the fields below.
   BufferNode* _completed_buffers_head;
   BufferNode* _completed_buffers_tail;
-  volatile size_t _n_completed_buffers;
+
+  // Number of actual entries in the list of completed buffers.
+  volatile size_t _num_entries_in_completed_buffers;
 
   size_t _process_completed_buffers_threshold;
   volatile bool _process_completed_buffers;
@@ -87,20 +79,18 @@
   // If true, notify_all on _cbl_mon when the threshold is reached.
   bool _notify_when_complete;
 
-  void assert_completed_buffers_list_len_correct_locked() NOT_DEBUG_RETURN;
-
   void abandon_completed_buffers();
 
   // Apply the closure to the elements of "node" from it's index to
   // buffer_size.  If all closure applications return true, then
   // returns true.  Stops processing after the first closure
   // application that returns false, and returns false from this
-  // function.  If "consume" is true, the node's index is updated to
-  // exclude the processed elements, e.g. up to the element for which
-  // the closure returned false.
+  // function.  The node's index is updated to exclude the processed
+  // elements, e.g. up to the element for which the closure returned
+  // false, or one past the last element if the closure always
+  // returned true.
   bool apply_closure_to_buffer(G1CardTableEntryClosure* cl,
                                BufferNode* node,
-                               bool consume,
                                uint worker_i = 0);
 
   // If there are more than stop_at completed buffers, pop one, apply
@@ -135,9 +125,6 @@
   jint _processed_buffers_mut;
   jint _processed_buffers_rs_thread;
 
-  // Current buffer node used for parallel iteration.
-  BufferNode* volatile _cur_par_buffer_node;
-
 public:
   G1DirtyCardQueueSet(bool notify_when_complete = true);
   ~G1DirtyCardQueueSet();
@@ -163,8 +150,17 @@
   // return a completed buffer from the list.  Otherwise, return NULL.
   BufferNode* get_completed_buffer(size_t stop_at = 0);
 
-  // The number of buffers in the list.  Racy...
-  size_t completed_buffers_num() const { return _n_completed_buffers; }
+  // The number of buffers in the list. Derived as an approximation from the number
+  // of entries in the buffers. Racy.
+  size_t num_completed_buffers() const {
+    return (num_entries_in_completed_buffers() + buffer_size() - 1) / buffer_size();
+  }
+  // The number of entries in completed buffers. Read without synchronization.
+  size_t num_entries_in_completed_buffers() const { return _num_entries_in_completed_buffers; }
+
+  // Verify that _num_entries_in_completed_buffers is equal to the sum of actual entries
+  // in the completed buffers.
+  void verify_num_entries_in_completed_buffers() const NOT_DEBUG_RETURN;
 
   bool process_completed_buffers() { return _process_completed_buffers; }
   void set_process_completed_buffers(bool x) { _process_completed_buffers = x; }
@@ -183,7 +179,7 @@
   // Notify the consumer if the number of buffers crossed the threshold
   void notify_if_necessary();
 
-  void merge_bufferlists(G1DirtyCardQueueSet* src);
+  void merge_bufferlists(G1RedirtyCardsQueueSet* src);
 
   // Apply G1RefineCardConcurrentlyClosure to completed buffers until there are stop_at
   // completed buffers remaining.
@@ -193,12 +189,6 @@
   // must never return false. Must only be called during GC.
   bool apply_closure_during_gc(G1CardTableEntryClosure* cl, uint worker_i);
 
-  void reset_for_par_iteration() { _cur_par_buffer_node = _completed_buffers_head; }
-  // Applies the current closure to all completed buffers, non-consumptively.
-  // Can be used in parallel, all callers using the iteration state initialized
-  // by reset_for_par_iteration.
-  void par_apply_closure_to_all_completed_buffers(G1CardTableEntryClosure* cl);
-
   // If a full collection is happening, reset partial logs, and release
   // completed ones: the full collection will make them all irrelevant.
   void abandon_logs();
--- a/src/hotspot/share/gc/g1/g1EvacFailure.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1EvacFailure.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -26,10 +26,10 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorState.hpp"
 #include "gc/g1/g1ConcurrentMark.inline.hpp"
-#include "gc/g1/g1DirtyCardQueue.hpp"
 #include "gc/g1/g1EvacFailure.hpp"
 #include "gc/g1/g1HeapVerifier.hpp"
 #include "gc/g1/g1OopClosures.inline.hpp"
+#include "gc/g1/g1RedirtyCardsQueue.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/shared/preservedMarks.inline.hpp"
@@ -40,7 +40,7 @@
 class UpdateLogBuffersDeferred : public BasicOopIterateClosure {
 private:
   G1CollectedHeap* _g1h;
-  G1DirtyCardQueue* _dcq;
+  G1RedirtyCardsQueue* _rdcq;
   G1CardTable*    _ct;
 
   // Remember the last enqueued card to avoid enqueuing the same card over and over;
@@ -48,8 +48,8 @@
   size_t _last_enqueued_card;
 
 public:
-  UpdateLogBuffersDeferred(G1DirtyCardQueue* dcq) :
-    _g1h(G1CollectedHeap::heap()), _dcq(dcq), _ct(_g1h->card_table()), _last_enqueued_card(SIZE_MAX) {}
+  UpdateLogBuffersDeferred(G1RedirtyCardsQueue* rdcq) :
+    _g1h(G1CollectedHeap::heap()), _rdcq(rdcq), _ct(_g1h->card_table()), _last_enqueued_card(SIZE_MAX) {}
 
   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
   virtual void do_oop(      oop* p) { do_oop_work(p); }
@@ -67,7 +67,7 @@
     }
     size_t card_index = _ct->index_for(p);
     if (card_index != _last_enqueued_card) {
-      _dcq->enqueue(_ct->byte_for_index(card_index));
+      _rdcq->enqueue(_ct->byte_for_index(card_index));
       _last_enqueued_card = card_index;
     }
   }
@@ -199,15 +199,15 @@
   G1CollectedHeap* _g1h;
   uint _worker_id;
 
-  G1DirtyCardQueue _dcq;
+  G1RedirtyCardsQueue _rdcq;
   UpdateLogBuffersDeferred _log_buffer_cl;
 
 public:
   RemoveSelfForwardPtrHRClosure(uint worker_id) :
     _g1h(G1CollectedHeap::heap()),
     _worker_id(worker_id),
-    _dcq(&_g1h->dirty_card_queue_set()),
-    _log_buffer_cl(&_dcq) {
+    _rdcq(&_g1h->redirty_cards_queue_set()),
+    _log_buffer_cl(&_rdcq) {
   }
 
   size_t remove_self_forward_ptr_by_walking_hr(HeapRegion* hr,
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -61,11 +61,12 @@
   _gc_par_phases[CLDGRoots] = new WorkerDataArray<double>(max_gc_threads, "CLDG Roots (ms):");
   _gc_par_phases[JVMTIRoots] = new WorkerDataArray<double>(max_gc_threads, "JVMTI Roots (ms):");
   AOT_ONLY(_gc_par_phases[AOTCodeRoots] = new WorkerDataArray<double>(max_gc_threads, "AOT Root Scan (ms):");)
-  JVMCI_ONLY(_gc_par_phases[JVMCIRoots] = new WorkerDataArray<double>(max_gc_threads, "JVMCI Root Scan (ms):");)
   _gc_par_phases[CMRefRoots] = new WorkerDataArray<double>(max_gc_threads, "CM RefProcessor Roots (ms):");
   _gc_par_phases[WaitForStrongCLD] = new WorkerDataArray<double>(max_gc_threads, "Wait For Strong CLD (ms):");
   _gc_par_phases[WeakCLDRoots] = new WorkerDataArray<double>(max_gc_threads, "Weak CLD Roots (ms):");
 
+  _gc_par_phases[MergeER] = new WorkerDataArray<double>(max_gc_threads, "Eager Reclaim (ms):");
+
   _gc_par_phases[MergeRS] = new WorkerDataArray<double>(max_gc_threads, "Remembered Sets (ms):");
   _merge_rs_merged_sparse = new WorkerDataArray<size_t>(max_gc_threads, "Merged Sparse:");
   _gc_par_phases[MergeRS]->link_thread_work_items(_merge_rs_merged_sparse, MergeRSMergedSparse);
@@ -85,8 +86,14 @@
   _gc_par_phases[MergeLB] = new WorkerDataArray<double>(max_gc_threads, "Log Buffers (ms):");
   if (G1HotCardCache::default_use_cache()) {
     _gc_par_phases[MergeHCC] = new WorkerDataArray<double>(max_gc_threads, "Hot Card Cache (ms):");
+    _merge_hcc_dirty_cards = new WorkerDataArray<size_t>(max_gc_threads, "Dirty Cards:");
+    _gc_par_phases[MergeHCC]->link_thread_work_items(_merge_hcc_dirty_cards, MergeHCCDirtyCards);
+    _merge_hcc_skipped_cards = new WorkerDataArray<size_t>(max_gc_threads, "Skipped Cards:");
+    _gc_par_phases[MergeHCC]->link_thread_work_items(_merge_hcc_skipped_cards, MergeHCCSkippedCards);
   } else {
     _gc_par_phases[MergeHCC] = NULL;
+    _merge_hcc_dirty_cards = NULL;
+    _merge_hcc_skipped_cards = NULL;
   }
   _gc_par_phases[ScanHR] = new WorkerDataArray<double>(max_gc_threads, "Scan Heap Roots (ms):");
   _gc_par_phases[OptScanHR] = new WorkerDataArray<double>(max_gc_threads, "Optional Scan Heap Roots (ms):");
@@ -166,6 +173,8 @@
   _cur_strong_code_root_purge_time_ms = 0.0;
   _cur_merge_heap_roots_time_ms = 0.0;
   _cur_optional_merge_heap_roots_time_ms = 0.0;
+  _cur_prepare_merge_heap_roots_time_ms = 0.0;
+  _cur_optional_prepare_merge_heap_roots_time_ms = 0.0;
   _cur_evac_fail_recalc_used = 0.0;
   _cur_evac_fail_remove_self_forwards = 0.0;
   _cur_string_deduplication_time_ms = 0.0;
@@ -248,9 +257,10 @@
       // Make sure all slots are uninitialized since this thread did not seem to have been started
       ASSERT_PHASE_UNINITIALIZED(GCWorkerEnd);
       ASSERT_PHASE_UNINITIALIZED(ExtRootScan);
-      ASSERT_PHASE_UNINITIALIZED(MergeHCC);
+      ASSERT_PHASE_UNINITIALIZED(MergeER);
       ASSERT_PHASE_UNINITIALIZED(MergeRS);
       ASSERT_PHASE_UNINITIALIZED(OptMergeRS);
+      ASSERT_PHASE_UNINITIALIZED(MergeHCC);
       ASSERT_PHASE_UNINITIALIZED(MergeLB);
       ASSERT_PHASE_UNINITIALIZED(ScanHR);
       ASSERT_PHASE_UNINITIALIZED(CodeRoots);
@@ -413,6 +423,8 @@
   const double sum_ms = _cur_optional_evac_ms + _cur_optional_merge_heap_roots_time_ms;
   if (sum_ms > 0) {
     info_time("Merge Optional Heap Roots", _cur_optional_merge_heap_roots_time_ms);
+
+    debug_time("Prepare Optional Merge Heap Roots", _cur_optional_prepare_merge_heap_roots_time_ms);
     debug_phase(_gc_par_phases[OptMergeRS]);
 
     info_time("Evacuate Optional Collection Set", _cur_optional_evac_ms);
@@ -427,6 +439,8 @@
 double G1GCPhaseTimes::print_evacuate_initial_collection_set() const {
   info_time("Merge Heap Roots", _cur_merge_heap_roots_time_ms);
 
+  debug_time("Prepare Merge Heap Roots", _cur_prepare_merge_heap_roots_time_ms);
+  debug_phase(_gc_par_phases[MergeER]);
   debug_phase(_gc_par_phases[MergeRS]);
   if (G1HotCardCache::default_use_cache()) {
     debug_phase(_gc_par_phases[MergeHCC]);
@@ -554,10 +568,10 @@
       "CLDGRoots",
       "JVMTIRoots",
       AOT_ONLY("AOTCodeRoots" COMMA)
-      JVMCI_ONLY("JVMCIRoots" COMMA)
       "CMRefRoots",
       "WaitForStrongCLD",
       "WeakCLDRoots",
+      "MergeER",
       "MergeRS",
       "OptMergeRS",
       "MergeLB",
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp	Thu Jul 25 11:31:07 2019 +0530
@@ -56,10 +56,10 @@
     CLDGRoots,
     JVMTIRoots,
     AOT_ONLY(AOTCodeRoots COMMA)
-    JVMCI_ONLY(JVMCIRoots COMMA)
     CMRefRoots,
     WaitForStrongCLD,
     WeakCLDRoots,
+    MergeER,
     MergeRS,
     OptMergeRS,
     MergeLB,
@@ -100,6 +100,11 @@
     ScanHRUsedMemory
   };
 
+  enum GCMergeHCCWorkItems {
+    MergeHCCDirtyCards,
+    MergeHCCSkippedCards
+  };
+
   enum GCMergeLBWorkItems {
     MergeLBProcessedBuffers,
     MergeLBDirtyCards,
@@ -121,6 +126,9 @@
   WorkerDataArray<size_t>* _merge_rs_merged_fine;
   WorkerDataArray<size_t>* _merge_rs_merged_coarse;
 
+  WorkerDataArray<size_t>* _merge_hcc_dirty_cards;
+  WorkerDataArray<size_t>* _merge_hcc_skipped_cards;
+
   WorkerDataArray<size_t>* _merge_lb_processed_buffers;
   WorkerDataArray<size_t>* _merge_lb_dirty_cards;
   WorkerDataArray<size_t>* _merge_lb_skipped_cards;
@@ -164,6 +172,9 @@
   double _cur_merge_heap_roots_time_ms;
   double _cur_optional_merge_heap_roots_time_ms;
 
+  double _cur_prepare_merge_heap_roots_time_ms;
+  double _cur_optional_prepare_merge_heap_roots_time_ms;
+
   double _cur_prepare_tlab_time_ms;
   double _cur_resize_tlab_time_ms;
 
@@ -308,6 +319,14 @@
     _cur_optional_merge_heap_roots_time_ms += ms;
   }
 
+  void record_prepare_merge_heap_roots_time(double ms) {
+    _cur_prepare_merge_heap_roots_time_ms += ms;
+  }
+
+  void record_or_add_optional_prepare_merge_heap_roots_time(double ms) {
+    _cur_optional_prepare_merge_heap_roots_time_ms += ms;
+  }
+
   void record_evac_fail_recalc_used_time(double ms) {
     _cur_evac_fail_recalc_used = ms;
   }
--- a/src/hotspot/share/gc/g1/g1HeapTransition.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1HeapTransition.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -35,7 +35,6 @@
   _old_length = g1_heap->old_regions_count();
   _archive_length = g1_heap->archive_regions_count();
   _humongous_length = g1_heap->humongous_regions_count();
-  _metaspace_used_bytes = MetaspaceUtils::used_bytes();
 }
 
 G1HeapTransition::G1HeapTransition(G1CollectedHeap* g1_heap) : _g1_heap(g1_heap), _before(g1_heap) { }
@@ -131,5 +130,5 @@
   log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",
       usage._humongous_used / K, ((after._humongous_length * HeapRegion::GrainBytes) - usage._humongous_used) / K);
 
-  MetaspaceUtils::print_metaspace_change(_before._metaspace_used_bytes);
+  MetaspaceUtils::print_metaspace_change(_before._meta_sizes);
 }
--- a/src/hotspot/share/gc/g1/g1HeapTransition.hpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1HeapTransition.hpp	Thu Jul 25 11:31:07 2019 +0530
@@ -26,6 +26,7 @@
 #define SHARE_GC_G1_G1HEAPTRANSITION_HPP
 
 #include "gc/shared/plab.hpp"
+#include "memory/metaspace/metaspaceSizesSnapshot.hpp"
 
 class G1CollectedHeap;
 
@@ -36,7 +37,7 @@
     size_t _old_length;
     size_t _archive_length;
     size_t _humongous_length;
-    size_t _metaspace_used_bytes;
+    const metaspace::MetaspaceSizesSnapshot _meta_sizes;
 
     Data(G1CollectedHeap* g1_heap);
   };
--- a/src/hotspot/share/gc/g1/g1HotCardCache.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1HotCardCache.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/g1/g1CardTableEntryClosure.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1DirtyCardQueue.hpp"
 #include "gc/g1/g1HotCardCache.hpp"
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -43,7 +43,7 @@
                                            size_t optional_cset_length)
   : _g1h(g1h),
     _refs(g1h->task_queue(worker_id)),
-    _dcq(&g1h->dirty_card_queue_set()),
+    _rdcq(&g1h->redirty_cards_queue_set()),
     _ct(g1h->card_table()),
     _closures(NULL),
     _plab_allocator(NULL),
@@ -88,7 +88,7 @@
 
 // Pass locally gathered statistics to global state.
 void G1ParScanThreadState::flush(size_t* surviving_young_words) {
-  _dcq.flush();
+  _rdcq.flush();
   // Update allocation statistics.
   _plab_allocator->flush_and_retire_stats();
   _g1h->policy()->record_age_table(&_age_table);
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp	Thu Jul 25 11:31:07 2019 +0530
@@ -27,7 +27,7 @@
 
 #include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1CollectedHeap.hpp"
-#include "gc/g1/g1DirtyCardQueue.hpp"
+#include "gc/g1/g1RedirtyCardsQueue.hpp"
 #include "gc/g1/g1OopClosures.hpp"
 #include "gc/g1/g1Policy.hpp"
 #include "gc/g1/g1RemSet.hpp"
@@ -46,7 +46,7 @@
 class G1ParScanThreadState : public CHeapObj<mtGC> {
   G1CollectedHeap* _g1h;
   RefToScanQueue* _refs;
-  G1DirtyCardQueue _dcq;
+  G1RedirtyCardsQueue _rdcq;
   G1CardTable* _ct;
   G1EvacuationRootClosures* _closures;
 
@@ -81,7 +81,7 @@
 
 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
 
-  G1DirtyCardQueue& dirty_card_queue()           { return _dcq; }
+  G1RedirtyCardsQueue& redirty_cards_queue()     { return _rdcq; }
   G1CardTable* ct()                              { return _ct; }
 
   G1HeapRegionAttr dest(G1HeapRegionAttr original) const {
@@ -133,7 +133,7 @@
     size_t card_index = ct()->index_for(p);
     // If the card hasn't been added to the buffer, do it.
     if (_last_enqueued_card != card_index) {
-      dirty_card_queue().enqueue(ct()->byte_for_index(card_index));
+      redirty_cards_queue().enqueue(ct()->byte_for_index(card_index));
       _last_enqueued_card = card_index;
     }
   }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1ParallelCleaning.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/g1/g1ParallelCleaning.hpp"
+#if INCLUDE_JVMCI
+#include "jvmci/jvmci.hpp"
+#endif
+
+#if INCLUDE_JVMCI
+JVMCICleaningTask::JVMCICleaningTask() :
+  _cleaning_claimed(0) {
+}
+
+bool JVMCICleaningTask::claim_cleaning_task() {
+  if (_cleaning_claimed) {
+    return false;
+  }
+
+  return Atomic::cmpxchg(1, &_cleaning_claimed, 0) == 0;
+}
+
+void JVMCICleaningTask::work(bool unloading_occurred) {
+  // One worker will clean JVMCI metadata handles.
+  if (unloading_occurred && EnableJVMCI && claim_cleaning_task()) {
+    JVMCI::do_unloading(unloading_occurred);
+  }
+}
+#endif // INCLUDE_JVMCI
+
+G1ParallelCleaningTask::G1ParallelCleaningTask(BoolObjectClosure* is_alive,
+                                               uint num_workers,
+                                               bool unloading_occurred,
+                                               bool resize_dedup_table) :
+  AbstractGangTask("G1 Parallel Cleaning"),
+  _unloading_occurred(unloading_occurred),
+  _string_dedup_task(is_alive, NULL, resize_dedup_table),
+  _code_cache_task(num_workers, is_alive, unloading_occurred),
+  JVMCI_ONLY(_jvmci_cleaning_task() COMMA)
+  _klass_cleaning_task() {
+}
+
+// The parallel work done by all worker threads.
+void G1ParallelCleaningTask::work(uint worker_id) {
+  // Clean JVMCI metadata handles.
+  // Execute this task first because it is serial task.
+  JVMCI_ONLY(_jvmci_cleaning_task.work(_unloading_occurred);)
+
+  // Do first pass of code cache cleaning.
+  _code_cache_task.work(worker_id);
+
+  // Clean the string dedup data structures.
+  _string_dedup_task.work(worker_id);
+
+  // Clean all klasses that were not unloaded.
+  // The weak metadata in klass doesn't need to be
+  // processed if there was no unloading.
+  if (_unloading_occurred) {
+    _klass_cleaning_task.work();
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1ParallelCleaning.hpp	Thu Jul 25 11:31:07 2019 +0530
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_G1_G1PARALLELCLEANING_HPP
+#define SHARE_GC_G1_G1PARALLELCLEANING_HPP
+
+#include "gc/shared/parallelCleaning.hpp"
+
+#if INCLUDE_JVMCI
+class JVMCICleaningTask : public StackObj {
+  volatile int       _cleaning_claimed;
+
+public:
+  JVMCICleaningTask();
+  // Clean JVMCI metadata handles.
+  void work(bool unloading_occurred);
+
+private:
+  bool claim_cleaning_task();
+};
+#endif
+
+// Do cleanup of some weakly held data in the same parallel task.
+// Assumes a non-moving context.
+class G1ParallelCleaningTask : public AbstractGangTask {
+private:
+  bool                    _unloading_occurred;
+  StringDedupCleaningTask _string_dedup_task;
+  CodeCacheUnloadingTask  _code_cache_task;
+#if INCLUDE_JVMCI
+  JVMCICleaningTask       _jvmci_cleaning_task;
+#endif
+  KlassCleaningTask       _klass_cleaning_task;
+
+public:
+  // The constructor is run in the VMThread.
+  G1ParallelCleaningTask(BoolObjectClosure* is_alive,
+                         uint num_workers,
+                         bool unloading_occurred,
+                         bool resize_dedup_table);
+
+  void work(uint worker_id);
+};
+
+#endif // SHARE_GC_G1_G1PARALLELCLEANING_HPP
--- a/src/hotspot/share/gc/g1/g1Policy.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1Policy.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -684,7 +684,9 @@
     if (remset_cards_scanned > 10) {
       double avg_time_remset_scan = ((average_time_ms(G1GCPhaseTimes::ScanHR) + average_time_ms(G1GCPhaseTimes::OptScanHR)) *
                                      remset_cards_scanned / total_cards_scanned) +
-                                    average_time_ms(G1GCPhaseTimes::MergeRS);
+                                     average_time_ms(G1GCPhaseTimes::MergeER) +
+                                     average_time_ms(G1GCPhaseTimes::MergeRS) +
+                                     average_time_ms(G1GCPhaseTimes::OptMergeRS);
 
       cost_per_remset_card_ms = avg_time_remset_scan / remset_cards_scanned;
       _analytics->report_cost_per_remset_card_ms(cost_per_remset_card_ms, this_pause_was_young_only);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1RedirtyCardsQueue.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1RedirtyCardsQueue.hpp"
+#include "runtime/atomic.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/macros.hpp"
+
+// G1RedirtyCardsBufferList
+
+G1RedirtyCardsBufferList::G1RedirtyCardsBufferList() :
+  _head(NULL), _tail(NULL), _entry_count(0) {}
+
+G1RedirtyCardsBufferList::G1RedirtyCardsBufferList(BufferNode* head,
+                                                   BufferNode* tail,
+                                                   size_t entry_count) :
+  _head(head), _tail(tail), _entry_count(entry_count)
+{
+  assert((_head == NULL) == (_tail == NULL), "invariant");
+  assert((_head == NULL) == (_entry_count == 0), "invariant");
+}
+
+// G1RedirtyCardsQueueBase::LocalQSet
+
+G1RedirtyCardsQueueBase::LocalQSet::LocalQSet(G1RedirtyCardsQueueSet* shared_qset) :
+  PtrQueueSet(),
+  _shared_qset(shared_qset),
+  _buffers()
+{
+  PtrQueueSet::initialize(_shared_qset->allocator());
+}
+
+G1RedirtyCardsQueueBase::LocalQSet::~LocalQSet() {
+  assert(_buffers._head == NULL, "unflushed qset");
+  assert(_buffers._tail == NULL, "invariant");
+  assert(_buffers._entry_count == 0, "invariant");
+}
+
+void G1RedirtyCardsQueueBase::LocalQSet::enqueue_completed_buffer(BufferNode* node) {
+  _buffers._entry_count += buffer_size() - node->index();
+  node->set_next(_buffers._head);
+  _buffers._head = node;
+  if (_buffers._tail == NULL) {
+    _buffers._tail = node;
+  }
+}
+
+G1RedirtyCardsBufferList G1RedirtyCardsQueueBase::LocalQSet::take_all_completed_buffers() {
+  G1RedirtyCardsBufferList result = _buffers;
+  _buffers = G1RedirtyCardsBufferList();
+  return result;
+}
+
+void G1RedirtyCardsQueueBase::LocalQSet::flush() {
+  _shared_qset->merge_bufferlist(this);
+}
+
+// G1RedirtyCardsQueue
+
+G1RedirtyCardsQueue::G1RedirtyCardsQueue(G1RedirtyCardsQueueSet* qset) :
+  G1RedirtyCardsQueueBase(qset), // Init _local_qset before passing to PtrQueue.
+  PtrQueue(&_local_qset, true /* active (always) */)
+{}
+
+G1RedirtyCardsQueue::~G1RedirtyCardsQueue() {
+  flush();
+}
+
+void G1RedirtyCardsQueue::handle_completed_buffer() {
+  enqueue_completed_buffer();
+}
+
+void G1RedirtyCardsQueue::flush() {
+  flush_impl();
+  _local_qset.flush();
+}
+
+// G1RedirtyCardsQueueSet
+
+G1RedirtyCardsQueueSet::G1RedirtyCardsQueueSet() :
+  PtrQueueSet(),
+  _list(),
+  _entry_count(0),
+  _tail(NULL)
+  DEBUG_ONLY(COMMA _collecting(true))
+{}
+
+G1RedirtyCardsQueueSet::~G1RedirtyCardsQueueSet() {
+  verify_empty();
+}
+
+#ifdef ASSERT
+void G1RedirtyCardsQueueSet::verify_empty() const {
+  assert(_list.empty(), "precondition");
+  assert(_tail == NULL, "invariant");
+  assert(_entry_count == 0, "invariant");
+}
+#endif // ASSERT
+
+BufferNode* G1RedirtyCardsQueueSet::all_completed_buffers() const {
+  DEBUG_ONLY(_collecting = false;)
+  return _list.top();
+}
+
+G1RedirtyCardsBufferList G1RedirtyCardsQueueSet::take_all_completed_buffers() {
+  DEBUG_ONLY(_collecting = false;)
+  G1RedirtyCardsBufferList result(_list.pop_all(), _tail, _entry_count);
+  _tail = NULL;
+  _entry_count = 0;
+  DEBUG_ONLY(_collecting = true;)
+  return result;
+}
+
+void G1RedirtyCardsQueueSet::update_tail(BufferNode* node) {
+  // Node is the tail of a (possibly single element) list just prepended to
+  // _list.  If, after that prepend, node's follower is NULL, then node is
+  // also the tail of _list, so record it as such.
+  if (node->next() == NULL) {
+    assert(_tail == NULL, "invariant");
+    _tail = node;
+  }
+}
+
+void G1RedirtyCardsQueueSet::enqueue_completed_buffer(BufferNode* node) {
+  assert(_collecting, "precondition");
+  Atomic::add(buffer_size() - node->index(), &_entry_count);
+  _list.push(*node);
+  update_tail(node);
+}
+
+void G1RedirtyCardsQueueSet::merge_bufferlist(LocalQSet* src) {
+  assert(_collecting, "precondition");
+  const G1RedirtyCardsBufferList from = src->take_all_completed_buffers();
+  if (from._head != NULL) {
+    assert(from._tail != NULL, "invariant");
+    Atomic::add(from._entry_count, &_entry_count);
+    _list.prepend(*from._head, *from._tail);
+    update_tail(from._tail);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1RedirtyCardsQueue.hpp	Thu Jul 25 11:31:07 2019 +0530
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_G1_G1REDIRTYCARDSQUEUE_HPP
+#define SHARE_GC_G1_G1REDIRTYCARDSQUEUE_HPP
+
+#include "gc/shared/ptrQueue.hpp"
+#include "memory/allocation.hpp"
+#include "memory/padded.hpp"
+
+class G1CardTableEntryClosure;
+class G1RedirtyCardsQueue;
+class G1RedirtyCardsQueueSet;
+
+struct G1RedirtyCardsBufferList {
+  BufferNode* _head;
+  BufferNode* _tail;
+  size_t _entry_count;
+
+  G1RedirtyCardsBufferList();
+  G1RedirtyCardsBufferList(BufferNode* head, BufferNode* tail, size_t entry_count);
+};
+
+// Provide G1RedirtyCardsQueue with a thread-local qset.  It provides an
+// uncontended staging area for completed buffers, to be flushed to the
+// shared qset en masse.  Using the "base from member" idiom so the local
+// qset is constructed before being passed to the PtrQueue constructor.
+class G1RedirtyCardsQueueBase {
+  friend class G1RedirtyCardsQueue;
+  friend class G1RedirtyCardsQueueSet;
+
+  class LocalQSet : public PtrQueueSet {
+    G1RedirtyCardsQueueSet* _shared_qset;
+    G1RedirtyCardsBufferList _buffers;
+
+  public:
+    LocalQSet(G1RedirtyCardsQueueSet* shared_qset);
+    ~LocalQSet();
+
+    // Add the buffer to the local list.
+    virtual void enqueue_completed_buffer(BufferNode* node);
+
+    // Transfer all completed buffers to the shared qset.
+    void flush();
+
+    G1RedirtyCardsBufferList take_all_completed_buffers();
+  };
+
+  G1RedirtyCardsQueueBase(G1RedirtyCardsQueueSet* shared_qset) :
+    _local_qset(shared_qset) {}
+
+  ~G1RedirtyCardsQueueBase() {}
+
+  LocalQSet _local_qset;
+};
+
+// Worker-local queues of card table entries.
+class G1RedirtyCardsQueue : private G1RedirtyCardsQueueBase, public PtrQueue {
+protected:
+  virtual void handle_completed_buffer();
+
+public:
+  G1RedirtyCardsQueue(G1RedirtyCardsQueueSet* qset);
+
+  // Flushes the queue.
+  ~G1RedirtyCardsQueue();
+
+  // Flushes all enqueued cards to qset.
+  void flush();
+};
+
+// Card table entries to be redirtied and the cards reprocessed later.
+// Has two phases, collecting and processing.  During the collecting
+// phase buffers are added to the set.  Once collecting is complete and
+// processing starts, buffers can no longer be added.  Taking all the
+// collected (and processed) buffers reverts back to collecting, allowing
+// the set to be reused for another round of redirtying.
+class G1RedirtyCardsQueueSet : public PtrQueueSet {
+  DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
+  BufferNode::Stack _list;
+  DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(size_t));
+  volatile size_t _entry_count;
+  DEFINE_PAD_MINUS_SIZE(3, DEFAULT_CACHE_LINE_SIZE, sizeof(BufferNode*));
+  BufferNode* _tail;
+  DEBUG_ONLY(mutable bool _collecting;)
+
+  typedef G1RedirtyCardsQueueBase::LocalQSet LocalQSet;
+
+  void update_tail(BufferNode* node);
+
+public:
+  G1RedirtyCardsQueueSet();
+  ~G1RedirtyCardsQueueSet();
+
+  using PtrQueueSet::initialize;
+
+  void verify_empty() const NOT_DEBUG_RETURN;
+
+  // Collect buffers.  These functions are thread-safe.
+  // precondition: Must not be concurrent with buffer processing.
+  virtual void enqueue_completed_buffer(BufferNode* node);
+  void merge_bufferlist(LocalQSet* src);
+
+  // Processing phase operations.
+  // precondition: Must not be concurrent with buffer collection.
+  BufferNode* all_completed_buffers() const;
+  G1RedirtyCardsBufferList take_all_completed_buffers();
+};
+
+#endif // SHARE_GC_G1_G1REDIRTYCARDSQUEUE_HPP
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp	Tue Jul 23 22:21:16 2019 -0700
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp	Thu Jul 25 11:31:07 2019 +0530
@@ -26,6 +26,7 @@
 #include "gc/g1/g1BarrierSet.hpp"
 #include "gc/g1/g1BlockOffsetTable.inline.hpp"
 #include "gc/g1/g1CardTable.inline.hpp"
+#include "gc/g1/g1CardTableEntryClosure.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1ConcurrentRefine.hpp"
 #include "gc/g1/g1DirtyCardQueue.hpp"
@@ -93,24 +94,35 @@
   // to (>=) HeapRegion::CardsPerRegion (completely scanned).
   uint volatile* _card_table_scan_state;
 
-  // Random power of two number of cards we want to claim per thread. This corresponds
-  // to a 64k of memory work chunk area for every thread.
-  // We use the same claim size as Parallel GC. No particular measurements have been
-  // performed to determine an optimal number.
-  static const uint CardsPerChunk = 128;
+  // Return "optimal" number of chunks per region we want to use for claiming areas
+  // within a region to claim. Dependent on the region size as proxy for the heap
+  // size, we limit the total number of chunks to limit memory usage and maintenance
+  // effort of that table vs. granularity of distributing scanning work.
+  // Testing showed that 8 for 1M/2M region, 16 for 4M/8M regions, 32 for 16/32M regions
+  // seems to be such a good trade-off.
+  static uint get_chunks_per_region(uint log_region_size) {
+    // Limit the expected input values to current known possible values of the
+    // (log) region size. Adjust as necessary after testing if changing the permissible
+    // values for region size.
+    assert(log_region_size >= 20 && log_region_size <= 25,
+           "expected value in [20,25], but got %u", log_region_size);
+    return 1u << (log_region_size / 2 - 7);
+  }
 
-  uint _scan_chunks_per_region;
+  uint _scan_chunks_per_region;         // Number of chunks per region.
+  uint8_t _log_scan_chunks_per_region;  // Log of number of chunks per region.
   bool* _region_scan_chunks;
-  uint8_t _scan_chunks_shift;
+  size_t _num_total_scan_chunks;        // Total number of elements in _region_scan_chunks.
+  uint8_t _scan_chunks_shift;           // For conversion between card index and chunk index.
 public:
   uint scan_chunk_size() const { return (uint)1 << _scan_chunks_shift; }
 
   // Returns whether the chunk corresponding to the given region/card in region contain a
   // dirty card, i.e. actually needs scanning.
   bool chunk_needs_scan(uint const region_idx, uint const card_in_region) const {
-    size_t const idx = (size_t)region_idx * _scan_chunks_per_region + (card_in_region >> _scan_chunks_shift);
-    assert(idx < (_max_regions * _scan_chunks_per_region), "Index " SIZE_FORMAT " out of bounds " SIZE_FORMAT,
-           idx, _max_regions * _scan_chunks_per_region);
+    size_t const idx = ((size_t)region_idx << _log_scan_chunks_per_region) + (card_in_region >> _scan_chunks_shift);
+    assert(idx < _num_total_scan_chunks, "Index " SIZE_FORMAT " out of bounds " SIZE_FORMAT,
+           idx, _num_total_scan_chunks);
     return _region_scan_chunks[idx];
   }
 
@@ -183,259 +195,6 @@
     }
   };
 
-  // Returns whether the given region contains cards we need to scan. The remembered
-  // set and other sources may contain cards that
-  // - are in uncommitted regions
-  // - are located in the collection set
-  // - are located in free regions
-  // as we do not clean up remembered sets before merging heap roots.
-  bool contains_cards_to_process(uint const region_idx) const {
-    HeapRegion* hr = G1CollectedHeap::heap()->region_at_or_null(region_idx);
-    return (hr != NULL && !hr->in_collection_set() && hr->is_old_or_humongous_or_archive());
-  }
-
-  class G1MergeCardSetClosure : public HeapRegionClosure {
-    G1RemSetScanState* _scan_state;
-    G1CardTable* _ct;
-
-    uint _merged_sparse;
-    uint _merged_fine;
-    uint _merged_coarse;
-
-    // Returns if the region contains cards we need to scan. If so, remember that
-    // region in the current set of dirty regions.
-    bool remember_if_interesting(uint const region_idx) {
-      if (!_scan_state->contains_cards_to_process(region_idx)) {
-        return false;
-      }
-      _scan_state->add_dirty_region(region_idx);
-      return true;
-    }
-  public:
-    G1MergeCardSetClosure(G1RemSetScanState* scan_state) :
-      _scan_state(scan_state),
-      _ct(G1CollectedHeap::heap()->card_table()),
-      _merged_sparse(0),
-      _merged_fine(0),
-      _merged_coarse(0) { }
-
-    void next_coarse_prt(uint const region_idx) {
-      if (!remember_if_interesting(region_idx)) {
-        return;
-      }
-
-      _merged_coarse++;
-
-      size_t region_base_idx = (size_t)region_idx << HeapRegion::LogCardsPerRegion;
-      _ct->mark_region_dirty(region_base_idx, HeapRegion::CardsPerRegion);
-      _scan_state->set_chunk_region_dirty(region_base_idx);
-    }
-
-    void next_fine_prt(uint const region_idx, BitMap* bm) {
-      if (!remember_if_interesting(region_idx)) {
-        return;
-      }
-
-      _merged_fine++;
-
-      size_t const region_base_idx = (size_t)region_idx << HeapRegion::LogCardsPerRegion;
-      BitMap::idx_t cur = bm->get_next_one_offset(0);
-      while (cur != bm->size()) {
-        _ct->mark_clean_as_dirty(region_base_idx + cur);
-        _scan_state->set_chunk_dirty(region_base_idx + cur);
-        cur = bm->get_next_one_offset(cur + 1);
-      }
-    }
-
-    void next_sparse_prt(uint const region_idx, SparsePRTEntry::card_elem_t* cards, uint const num_cards) {
-      if (!remember_if_interesting(region_idx)) {
-        return;
-      }
-
-      _merged_sparse++;
-
-      size_t const region_base_idx = (size_t)region_idx << HeapRegion::LogCardsPerRegion;
-      for (uint i = 0; i < num_cards; i++) {
-        size_t card_idx = region_base_idx + cards[i];
-        _ct->mark_clean_as_dirty(card_idx);
-        _scan_state->set_chunk_dirty(card_idx);
-      }
-    }
-
-    virtual bool do_heap_region(HeapRegion* r) {
-      assert(r->in_collection_set() || r->is_starts_humongous(), "must be");
-
-      HeapRegionRemSet* rem_set = r->rem_set();
-      if (!rem_set->is_empty()) {
-        rem_set->iterate_prts(*this);
-      }
-
-      return false;
-    }
-
-    size_t merged_sparse() const { return _merged_sparse; }
-    size_t merged_fine() const { return _merged_fine; }
-    size_t merged_coarse() const { return _merged_coarse; }
-  };
-
-  // Visitor for the remembered sets of humongous candidate regions to merge their
-  // remembered set into the card table.
-  class G1FlushHumongousCandidateRemSets : public HeapRegionClosure {
-    G1MergeCardSetClosure _cl;
-
-  public:
-    G1FlushHumongousCandidateRemSets(G1RemSetScanState* scan_state) : _cl(scan_state) { }
-
-    virtual bool do_heap_region(HeapRegion* r) {
-      G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
-      if (!r->is_starts_humongous() ||
-          !g1h->region_attr(r->hrm_index()).is_humongous() ||
-          r->rem_set()->is_empty()) {
-        return false;
-      }
-
-      guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
-                "Found a not-small remembered set here. This is inconsistent with previous assumptions.");
-
-      _cl.do_heap_region(r);
-
-      // We should only clear the card based remembered set here as we will not
-      // implicitly rebuild anything else during eager reclaim. Note that at the moment
-      // (and probably never) we do not enter this path if there are other kind of
-      // remembered sets for this region.
-      r->rem_set()->clear_locked(true /* only_cardset */);
-      // Clear_locked() above sets the state to Empty. However we want to continue
-      // collecting remembered set entries for humongous regions that were not
-      // reclaimed.
-      r->rem_set()->set_state_complete();
-#ifdef ASSERT
-      G1HeapRegionAttr region_attr = g1h->region_attr(r->hrm_index());
-      assert(region_attr.needs_remset_update(), "must be");
-#endif
-      assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
-
-      return false;
-    }
-
-    size_t merged_sparse() const { return _cl.merged_sparse(); }
-    size_t merged_fine() const { return _cl.merged_fine(); }
-    size_t merged_coarse() const { return _cl.merged_coarse(); }
-  };
-
-  // Visitor for the log buffer entries to merge them into the card table.
-  class G1MergeLogBufferCardsClosure : public G1CardTableEntryClosure {
-    G1RemSetScanState* _scan_state;
-    G1CardTable* _ct;
-
-    size_t _cards_dirty;
-    size_t _cards_skipped;
-  public:
-    G1MergeLogBufferCardsClosure(G1CollectedHeap* g1h, G1RemSetScanState* scan_state) :
-      _scan_state(scan_state), _ct(g1h->card_table()), _cards_dirty(0), _cards_skipped(0)
-    {}
-
-    bool do_card_ptr(CardValue* card_ptr, uint worker_i) {
-      // The only time we care about recording cards that
-      // contain references that point into the collection set
-      // is during RSet updating within an evacuation pause.
-      // In this case worker_id should be the id of a GC worker thread.
-      assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
-
-      uint const region_idx = _ct->region_idx_for(card_ptr);
-
-      // The second clause must come after - the log buffers might contain cards to uncommited
-      // regions.
-      // This code may count duplicate entries in the log buffers (even if rare) multiple
-      // times.
-      if (_scan_state->contains_cards_to_process(region_idx) && (*card_ptr == G1CardTable::dirty_card_val())) {
-        _scan_state->add_dirty_region(region_idx);
-        _scan_state->set_chunk_dirty(_ct->index_for_cardvalue(card_ptr));
-        _cards_dirty++;
-      } else {
-        // We may have had dirty cards in the (initial) collection set (or the
-        // young regions which are always in the initial collection set). We do
-        // not fix their cards here: we already added these regions to the set of
-        // regions to clear the card table at the end during the prepare() phase.
-        _cards_skipped++;
-      }
-      return true;
-    }
-
-    size_t cards_dirty() const { return _cards_dirty; }
-    size_t cards_skipped() const { return _cards_skipped; }
-  };
-
-  class G1MergeHeapRootsTask : public AbstractGangTask {
-    HeapRegionClaimer _hr_claimer;
-    G1RemSetScanState* _scan_state;
-    bool _remembered_set_only;
-
-    G1GCPhaseTimes::GCParPhases _merge_phase;
-
-    volatile bool _fast_reclaim_handled;
-
-  public:
-    G1MergeHeapRootsTask(G1RemSetScanState* scan_state, uint num_workers, bool remembered_set_only, G1GCPhaseTimes::GCParPhases merge_phase) :
-      AbstractGangTask("G1 Merge Heap Roots"),
-      _hr_claimer(num_workers),
-      _scan_state(scan_state),
-      _remembered_set_only(remembered_set_only),
-      _merge_phase(merge_phase),
-      _fast_reclaim_handled(false) { }
-
-    virtual void work(uint worker_id) {
-      G1CollectedHeap* g1h = G1CollectedHeap::heap();
-      G1GCPhaseTimes* p = g1h->phase_times();
-
-      // We schedule flushing the remembered sets of humongous fast reclaim candidates
-      // onto the card table first to allow the remaining parallelized tasks hide it.
-      if (!_remembered_set_only &&
-          p->fast_reclaim_humongous_candidates() > 0 &&
-          !_fast_reclaim_handled &&
-          !Atomic::cmpxchg(true, &_fast_reclaim_handled, false)) {
-
-        G1FlushHumongousCandidateRemSets cl(_scan_state);
-        g1h->heap_region_iterate(&cl);
-
-        p->record_or_add_thread_work_item(_merge_phase, worker_id, cl.merged_sparse(), G1GCPhaseTimes::MergeRSMergedSparse);
-        p->record_or_add_thread_work_item(_merge_phase, worker_id, cl.merged_fine(), G1GCPhaseTimes::MergeRSMergedFine);
-        p->record_or_add_thread_work_item(_merge_phase, worker_id, cl.merged_coarse(), G1GCPhaseTimes::MergeRSMergedCoarse);
-      }
-
-      // Merge remembered sets of current candidates.
-      {
-        G1GCParPhaseTimesTracker x(p, _merge_phase, worker_id, !_remembered_set_only /* must_record */);
-        G1MergeCardSetClosure cl(_scan_state);
-        g1h->collection_set_iterate_increment_from(&cl, &_hr_claimer, worker_id);
-
-        p->record_or_add_thread_work_item(_merge_phase, worker_id, cl.merged_sparse(), G1GCPhaseTimes::MergeRSMergedSparse);
-        p->record_or_add_thread_work_item(_merge_phase, worker_id, cl.merged_fine(), G1GCPhaseTimes::MergeRSMergedFine);
-        p->record_or_add_thread_work_item(_merge_phase, worker_id, cl.merged_coarse(), G1GCPhaseTimes::MergeRSMergedCoarse);
-      }
-
-      // Apply closure to log entries in the HCC.
-      if (!_remembered_set_only && G1HotCardCache::default_use_cache()) {
-        assert(_merge_phase == G1GCPhaseTimes::MergeRS, "Wrong merge phase");
-        G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeHCC, worker_id);
-        G1MergeLogBufferCardsClosure cl(g1h, _scan_state);
-        g1h->iterate_hcc_closure(&cl, worker_id);
-      }
-
-      // Now apply the closure to all remaining log entries.
-      if (!_remembered_set_only) {
-        assert(_merge_phase == G1GCPhaseTimes::MergeRS, "Wrong merge phase");
-        G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeLB, worker_id);
-
-        G1MergeLogBufferCardsClosure cl(g1h, _scan_state);
-        g1h->iterate_dirty_card_closure(&cl, worker_id);
-
-        p->record_thread_work_item(G1GCPhaseTimes::MergeLB, worker_id, cl.cards_dirty(), G1GCPhaseTimes::MergeLBDirtyCards);
-        p->record_thread_work_item(G1GCPhaseTimes::MergeLB, worker_id, cl.cards_skipped(), G1GCPhaseTimes::MergeLBSkippedCards);
-      }
-    }
-  };
-
   // Creates a snapshot of the current _top values at the start of collection to
   // filter out card marks that we do not want to scan.
   class G1ResetScanTopClosure : public HeapRegionClosure {
@@ -538,8 +297,10 @@
     _max_regions(0),
     _collection_set_iter_state(NULL),
     _card_table_scan_state(NULL),
-    _scan_chunks_per_region((uint)(HeapRegion::CardsPerRegion / CardsPerChunk)),
+    _scan_chunks_per_region(get_chunks_per_region(HeapRegion::LogOfHRGrainBytes)),
+    _log_scan_chunks_per_region(log2_uint(_scan_chunks_per_region)),
     _region_scan_chunks(NULL),
+    _num_total_scan_chunks(0),
     _scan_chunks_shift(0),
     _all_dirty_regions(NULL),
     _next_dirty_regions(NULL),
@@ -558,7 +319,8 @@
     _max_regions = max_regions;
     _collection_set_iter_state = NEW_C_HEAP_ARRAY(G1RemsetIterState, max_regions, mtGC);
     _card_table_scan_state = NEW_C_HEAP_ARRAY(uint, max_regions, mtGC);
-    _region_scan_chunks = NEW_C_HEAP_ARRAY(bool, max_regions * _scan_chunks_per_region, mtGC);
+    _num_total_scan_chunks = max_regions * _scan_chunks_per_region;
+    _region_scan_chunks = NEW_C_HEAP_ARRAY(bool, _num_total_scan_chunks, mtGC);
 
     _scan_chunks_shift = (uint8_t)log2_intptr(HeapRegion::CardsPerRegion / _scan_chunks_per_region);
     _scan_top = NEW_C_HEAP_ARRAY(HeapWord*, max_regions, mtGC);
@@ -571,61 +333,46 @@
     }
 
     _all_dirty_regions = new G1DirtyRegions(_max_regions);