changeset 18250:6a8b17cf0cd9

Merge
author chegar
date Thu, 16 May 2013 11:47:51 +0100
parents aec7e8963c3e f09ab0c41618
children 3743160a4cb8
files hotspot/agent/doc/c2replay.html langtools/make/Makefile-classic nashorn/src/jdk/nashorn/internal/codegen/Frame.java nashorn/src/jdk/nashorn/internal/ir/DoWhileNode.java nashorn/src/jdk/nashorn/internal/ir/LabeledNode.java
diffstat 455 files changed, 18994 insertions(+), 13052 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Wed May 08 11:22:25 2013 +0100
+++ b/.hgtags	Thu May 16 11:47:51 2013 +0100
@@ -209,3 +209,5 @@
 1872c12529090e1c1dbf567f02ad7ae6231b8f0c jdk8-b85
 da9a4c9312816451884aa6db6f18be51a07bff13 jdk8-b86
 5ebf6c63714de2c9dcf831074086d31daec819df jdk8-b87
+e517701a4d0e25ae9c7945bca6e1762a8c5d8aa6 jdk8-b88
+4dec41b3c5e3bb616f0c6f15830d940905aa5d16 jdk8-b89
--- a/.hgtags-top-repo	Wed May 08 11:22:25 2013 +0100
+++ b/.hgtags-top-repo	Thu May 16 11:47:51 2013 +0100
@@ -209,3 +209,5 @@
 7fc358f5943676b82f1dccd3152b1ac07d92e38b jdk8-b85
 df9b5240f0a76c91cfe1a5b39da4d08df56e05be jdk8-b86
 b9415faa7066a4d3b16d466556d5428446918d95 jdk8-b87
+e1a929afcfc492470d50be0b6b0e8dc77d3760b9 jdk8-b88
+892a0196d10c67f3a12f0eefb0bb536e423d8868 jdk8-b89
--- a/common/makefiles/NativeCompilation.gmk	Wed May 08 11:22:25 2013 +0100
+++ b/common/makefiles/NativeCompilation.gmk	Thu May 16 11:47:51 2013 +0100
@@ -411,6 +411,8 @@
             $1_EXTRA_LDFLAGS+="-implib:$$($1_OBJECT_DIR)/$$($1_LIBRARY).lib"
         endif
 
+        $1_EXTRA_LDFLAGS_SUFFIX += $(GLOBAL_LDFLAGS_SUFFIX)        
+
         ifneq (,$$($1_DEBUG_SYMBOLS))
             ifeq ($(ENABLE_DEBUG_SYMBOLS), true)
                 ifeq ($(OPENJDK_TARGET_OS), windows)
@@ -549,6 +551,8 @@
             endif
         endif
 
+        $1_EXTRA_LDFLAGS_SUFFIX += $(GLOBAL_LDFLAGS_SUFFIX)
+
         $$($1_TARGET) : $$($1_EXPECTED_OBJS) $$($1_RES) $$($1_GEN_MANIFEST)
 	    	$$(call LINKING_EXE_MSG,$$($1_BASENAME))
 		$$($1_LDEXE) $$($1_LDFLAGS) $$($1_EXTRA_LDFLAGS) $(EXE_OUT_OPTION)$$($1_TARGET) \
--- a/common/makefiles/javadoc/CORE_PKGS.gmk	Wed May 08 11:22:25 2013 +0100
+++ b/common/makefiles/javadoc/CORE_PKGS.gmk	Thu May 16 11:47:51 2013 +0100
@@ -142,6 +142,7 @@
   java.util.prefs                                \
   java.util.regex                                \
   java.util.spi                                  \
+  java.util.stream                               \
   java.util.zip                                  \
   javax.accessibility                            \
   javax.activation                               \
--- a/common/makefiles/javadoc/Javadoc.gmk	Wed May 08 11:22:25 2013 +0100
+++ b/common/makefiles/javadoc/Javadoc.gmk	Thu May 16 11:47:51 2013 +0100
@@ -390,6 +390,17 @@
 	  $(call OptionPair,-tag,specdefault:X)				; \
 	  $(call OptionPair,-tag,Note:X)				; \
 	  $(call OptionPair,-tag,ToDo:X)				; \
+          $(call OptionPair,-tag,apiNote:a:API Note:)                   ; \
+          $(call OptionPair,-tag,implSpec:a:Implementation Requirements:) ; \
+          $(call OptionPair,-tag,implNote:a:Implementation Note:)       ; \
+          $(call OptionPair,-tag,param)                                 ; \
+          $(call OptionPair,-tag,return)                                ; \
+          $(call OptionPair,-tag,throws)                                ; \
+          $(call OptionPair,-tag,since)                                 ; \
+          $(call OptionPair,-tag,version)                               ; \
+          $(call OptionPair,-tag,serialData)                            ; \
+          $(call OptionPair,-tag,factory)                               ; \
+          $(call OptionPair,-tag,see)                                   ; \
           $(call OptionPair,-tag,$(TAG_JLS)) 				; \
 	  $(call OptionOnly,-splitIndex)				; \
 	  $(call OptionPair,-overview,$(COREAPI_OVERVIEW))		; \
--- a/corba/.hgtags	Wed May 08 11:22:25 2013 +0100
+++ b/corba/.hgtags	Thu May 16 11:47:51 2013 +0100
@@ -209,3 +209,5 @@
 9583a6431596bac1959d2d8828f5ea217843dd12 jdk8-b85
 44a8ce4a759f2668ff434661a93ff462ea472478 jdk8-b86
 f1709874d55a06bc3d5dfa02dbcdfbc59f4cba34 jdk8-b87
+4e3a881ebb1ee96ce0872508b0066d74f310dbfa jdk8-b88
+fe4150590ee597f4e125fea950aa3b352622cc2d jdk8-b89
--- a/corba/src/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/UnionGen.java	Wed May 08 11:22:25 2013 +0100
+++ b/corba/src/share/classes/com/sun/tools/corba/se/idl/toJavaPortable/UnionGen.java	Thu May 16 11:47:51 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -258,6 +258,19 @@
   {
     Vector labels = vectorizeLabels (u.branches (), true);
 
+    if (Util.javaName(utype).equals ("boolean")) {
+        stream.println( "" ) ;
+        stream.println( "  private void verifyDefault (boolean discriminator)" ) ;
+        stream.println( "  {" ) ;
+        if (labels.contains ("true"))
+            stream.println ("    if ( discriminator )");
+        else
+            stream.println ("    if ( !discriminator )");
+        stream.println( "        throw new org.omg.CORBA.BAD_OPERATION();" ) ;
+        stream.println( "  }" ) ;
+        return;
+    }
+
     stream.println( "" ) ;
     stream.println( "  private void verifyDefault( " + Util.javaName(utype) +
         " value )" ) ;
@@ -763,7 +776,7 @@
             stream.println (indent + "if (" + disName + ')');
 
             if (firstBranch == null)
-                stream.println (indent + "  throw new org.omg.CORBA.BAD_OPERATION ();");
+                stream.println (indent + "  value._default(" + disName + ");");
             else {
                 stream.println (indent + '{');
                 index = readBranch (index, indent + "  ", firstBranch.typedef.name (),
@@ -774,7 +787,7 @@
             stream.println (indent + "else");
 
             if (secondBranch == null)
-                stream.println (indent + "  throw new org.omg.CORBA.BAD_OPERATION ();");
+                stream.println (indent + "  value._default(" + disName + ");");
             else {
                 stream.println (indent + '{');
                 index = readBranch (index, indent + "  ", secondBranch.typedef.name (),
@@ -924,23 +937,25 @@
         firstBranch = secondBranch;
         secondBranch = tmp;
       }
-      stream.println (indent + "if (" + disName + ')');
-      if (firstBranch == null)
-        stream.println (indent + "  throw new org.omg.CORBA.BAD_OPERATION ();");
-      else
-      {
-        stream.println (indent + '{');
-        index = writeBranch (index, indent + "  ", name, firstBranch.typedef, stream);
-        stream.println (indent + '}');
-      }
-      stream.println (indent + "else");
-      if (secondBranch == null)
-        stream.println (indent + "  throw new org.omg.CORBA.BAD_OPERATION ();");
-      else
-      {
-        stream.println (indent + '{');
-        index = writeBranch (index, indent + "  ", name, secondBranch.typedef, stream);
-        stream.println (indent + '}');
+      if (firstBranch != null && secondBranch != null) {
+          stream.println (indent + "if (" + disName + ')');
+          stream.println (indent + '{');
+          index = writeBranch (index, indent + "  ", name, firstBranch.typedef, stream);
+          stream.println (indent + '}');
+          stream.println (indent + "else");
+          stream.println (indent + '{');
+          index = writeBranch (index, indent + "  ", name, secondBranch.typedef, stream);
+          stream.println (indent + '}');
+      } else if (firstBranch != null) {
+          stream.println (indent + "if (" + disName + ')');
+          stream.println (indent + '{');
+          index = writeBranch (index, indent + "  ", name, firstBranch.typedef, stream);
+          stream.println (indent + '}');
+      } else {
+          stream.println (indent + "if (!" + disName + ')');
+          stream.println (indent + '{');
+          index = writeBranch (index, indent + "  ", name, secondBranch.typedef, stream);
+          stream.println (indent + '}');
       }
     }
     return index;
--- a/hotspot/.hgtags	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/.hgtags	Thu May 16 11:47:51 2013 +0100
@@ -337,3 +337,7 @@
 d4c2667846607042370760e23f64c3ab9350e60d jdk8-b87
 01d5f04e64dc2d64625b2db2056f5ed4de918a45 hs25-b29
 c4af77d2045476c56fbf3f914b336bb1b7cd18af hs25-b30
+8482058e74bc8c1a890e6f3be3eff192dba6ce67 jdk8-b88
+4ec91349972255650f97bedfd07e6423e02428cf hs25-b31
+9c1fe0b419b40a9ecdd1653cc9af1b6d67a12c46 jdk8-b89
+69494caf57908ba2c8efa9eaaa472b4d1875588a hs25-b32
--- a/hotspot/agent/doc/c2replay.html	Wed May 08 11:22:25 2013 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-<html>
-<head>
-<title>
-C2 Replay
-</title>
-</head>
-<body>
-
-<h1>C2 compiler replay</h1>
-<p>
-The C2 compiler replay is a function to repeat the compiling process from a crashed java process in compiled method<br>
-This function only exists in debug version of VM
-</p>
-<h2>Usage</h2>
-<pre> 
-First, use SA to attach to the core file, if suceeded, do
-       clhsdb>dumpreplaydata <address> | -a | <thread_id> [> replay.txt]
-       create file replay.txt, address is address of Method, or nmethod(CodeBlob)
-       clhsdb>buildreplayjars [all | boot | app]
-       create files:
-         all:
-           app.jar, boot.jar
-         boot:
-           boot.jar
-         app:
-           app.jar
-       exit SA now.
-Second, use the obtained replay text file, replay.txt and jar files, app.jar and boot.jar, using debug version of java
-       java -Xbootclasspath/p:boot.jar -cp app.jar -XX:ReplayDataFile=<datafile> -XX:+ReplayCompiles ....
-       This will replay the compiling process.
-
-       With ReplayCompiles, the replay will recompile all the methods in app.jar, and in boot.jar to emulate the process in java app.
-
-notes:
-       1) Most time, we don't need the boot.jar which is the classes loaded from JDK. It will be only modified when an agent(JVMDI) is running and modifies the classes.
-       2) If encounter error as "<flag>" not found, that means the SA is using a VMStructs which is different from the one with corefile. In this case, SA has a utility tool vmstructsdump which is located at agent/src/os/<os>/proc/<os_platform>
-
-       Use this tool to dump VM type library:
-       vmstructsdump libjvm.so > <type_name>.db
-
-       set env SA_TYPEDB=<type_name>.db (refer different shell for set envs)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/agent/doc/cireplay.html	Thu May 16 11:47:51 2013 +0100
@@ -0,0 +1,41 @@
+<html>
+<head>
+<title>
+Replay
+</title>
+</head>
+<body>
+
+<h1>Compiler replay</h1>
+<p>
+The compiler replay is a function to repeat the compiling process from a crashed java process in compiled method<br>
+This function only exists in debug version of VM
+</p>
+<h2>Usage</h2>
+<pre>
+First, use SA to attach to the core file, if succeeded, do
+       hsdb&gt; dumpreplaydata &lt;address&gt; | -a | &lt;thread_id&gt; [&gt; replay.txt]
+       create file replay.txt, address is address of Method, or nmethod(CodeBlob)
+       hsdb&gt; buildreplayjars [all | boot | app]
+       create files:
+         all:
+           app.jar, boot.jar
+         boot:
+           boot.jar
+         app:
+           app.jar
+       exit SA now.
+Second, use the obtained replay text file, replay.txt and jar files, app.jar and boot.jar, using debug version of java
+       java -Xbootclasspath/p:boot.jar -cp app.jar -XX:ReplayDataFile=&lt;datafile&gt; -XX:+ReplayCompiles ....
+       This will replay the compiling process.
+
+       With ReplayCompiles, the replay will recompile all the methods in app.jar, and in boot.jar to emulate the process in java app.
+
+notes:
+       1) Most time, we don't need the boot.jar which is the classes loaded from JDK. It will be only modified when an agent(JVMDI) is running and modifies the classes.
+       2) If encounter error as "&lt;flag&gt;" not found, that means the SA is using a VMStructs which is different from the one with corefile. In this case, SA has a utility tool vmstructsdump which is located at agent/src/os/&lt;os&gt;/proc/&lt;os_platform&gt;
+
+       Use this tool to dump VM type library:
+       vmstructsdump libjvm.so &gt; &lt;type_name&gt;.db
+
+       set env SA_TYPEDB=&lt;type_name&gt;.db (refer different shell for set envs)
--- a/hotspot/agent/doc/clhsdb.html	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/agent/doc/clhsdb.html	Thu May 16 11:47:51 2013 +0100
@@ -15,7 +15,7 @@
 <p>
 There is also JavaScript based SA command line interface called <a href="jsdb.html">jsdb</a>.
 But, CLHSDB supports Unix shell-like (or dbx/gdb-like) command line interface with
-support for output redirection/appending (familiar >, >>), command history and so on.
+support for output redirection/appending (familiar &gt;, &gt;&gt;), command history and so on.
 Each CLHSDB command can have zero or more arguments and optionally end with output redirection
 (or append) to a file. Commands may be stored in a file and run using <b>source</b> command.
 <b>help</b> command prints usage message for all supported commands (or a specific command)
@@ -49,7 +49,7 @@
   dumpheap [ file ] <font color="red">dump heap in hprof binary format</font>
   dumpideal -a | id <font color="red">dump ideal graph like debug flag -XX:+PrintIdeal</font>
   dumpilt -a | id <font color="red">dump inline tree for C2 compilation</font>
-  dumpreplaydata <address> | -a | <thread_id> [>replay.txt] <font color="red">dump replay data into a file</font>
+  dumpreplaydata &lt;address&gt; | -a | &lt;thread_id&gt; [&gt;replay.txt] <font color="red">dump replay data into a file</font>
   echo [ true | false ] <font color="red">turn on/off command echo mode</font>
   examine [ address/count ] | [ address,address] <font color="red">show contents of memory from given address</font>
   field [ type [ name fieldtype isStatic offset address ] ] <font color="red">print info about a field of HotSpot type</font>
@@ -96,11 +96,11 @@
 
 <h3>JavaScript integration</h3>
 
-<p>Few CLHSDB commands are already implemented in JavaScript. It is possible to extend CLHSDB command set 
+<p>Few CLHSDB commands are already implemented in JavaScript. It is possible to extend CLHSDB command set
 by implementing more commands in a JavaScript file and by loading it by <b>jsload</b> command. <b>jseval</b>
 command may be used to evaluate arbitrary JavaScript expression from a string. Any JavaScript function
 may be exposed as a CLHSDB command by registering it using JavaScript <b><code>registerCommand</code></b>
-function. This function accepts command name, usage and name of the JavaScript implementation function 
+function. This function accepts command name, usage and name of the JavaScript implementation function
 as arguments.
 </p>
 
@@ -127,11 +127,11 @@
 </code>
 </pre>
 
-<h3>C2 Compilation Replay</h3>
+<h3>Compilation Replay</h3>
 <p>
 When a java process crashes in compiled method, usually a core file is saved.
-The C2 replay function can reproduce the compiling process in the core.
-<a href="c2replay.html">c2replay.html</a>
+The replay function can reproduce the compiling process in the core.
+<a href="cireplay.html">cireplay.html</a>
 
 </body>
 </html>
--- a/hotspot/agent/src/os/bsd/MacosxDebuggerLocal.m	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/agent/src/os/bsd/MacosxDebuggerLocal.m	Thu May 16 11:47:51 2013 +0100
@@ -204,7 +204,7 @@
   jstring objectName, jstring symbolName) 
 {
   struct ps_prochandle* ph = get_proc_handle(env, this_obj);
-  if (ph->core != NULL) {
+  if (ph != NULL && ph->core != NULL) {
     return lookupByNameIncore(env, ph, this_obj, objectName, symbolName);
   }
 
@@ -238,10 +238,13 @@
   const char* sym = NULL;
 
   struct ps_prochandle* ph = get_proc_handle(env, this_obj);
-  sym = symbol_for_pc(ph, (uintptr_t) addr, &offset);
-  if (sym == NULL) return 0;
-  return (*env)->CallObjectMethod(env, this_obj, createClosestSymbol_ID,
+  if (ph != NULL && ph->core != NULL) {
+    sym = symbol_for_pc(ph, (uintptr_t) addr, &offset);
+    if (sym == NULL) return 0;
+    return (*env)->CallObjectMethod(env, this_obj, createClosestSymbol_ID,
                           (*env)->NewStringUTF(env, sym), (jlong)offset);
+  }
+  return 0;
 }
 
 /** called from Java_sun_jvm_hotspot_debugger_bsd_BsdDebuggerLocal_readBytesFromProcess0 */
@@ -279,7 +282,7 @@
   jbyteArray array;
 
   struct ps_prochandle* ph = get_proc_handle(env, this_obj);
-  if (ph->core != NULL) {
+  if (ph != NULL && ph->core != NULL) {
     return readBytesFromCore(env, ph, this_obj, addr, numBytes);
   }
 
@@ -394,9 +397,9 @@
 /* For core file only, called from
  * Java_sun_jvm_hotspot_debugger_bsd_BsdDebuggerLocal_getThreadIntegerRegisterSet0
  */
-jlongArray getThreadIntegerRegisterSetFromCore(JNIEnv *env, jobject this_obj, long lwp_id) {
+jlongArray getThreadIntegerRegisterSetFromCore(JNIEnv *env, jobject this_obj, long lwp_id, struct ps_prochandle* ph) {
   if (!_threads_filled)  {
-    if (!fill_java_threads(env, this_obj, get_proc_handle(env, this_obj))) {
+    if (!fill_java_threads(env, this_obj, ph)) {
       throw_new_debugger_exception(env, "Failed to fill in threads");
       return 0;
     } else {
@@ -409,7 +412,6 @@
   jlongArray array;
   jlong *regs;
 
-  struct ps_prochandle* ph = get_proc_handle(env, this_obj);
   if (get_lwp_regs(ph, lwp_id, &gregs) != true) {
     THROW_NEW_DEBUGGER_EXCEPTION_("get_thread_regs failed for a lwp", 0);
   }
@@ -521,8 +523,8 @@
   print_debug("getThreadRegisterSet0 called\n");
 
   struct ps_prochandle* ph = get_proc_handle(env, this_obj);
-  if (ph->core != NULL) {
-    return getThreadIntegerRegisterSetFromCore(env, this_obj, thread_id);
+  if (ph != NULL && ph->core != NULL) {
+    return getThreadIntegerRegisterSetFromCore(env, this_obj, thread_id, ph);
   }
 
   kern_return_t result;
@@ -705,8 +707,8 @@
   task_t gTask = 0;
   result = task_for_pid(mach_task_self(), jpid, &gTask);
   if (result != KERN_SUCCESS) {
-    print_error("attach: task_for_pid(%d) failed (%d)\n", (int)jpid, result);
-    THROW_NEW_DEBUGGER_EXCEPTION("Can't attach to the process");
+    print_error("attach: task_for_pid(%d) failed: '%s' (%d)\n", (int)jpid, mach_error_string(result), result);
+    THROW_NEW_DEBUGGER_EXCEPTION("Can't attach to the process. Could be caused by an incorrect pid or lack of privileges.");
   }
   putTask(env, this_obj, gTask);
 
--- a/hotspot/agent/src/os/bsd/ps_core.c	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/agent/src/os/bsd/ps_core.c	Thu May 16 11:47:51 2013 +0100
@@ -199,10 +199,10 @@
 //---------------------------------------------------------------
 // Part of the class sharing workaround:
 //
-// With class sharing, pages are mapped from classes[_g].jsa file.
+// With class sharing, pages are mapped from classes.jsa file.
 // The read-only class sharing pages are mapped as MAP_SHARED,
 // PROT_READ pages. These pages are not dumped into core dump.
-// With this workaround, these pages are read from classes[_g].jsa.
+// With this workaround, these pages are read from classes.jsa.
 
 // FIXME: !HACK ALERT!
 // The format of sharing achive file header is needed to read shared heap
@@ -298,14 +298,12 @@
   lib_info* lib = ph->libs;
   while (lib != NULL) {
     // we are iterating over shared objects from the core dump. look for
-    // libjvm[_g].so.
+    // libjvm.so.
     const char *jvm_name = 0;
 #ifdef __APPLE__
-    if ((jvm_name = strstr(lib->name, "/libjvm.dylib")) != 0 ||
-        (jvm_name = strstr(lib->name, "/libjvm_g.dylib")) != 0)
+    if ((jvm_name = strstr(lib->name, "/libjvm.dylib")) != 0)
 #else
-    if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0 ||
-        (jvm_name = strstr(lib->name, "/libjvm_g.so")) != 0)
+    if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0)
 #endif // __APPLE__
     {
       char classes_jsa[PATH_MAX];
@@ -389,7 +387,7 @@
       }
 
       ph->core->classes_jsa_fd = fd;
-      // add read-only maps from classes[_g].jsa to the list of maps
+      // add read-only maps from classes.jsa to the list of maps
       for (m = 0; m < NUM_SHARED_MAPS; m++) {
         if (header._space[m]._read_only) {
           base = (uintptr_t) header._space[m]._base;
--- a/hotspot/agent/src/os/linux/ps_core.c	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/agent/src/os/linux/ps_core.c	Thu May 16 11:47:51 2013 +0100
@@ -195,10 +195,10 @@
 //---------------------------------------------------------------
 // Part of the class sharing workaround:
 //
-// With class sharing, pages are mapped from classes[_g].jsa file.
+// With class sharing, pages are mapped from classes.jsa file.
 // The read-only class sharing pages are mapped as MAP_SHARED,
 // PROT_READ pages. These pages are not dumped into core dump.
-// With this workaround, these pages are read from classes[_g].jsa.
+// With this workaround, these pages are read from classes.jsa.
 
 // FIXME: !HACK ALERT!
 // The format of sharing achive file header is needed to read shared heap
@@ -284,10 +284,9 @@
    lib_info* lib = ph->libs;
    while (lib != NULL) {
       // we are iterating over shared objects from the core dump. look for
-      // libjvm[_g].so.
+      // libjvm.so.
       const char *jvm_name = 0;
-      if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0 ||
-          (jvm_name = strstr(lib->name, "/libjvm_g.so")) != 0) {
+      if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0) {
          char classes_jsa[PATH_MAX];
          struct FileMapHeader header;
          size_t n = 0;
@@ -371,7 +370,7 @@
          }
 
          ph->core->classes_jsa_fd = fd;
-         // add read-only maps from classes[_g].jsa to the list of maps
+         // add read-only maps from classes.jsa to the list of maps
          for (m = 0; m < NUM_SHARED_MAPS; m++) {
             if (header._space[m]._read_only) {
                base = (uintptr_t) header._space[m]._base;
--- a/hotspot/agent/src/os/solaris/proc/saproc.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/agent/src/os/solaris/proc/saproc.cpp	Thu May 16 11:47:51 2013 +0100
@@ -589,8 +589,7 @@
   JNIEnv*   env = dbg->env;
   jobject this_obj = dbg->this_obj;
   const char* jvm_name = 0;
-  if ((jvm_name = strstr(obj_name, "libjvm.so")) != NULL ||
-      (jvm_name = strstr(obj_name, "libjvm_g.so")) != NULL) {
+  if ((jvm_name = strstr(obj_name, "libjvm.so")) != NULL) {
     jvm_name = obj_name;
   } else {
     return 0;
@@ -598,7 +597,7 @@
 
   struct ps_prochandle* ph = (struct ps_prochandle*) env->GetLongField(this_obj, p_ps_prochandle_ID);
 
-  // initialize classes[_g].jsa file descriptor field.
+  // initialize classes.jsa file descriptor field.
   dbg->env->SetIntField(this_obj, classes_jsa_fd_ID, -1);
 
   // check whether class sharing is on by reading variable "UseSharedSpaces"
@@ -641,7 +640,7 @@
 
   print_debug("looking for %s\n", classes_jsa);
 
-  // open the classes[_g].jsa
+  // open the classes.jsa
   int fd = libsaproc_open(classes_jsa, O_RDONLY);
   if (fd < 0) {
     char errMsg[ERR_MSG_SIZE];
@@ -651,7 +650,7 @@
     print_debug("opened shared archive file %s\n", classes_jsa);
   }
 
-  // parse classes[_g].jsa
+  // parse classes.jsa
   struct FileMapHeader* pheader = (struct FileMapHeader*) malloc(sizeof(struct FileMapHeader));
   if (pheader == NULL) {
     close(fd);
@@ -798,8 +797,8 @@
   if (! isProcess) {
     /*
      * With class sharing, shared perm. gen heap is allocated in with MAP_SHARED|PROT_READ.
-     * These pages are mapped from the file "classes[_g].jsa". MAP_SHARED pages are not dumped
-     * in Solaris core.To read shared heap pages, we have to read classes[_g].jsa file.
+     * These pages are mapped from the file "classes.jsa". MAP_SHARED pages are not dumped
+     * in Solaris core.To read shared heap pages, we have to read classes.jsa file.
      */
     Pobject_iter(ph, init_classsharing_workaround, &dbg);
     exception = env->ExceptionOccurred();
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/HotSpotAgent.java	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/HotSpotAgent.java	Thu May 16 11:47:51 2013 +0100
@@ -24,20 +24,29 @@
 
 package sun.jvm.hotspot;
 
-import java.io.PrintStream;
-import java.net.*;
-import java.rmi.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.bsd.*;
-import sun.jvm.hotspot.debugger.proc.*;
-import sun.jvm.hotspot.debugger.remote.*;
-import sun.jvm.hotspot.debugger.windbg.*;
-import sun.jvm.hotspot.debugger.linux.*;
-import sun.jvm.hotspot.memory.*;
-import sun.jvm.hotspot.oops.*;
-import sun.jvm.hotspot.runtime.*;
-import sun.jvm.hotspot.types.*;
-import sun.jvm.hotspot.utilities.*;
+import java.rmi.RemoteException;
+
+import sun.jvm.hotspot.debugger.Debugger;
+import sun.jvm.hotspot.debugger.DebuggerException;
+import sun.jvm.hotspot.debugger.JVMDebugger;
+import sun.jvm.hotspot.debugger.MachineDescription;
+import sun.jvm.hotspot.debugger.MachineDescriptionAMD64;
+import sun.jvm.hotspot.debugger.MachineDescriptionIA64;
+import sun.jvm.hotspot.debugger.MachineDescriptionIntelX86;
+import sun.jvm.hotspot.debugger.MachineDescriptionSPARC32Bit;
+import sun.jvm.hotspot.debugger.MachineDescriptionSPARC64Bit;
+import sun.jvm.hotspot.debugger.NoSuchSymbolException;
+import sun.jvm.hotspot.debugger.bsd.BsdDebuggerLocal;
+import sun.jvm.hotspot.debugger.linux.LinuxDebuggerLocal;
+import sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal;
+import sun.jvm.hotspot.debugger.remote.RemoteDebugger;
+import sun.jvm.hotspot.debugger.remote.RemoteDebuggerClient;
+import sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer;
+import sun.jvm.hotspot.debugger.windbg.WindbgDebuggerLocal;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.types.TypeDataBase;
+import sun.jvm.hotspot.utilities.PlatformInfo;
+import sun.jvm.hotspot.utilities.UnsupportedPlatformException;
 
 /** <P> This class wraps much of the basic functionality and is the
  * highest-level factory for VM data structures. It makes it simple
@@ -475,7 +484,7 @@
     }
 
     private void setupJVMLibNamesSolaris() {
-        jvmLibNames = new String[] { "libjvm.so", "libjvm_g.so", "gamma_g" };
+        jvmLibNames = new String[] { "libjvm.so" };
     }
 
     //
@@ -507,7 +516,7 @@
     }
 
     private void setupJVMLibNamesWin32() {
-        jvmLibNames = new String[] { "jvm.dll", "jvm_g.dll" };
+        jvmLibNames = new String[] { "jvm.dll" };
     }
 
     //
@@ -547,7 +556,7 @@
     }
 
     private void setupJVMLibNamesLinux() {
-        jvmLibNames = new String[] { "libjvm.so", "libjvm_g.so" };
+        jvmLibNames = new String[] { "libjvm.so" };
     }
 
     //
@@ -572,7 +581,7 @@
     }
 
     private void setupJVMLibNamesBsd() {
-        jvmLibNames = new String[] { "libjvm.so", "libjvm_g.so" };
+        jvmLibNames = new String[] { "libjvm.so" };
     }
 
     //
@@ -595,7 +604,7 @@
     }
 
     private void setupJVMLibNamesDarwin() {
-        jvmLibNames = new String[] { "libjvm.dylib", "libjvm_g.dylib" };
+        jvmLibNames = new String[] { "libjvm.dylib" };
     }
 
     /** Convenience routine which should be called by per-platform
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/LinuxVtblAccess.java	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/LinuxVtblAccess.java	Thu May 16 11:47:51 2013 +0100
@@ -24,9 +24,9 @@
 
 package sun.jvm.hotspot;
 
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.types.*;
-import sun.jvm.hotspot.types.basic.*;
+import sun.jvm.hotspot.debugger.SymbolLookup;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.basic.BasicVtblAccess;
 
 public class LinuxVtblAccess extends BasicVtblAccess {
   private String vt;
@@ -35,8 +35,7 @@
                          String[] dllNames) {
     super(symbolLookup, dllNames);
 
-    if (symbolLookup.lookup("libjvm.so", "__vt_10JavaThread") != null ||
-        symbolLookup.lookup("libjvm_g.so", "__vt_10JavaThread") != null) {
+    if (symbolLookup.lookup("libjvm.so", "__vt_10JavaThread") != null) {
        // old C++ ABI
        vt = "__vt_";
     } else {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/ci/ciEnv.java	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/ci/ciEnv.java	Thu May 16 11:47:51 2013 +0100
@@ -93,10 +93,11 @@
     CompileTask task = task();
     Method method = task.method();
     int entryBci = task.osrBci();
+    int compLevel = task.compLevel();
     Klass holder = method.getMethodHolder();
     out.println("compile " + holder.getName().asString() + " " +
                 OopUtilities.escapeString(method.getName().asString()) + " " +
                 method.getSignature().asString() + " " +
-                entryBci);
+                entryBci + " " + compLevel);
   }
 }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java	Thu May 16 11:47:51 2013 +0100
@@ -78,6 +78,8 @@
       current sweep traversal index. */
   private static CIntegerField stackTraversalMarkField;
 
+  private static CIntegerField compLevelField;
+
   static {
     VM.registerVMInitializedObserver(new Observer() {
         public void update(Observable o, Object data) {
@@ -113,7 +115,7 @@
     osrEntryPointField          = type.getAddressField("_osr_entry_point");
     lockCountField              = type.getJIntField("_lock_count");
     stackTraversalMarkField     = type.getCIntegerField("_stack_traversal_mark");
-
+    compLevelField              = type.getCIntegerField("_comp_level");
     pcDescSize = db.lookupType("PcDesc").getSize();
   }
 
@@ -530,7 +532,7 @@
     out.println("compile " + holder.getName().asString() + " " +
                 OopUtilities.escapeString(method.getName().asString()) + " " +
                 method.getSignature().asString() + " " +
-                getEntryBCI());
+                getEntryBCI() + " " + getCompLevel());
 
   }
 
@@ -551,4 +553,5 @@
   private int getHandlerTableOffset() { return (int) handlerTableOffsetField.getValue(addr); }
   private int getNulChkTableOffset()  { return (int) nulChkTableOffsetField .getValue(addr); }
   private int getNMethodEndOffset()   { return (int) nmethodEndOffsetField  .getValue(addr); }
+  private int getCompLevel()          { return (int) compLevelField         .getValue(addr); }
 }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/compiler/CompileTask.java	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/compiler/CompileTask.java	Thu May 16 11:47:51 2013 +0100
@@ -46,10 +46,12 @@
     Type type      = db.lookupType("CompileTask");
     methodField = type.getAddressField("_method");
     osrBciField = new CIntField(type.getCIntegerField("_osr_bci"), 0);
+    compLevelField = new CIntField(type.getCIntegerField("_comp_level"), 0);
   }
 
   private static AddressField methodField;
   private static CIntField osrBciField;
+  private static CIntField compLevelField;
 
   public CompileTask(Address addr) {
     super(addr);
@@ -63,4 +65,8 @@
   public int osrBci() {
     return (int)osrBciField.getValue(getAddress());
   }
+
+  public int compLevel() {
+      return (int)compLevelField.getValue(getAddress());
+  }
 }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdDebuggerLocal.java	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdDebuggerLocal.java	Thu May 16 11:47:51 2013 +0100
@@ -24,17 +24,28 @@
 
 package sun.jvm.hotspot.debugger.bsd;
 
-import java.io.*;
-import java.net.*;
-import java.util.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.x86.*;
-import sun.jvm.hotspot.debugger.cdbg.*;
-import sun.jvm.hotspot.utilities.*;
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.debugger.DebuggerBase;
+import sun.jvm.hotspot.debugger.DebuggerException;
+import sun.jvm.hotspot.debugger.DebuggerUtilities;
+import sun.jvm.hotspot.debugger.MachineDescription;
+import sun.jvm.hotspot.debugger.NotInHeapException;
+import sun.jvm.hotspot.debugger.OopHandle;
+import sun.jvm.hotspot.debugger.ReadResult;
+import sun.jvm.hotspot.debugger.ThreadProxy;
+import sun.jvm.hotspot.debugger.UnalignedAddressException;
+import sun.jvm.hotspot.debugger.UnmappedAddressException;
+import sun.jvm.hotspot.debugger.cdbg.CDebugger;
+import sun.jvm.hotspot.debugger.cdbg.ClosestSymbol;
+import sun.jvm.hotspot.debugger.cdbg.LoadObject;
+import sun.jvm.hotspot.runtime.JavaThread;
+import sun.jvm.hotspot.runtime.Threads;
 import sun.jvm.hotspot.runtime.VM;
-import sun.jvm.hotspot.runtime.Threads;
-import sun.jvm.hotspot.runtime.JavaThread;
-import java.lang.reflect.*;
+import sun.jvm.hotspot.utilities.PlatformInfo;
 
 /** <P> An implementation of the JVMDebugger interface. The basic debug
     facilities are implemented through ptrace interface in the JNI code
@@ -246,10 +257,8 @@
     /* called from attach methods */
     private void findABIVersion() throws DebuggerException {
         String libjvmName = isDarwin ? "libjvm.dylib" : "libjvm.so";
-        String libjvm_gName = isDarwin? "libjvm_g.dylib" : "libjvm_g.so";
         String javaThreadVt = isDarwin ? "_vt_10JavaThread" : "__vt_10JavaThread";
-        if (lookupByName0(libjvmName, javaThreadVt) != 0 ||
-            lookupByName0(libjvm_gName, javaThreadVt) != 0) {
+        if (lookupByName0(libjvmName, javaThreadVt) != 0) {
             // old C++ ABI
             useGCC32ABI = false;
         } else {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java	Thu May 16 11:47:51 2013 +0100
@@ -24,14 +24,25 @@
 
 package sun.jvm.hotspot.debugger.linux;
 
-import java.io.*;
-import java.net.*;
-import java.util.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.debugger.x86.*;
-import sun.jvm.hotspot.debugger.cdbg.*;
-import sun.jvm.hotspot.utilities.*;
-import java.lang.reflect.*;
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.debugger.DebuggerBase;
+import sun.jvm.hotspot.debugger.DebuggerException;
+import sun.jvm.hotspot.debugger.DebuggerUtilities;
+import sun.jvm.hotspot.debugger.MachineDescription;
+import sun.jvm.hotspot.debugger.NotInHeapException;
+import sun.jvm.hotspot.debugger.OopHandle;
+import sun.jvm.hotspot.debugger.ReadResult;
+import sun.jvm.hotspot.debugger.ThreadProxy;
+import sun.jvm.hotspot.debugger.UnalignedAddressException;
+import sun.jvm.hotspot.debugger.UnmappedAddressException;
+import sun.jvm.hotspot.debugger.cdbg.CDebugger;
+import sun.jvm.hotspot.debugger.cdbg.ClosestSymbol;
+import sun.jvm.hotspot.debugger.cdbg.LoadObject;
+import sun.jvm.hotspot.utilities.PlatformInfo;
 
 /** <P> An implementation of the JVMDebugger interface. The basic debug
     facilities are implemented through ptrace interface in the JNI code
@@ -238,8 +249,7 @@
 
     /* called from attach methods */
     private void findABIVersion() throws DebuggerException {
-        if (lookupByName0("libjvm.so", "__vt_10JavaThread") != 0 ||
-            lookupByName0("libjvm_g.so", "__vt_10JavaThread") != 0) {
+        if (lookupByName0("libjvm.so", "__vt_10JavaThread") != 0) {
             // old C++ ABI
             useGCC32ABI = false;
         } else {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/JMap.java	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/JMap.java	Thu May 16 11:47:51 2013 +0100
@@ -117,8 +117,6 @@
                 mode = MODE_HEAP_SUMMARY;
             } else if (modeFlag.equals("-histo")) {
                 mode = MODE_HISTOGRAM;
-            } else if (modeFlag.equals("-permstat")) {
-                mode = MODE_CLSTATS;
             } else if (modeFlag.equals("-clstats")) {
                 mode = MODE_CLSTATS;
             } else if (modeFlag.equals("-finalizerinfo")) {
--- a/hotspot/make/excludeSrc.make	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/make/excludeSrc.make	Thu May 16 11:47:51 2013 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -81,22 +81,25 @@
 	cmsAdaptiveSizePolicy.cpp cmsCollectorPolicy.cpp \
 	cmsGCAdaptivePolicyCounters.cpp cmsLockVerifier.cpp compactibleFreeListSpace.cpp \
 	concurrentMarkSweepGeneration.cpp concurrentMarkSweepThread.cpp \
-	freeChunk.cpp adaptiveFreeList.cpp promotionInfo.cpp vmCMSOperations.cpp collectionSetChooser.cpp \
-	concurrentG1Refine.cpp concurrentG1RefineThread.cpp concurrentMark.cpp concurrentMarkThread.cpp \
-	dirtyCardQueue.cpp g1AllocRegion.cpp g1BlockOffsetTable.cpp g1CollectedHeap.cpp g1GCPhaseTimes.cpp \
-	g1CollectorPolicy.cpp g1ErgoVerbose.cpp g1_globals.cpp g1HRPrinter.cpp g1MarkSweep.cpp \
-	g1MMUTracker.cpp g1MonitoringSupport.cpp g1RemSet.cpp g1SATBCardTableModRefBS.cpp heapRegion.cpp \
-	heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp ptrQueue.cpp \
-	satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp adjoiningGenerations.cpp \
-	adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp cardTableExtension.cpp \
-	gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp parallelScavengeHeap.cpp parMarkBitMap.cpp \
-	pcTasks.cpp psAdaptiveSizePolicy.cpp psCompactionManager.cpp psGCAdaptivePolicyCounters.cpp \
-	psGenerationCounters.cpp psMarkSweep.cpp psMarkSweepDecorator.cpp psOldGen.cpp psParallelCompact.cpp \
-	psPromotionLAB.cpp psPromotionManager.cpp psScavenge.cpp psTasks.cpp psVirtualspace.cpp \
-	psYoungGen.cpp vmPSOperations.cpp asParNewGeneration.cpp parCardTableModRefBS.cpp \
-	parGCAllocBuffer.cpp parNewGeneration.cpp mutableSpace.cpp gSpaceCounters.cpp allocationStats.cpp \
-	spaceCounters.cpp gcAdaptivePolicyCounters.cpp mutableNUMASpace.cpp immutableSpace.cpp \
-	immutableSpace.cpp g1MemoryPool.cpp psMemoryPool.cpp yieldingWorkGroup.cpp g1Log.cpp
+	freeChunk.cpp adaptiveFreeList.cpp promotionInfo.cpp vmCMSOperations.cpp \
+	collectionSetChooser.cpp concurrentG1Refine.cpp concurrentG1RefineThread.cpp \
+	concurrentMark.cpp concurrentMarkThread.cpp dirtyCardQueue.cpp g1AllocRegion.cpp \
+	g1BlockOffsetTable.cpp g1CardCounts.cpp g1CollectedHeap.cpp g1CollectorPolicy.cpp \
+	g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \
+	g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp \
+	g1RemSet.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \
+	heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
+	ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \
+	adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \
+	cardTableExtension.cpp gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp \
+	parallelScavengeHeap.cpp parMarkBitMap.cpp pcTasks.cpp psAdaptiveSizePolicy.cpp \
+	psCompactionManager.cpp psGCAdaptivePolicyCounters.cpp psGenerationCounters.cpp \
+	psMarkSweep.cpp psMarkSweepDecorator.cpp psMemoryPool.cpp psOldGen.cpp \
+	psParallelCompact.cpp psPromotionLAB.cpp psPromotionManager.cpp psScavenge.cpp \
+	psTasks.cpp psVirtualspace.cpp psYoungGen.cpp vmPSOperations.cpp asParNewGeneration.cpp \
+	parCardTableModRefBS.cpp parGCAllocBuffer.cpp parNewGeneration.cpp mutableSpace.cpp \
+	gSpaceCounters.cpp allocationStats.cpp spaceCounters.cpp gcAdaptivePolicyCounters.cpp \
+	mutableNUMASpace.cpp immutableSpace.cpp yieldingWorkGroup.cpp
 endif 
 
 ifeq ($(INCLUDE_NMT), false)
--- a/hotspot/make/hotspot_version	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/make/hotspot_version	Thu May 16 11:47:51 2013 +0100
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=25
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=30
+HS_BUILD_NUMBER=32
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/hotspot/make/windows/makefiles/compile.make	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/make/windows/makefiles/compile.make	Thu May 16 11:47:51 2013 +0100
@@ -52,7 +52,7 @@
 # improving the quality of crash log stack traces involving jvm.dll.
 
 # These are always used in all compiles
-CXX_FLAGS=/nologo /W3 /WX
+CXX_FLAGS=$(EXTRA_CFLAGS) /nologo /W3 /WX
 
 # Let's add debug information when Full Debug Symbols is enabled
 !if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1"
--- a/hotspot/make/windows/makefiles/defs.make	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/make/windows/makefiles/defs.make	Thu May 16 11:47:51 2013 +0100
@@ -193,7 +193,7 @@
   MAKE_ARGS += JDK_BUILD_NUMBER=$(COOKED_BUILD_NUMBER)
 endif
 
-NMAKE= MAKEFLAGS= MFLAGS= nmake -NOLOGO
+NMAKE= MAKEFLAGS= MFLAGS= EXTRA_CFLAGS="$(EXTRA_CFLAGS)" nmake -NOLOGO
 ifndef SYSTEM_UNAME
   SYSTEM_UNAME := $(shell uname)
   export SYSTEM_UNAME
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/sparc/vm/compiledIC_sparc.cpp	Thu May 16 11:47:51 2013 +0100
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "code/compiledIC.hpp"
+#include "code/icBuffer.hpp"
+#include "code/nmethod.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/safepoint.hpp"
+#ifdef COMPILER2
+#include "opto/matcher.hpp"
+#endif
+
+// Release the CompiledICHolder* associated with this call site is there is one.
+void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) {
+  // This call site might have become stale so inspect it carefully.
+  NativeCall* call = nativeCall_at(call_site->addr());
+  if (is_icholder_entry(call->destination())) {
+    NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value());
+    InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data());
+  }
+}
+
+bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
+  // This call site might have become stale so inspect it carefully.
+  NativeCall* call = nativeCall_at(call_site->addr());
+  return is_icholder_entry(call->destination());
+}
+
+//-----------------------------------------------------------------------------
+// High-level access to an inline cache. Guaranteed to be MT-safe.
+
+CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
+  : _ic_call(call)
+{
+  address ic_call = call->instruction_address();
+
+  assert(ic_call != NULL, "ic_call address must be set");
+  assert(nm != NULL, "must pass nmethod");
+  assert(nm->contains(ic_call), "must be in nmethod");
+
+  // Search for the ic_call at the given address.
+  RelocIterator iter(nm, ic_call, ic_call+1);
+  bool ret = iter.next();
+  assert(ret == true, "relocInfo must exist at this address");
+  assert(iter.addr() == ic_call, "must find ic_call");
+  if (iter.type() == relocInfo::virtual_call_type) {
+    virtual_call_Relocation* r = iter.virtual_call_reloc();
+    _is_optimized = false;
+    _value = nativeMovConstReg_at(r->cached_value());
+  } else {
+    assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
+    _is_optimized = true;
+    _value = NULL;
+  }
+}
+
+// ----------------------------------------------------------------------------
+
+#define __ _masm.
+void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
+#ifdef COMPILER2
+  // Stub is fixed up when the corresponding call is converted from calling
+  // compiled code to calling interpreted code.
+  // set (empty), G5
+  // jmp -1
+
+  address mark = cbuf.insts_mark();  // Get mark within main instrs section.
+
+  MacroAssembler _masm(&cbuf);
+
+  address base =
+  __ start_a_stub(to_interp_stub_size()*2);
+  if (base == NULL) return;  // CodeBuffer::expand failed.
+
+  // Static stub relocation stores the instruction address of the call.
+  __ relocate(static_stub_Relocation::spec(mark));
+
+  __ set_metadata(NULL, as_Register(Matcher::inline_cache_reg_encode()));
+
+  __ set_inst_mark();
+  AddressLiteral addrlit(-1);
+  __ JUMP(addrlit, G3, 0);
+
+  __ delayed()->nop();
+
+  // Update current stubs pointer and restore code_end.
+  __ end_a_stub();
+#else
+  ShouldNotReachHere();
+#endif
+}
+#undef __
+
+int CompiledStaticCall::to_interp_stub_size() {
+  // This doesn't need to be accurate but it must be larger or equal to
+  // the real size of the stub.
+  return (NativeMovConstReg::instruction_size +  // sethi/setlo;
+          NativeJump::instruction_size + // sethi; jmp; nop
+          (TraceJumps ? 20 * BytesPerInstWord : 0) );
+}
+
+// Relocation entries for call stub, compiled java to interpreter.
+int CompiledStaticCall::reloc_to_interp_stub() {
+  return 10;  // 4 in emit_java_to_interp + 1 in Java_Static_Call
+}
+
+void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) {
+  address stub = find_stub();
+  guarantee(stub != NULL, "stub not found");
+
+  if (TraceICs) {
+    ResourceMark rm;
+    tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
+                  instruction_address(),
+                  callee->name_and_sig_as_C_string());
+  }
+
+  // Creation also verifies the object.
+  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
+  NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
+
+  assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(),
+         "a) MT-unsafe modification of inline cache");
+  assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry,
+         "b) MT-unsafe modification of inline cache");
+
+  // Update stub.
+  method_holder->set_data((intptr_t)callee());
+  jump->set_jump_destination(entry);
+
+  // Update jump to call.
+  set_destination_mt_safe(stub);
+}
+
+void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
+  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
+  // Reset stub.
+  address stub = static_stub->addr();
+  assert(stub != NULL, "stub not found");
+  // Creation also verifies the object.
+  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
+  NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
+  method_holder->set_data(0);
+  jump->set_jump_destination((address)-1);
+}
+
+//-----------------------------------------------------------------------------
+// Non-product mode code
+#ifndef PRODUCT
+
+void CompiledStaticCall::verify() {
+  // Verify call.
+  NativeCall::verify();
+  if (os::is_MP()) {
+    verify_alignment();
+  }
+
+  // Verify stub.
+  address stub = find_stub();
+  assert(stub != NULL, "no stub found for static call");
+  // Creation also verifies the object.
+  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
+  NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
+
+  // Verify state.
+  assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
+}
+
+#endif // !PRODUCT
--- a/hotspot/src/cpu/sparc/vm/globalDefinitions_sparc.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/cpu/sparc/vm/globalDefinitions_sparc.hpp	Thu May 16 11:47:51 2013 +0100
@@ -30,4 +30,6 @@
 
 const int StackAlignmentInBytes = (2*wordSize);
 
+#define SUPPORTS_NATIVE_CX8
+
 #endif // CPU_SPARC_VM_GLOBALDEFINITIONS_SPARC_HPP
--- a/hotspot/src/cpu/sparc/vm/jni_sparc.h	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/cpu/sparc/vm/jni_sparc.h	Thu May 16 11:47:51 2013 +0100
@@ -23,7 +23,12 @@
  * questions.
  */
 
-#if defined(__GNUC__) && (__GNUC__ >= 4)
+// Note: please do not change these without also changing jni_md.h in the JDK
+// repository
+#ifndef __has_attribute
+  #define __has_attribute(x) 0
+#endif
+#if (defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2))) || __has_attribute(visibility)
   #define JNIEXPORT     __attribute__((visibility("default")))
   #define JNIIMPORT     __attribute__((visibility("default")))
 #else
--- a/hotspot/src/cpu/sparc/vm/sparc.ad	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad	Thu May 16 11:47:51 2013 +0100
@@ -1656,53 +1656,6 @@
 }
 
 //=============================================================================
-
-// emit call stub, compiled java to interpretor
-void emit_java_to_interp(CodeBuffer &cbuf ) {
-
-  // Stub is fixed up when the corresponding call is converted from calling
-  // compiled code to calling interpreted code.
-  // set (empty), G5
-  // jmp -1
-
-  address mark = cbuf.insts_mark();  // get mark within main instrs section
-
-  MacroAssembler _masm(&cbuf);
-
-  address base =
-  __ start_a_stub(Compile::MAX_stubs_size);
-  if (base == NULL)  return;  // CodeBuffer::expand failed
-
-  // static stub relocation stores the instruction address of the call
-  __ relocate(static_stub_Relocation::spec(mark));
-
-  __ set_metadata(NULL, reg_to_register_object(Matcher::inline_cache_reg_encode()));
-
-  __ set_inst_mark();
-  AddressLiteral addrlit(-1);
-  __ JUMP(addrlit, G3, 0);
-
-  __ delayed()->nop();
-
-  // Update current stubs pointer and restore code_end.
-  __ end_a_stub();
-}
-
-// size of call stub, compiled java to interpretor
-uint size_java_to_interp() {
-  // This doesn't need to be accurate but it must be larger or equal to
-  // the real size of the stub.
-  return (NativeMovConstReg::instruction_size +  // sethi/setlo;
-          NativeJump::instruction_size + // sethi; jmp; nop
-          (TraceJumps ? 20 * BytesPerInstWord : 0) );
-}
-// relocation entries for call stub, compiled java to interpretor
-uint reloc_java_to_interp() {
-  return 10;  // 4 in emit_java_to_interp + 1 in Java_Static_Call
-}
-
-
-//=============================================================================
 #ifndef PRODUCT
 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
   st->print_cr("\nUEP:");
@@ -2576,15 +2529,15 @@
   enc_class Java_Static_Call (method meth) %{    // JAVA STATIC CALL
     // CALL to fixup routine.  Fixup routine uses ScopeDesc info to determine
     // who we intended to call.
-    if ( !_method ) {
+    if (!_method) {
       emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type);
     } else if (_optimized_virtual) {
       emit_call_reloc(cbuf, $meth$$method, relocInfo::opt_virtual_call_type);
     } else {
       emit_call_reloc(cbuf, $meth$$method, relocInfo::static_call_type);
     }
-    if( _method ) {  // Emit stub for static call
-      emit_java_to_interp(cbuf);
+    if (_method) {  // Emit stub for static call.
+      CompiledStaticCall::emit_to_interp_stub(cbuf);
     }
   %}
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/x86/vm/compiledIC_x86.cpp	Thu May 16 11:47:51 2013 +0100
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "code/compiledIC.hpp"
+#include "code/icBuffer.hpp"
+#include "code/nmethod.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/safepoint.hpp"
+
+// Release the CompiledICHolder* associated with this call site is there is one.
+void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) {
+  // This call site might have become stale so inspect it carefully.
+  NativeCall* call = nativeCall_at(call_site->addr());
+  if (is_icholder_entry(call->destination())) {
+    NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value());
+    InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data());
+  }
+}
+
+bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
+  // This call site might have become stale so inspect it carefully.
+  NativeCall* call = nativeCall_at(call_site->addr());
+  return is_icholder_entry(call->destination());
+}
+
+//-----------------------------------------------------------------------------
+// High-level access to an inline cache. Guaranteed to be MT-safe.
+
+CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
+  : _ic_call(call)
+{
+  address ic_call = call->instruction_address();
+
+  assert(ic_call != NULL, "ic_call address must be set");
+  assert(nm != NULL, "must pass nmethod");
+  assert(nm->contains(ic_call), "must be in nmethod");
+
+  // Search for the ic_call at the given address.
+  RelocIterator iter(nm, ic_call, ic_call+1);
+  bool ret = iter.next();
+  assert(ret == true, "relocInfo must exist at this address");
+  assert(iter.addr() == ic_call, "must find ic_call");
+  if (iter.type() == relocInfo::virtual_call_type) {
+    virtual_call_Relocation* r = iter.virtual_call_reloc();
+    _is_optimized = false;
+    _value = nativeMovConstReg_at(r->cached_value());
+  } else {
+    assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
+    _is_optimized = true;
+    _value = NULL;
+  }
+}
+
+// ----------------------------------------------------------------------------
+
+#define __ _masm.
+void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
+  // Stub is fixed up when the corresponding call is converted from
+  // calling compiled code to calling interpreted code.
+  // movq rbx, 0
+  // jmp -5 # to self
+
+  address mark = cbuf.insts_mark();  // Get mark within main instrs section.
+
+  // Note that the code buffer's insts_mark is always relative to insts.
+  // That's why we must use the macroassembler to generate a stub.
+  MacroAssembler _masm(&cbuf);
+
+  address base =
+  __ start_a_stub(to_interp_stub_size()*2);
+  if (base == NULL) return;  // CodeBuffer::expand failed.
+  // Static stub relocation stores the instruction address of the call.
+  __ relocate(static_stub_Relocation::spec(mark), Assembler::imm_operand);
+  // Static stub relocation also tags the Method* in the code-stream.
+  __ mov_metadata(rbx, (Metadata*) NULL);  // Method is zapped till fixup time.
+  // This is recognized as unresolved by relocs/nativeinst/ic code.
+  __ jump(RuntimeAddress(__ pc()));
+
+  // Update current stubs pointer and restore insts_end.
+  __ end_a_stub();
+}
+#undef __
+
+int CompiledStaticCall::to_interp_stub_size() {
+  return NOT_LP64(10)    // movl; jmp
+         LP64_ONLY(15);  // movq (1+1+8); jmp (1+4)
+}
+
+// Relocation entries for call stub, compiled java to interpreter.
+int CompiledStaticCall::reloc_to_interp_stub() {
+  return 4; // 3 in emit_to_interp_stub + 1 in emit_call
+}
+
+void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) {
+  address stub = find_stub();
+  guarantee(stub != NULL, "stub not found");
+
+  if (TraceICs) {
+    ResourceMark rm;
+    tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
+                  instruction_address(),
+                  callee->name_and_sig_as_C_string());
+  }
+
+  // Creation also verifies the object.
+  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
+  NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
+
+  assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(),
+         "a) MT-unsafe modification of inline cache");
+  assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry,
+         "b) MT-unsafe modification of inline cache");
+
+  // Update stub.
+  method_holder->set_data((intptr_t)callee());
+  jump->set_jump_destination(entry);
+
+  // Update jump to call.
+  set_destination_mt_safe(stub);
+}
+
+void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
+  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
+  // Reset stub.
+  address stub = static_stub->addr();
+  assert(stub != NULL, "stub not found");
+  // Creation also verifies the object.
+  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
+  NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
+  method_holder->set_data(0);
+  jump->set_jump_destination((address)-1);
+}
+
+//-----------------------------------------------------------------------------
+// Non-product mode code
+#ifndef PRODUCT
+
+void CompiledStaticCall::verify() {
+  // Verify call.
+  NativeCall::verify();
+  if (os::is_MP()) {
+    verify_alignment();
+  }
+
+  // Verify stub.
+  address stub = find_stub();
+  assert(stub != NULL, "no stub found for static call");
+  // Creation also verifies the object.
+  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
+  NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
+
+  // Verify state.
+  assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
+}
+
+#endif // !PRODUCT
--- a/hotspot/src/cpu/x86/vm/globalDefinitions_x86.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/cpu/x86/vm/globalDefinitions_x86.hpp	Thu May 16 11:47:51 2013 +0100
@@ -27,4 +27,6 @@
 
 const int StackAlignmentInBytes  = 16;
 
+#define SUPPORTS_NATIVE_CX8
+
 #endif // CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP
--- a/hotspot/src/cpu/x86/vm/jni_x86.h	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/cpu/x86/vm/jni_x86.h	Thu May 16 11:47:51 2013 +0100
@@ -28,7 +28,13 @@
 
 #if defined(SOLARIS) || defined(LINUX) || defined(_ALLBSD_SOURCE)
 
-#if defined(__GNUC__) && (__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2)
+
+// Note: please do not change these without also changing jni_md.h in the JDK
+// repository
+#ifndef __has_attribute
+  #define __has_attribute(x) 0
+#endif
+#if (defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2))) || __has_attribute(visibility)
   #define JNIEXPORT     __attribute__((visibility("default")))
   #define JNIIMPORT     __attribute__((visibility("default")))
 #else
--- a/hotspot/src/cpu/x86/vm/x86_32.ad	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/cpu/x86/vm/x86_32.ad	Thu May 16 11:47:51 2013 +0100
@@ -1257,43 +1257,6 @@
 }
 
 //=============================================================================
-
-// emit call stub, compiled java to interpreter
-void emit_java_to_interp(CodeBuffer &cbuf ) {
-  // Stub is fixed up when the corresponding call is converted from calling
-  // compiled code to calling interpreted code.
-  // mov rbx,0
-  // jmp -1
-
-  address mark = cbuf.insts_mark();  // get mark within main instrs section
-
-  // Note that the code buffer's insts_mark is always relative to insts.
-  // That's why we must use the macroassembler to generate a stub.
-  MacroAssembler _masm(&cbuf);
-
-  address base =
-  __ start_a_stub(Compile::MAX_stubs_size);
-  if (base == NULL)  return;  // CodeBuffer::expand failed
-  // static stub relocation stores the instruction address of the call
-  __ relocate(static_stub_Relocation::spec(mark), RELOC_IMM32);
-  // static stub relocation also tags the Method* in the code-stream.
-  __ mov_metadata(rbx, (Metadata*)NULL);  // method is zapped till fixup time
-  // This is recognized as unresolved by relocs/nativeInst/ic code
-  __ jump(RuntimeAddress(__ pc()));
-
-  __ end_a_stub();
-  // Update current stubs pointer and restore insts_end.
-}
-// size of call stub, compiled java to interpretor
-uint size_java_to_interp() {
-  return 10;  // movl; jmp
-}
-// relocation entries for call stub, compiled java to interpretor
-uint reloc_java_to_interp() {
-  return 4;  // 3 in emit_java_to_interp + 1 in Java_Static_Call
-}
-
-//=============================================================================
 #ifndef PRODUCT
 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
   st->print_cr(  "CMP    EAX,[ECX+4]\t# Inline cache check");
@@ -1909,8 +1872,8 @@
       emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4),
                      static_call_Relocation::spec(), RELOC_IMM32 );
     }
-    if (_method) {  // Emit stub for static call
-      emit_java_to_interp(cbuf);
+    if (_method) {  // Emit stub for static call.
+      CompiledStaticCall::emit_to_interp_stub(cbuf);
     }
   %}
 
--- a/hotspot/src/cpu/x86/vm/x86_64.ad	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad	Thu May 16 11:47:51 2013 +0100
@@ -1388,48 +1388,6 @@
 }
 
 //=============================================================================
-
-// emit call stub, compiled java to interpreter
-void emit_java_to_interp(CodeBuffer& cbuf)
-{
-  // Stub is fixed up when the corresponding call is converted from
-  // calling compiled code to calling interpreted code.
-  // movq rbx, 0
-  // jmp -5 # to self
-
-  address mark = cbuf.insts_mark();  // get mark within main instrs section
-
-  // Note that the code buffer's insts_mark is always relative to insts.
-  // That's why we must use the macroassembler to generate a stub.
-  MacroAssembler _masm(&cbuf);
-
-  address base =
-  __ start_a_stub(Compile::MAX_stubs_size);
-  if (base == NULL)  return;  // CodeBuffer::expand failed
-  // static stub relocation stores the instruction address of the call
-  __ relocate(static_stub_Relocation::spec(mark), RELOC_IMM64);
-  // static stub relocation also tags the Method* in the code-stream.
-  __ mov_metadata(rbx, (Metadata*) NULL);  // method is zapped till fixup time
-  // This is recognized as unresolved by relocs/nativeinst/ic code
-  __ jump(RuntimeAddress(__ pc()));
-
-  // Update current stubs pointer and restore insts_end.
-  __ end_a_stub();
-}
-
-// size of call stub, compiled java to interpretor
-uint size_java_to_interp()
-{
-  return 15;  // movq (1+1+8); jmp (1+4)
-}
-
-// relocation entries for call stub, compiled java to interpretor
-uint reloc_java_to_interp()
-{
-  return 4; // 3 in emit_java_to_interp + 1 in Java_Static_Call
-}
-
-//=============================================================================
 #ifndef PRODUCT
 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 {
@@ -2078,8 +2036,8 @@
                      RELOC_DISP32);
     }
     if (_method) {
-      // Emit stub for static call
-      emit_java_to_interp(cbuf);
+      // Emit stub for static call.
+      CompiledStaticCall::emit_to_interp_stub(cbuf);
     }
   %}
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/zero/vm/compiledIC_zero.cpp	Thu May 16 11:47:51 2013 +0100
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "code/codeCache.hpp"
+#include "code/compiledIC.hpp"
+#include "code/icBuffer.hpp"
+#include "code/nmethod.hpp"
+#include "code/vtableStubs.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/linkResolver.hpp"
+#include "memory/metadataFactory.hpp"
+#include "memory/oopFactory.hpp"
+#include "oops/method.hpp"
+#include "oops/oop.inline.hpp"
+#include "oops/symbol.hpp"
+#include "runtime/icache.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "utilities/events.hpp"
+
+
+// Release the CompiledICHolder* associated with this call site is there is one.
+void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) {
+  // This call site might have become stale so inspect it carefully.
+  NativeCall* call = nativeCall_at(call_site->addr());
+  if (is_icholder_entry(call->destination())) {
+    NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value());
+    InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data());
+  }
+}
+
+bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
+  // This call site might have become stale so inspect it carefully.
+  NativeCall* call = nativeCall_at(call_site->addr());
+  return is_icholder_entry(call->destination());
+}
+
+//-----------------------------------------------------------------------------
+// High-level access to an inline cache. Guaranteed to be MT-safe.
+
+CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
+  : _ic_call(call)
+{
+  address ic_call = call->instruction_address();
+
+  assert(ic_call != NULL, "ic_call address must be set");
+  assert(nm != NULL, "must pass nmethod");
+  assert(nm->contains(ic_call), "must be in nmethod");
+
+  // Search for the ic_call at the given address.
+  RelocIterator iter(nm, ic_call, ic_call+1);
+  bool ret = iter.next();
+  assert(ret == true, "relocInfo must exist at this address");
+  assert(iter.addr() == ic_call, "must find ic_call");
+  if (iter.type() == relocInfo::virtual_call_type) {
+    virtual_call_Relocation* r = iter.virtual_call_reloc();
+    _is_optimized = false;
+    _value = nativeMovConstReg_at(r->cached_value());
+  } else {
+    assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
+    _is_optimized = true;
+    _value = NULL;
+  }
+}
+
+// ----------------------------------------------------------------------------
+
+void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
+  ShouldNotReachHere(); // Only needed for COMPILER2.
+}
+
+int CompiledStaticCall::to_interp_stub_size() {
+  ShouldNotReachHere(); // Only needed for COMPILER2.
+  return 0;
+}
+
+// Relocation entries for call stub, compiled java to interpreter.
+int CompiledStaticCall::reloc_to_interp_stub() {
+  ShouldNotReachHere(); // Only needed for COMPILER2.
+  return 0;
+}
+
+void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) {
+  ShouldNotReachHere(); // Only needed for COMPILER2.
+}
+
+void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
+  ShouldNotReachHere(); // Only needed for COMPILER2.
+}
+
+//-----------------------------------------------------------------------------
+// Non-product mode code.
+#ifndef PRODUCT
+
+void CompiledStaticCall::verify() {
+  ShouldNotReachHere(); // Only needed for COMPILER2.
+}
+
+#endif // !PRODUCT
--- a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp	Thu May 16 11:47:51 2013 +0100
@@ -212,7 +212,13 @@
 
   // Update the invocation counter
   if ((UseCompiler || CountCompiledCalls) && !method->is_synchronized()) {
-    InvocationCounter *counter = method->invocation_counter();
+    MethodCounters* mcs = method->method_counters();
+    if (mcs == NULL) {
+      CALL_VM_NOCHECK(mcs = InterpreterRuntime::build_method_counters(thread, method));
+      if (HAS_PENDING_EXCEPTION)
+        goto unwind_and_return;
+    }
+    InvocationCounter *counter = mcs->invocation_counter();
     counter->increment();
     if (counter->reached_InvocationLimit()) {
       CALL_VM_NOCHECK(
--- a/hotspot/src/cpu/zero/vm/jni_zero.h	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/cpu/zero/vm/jni_zero.h	Thu May 16 11:47:51 2013 +0100
@@ -25,7 +25,13 @@
  */
 
 
-#if defined(__GNUC__) && (__GNUC__ >= 4)
+
+// Note: please do not change these without also changing jni_md.h in the JDK
+// repository
+#ifndef __has_attribute
+  #define __has_attribute(x) 0
+#endif
+#if (defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2))) || __has_attribute(visibility)
   #define JNIEXPORT     __attribute__((visibility("default")))
   #define JNIIMPORT     __attribute__((visibility("default")))
 #else
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Thu May 16 11:47:51 2013 +0100
@@ -1230,10 +1230,6 @@
   return retval;
 }
 
-const char* os::get_current_directory(char *buf, int buflen) {
-  return getcwd(buf, buflen);
-}
-
 // check if addr is inside libjvm.so
 bool os::address_is_in_vm(address addr) {
   static address libjvm_base_addr;
@@ -2080,9 +2076,10 @@
     flags |= MAP_FIXED;
   }
 
-  // Map uncommitted pages PROT_READ and PROT_WRITE, change access
-  // to PROT_EXEC if executable when we commit the page.
-  addr = (char*)::mmap(requested_addr, bytes, PROT_READ|PROT_WRITE,
+  // Map reserved/uncommitted pages PROT_NONE so we fail early if we
+  // touch an uncommitted page. Otherwise, the read/write might
+  // succeed if we have enough swap space to back the physical page.
+  addr = (char*)::mmap(requested_addr, bytes, PROT_NONE,
                        flags, -1, 0);
 
   if (addr != MAP_FAILED) {
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Thu May 16 11:47:51 2013 +0100
@@ -119,6 +119,7 @@
 Mutex* os::Linux::_createThread_lock = NULL;
 pthread_t os::Linux::_main_thread;
 int os::Linux::_page_size = -1;
+const int os::Linux::_vm_default_page_size = (8 * K);
 bool os::Linux::_is_floating_stack = false;
 bool os::Linux::_is_NPTL = false;
 bool os::Linux::_supports_fast_thread_cpu_time = false;
@@ -1662,10 +1663,6 @@
   return retval;
 }
 
-const char* os::get_current_directory(char *buf, int buflen) {
-  return getcwd(buf, buflen);
-}
-
 // check if addr is inside libjvm.so
 bool os::address_is_in_vm(address addr) {
   static address libjvm_base_addr;
@@ -2906,9 +2903,10 @@
     flags |= MAP_FIXED;
   }
 
-  // Map uncommitted pages PROT_READ and PROT_WRITE, change access
-  // to PROT_EXEC if executable when we commit the page.
-  addr = (char*)::mmap(requested_addr, bytes, PROT_READ|PROT_WRITE,
+  // Map reserved/uncommitted pages PROT_NONE so we fail early if we
+  // touch an uncommitted page. Otherwise, the read/write might
+  // succeed if we have enough swap space to back the physical page.
+  addr = (char*)::mmap(requested_addr, bytes, PROT_NONE,
                        flags, -1, 0);
 
   if (addr != MAP_FAILED) {
@@ -4249,6 +4247,15 @@
   Linux::clock_init();
   initial_time_count = os::elapsed_counter();
   pthread_mutex_init(&dl_mutex, NULL);
+
+  // If the pagesize of the VM is greater than 8K determine the appropriate
+  // number of initial guard pages.  The user can change this with the
+  // command line arguments, if needed.
+  if (vm_page_size() > (int)Linux::vm_default_page_size()) {
+    StackYellowPages = 1;
+    StackRedPages = 1;
+    StackShadowPages = round_to((StackShadowPages*Linux::vm_default_page_size()), vm_page_size()) / vm_page_size();
+  }
 }
 
 // To install functions for atexit system call
@@ -4302,8 +4309,8 @@
   // Add in 2*BytesPerWord times page size to account for VM stack during
   // class initialization depending on 32 or 64 bit VM.
   os::Linux::min_stack_allowed = MAX2(os::Linux::min_stack_allowed,
-            (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
-                    2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::page_size());
+            (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Linux::page_size() +
+                    (2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::vm_default_page_size());
 
   size_t threadStackSizeInBytes = ThreadStackSize * K;
   if (threadStackSizeInBytes != 0 &&
--- a/hotspot/src/os/linux/vm/os_linux.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/os/linux/vm/os_linux.hpp	Thu May 16 11:47:51 2013 +0100
@@ -70,6 +70,7 @@
   static pthread_t _main_thread;
   static Mutex* _createThread_lock;
   static int _page_size;
+  static const int _vm_default_page_size;
 
   static julong available_memory();
   static julong physical_memory() { return _physical_memory; }
@@ -116,6 +117,8 @@
   static int page_size(void)                                        { return _page_size; }
   static void set_page_size(int val)                                { _page_size = val; }
 
+  static int vm_default_page_size(void)                             { return _vm_default_page_size; }
+
   static address   ucontext_get_pc(ucontext_t* uc);
   static intptr_t* ucontext_get_sp(ucontext_t* uc);
   static intptr_t* ucontext_get_fp(ucontext_t* uc);
--- a/hotspot/src/os/posix/vm/os_posix.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/os/posix/vm/os_posix.cpp	Thu May 16 11:47:51 2013 +0100
@@ -251,3 +251,11 @@
   return true;
 #endif
 }
+
+const char* os::get_current_directory(char *buf, size_t buflen) {
+  return getcwd(buf, buflen);
+}
+
+FILE* os::open(int fd, const char* mode) {
+  return ::fdopen(fd, mode);
+}
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Thu May 16 11:47:51 2013 +0100
@@ -824,7 +824,7 @@
       // allocate new buffer and initialize
       info = (Dl_serinfo*)malloc(_info.dls_size);
       if (info == NULL) {
-        vm_exit_out_of_memory(_info.dls_size,
+        vm_exit_out_of_memory(_info.dls_size, OOM_MALLOC_ERROR,
                               "init_system_properties_values info");
       }
       info->dls_size = _info.dls_size;
@@ -866,7 +866,7 @@
       common_path = malloc(bufsize);
       if (common_path == NULL) {
         free(info);
-        vm_exit_out_of_memory(bufsize,
+        vm_exit_out_of_memory(bufsize, OOM_MALLOC_ERROR,
                               "init_system_properties_values common_path");
       }
       sprintf(common_path, COMMON_DIR "/lib/%s", cpu_arch);
@@ -879,7 +879,7 @@
       if (library_path == NULL) {
         free(info);
         free(common_path);
-        vm_exit_out_of_memory(bufsize,
+        vm_exit_out_of_memory(bufsize, OOM_MALLOC_ERROR,
                               "init_system_properties_values library_path");
       }
       library_path[0] = '\0';
@@ -1623,7 +1623,8 @@
   // %%% this is used only in threadLocalStorage.cpp
   if (thr_setspecific((thread_key_t)index, value)) {
     if (errno == ENOMEM) {
-       vm_exit_out_of_memory(SMALLINT, "thr_setspecific: out of swap space");
+       vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR,
+                             "thr_setspecific: out of swap space");
     } else {
       fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
                     "(%s)", strerror(errno)));
@@ -1915,10 +1916,6 @@
   return retval;
 }
 
-const char* os::get_current_directory(char *buf, int buflen) {
-  return getcwd(buf, buflen);
-}
-
 // check if addr is inside libjvm.so
 bool os::address_is_in_vm(address addr) {
   static address libjvm_base_addr;
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Thu May 16 11:47:51 2013 +0100
@@ -1221,8 +1221,10 @@
 
 // Needs to be in os specific directory because windows requires another
 // header file <direct.h>
-const char* os::get_current_directory(char *buf, int buflen) {
-  return _getcwd(buf, buflen);
+const char* os::get_current_directory(char *buf, size_t buflen) {
+  int n = static_cast<int>(buflen);
+  if (buflen > INT_MAX)  n = INT_MAX;
+  return _getcwd(buf, n);
 }
 
 //-----------------------------------------------------------
@@ -4098,6 +4100,10 @@
   return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
 }
 
+FILE* os::open(int fd, const char* mode) {
+  return ::_fdopen(fd, mode);
+}
+
 // Is a (classpath) directory empty?
 bool os::dir_is_empty(const char* path) {
   WIN32_FIND_DATA fd;
--- a/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp	Thu May 16 11:47:51 2013 +0100
@@ -178,7 +178,7 @@
     // JVM needs to know exact stack location, abort if it fails
     if (rslt != 0) {
       if (rslt == ENOMEM) {
-        vm_exit_out_of_memory(0, "pthread_getattr_np");
+        vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np");
       } else {
         fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt));
       }
--- a/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Thu May 16 11:47:51 2013 +0100
@@ -710,7 +710,7 @@
      // JVM needs to know exact stack location, abort if it fails
      if (rslt != 0) {
        if (rslt == ENOMEM) {
-         vm_exit_out_of_memory(0, "pthread_getattr_np");
+         vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np");
        } else {
          fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt));
        }
--- a/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Thu May 16 11:47:51 2013 +0100
@@ -313,7 +313,7 @@
   int res = pthread_getattr_np(pthread_self(), &attr);
   if (res != 0) {
     if (res == ENOMEM) {
-      vm_exit_out_of_memory(0, "pthread_getattr_np");
+      vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np");
     }
     else {
       fatal(err_msg("pthread_getattr_np failed with errno = %d", res));
--- a/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp	Thu May 16 11:47:51 2013 +0100
@@ -591,7 +591,7 @@
   // on the thread stack, which could get a mapping error when touched.
   address addr = (address) info->si_addr;
   if (sig == SIGBUS && info->si_code == BUS_OBJERR && info->si_errno == ENOMEM) {
-    vm_exit_out_of_memory(0, "Out of swap space to map in thread stack.");
+    vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "Out of swap space to map in thread stack.");
   }
 
   VMError err(t, sig, pc, info, ucVoid);
--- a/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Thu May 16 11:47:51 2013 +0100
@@ -745,7 +745,7 @@
   // on the thread stack, which could get a mapping error when touched.
   address addr = (address) info->si_addr;
   if (sig == SIGBUS && info->si_code == BUS_OBJERR && info->si_errno == ENOMEM) {
-    vm_exit_out_of_memory(0, "Out of swap space to map in thread stack.");
+    vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "Out of swap space to map in thread stack.");
   }
 
   VMError err(t, sig, pc, info, ucVoid);
--- a/hotspot/src/share/vm/adlc/main.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/adlc/main.cpp	Thu May 16 11:47:51 2013 +0100
@@ -213,6 +213,7 @@
   AD.addInclude(AD._CPP_file, "adfiles", get_basename(AD._HPP_file._name));
   AD.addInclude(AD._CPP_file, "memory/allocation.inline.hpp");
   AD.addInclude(AD._CPP_file, "asm/macroAssembler.inline.hpp");
+  AD.addInclude(AD._CPP_file, "code/compiledIC.hpp");
   AD.addInclude(AD._CPP_file, "code/vmreg.hpp");
   AD.addInclude(AD._CPP_file, "gc_interface/collectedHeap.inline.hpp");
   AD.addInclude(AD._CPP_file, "oops/compiledICHolder.hpp");
--- a/hotspot/src/share/vm/asm/assembler.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/asm/assembler.cpp	Thu May 16 11:47:51 2013 +0100
@@ -44,7 +44,7 @@
   CodeSection* cs = code->insts();
   cs->clear_mark();   // new assembler kills old mark
   if (cs->start() == NULL)  {
-    vm_exit_out_of_memory(0, err_msg("CodeCache: no room for %s",
+    vm_exit_out_of_memory(0, OOM_MMAP_ERROR, err_msg("CodeCache: no room for %s",
                                      code->name()));
   }
   _code_section = cs;
--- a/hotspot/src/share/vm/ci/ciEnv.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp	Thu May 16 11:47:51 2013 +0100
@@ -483,7 +483,8 @@
     {
       // We have to lock the cpool to keep the oop from being resolved
       // while we are accessing it.
-        MonitorLockerEx ml(cpool->lock());
+      oop cplock = cpool->lock();
+      ObjectLocker ol(cplock, THREAD, cplock != NULL);
       constantTag tag = cpool->tag_at(index);
       if (tag.is_klass()) {
         // The klass has been inserted into the constant pool
@@ -1149,23 +1150,9 @@
   record_method_not_compilable("out of memory");
 }
 
-fileStream* ciEnv::_replay_data_stream = NULL;
-
-void ciEnv::dump_replay_data() {
+void ciEnv::dump_replay_data(outputStream* out) {
   VM_ENTRY_MARK;
   MutexLocker ml(Compile_lock);
-  if (_replay_data_stream == NULL) {
-    _replay_data_stream = new (ResourceObj::C_HEAP, mtCompiler) fileStream(ReplayDataFile);
-    if (_replay_data_stream == NULL) {
-      fatal(err_msg("Can't open %s for replay data", ReplayDataFile));
-    }
-  }
-  dump_replay_data(_replay_data_stream);
-}
-
-
-void ciEnv::dump_replay_data(outputStream* out) {
-  ASSERT_IN_VM;
   ResourceMark rm;
 #if INCLUDE_JVMTI
   out->print_cr("JvmtiExport can_access_local_variables %d",     _jvmti_can_access_local_variables);
@@ -1178,13 +1165,15 @@
   for (int i = 0; i < objects->length(); i++) {
     objects->at(i)->dump_replay_data(out);
   }
-  Method* method = task()->method();
-  int entry_bci = task()->osr_bci();
+  CompileTask* task = this->task();
+  Method* method = task->method();
+  int entry_bci = task->osr_bci();
+  int comp_level = task->comp_level();
   // Klass holder = method->method_holder();
-  out->print_cr("compile %s %s %s %d",
+  out->print_cr("compile %s %s %s %d %d",
                 method->klass_name()->as_quoted_ascii(),
                 method->name()->as_quoted_ascii(),
                 method->signature()->as_quoted_ascii(),
-                entry_bci);
+                entry_bci, comp_level);
   out->flush();
 }
--- a/hotspot/src/share/vm/ci/ciEnv.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/ci/ciEnv.hpp	Thu May 16 11:47:51 2013 +0100
@@ -46,8 +46,6 @@
   friend class CompileBroker;
   friend class Dependencies;  // for get_object, during logging
 
-  static fileStream* _replay_data_stream;
-
 private:
   Arena*           _arena;       // Alias for _ciEnv_arena except in init_shared_objects()
   Arena            _ciEnv_arena;
@@ -451,10 +449,6 @@
   // RedefineClasses support
   void metadata_do(void f(Metadata*)) { _factory->metadata_do(f); }
 
-  // Dump the compilation replay data for this ciEnv to
-  // ReplayDataFile, creating the file if needed.
-  void  dump_replay_data();
-
   // Dump the compilation replay data for the ciEnv to the stream.
   void dump_replay_data(outputStream* out);
 };
--- a/hotspot/src/share/vm/ci/ciMethod.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/ci/ciMethod.hpp	Thu May 16 11:47:51 2013 +0100
@@ -196,7 +196,6 @@
   // Analysis and profiling.
   //
   // Usage note: liveness_at_bci and init_vars should be wrapped in ResourceMarks.
-  bool          uses_monitors() const            { return _uses_monitors; } // this one should go away, it has a misleading name
   bool          has_monitor_bytecodes() const    { return _uses_monitors; }
   bool          has_balanced_monitors();
 
--- a/hotspot/src/share/vm/ci/ciReplay.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/ci/ciReplay.cpp	Thu May 16 11:47:51 2013 +0100
@@ -89,7 +89,7 @@
     loader = Handle(thread, SystemDictionary::java_system_loader());
     stream = fopen(filename, "rt");
     if (stream == NULL) {
-      fprintf(stderr, "Can't open replay file %s\n", filename);
+      fprintf(stderr, "ERROR: Can't open replay file %s\n", filename);
     }
     buffer_length = 32;
     buffer = NEW_RESOURCE_ARRAY(char, buffer_length);
@@ -327,7 +327,6 @@
         if (had_error()) {
           tty->print_cr("Error while parsing line %d: %s\n", line_no, _error_message);
           tty->print_cr("%s", buffer);
-          assert(false, "error");
           return;
         }
         pos = 0;
@@ -370,11 +369,47 @@
     }
   }
 
-  // compile <klass> <name> <signature> <entry_bci>
+  // validation of comp_level
+  bool is_valid_comp_level(int comp_level) {
+    const int msg_len = 256;
+    char* msg = NULL;
+    if (!is_compile(comp_level)) {
+      msg = NEW_RESOURCE_ARRAY(char, msg_len);
+      jio_snprintf(msg, msg_len, "%d isn't compilation level", comp_level);
+    } else if (!TieredCompilation && (comp_level != CompLevel_highest_tier)) {
+      msg = NEW_RESOURCE_ARRAY(char, msg_len);
+      switch (comp_level) {
+        case CompLevel_simple:
+          jio_snprintf(msg, msg_len, "compilation level %d requires Client VM or TieredCompilation", comp_level);
+          break;
+        case CompLevel_full_optimization:
+          jio_snprintf(msg, msg_len, "compilation level %d requires Server VM", comp_level);
+          break;
+        default:
+          jio_snprintf(msg, msg_len, "compilation level %d requires TieredCompilation", comp_level);
+      }
+    }
+    if (msg != NULL) {
+      report_error(msg);
+      return false;
+    }
+    return true;
+  }
+
+  // compile <klass> <name> <signature> <entry_bci> <comp_level>
   void process_compile(TRAPS) {
     // methodHandle method;
     Method* method = parse_method(CHECK);
     int entry_bci = parse_int("entry_bci");
+    const char* comp_level_label = "comp_level";
+    int comp_level = parse_int(comp_level_label);
+    // old version w/o comp_level
+    if (had_error() && (error_message() == comp_level_label)) {
+      comp_level = CompLevel_full_optimization;
+    }
+    if (!is_valid_comp_level(comp_level)) {
+      return;
+    }
     Klass* k = method->method_holder();
     ((InstanceKlass*)k)->initialize(THREAD);
     if (HAS_PENDING_EXCEPTION) {
@@ -389,12 +424,12 @@
       }
     }
     // Make sure the existence of a prior compile doesn't stop this one
-    nmethod* nm = (entry_bci != InvocationEntryBci) ? method->lookup_osr_nmethod_for(entry_bci, CompLevel_full_optimization, true) : method->code();
+    nmethod* nm = (entry_bci != InvocationEntryBci) ? method->lookup_osr_nmethod_for(entry_bci, comp_level, true) : method->code();
     if (nm != NULL) {
       nm->make_not_entrant();
     }
     replay_state = this;
-    CompileBroker::compile_method(method, entry_bci, CompLevel_full_optimization,
+    CompileBroker::compile_method(method, entry_bci, comp_level,
                                   methodHandle(), 0, "replay", THREAD);
     replay_state = NULL;
     reset();
@@ -551,7 +586,7 @@
           if (parsed_two_word == i) continue;
 
         default:
-          ShouldNotReachHere();
+          fatal(err_msg_res("Unexpected tag: %d", cp->tag_at(i).value()));
           break;
       }
 
@@ -819,6 +854,11 @@
     ReplaySuppressInitializers = 1;
   }
 
+  if (FLAG_IS_DEFAULT(ReplayDataFile)) {
+    tty->print_cr("ERROR: no compiler replay data file specified (use -XX:ReplayDataFile=replay_pid12345.txt).");
+    return 1;
+  }
+
   // Load and parse the replay data
   CompileReplay rp(ReplayDataFile, THREAD);
   int exit_code = 0;
--- a/hotspot/src/share/vm/classfile/bytecodeAssembler.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/classfile/bytecodeAssembler.cpp	Thu May 16 11:47:51 2013 +0100
@@ -75,8 +75,8 @@
     int idx = i + _orig->length();
     switch (entry._tag) {
       case BytecodeCPEntry::UTF8:
+        entry._u.utf8->increment_refcount();
         cp->symbol_at_put(idx, entry._u.utf8);
-        entry._u.utf8->increment_refcount();
         break;
       case BytecodeCPEntry::KLASS:
         cp->unresolved_klass_at_put(
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Thu May 16 11:47:51 2013 +0100
@@ -2027,7 +2027,6 @@
   u2 method_parameters_length = 0;
   u1* method_parameters_data = NULL;
   bool method_parameters_seen = false;
-  bool method_parameters_four_byte_flags;
   bool parsed_code_attribute = false;
   bool parsed_checked_exceptions_attribute = false;
   bool parsed_stackmap_attribute = false;
@@ -2241,26 +2240,14 @@
       }
       method_parameters_seen = true;
       method_parameters_length = cfs->get_u1_fast();
-      // Track the actual size (note: this is written for clarity; a
-      // decent compiler will CSE and constant-fold this into a single
-      // expression)
-      // Use the attribute length to figure out the size of flags
-      if (method_attribute_length == (method_parameters_length * 6u) + 1u) {
-        method_parameters_four_byte_flags = true;
-      } else if (method_attribute_length == (method_parameters_length * 4u) + 1u) {
-        method_parameters_four_byte_flags = false;
-      } else {
+      if (method_attribute_length != (method_parameters_length * 4u) + 1u) {
         classfile_parse_error(
           "Invalid MethodParameters method attribute length %u in class file",
           method_attribute_length, CHECK_(nullHandle));
       }
       method_parameters_data = cfs->get_u1_buffer();
       cfs->skip_u2_fast(method_parameters_length);
-      if (method_parameters_four_byte_flags) {
-        cfs->skip_u4_fast(method_parameters_length);
-      } else {
-        cfs->skip_u2_fast(method_parameters_length);
-      }
+      cfs->skip_u2_fast(method_parameters_length);
       // ignore this attribute if it cannot be reflected
       if (!SystemDictionary::Parameter_klass_loaded())
         method_parameters_length = 0;
@@ -2423,13 +2410,8 @@
     for (int i = 0; i < method_parameters_length; i++) {
       elem[i].name_cp_index = Bytes::get_Java_u2(method_parameters_data);
       method_parameters_data += 2;
-      if (method_parameters_four_byte_flags) {
-        elem[i].flags = Bytes::get_Java_u4(method_parameters_data);
-        method_parameters_data += 4;
-      } else {
-        elem[i].flags = Bytes::get_Java_u2(method_parameters_data);
-        method_parameters_data += 2;
-      }
+      elem[i].flags = Bytes::get_Java_u2(method_parameters_data);
+      method_parameters_data += 2;
     }
   }
 
--- a/hotspot/src/share/vm/classfile/classFileParser.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/classfile/classFileParser.hpp	Thu May 16 11:47:51 2013 +0100
@@ -304,7 +304,19 @@
 
   inline void assert_property(bool b, const char* msg, TRAPS) {
 #ifdef ASSERT
-    if (!b) { fatal(msg); }
+    if (!b) {
+      ResourceMark rm(THREAD);
+      fatal(err_msg(msg, _class_name->as_C_string()));
+    }
+#endif
+  }
+
+  inline void assert_property(bool b, const char* msg, int index, TRAPS) {
+#ifdef ASSERT
+    if (!b) {
+      ResourceMark rm(THREAD);
+      fatal(err_msg(msg, index, _class_name->as_C_string()));
+    }
 #endif
   }
 
@@ -312,7 +324,7 @@
     if (_need_verify) {
       guarantee_property(property, msg, index, CHECK);
     } else {
-      assert_property(property, msg, CHECK);
+      assert_property(property, msg, index, CHECK);
     }
   }
 
--- a/hotspot/src/share/vm/classfile/classLoader.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/classfile/classLoader.cpp	Thu May 16 11:47:51 2013 +0100
@@ -1345,9 +1345,10 @@
           tty->print_cr("CompileTheWorld (%d) : %s", _compile_the_world_class_counter, buffer);
           // Preload all classes to get around uncommon traps
           // Iterate over all methods in class
+          int comp_level = CompilationPolicy::policy()->initial_compile_level();
           for (int n = 0; n < k->methods()->length(); n++) {
             methodHandle m (THREAD, k->methods()->at(n));
-            if (CompilationPolicy::can_be_compiled(m)) {
+            if (CompilationPolicy::can_be_compiled(m, comp_level)) {
 
               if (++_codecache_sweep_counter == CompileTheWorldSafepointInterval) {
                 // Give sweeper a chance to keep up with CTW
@@ -1356,7 +1357,7 @@
                 _codecache_sweep_counter = 0;
               }
               // Force compilation
-              CompileBroker::compile_method(m, InvocationEntryBci, CompilationPolicy::policy()->initial_compile_level(),
+              CompileBroker::compile_method(m, InvocationEntryBci, comp_level,
                                             methodHandle(), 0, "CTW", THREAD);
               if (HAS_PENDING_EXCEPTION) {
                 clear_pending_exception_if_not_oom(CHECK);
--- a/hotspot/src/share/vm/classfile/classLoaderData.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp	Thu May 16 11:47:51 2013 +0100
@@ -53,6 +53,7 @@
 #include "classfile/metadataOnStackMark.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
+#include "memory/gcLocker.hpp"
 #include "memory/metadataFactory.hpp"
 #include "memory/metaspaceShared.hpp"
 #include "memory/oopFactory.hpp"
@@ -65,17 +66,19 @@
 
 ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
 
-ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous) :
+ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) :
   _class_loader(h_class_loader()),
   _is_anonymous(is_anonymous), _keep_alive(is_anonymous), // initially
   _metaspace(NULL), _unloading(false), _klasses(NULL),
   _claimed(0), _jmethod_ids(NULL), _handles(NULL), _deallocate_list(NULL),
-  _next(NULL), _dependencies(),
+  _next(NULL), _dependencies(dependencies),
   _metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true)) {
     // empty
 }
 
 void ClassLoaderData::init_dependencies(TRAPS) {
+  assert(!Universe::is_fully_initialized(), "should only be called when initializing");
+  assert(is_the_null_class_loader_data(), "should only call this for the null class loader");
   _dependencies.init(CHECK);
 }
 
@@ -277,6 +280,9 @@
 void ClassLoaderData::unload() {
   _unloading = true;
 
+  // Tell serviceability tools these classes are unloading
+  classes_do(InstanceKlass::notify_unload_class);
+
   if (TraceClassLoaderData) {
     ResourceMark rm;
     tty->print("[ClassLoaderData: unload loader data "PTR_FORMAT, this);
@@ -300,6 +306,9 @@
 
 
 ClassLoaderData::~ClassLoaderData() {
+  // Release C heap structures for all the classes.
+  classes_do(InstanceKlass::release_C_heap_structures);
+
   Metaspace *m = _metaspace;
   if (m != NULL) {
     _metaspace = NULL;
@@ -423,7 +432,7 @@
 // These anonymous class loaders are to contain classes used for JSR292
 ClassLoaderData* ClassLoaderData::anonymous_class_loader_data(oop loader, TRAPS) {
   // Add a new class loader data to the graph.
-  return ClassLoaderDataGraph::add(NULL, loader, CHECK_NULL);
+  return ClassLoaderDataGraph::add(loader, true, CHECK_NULL);
 }
 
 const char* ClassLoaderData::loader_name() {
@@ -495,19 +504,22 @@
 ClassLoaderData* ClassLoaderDataGraph::_unloading = NULL;
 ClassLoaderData* ClassLoaderDataGraph::_saved_head = NULL;
 
-
 // Add a new class loader data node to the list.  Assign the newly created
 // ClassLoaderData into the java/lang/ClassLoader object as a hidden field
-ClassLoaderData* ClassLoaderDataGraph::add(ClassLoaderData** cld_addr, Handle loader, TRAPS) {
-  // Not assigned a class loader data yet.
-  // Create one.
-  ClassLoaderData* *list_head = &_head;
-  ClassLoaderData* next = _head;
+ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous, TRAPS) {
+  // We need to allocate all the oops for the ClassLoaderData before allocating the
+  // actual ClassLoaderData object.
+  ClassLoaderData::Dependencies dependencies(CHECK_NULL);
 
-  bool is_anonymous = (cld_addr == NULL);
-  ClassLoaderData* cld = new ClassLoaderData(loader, is_anonymous);
+  No_Safepoint_Verifier no_safepoints; // we mustn't GC until we've installed the
+                                       // ClassLoaderData in the graph since the CLD
+                                       // contains unhandled oops
 
-  if (cld_addr != NULL) {
+  ClassLoaderData* cld = new ClassLoaderData(loader, is_anonymous, dependencies);
+
+
+  if (!is_anonymous) {
+    ClassLoaderData** cld_addr = java_lang_ClassLoader::loader_data_addr(loader());
     // First, Atomically set it
     ClassLoaderData* old = (ClassLoaderData*) Atomic::cmpxchg_ptr(cld, cld_addr, NULL);
     if (old != NULL) {
@@ -519,6 +531,9 @@
 
   // We won the race, and therefore the task of adding the data to the list of
   // class loader data
+  ClassLoaderData** list_head = &_head;
+  ClassLoaderData* next = _head;
+
   do {
     cld->set_next(next);
     ClassLoaderData* exchanged = (ClassLoaderData*)Atomic::cmpxchg_ptr(cld, list_head, next);
@@ -531,10 +546,6 @@
                    cld->loader_name());
         tty->print_cr("]");
       }
-      // Create dependencies after the CLD is added to the list.  Otherwise,
-      // the GC GC will not find the CLD and the _class_loader field will
-      // not be updated.
-      cld->init_dependencies(CHECK_NULL);
       return cld;
     }
     next = exchanged;
@@ -665,6 +676,8 @@
     dead->unload();
     data = data->next();
     // Remove from loader list.
+    // This class loader data will no longer be found
+    // in the ClassLoaderDataGraph.
     if (prev != NULL) {
       prev->set_next(data);
     } else {
@@ -686,6 +699,7 @@
     next = purge_me->next();
     delete purge_me;
   }
+  Metaspace::purge();
 }
 
 // CDS support
--- a/hotspot/src/share/vm/classfile/classLoaderData.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/classfile/classLoaderData.hpp	Thu May 16 11:47:51 2013 +0100
@@ -62,7 +62,7 @@
   // CMS support.
   static ClassLoaderData* _saved_head;
 
-  static ClassLoaderData* add(ClassLoaderData** loader_data_addr, Handle class_loader, TRAPS);
+  static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS);
  public:
   static ClassLoaderData* find_or_create(Handle class_loader, TRAPS);
   static void purge();
@@ -100,6 +100,9 @@
                     Thread* THREAD);
    public:
     Dependencies() : _list_head(NULL) {}
+    Dependencies(TRAPS) : _list_head(NULL) {
+      init(CHECK);
+    }
     void add(Handle dependency, TRAPS);
     void init(TRAPS);
     void oops_do(OopClosure* f);
@@ -150,7 +153,7 @@
   void set_next(ClassLoaderData* next) { _next = next; }
   ClassLoaderData* next() const        { return _next; }
 
-  ClassLoaderData(Handle h_class_loader, bool is_anonymous);
+  ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies);
   ~ClassLoaderData();
 
   void set_metaspace(Metaspace* m) { _metaspace = m; }
@@ -190,7 +193,9 @@
   static void init_null_class_loader_data() {
     assert(_the_null_class_loader_data == NULL, "cannot initialize twice");
     assert(ClassLoaderDataGraph::_head == NULL, "cannot initialize twice");
-    _the_null_class_loader_data = new ClassLoaderData((oop)NULL, false);
+
+    // We explicitly initialize the Dependencies object at a later phase in the initialization
+    _the_null_class_loader_data = new ClassLoaderData((oop)NULL, false, Dependencies());
     ClassLoaderDataGraph::_head = _the_null_class_loader_data;
     assert(_the_null_class_loader_data->is_the_null_class_loader_data(), "Must be");
     if (DumpSharedSpaces) {
--- a/hotspot/src/share/vm/classfile/classLoaderData.inline.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/classfile/classLoaderData.inline.hpp	Thu May 16 11:47:51 2013 +0100
@@ -43,10 +43,9 @@
   assert(loader() != NULL,"Must be a class loader");
   // Gets the class loader data out of the java/lang/ClassLoader object, if non-null
   // it's already in the loader_data, so no need to add
-  ClassLoaderData** loader_data_addr = java_lang_ClassLoader::loader_data_addr(loader());
-  ClassLoaderData* loader_data_id = *loader_data_addr;
-  if (loader_data_id) {
-     return loader_data_id;
+  ClassLoaderData* loader_data= java_lang_ClassLoader::loader_data(loader());
+  if (loader_data) {
+     return loader_data;
   }
-  return ClassLoaderDataGraph::add(loader_data_addr, loader, THREAD);
+  return ClassLoaderDataGraph::add(loader, false, THREAD);
 }
--- a/hotspot/src/share/vm/classfile/dictionary.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/classfile/dictionary.cpp	Thu May 16 11:47:51 2013 +0100
@@ -27,7 +27,6 @@
 #include "classfile/systemDictionary.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiRedefineClassesTrace.hpp"
-#include "services/classLoadingService.hpp"
 #include "utilities/hashtable.inline.hpp"
 
 
@@ -156,19 +155,7 @@
           if (k_def_class_loader_data == loader_data) {
             // This is the defining entry, so the referred class is about
             // to be unloaded.
-            // Notify the debugger and clean up the class.
             class_was_unloaded = true;
-            // notify the debugger
-            if (JvmtiExport::should_post_class_unload()) {
-              JvmtiExport::post_class_unload(ik);
-            }
-
-            // notify ClassLoadingService of class unload
-            ClassLoadingService::notify_class_unloaded(ik);
-
-            // Clean up C heap
-            ik->release_C_heap_structures();
-            ik->constants()->release_C_heap_structures();
           }
           // Also remove this system dictionary entry.
           purge_entry = true;
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp	Thu May 16 11:47:51 2013 +0100
@@ -315,14 +315,18 @@
   return string;
 }
 
-jchar* java_lang_String::as_unicode_string(oop java_string, int& length) {
+jchar* java_lang_String::as_unicode_string(oop java_string, int& length, TRAPS) {
   typeArrayOop value  = java_lang_String::value(java_string);
   int          offset = java_lang_String::offset(java_string);
                length = java_lang_String::length(java_string);
 
-  jchar* result = NEW_RESOURCE_ARRAY(jchar, length);
-  for (int index = 0; index < length; index++) {
-    result[index] = value->char_at(index + offset);
+  jchar* result = NEW_RESOURCE_ARRAY_RETURN_NULL(jchar, length);
+  if (result != NULL) {
+    for (int index = 0; index < length; index++) {
+      result[index] = value->char_at(index + offset);
+    }
+  } else {
+    THROW_MSG_0(vmSymbols::java_lang_OutOfMemoryError(), "could not allocate Unicode string");
   }
   return result;
 }
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp	Thu May 16 11:47:51 2013 +0100
@@ -153,7 +153,7 @@
   static char*  as_utf8_string(oop java_string, char* buf, int buflen);
   static char*  as_utf8_string(oop java_string, int start, int len);
   static char*  as_platform_dependent_str(Handle java_string, TRAPS);
-  static jchar* as_unicode_string(oop java_string, int& length);
+  static jchar* as_unicode_string(oop java_string, int& length, TRAPS);
   // produce an ascii string with all other values quoted using \u####
   static char*  as_quoted_ascii(oop java_string);
 
--- a/hotspot/src/share/vm/classfile/symbolTable.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/classfile/symbolTable.cpp	Thu May 16 11:47:51 2013 +0100
@@ -735,7 +735,7 @@
   ResourceMark rm(THREAD);
   int length;
   Handle h_string (THREAD, string);
-  jchar* chars = java_lang_String::as_unicode_string(string, length);
+  jchar* chars = java_lang_String::as_unicode_string(string, length, CHECK_NULL);
   oop result = intern(h_string, chars, length, CHECK_NULL);
   return result;
 }
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp	Thu May 16 11:47:51 2013 +0100
@@ -830,7 +830,7 @@
             Klass *kk;
             {
               MutexLocker mu(SystemDictionary_lock, THREAD);
-              kk = find_class(name, ik->class_loader_data());
+              kk = find_class(d_index, d_hash, name, ik->class_loader_data());
             }
             if (kk != NULL) {
               // No clean up is needed if the shared class has been entered
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp	Thu May 16 11:47:51 2013 +0100
@@ -517,13 +517,18 @@
   template(sun_management_ManagementFactory,           "sun/management/ManagementFactory")                        \
   template(sun_management_Sensor,                      "sun/management/Sensor")                                   \
   template(sun_management_Agent,                       "sun/management/Agent")                                    \
+  template(sun_management_DiagnosticCommandImpl,       "sun/management/DiagnosticCommandImpl")                    \
   template(sun_management_GarbageCollectorImpl,        "sun/management/GarbageCollectorImpl")                     \
+  template(sun_management_ManagementFactoryHelper,     "sun/management/ManagementFactoryHelper")                  \
+  template(getDiagnosticCommandMBean_name,             "getDiagnosticCommandMBean")                               \
+  template(getDiagnosticCommandMBean_signature,        "()Lcom/sun/management/DiagnosticCommandMBean;")           \
   template(getGcInfoBuilder_name,                      "getGcInfoBuilder")                                        \
   template(getGcInfoBuilder_signature,                 "()Lsun/management/GcInfoBuilder;")                        \
   template(com_sun_management_GcInfo,                  "com/sun/management/GcInfo")                               \
   template(com_sun_management_GcInfo_constructor_signature, "(Lsun/management/GcInfoBuilder;JJJ[Ljava/lang/management/MemoryUsage;[Ljava/lang/management/MemoryUsage;[Ljava/lang/Object;)V") \
   template(createGCNotification_name,                  "createGCNotification")                                    \
   template(createGCNotification_signature,             "(JLjava/lang/String;Ljava/lang/String;Ljava/lang/String;Lcom/sun/management/GcInfo;)V") \
+  template(createDiagnosticFrameworkNotification_name, "createDiagnosticFrameworkNotification")                   \
   template(createMemoryPoolMBean_name,                 "createMemoryPoolMBean")                                   \
   template(createMemoryManagerMBean_name,              "createMemoryManagerMBean")                                \
   template(createGarbageCollectorMBean_name,           "createGarbageCollectorMBean")                             \
--- a/hotspot/src/share/vm/code/codeCache.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/code/codeCache.cpp	Thu May 16 11:47:51 2013 +0100
@@ -463,8 +463,10 @@
 }
 #endif //PRODUCT
 
-
-nmethod* CodeCache::find_and_remove_saved_code(Method* m) {
+/**
+ * Remove and return nmethod from the saved code list in order to reanimate it.
+ */
+nmethod* CodeCache::reanimate_saved_code(Method* m) {
   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   nmethod* saved = _saved_nmethods;
   nmethod* prev = NULL;
@@ -479,7 +481,7 @@
       saved->set_speculatively_disconnected(false);
       saved->set_saved_nmethod_link(NULL);
       if (PrintMethodFlushing) {
-        saved->print_on(tty, " ### nmethod is reconnected\n");
+        saved->print_on(tty, " ### nmethod is reconnected");
       }
       if (LogCompilation && (xtty != NULL)) {
         ttyLocker ttyl;
@@ -496,6 +498,9 @@
   return NULL;
 }
 
+/**
+ * Remove nmethod from the saved code list in order to discard it permanently
+ */
 void CodeCache::remove_saved_code(nmethod* nm) {
   // For conc swpr this will be called with CodeCache_lock taken by caller
   assert_locked_or_safepoint(CodeCache_lock);
@@ -529,7 +534,7 @@
   nm->set_saved_nmethod_link(_saved_nmethods);
   _saved_nmethods = nm;
   if (PrintMethodFlushing) {
-    nm->print_on(tty, " ### nmethod is speculatively disconnected\n");
+    nm->print_on(tty, " ### nmethod is speculatively disconnected");
   }
   if (LogCompilation && (xtty != NULL)) {
     ttyLocker ttyl;
--- a/hotspot/src/share/vm/code/codeCache.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/code/codeCache.hpp	Thu May 16 11:47:51 2013 +0100
@@ -57,7 +57,7 @@
   static int _number_of_nmethods_with_dependencies;
   static bool _needs_cache_clean;
   static nmethod* _scavenge_root_nmethods;  // linked via nm->scavenge_root_link()
-  static nmethod* _saved_nmethods;          // linked via nm->saved_nmethod_look()
+  static nmethod* _saved_nmethods;          // Linked list of speculatively disconnected nmethods.
 
   static void verify_if_often() PRODUCT_RETURN;
 
@@ -168,7 +168,7 @@
   static void set_needs_cache_clean(bool v)      { _needs_cache_clean = v;    }
   static void clear_inline_caches();             // clear all inline caches
 
-  static nmethod* find_and_remove_saved_code(Method* m);
+  static nmethod* reanimate_saved_code(Method* m);
   static void remove_saved_code(nmethod* nm);
   static void speculatively_disconnect(nmethod* nm);
 
--- a/hotspot/src/share/vm/code/compiledIC.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/code/compiledIC.cpp	Thu May 16 11:47:51 2013 +0100
@@ -45,25 +45,6 @@
 // Every time a compiled IC is changed or its type is being accessed,
 // either the CompiledIC_lock must be set or we must be at a safe point.
 
-
-// Release the CompiledICHolder* associated with this call site is there is one.
-void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) {
-  // This call site might have become stale so inspect it carefully.
-  NativeCall* call = nativeCall_at(call_site->addr());
-  if (is_icholder_entry(call->destination())) {
-    NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value());
-    InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data());
-  }
-}
-
-
-bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
-  // This call site might have become stale so inspect it carefully.
-  NativeCall* call = nativeCall_at(call_site->addr());
-  return is_icholder_entry(call->destination());
-}
-
-
 //-----------------------------------------------------------------------------
 // Low-level access to an inline cache. Private, since they might not be
 // MT-safe to use.
@@ -488,33 +469,6 @@
   return (cb != NULL && cb->is_adapter_blob());
 }
 
-
-CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
-  : _ic_call(call)
-{
-  address ic_call = call->instruction_address();
-
-  assert(ic_call != NULL, "ic_call address must be set");
-  assert(nm != NULL, "must pass nmethod");
-  assert(nm->contains(ic_call),   "must be in nmethod");
-
-  // search for the ic_call at the given address
-  RelocIterator iter(nm, ic_call, ic_call+1);
-  bool ret = iter.next();
-  assert(ret == true, "relocInfo must exist at this address");
-  assert(iter.addr() == ic_call, "must find ic_call");
-  if (iter.type() == relocInfo::virtual_call_type) {
-    virtual_call_Relocation* r = iter.virtual_call_reloc();
-    _is_optimized = false;
-    _value = nativeMovConstReg_at(r->cached_value());
-  } else {
-    assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
-    _is_optimized = true;
-    _value = NULL;
-}
-}
-
-
 // ----------------------------------------------------------------------------
 
 void CompiledStaticCall::set_to_clean() {
@@ -549,33 +503,6 @@
   return nm->stub_contains(destination());
 }
 
-
-void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) {
-  address stub=find_stub();
-  guarantee(stub != NULL, "stub not found");
-
-  if (TraceICs) {
-    ResourceMark rm;
-    tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
-                  instruction_address(),
-                  callee->name_and_sig_as_C_string());
-  }
-
-  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);   // creation also verifies the object
-  NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
-
-  assert(method_holder->data()    == 0           || method_holder->data()    == (intptr_t)callee(), "a) MT-unsafe modification of inline cache");
-  assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry, "b) MT-unsafe modification of inline cache");
-
-  // Update stub
-  method_holder->set_data((intptr_t)callee());
-  jump->set_jump_destination(entry);
-
-  // Update jump to call
-  set_destination_mt_safe(stub);
-}
-
-
 void CompiledStaticCall::set(const StaticCallInfo& info) {
   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
   MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
@@ -618,19 +545,6 @@
   }
 }
 
-
-void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
-  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
-  // Reset stub
-  address stub = static_stub->addr();
-  assert(stub!=NULL, "stub not found");
-  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);   // creation also verifies the object
-  NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
-  method_holder->set_data(0);
-  jump->set_jump_destination((address)-1);
-}
-
-
 address CompiledStaticCall::find_stub() {
   // Find reloc. information containing this call-site
   RelocIterator iter((nmethod*)NULL, instruction_address());
@@ -668,19 +582,16 @@
           || is_optimized() || is_megamorphic(), "sanity check");
 }
 
-
 void CompiledIC::print() {
   print_compiled_ic();
   tty->cr();
 }
 
-
 void CompiledIC::print_compiled_ic() {
   tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT " cached_value " INTPTR_FORMAT,
              instruction_address(), is_call_to_interpreted() ? "interpreted " : "", ic_destination(), is_optimized() ? NULL : cached_value());
 }
 
-
 void CompiledStaticCall::print() {
   tty->print("static call at " INTPTR_FORMAT " -> ", instruction_address());
   if (is_clean()) {
@@ -693,21 +604,4 @@
   tty->cr();
 }
 
-void CompiledStaticCall::verify() {
-  // Verify call
-  NativeCall::verify();
-  if (os::is_MP()) {
-    verify_alignment();
-  }
-
-  // Verify stub
-  address stub = find_stub();
-  assert(stub != NULL, "no stub found for static call");
-  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);   // creation also verifies the object
-  NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
-
-  // Verify state
-  assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
-}
-
-#endif
+#endif // !PRODUCT
--- a/hotspot/src/share/vm/code/compiledIC.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/code/compiledIC.hpp	Thu May 16 11:47:51 2013 +0100
@@ -304,6 +304,11 @@
   friend CompiledStaticCall* compiledStaticCall_at(address native_call);
   friend CompiledStaticCall* compiledStaticCall_at(Relocation* call_site);
 
+  // Code
+  static void emit_to_interp_stub(CodeBuffer &cbuf);
+  static int to_interp_stub_size();
+  static int reloc_to_interp_stub();
+
   // State
   bool is_clean() const;
   bool is_call_to_compiled() const;
--- a/hotspot/src/share/vm/code/stubs.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/code/stubs.cpp	Thu May 16 11:47:51 2013 +0100
@@ -67,7 +67,7 @@
   intptr_t size = round_to(buffer_size, 2*BytesPerWord);
   BufferBlob* blob = BufferBlob::create(name, size);
   if( blob == NULL) {
-    vm_exit_out_of_memory(size, err_msg("CodeCache: no room for %s", name));
+    vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, err_msg("CodeCache: no room for %s", name));
   }
   _stub_interface  = stub_interface;
   _buffer_size     = blob->content_size();
--- a/hotspot/src/share/vm/code/vtableStubs.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/code/vtableStubs.cpp	Thu May 16 11:47:51 2013 +0100
@@ -60,7 +60,7 @@
     const int bytes = chunk_factor * real_size + pd_code_alignment();
     BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
     if (blob == NULL) {
-      vm_exit_out_of_memory(bytes, "CodeCache: no room for vtable chunks");
+      vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "CodeCache: no room for vtable chunks");
     }
     _chunk = blob->content_begin();
     _chunk_end = _chunk + bytes;
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Thu May 16 11:47:51 2013 +0100
@@ -65,7 +65,7 @@
 HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end,
   char*, intptr_t, char*, intptr_t, char*, intptr_t, char*, intptr_t, bool);
 
-#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(compiler, method, comp_name)   \
+#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name)             \
   {                                                                      \
     Symbol* klass_name = (method)->klass_name();                         \
     Symbol* name = (method)->name();                                     \
@@ -77,8 +77,7 @@
       signature->bytes(), signature->utf8_length());                     \
   }
 
-#define DTRACE_METHOD_COMPILE_END_PROBE(compiler, method,                \
-                                        comp_name, success)              \
+#define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success)      \
   {                                                                      \
     Symbol* klass_name = (method)->klass_name();                         \
     Symbol* name = (method)->name();                                     \
@@ -92,7 +91,7 @@
 
 #else /* USDT2 */
 
-#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(compiler, method, comp_name)   \
+#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name)             \
   {                                                                      \
     Symbol* klass_name = (method)->klass_name();                         \
     Symbol* name = (method)->name();                                     \
@@ -104,8 +103,7 @@
       (char *) signature->bytes(), signature->utf8_length());            \
   }
 
-#define DTRACE_METHOD_COMPILE_END_PROBE(compiler, method,                \
-                                        comp_name, success)              \
+#define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success)      \
   {                                                                      \
     Symbol* klass_name = (method)->klass_name();                         \
     Symbol* name = (method)->name();                                     \
@@ -120,8 +118,8 @@
 
 #else //  ndef DTRACE_ENABLED
 
-#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(compiler, method, comp_name)
-#define DTRACE_METHOD_COMPILE_END_PROBE(compiler, method, comp_name, success)
+#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name)
+#define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success)
 
 #endif // ndef DTRACE_ENABLED
 
@@ -1229,7 +1227,7 @@
     if (method->is_not_compilable(comp_level)) return NULL;
 
     if (UseCodeCacheFlushing) {
-      nmethod* saved = CodeCache::find_and_remove_saved_code(method());
+      nmethod* saved = CodeCache::reanimate_saved_code(method());
       if (saved != NULL) {
         method->set_code(method, saved);
         return saved;
@@ -1288,9 +1286,9 @@
     method->jmethod_id();
   }
 
-  // If the compiler is shut off due to code cache flushing or otherwise,
+  // If the compiler is shut off due to code cache getting full
   // fail out now so blocking compiles dont hang the java thread
-  if (!should_compile_new_jobs() || (UseCodeCacheFlushing && CodeCache::needs_flushing())) {
+  if (!should_compile_new_jobs()) {
     CompilationPolicy::policy()->delay_compilation(method());
     return NULL;
   }
@@ -1766,8 +1764,7 @@
     // Save information about this method in case of failure.
     set_last_compile(thread, method, is_osr, task_level);
 
-    DTRACE_METHOD_COMPILE_BEGIN_PROBE(compiler(task_level), method,
-                                      compiler_name(task_level));
+    DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, compiler_name(task_level));
   }
 
   // Allocate a new set of JNI handles.
@@ -1842,13 +1839,14 @@
         }
       }
     }
+    // simulate crash during compilation
+    assert(task->compile_id() != CICrashAt, "just as planned");
   }
   pop_jni_handle_block();
 
   methodHandle method(thread, task->method());
 
-  DTRACE_METHOD_COMPILE_END_PROBE(compiler(task_level), method,
-                                  compiler_name(task_level), task->is_success());
+  DTRACE_METHOD_COMPILE_END_PROBE(method, compiler_name(task_level), task->is_success());
 
   collect_statistics(thread, time, task);
 
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu May 16 11:47:51 2013 +0100
@@ -193,7 +193,8 @@
      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
   CardGeneration(rs, initial_byte_size, level, ct),
   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
-  _debug_collection_type(Concurrent_collection_type)
+  _debug_collection_type(Concurrent_collection_type),
+  _did_compact(false)
 {
   HeapWord* bottom = (HeapWord*) _virtual_space.low();
   HeapWord* end    = (HeapWord*) _virtual_space.high();
@@ -917,18 +918,15 @@
     return;
   }
 
-  // Compute some numbers about the state of the heap.
-  const size_t used_after_gc = used();
-  const size_t capacity_after_gc = capacity();
+  // The heap has been compacted but not reset yet.
+  // Any metric such as free() or used() will be incorrect.
 
   CardGeneration::compute_new_size();
 
   // Reset again after a possible resizing
-  cmsSpace()->reset_after_compaction();
-
-  assert(used() == used_after_gc && used_after_gc <= capacity(),
-         err_msg("used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
-         " capacity: " SIZE_FORMAT, used(), used_after_gc, capacity()));
+  if (did_compact()) {
+    cmsSpace()->reset_after_compaction();
+  }
 }
 
 void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
@@ -1578,6 +1576,8 @@
   return false;
 }
 
+void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
+
 // Clear _expansion_cause fields of constituent generations
 void CMSCollector::clear_expansion_cause() {
   _cmsGen->clear_expansion_cause();
@@ -1675,7 +1675,6 @@
   }
   acquire_control_and_collect(full, clear_all_soft_refs);
   _full_gcs_since_conc_gc++;
-
 }
 
 void CMSCollector::request_full_gc(unsigned int full_gc_count) {
@@ -1857,6 +1856,7 @@
     }
   }
 
+  set_did_compact(should_compact);
   if (should_compact) {
     // If the collection is being acquired from the background
     // collector, there may be references on the discovered
@@ -2444,8 +2444,7 @@
         // initial marking in checkpointRootsInitialWork has been completed
         if (VerifyDuringGC &&
             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-          gclog_or_tty->print("Verify before initial mark: ");
-          Universe::verify();
+          Universe::verify("Verify before initial mark: ");
         }
         {
           bool res = markFromRoots(false);
@@ -2456,8 +2455,7 @@
       case FinalMarking:
         if (VerifyDuringGC &&
             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-          gclog_or_tty->print("Verify before re-mark: ");
-          Universe::verify();
+          Universe::verify("Verify before re-mark: ");
         }
         checkpointRootsFinal(false, clear_all_soft_refs,
                              init_mark_was_synchronous);
@@ -2468,8 +2466,7 @@
         // final marking in checkpointRootsFinal has been completed
         if (VerifyDuringGC &&
             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-          gclog_or_tty->print("Verify before sweep: ");
-          Universe::verify();
+          Universe::verify("Verify before sweep: ");
         }
         sweep(false);
         assert(_collectorState == Resizing, "Incorrect state");
@@ -2484,8 +2481,7 @@
         // The heap has been resized.
         if (VerifyDuringGC &&
             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-          gclog_or_tty->print("Verify before reset: ");
-          Universe::verify();
+          Universe::verify("Verify before reset: ");
         }
         reset(false);
         assert(_collectorState == Idling, "Collector state should "
@@ -2722,6 +2718,7 @@
     Chunk::clean_chunk_pool();
   }
 
+  set_did_compact(false);
   _between_prologue_and_epilogue = false;  // ready for next cycle
 }
 
@@ -2853,8 +2850,8 @@
   bool failed() { return _failed; }
 };
 
-bool CMSCollector::verify_after_remark() {
-  gclog_or_tty->print(" [Verifying CMS Marking... ");
+bool CMSCollector::verify_after_remark(bool silent) {
+  if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... ");
   MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
   static bool init = false;
 
@@ -2915,7 +2912,7 @@
     warning("Unrecognized value %d for CMSRemarkVerifyVariant",
             CMSRemarkVerifyVariant);
   }
-  gclog_or_tty->print(" done] ");
+  if (!silent) gclog_or_tty->print(" done] ");
   return true;
 }
 
@@ -3426,8 +3423,9 @@
 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
   assert_locked_or_safepoint(Heap_lock);
   assert_lock_strong(freelistLock());
-  // XXX Fix when compaction is implemented.
-  warning("Shrinking of CMS not yet implemented");
+  if (PrintGCDetails && Verbose) {
+    warning("Shrinking of CMS not yet implemented");
+  }
   return;
 }
 
@@ -6010,26 +6008,23 @@
                                         &cmsDrainMarkingStackClosure,
                                         NULL);
     }
-    verify_work_stacks_empty();
-  }
+  }
+
+  // This is the point where the entire marking should have completed.
+  verify_work_stacks_empty();
 
   if (should_unload_classes()) {
     {
       TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
 
-      // Follow SystemDictionary roots and unload classes
+      // Unload classes and purge the SystemDictionary.
       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
 
-      // Follow CodeCache roots and unload any methods marked for unloading
+      // Unload nmethods.
       CodeCache::do_unloading(&_is_alive_closure, purged_class);
 
-      cmsDrainMarkingStackClosure.do_void();
-      verify_work_stacks_empty();
-
-      // Update subklass/sibling/implementor links in KlassKlass descendants
+      // Prune dead klasses from subklass/sibling/implementor lists.
       Klass::clean_weak_klass_links(&_is_alive_closure);
-      // Nothing should have been pushed onto the working stacks.
-      verify_work_stacks_empty();
     }
 
     {
@@ -6043,11 +6038,10 @@
   // Need to check if we really scanned the StringTable.
   if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
     TraceTime t("scrub string table", PrintGCDetails, false, gclog_or_tty);
-    // Now clean up stale oops in StringTable
+    // Delete entries for dead interned strings.
     StringTable::unlink(&_is_alive_closure);
   }
 
-  verify_work_stacks_empty();
   // Restore any preserved marks as a result of mark stack or
   // work queue overflow
   restore_preserved_marks_if_any();  // done single-threaded for now
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Thu May 16 11:47:51 2013 +0100
@@ -604,6 +604,8 @@
   ConcurrentMarkSweepPolicy* _collector_policy;
   ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
 
+  void set_did_compact(bool v);
+
   // XXX Move these to CMSStats ??? FIX ME !!!
   elapsedTimer _inter_sweep_timer;   // time between sweeps
   elapsedTimer _intra_sweep_timer;   // time _in_ sweeps
@@ -990,7 +992,7 @@
 
   // debugging
   void verify();
-  bool verify_after_remark();
+  bool verify_after_remark(bool silent = VerifySilently);
   void verify_ok_to_terminate() const PRODUCT_RETURN;
   void verify_work_stacks_empty() const PRODUCT_RETURN;
   void verify_overflow_empty() const PRODUCT_RETURN;
@@ -1081,6 +1083,10 @@
 
   CollectionTypes _debug_collection_type;
 
+  // True if a compactiing collection was done.
+  bool _did_compact;
+  bool did_compact() { return _did_compact; }
+
   // Fraction of current occupancy at which to start a CMS collection which
   // will collect this generation (at least).
   double _initiating_occupancy;
@@ -1121,6 +1127,8 @@
   // Adaptive size policy
   CMSAdaptiveSizePolicy* size_policy();
 
+  void set_did_compact(bool v) { _did_compact = v; }
+
   bool refs_discovery_is_atomic() const { return false; }
   bool refs_discovery_is_mt()     const {
     // Note: CMS does MT-discovery during the parallel-remark
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Thu May 16 11:47:51 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,40 +26,12 @@
 #include "gc_implementation/g1/concurrentG1Refine.hpp"
 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
-#include "gc_implementation/g1/g1CollectorPolicy.hpp"
-#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
-#include "gc_implementation/g1/g1RemSet.hpp"
-#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
-#include "memory/space.inline.hpp"
-#include "runtime/atomic.hpp"
-#include "runtime/java.hpp"
-#include "utilities/copy.hpp"
-
-// Possible sizes for the card counts cache: odd primes that roughly double in size.
-// (See jvmtiTagMap.cpp).
-
-#define MAX_SIZE ((size_t) -1)
+#include "gc_implementation/g1/g1HotCardCache.hpp"
 
-size_t ConcurrentG1Refine::_cc_cache_sizes[] = {
-          16381,    32771,    76831,    150001,   307261,
-         614563,  1228891,  2457733,   4915219,  9830479,
-       19660831, 39321619, 78643219, 157286461,  MAX_SIZE
-  };
-
-ConcurrentG1Refine::ConcurrentG1Refine() :
-  _card_counts(NULL), _card_epochs(NULL),
-  _n_card_counts(0), _max_cards(0), _max_n_card_counts(0),
-  _cache_size_index(0), _expand_card_counts(false),
-  _hot_cache(NULL),
-  _def_use_cache(false), _use_cache(false),
-  // We initialize the epochs of the array to 0. By initializing
-  // _n_periods to 1 and not 0 we automatically invalidate all the
-  // entries on the array. Otherwise we might accidentally think that
-  // we claimed a card that was in fact never set (see CR7033292).
-  _n_periods(1),
-  _threads(NULL), _n_threads(0)
+ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h) :
+  _threads(NULL), _n_threads(0),
+  _hot_card_cache(g1h)
 {
-
   // Ergomonically select initial concurrent refinement parameters
   if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
     FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2<int>(ParallelGCThreads, 1));
@@ -75,13 +47,17 @@
     FLAG_SET_DEFAULT(G1ConcRefinementRedZone, yellow_zone() * 2);
   }
   set_red_zone(MAX2<int>(G1ConcRefinementRedZone, yellow_zone()));
+
   _n_worker_threads = thread_num();
   // We need one extra thread to do the young gen rset size sampling.
   _n_threads = _n_worker_threads + 1;
+
   reset_threshold_step();
 
   _threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads, mtGC);
+
   int worker_id_offset = (int)DirtyCardQueueSet::num_par_ids();
+
   ConcurrentG1RefineThread *next = NULL;
   for (int i = _n_threads - 1; i >= 0; i--) {
     ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i);
@@ -100,74 +76,8 @@
   }
 }
 
-int ConcurrentG1Refine::thread_num() {
-  return MAX2<int>((G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads : ParallelGCThreads, 1);
-}
-
 void ConcurrentG1Refine::init() {
-  if (G1ConcRSLogCacheSize > 0) {
-    _g1h = G1CollectedHeap::heap();
-
-    _max_cards = _g1h->max_capacity() >> CardTableModRefBS::card_shift;
-    _max_n_card_counts = _max_cards * G1MaxHotCardCountSizePercent / 100;
-
-    size_t max_card_num = ((size_t)1 << (sizeof(unsigned)*BitsPerByte-1)) - 1;
-    guarantee(_max_cards < max_card_num, "card_num representation");
-
-    // We need _n_card_counts to be less than _max_n_card_counts here
-    // so that the expansion call (below) actually allocates the
-    // _counts and _epochs arrays.
-    assert(_n_card_counts == 0, "pre-condition");
-    assert(_max_n_card_counts > 0, "pre-condition");
-
-    // Find the index into cache size array that is of a size that's
-    // large enough to hold desired_sz.
-    size_t desired_sz = _max_cards / InitialCacheFraction;
-    int desired_sz_index = 0;
-    while (_cc_cache_sizes[desired_sz_index] < desired_sz) {
-      desired_sz_index += 1;
-      assert(desired_sz_index <  MAX_CC_CACHE_INDEX, "invariant");
-    }
-    assert(desired_sz_index <  MAX_CC_CACHE_INDEX, "invariant");
-
-    // If the desired_sz value is between two sizes then
-    // _cc_cache_sizes[desired_sz_index-1] < desired_sz <= _cc_cache_sizes[desired_sz_index]
-    // we will start with the lower size in the optimistic expectation that
-    // we will not need to expand up. Note desired_sz_index could also be 0.
-    if (desired_sz_index > 0 &&
-        _cc_cache_sizes[desired_sz_index] > desired_sz) {
-      desired_sz_index -= 1;
-    }
-
-    if (!expand_card_count_cache(desired_sz_index)) {
-      // Allocation was unsuccessful - exit
-      vm_exit_during_initialization("Could not reserve enough space for card count cache");
-    }
-    assert(_n_card_counts > 0, "post-condition");
-    assert(_cache_size_index == desired_sz_index, "post-condition");
-
-    Copy::fill_to_bytes(&_card_counts[0],
-                        _n_card_counts * sizeof(CardCountCacheEntry));
-    Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
-
-    ModRefBarrierSet* bs = _g1h->mr_bs();
-    guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
-    _ct_bs = (CardTableModRefBS*)bs;
-    _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
-
-    _def_use_cache = true;
-    _use_cache = true;
-    _hot_cache_size = (1 << G1ConcRSLogCacheSize);
-    _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size, mtGC);
-    _n_hot = 0;
-    _hot_cache_idx = 0;
-
-    // For refining the cards in the hot cache in parallel
-    int n_workers = (ParallelGCThreads > 0 ?
-                        _g1h->workers()->total_workers() : 1);
-    _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / n_workers);
-    _hot_cache_par_claimed_idx = 0;
-  }
+  _hot_card_cache.initialize();
 }
 
 void ConcurrentG1Refine::stop() {
@@ -188,17 +98,6 @@
 }
 
 ConcurrentG1Refine::~ConcurrentG1Refine() {
-  if (G1ConcRSLogCacheSize > 0) {
-    // Please see the comment in allocate_card_count_cache
-    // for why we call os::malloc() and os::free() directly.
-    assert(_card_counts != NULL, "Logic");
-    os::free(_card_counts, mtGC);
-    assert(_card_epochs != NULL, "Logic");
-    os::free(_card_epochs, mtGC);
-
-    assert(_hot_cache != NULL, "Logic");
-    FREE_C_HEAP_ARRAY(jbyte*, _hot_cache, mtGC);
-  }
   if (_threads != NULL) {
     for (int i = 0; i < _n_threads; i++) {
       delete _threads[i];
@@ -215,317 +114,10 @@
   }
 }
 
-bool ConcurrentG1Refine::is_young_card(jbyte* card_ptr) {
-  HeapWord* start = _ct_bs->addr_for(card_ptr);
-  HeapRegion* r = _g1h->heap_region_containing(start);
-  if (r != NULL && r->is_young()) {
-    return true;
-  }
-  // This card is not associated with a heap region
-  // so can't be young.
-  return false;
-}
-
-jbyte* ConcurrentG1Refine::add_card_count(jbyte* card_ptr, int* count, bool* defer) {
-  unsigned new_card_num = ptr_2_card_num(card_ptr);
-  unsigned bucket = hash(new_card_num);
-  assert(0 <= bucket && bucket < _n_card_counts, "Bounds");
-
-  CardCountCacheEntry* count_ptr = &_card_counts[bucket];
-  CardEpochCacheEntry* epoch_ptr = &_card_epochs[bucket];
-
-  // We have to construct a new entry if we haven't updated the counts
-  // during the current period, or if the count was updated for a
-  // different card number.
-  unsigned int new_epoch = (unsigned int) _n_periods;
-  julong new_epoch_entry = make_epoch_entry(new_card_num, new_epoch);
-
-  while (true) {
-    // Fetch the previous epoch value
-    julong prev_epoch_entry = epoch_ptr->_value;
-    julong cas_res;
-
-    if (extract_epoch(prev_epoch_entry) != new_epoch) {
-      // This entry has not yet been updated during this period.
-      // Note: we update the epoch value atomically to ensure
-      // that there is only one winner that updates the cached
-      // card_ptr value even though all the refine threads share
-      // the same epoch value.
-
-      cas_res = (julong) Atomic::cmpxchg((jlong) new_epoch_entry,
-                                         (volatile jlong*)&epoch_ptr->_value,
-                                         (jlong) prev_epoch_entry);
-
-      if (cas_res == prev_epoch_entry) {
-        // We have successfully won the race to update the
-        // epoch and card_num value. Make it look like the
-        // count and eviction count were previously cleared.
-        count_ptr->_count = 1;
-        count_ptr->_evict_count = 0;
-        *count = 0;
-        // We can defer the processing of card_ptr
-        *defer = true;
-        return card_ptr;
-      }
-      // We did not win the race to update the epoch field, so some other
-      // thread must have done it. The value that gets returned by CAS
-      // should be the new epoch value.
-      assert(extract_epoch(cas_res) == new_epoch, "unexpected epoch");
-      // We could 'continue' here or just re-read the previous epoch value
-      prev_epoch_entry = epoch_ptr->_value;
-    }
-
-    // The epoch entry for card_ptr has been updated during this period.
-    unsigned old_card_num = extract_card_num(prev_epoch_entry);
-
-    // The card count that will be returned to caller
-    *count = count_ptr->_count;
-
-    // Are we updating the count for the same card?
-    if (new_card_num == old_card_num) {
-      // Same card - just update the count. We could have more than one
-      // thread racing to update count for the current card. It should be
-      // OK not to use a CAS as the only penalty should be some missed
-      // increments of the count which delays identifying the card as "hot".
-
-      if (*count < max_jubyte) count_ptr->_count++;
-      // We can defer the processing of card_ptr
-      *defer = true;
-      return card_ptr;
-    }
-
-    // Different card - evict old card info
-    if (count_ptr->_evict_count < max_jubyte) count_ptr->_evict_count++;
-    if (count_ptr->_evict_count > G1CardCountCacheExpandThreshold) {
-      // Trigger a resize the next time we clear
-      _expand_card_counts = true;
-    }
-
-    cas_res = (julong) Atomic::cmpxchg((jlong) new_epoch_entry,
-                                       (volatile jlong*)&epoch_ptr->_value,
-                                       (jlong) prev_epoch_entry);
-
-    if (cas_res == prev_epoch_entry) {
-      // We successfully updated the card num value in the epoch entry
-      count_ptr->_count = 0; // initialize counter for new card num
-      jbyte* old_card_ptr = card_num_2_ptr(old_card_num);
-
-      // Even though the region containg the card at old_card_num was not
-      // in the young list when old_card_num was recorded in the epoch
-      // cache it could have been added to the free list and subsequently
-      // added to the young list in the intervening time. See CR 6817995.
-      // We do not deal with this case here - it will be handled in
-      // HeapRegion::oops_on_card_seq_iterate_careful after it has been
-      // determined that the region containing the card has been allocated
-      // to, and it's safe to check the young type of the region.
-
-      // We do not want to defer processing of card_ptr in this case
-      // (we need to refine old_card_ptr and card_ptr)
-      *defer = false;
-      return old_card_ptr;
-    }
-    // Someone else beat us - try again.
-  }
-}
-
-jbyte* ConcurrentG1Refine::cache_insert(jbyte* card_ptr, bool* defer) {
-  int count;
-  jbyte* cached_ptr = add_card_count(card_ptr, &count, defer);
-  assert(cached_ptr != NULL, "bad cached card ptr");
-
-  // We've just inserted a card pointer into the card count cache
-  // and got back the card that we just inserted or (evicted) the
-  // previous contents of that count slot.
-
-  // The card we got back could be in a young region. When the
-  // returned card (if evicted) was originally inserted, we had
-  // determined that its containing region was not young. However
-  // it is possible for the region to be freed during a cleanup
-  // pause, then reallocated and tagged as young which will result
-  // in the returned card residing in a young region.
-  //
-  // We do not deal with this case here - the change from non-young
-  // to young could be observed at any time - it will be handled in
-  // HeapRegion::oops_on_card_seq_iterate_careful after it has been
-  // determined that the region containing the card has been allocated
-  // to.
-
-  // The card pointer we obtained from card count cache is not hot
-  // so do not store it in the cache; return it for immediate
-  // refining.
-  if (count < G1ConcRSHotCardLimit) {
-    return cached_ptr;
-  }
-
-  // Otherwise, the pointer we got from the _card_counts cache is hot.
-  jbyte* res = NULL;
-  MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag);
-  if (_n_hot == _hot_cache_size) {
-    res = _hot_cache[_hot_cache_idx];
-    _n_hot--;
-  }
-  // Now _n_hot < _hot_cache_size, and we can insert at _hot_cache_idx.
-  _hot_cache[_hot_cache_idx] = cached_ptr;
-  _hot_cache_idx++;
-  if (_hot_cache_idx == _hot_cache_size) _hot_cache_idx = 0;
-  _n_hot++;
-
-  // The card obtained from the hot card cache could be in a young
-  // region. See above on how this can happen.
-
-  return res;
-}
-
-void ConcurrentG1Refine::clean_up_cache(int worker_i,
-                                        G1RemSet* g1rs,
-                                        DirtyCardQueue* into_cset_dcq) {
-  assert(!use_cache(), "cache should be disabled");
-  int start_idx;
-
-  while ((start_idx = _hot_cache_par_claimed_idx) < _n_hot) { // read once
-    int end_idx = start_idx + _hot_cache_par_chunk_size;
-
-    if (start_idx ==
-        Atomic::cmpxchg(end_idx, &_hot_cache_par_claimed_idx, start_idx)) {
-      // The current worker has successfully claimed the chunk [start_idx..end_idx)
-      end_idx = MIN2(end_idx, _n_hot);
-      for (int i = start_idx; i < end_idx; i++) {
-        jbyte* entry = _hot_cache[i];
-        if (entry != NULL) {
-          if (g1rs->concurrentRefineOneCard(entry, worker_i, true)) {
-            // 'entry' contains references that point into the current
-            // collection set. We need to record 'entry' in the DCQS
-            // that's used for that purpose.
-            //
-            // The only time we care about recording cards that contain
-            // references that point into the collection set is during
-            // RSet updating while within an evacuation pause.
-            // In this case worker_i should be the id of a GC worker thread
-            assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
-            assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "incorrect worker id");
-            into_cset_dcq->enqueue(entry);
-          }
-        }
-      }
-    }
-  }
-}
-
-// The arrays used to hold the card counts and the epochs must have
-// a 1:1 correspondence. Hence they are allocated and freed together
-// Returns true if the allocations of both the counts and epochs
-// were successful; false otherwise.
-bool ConcurrentG1Refine::allocate_card_count_cache(size_t n,
-                                                   CardCountCacheEntry** counts,
-                                                   CardEpochCacheEntry** epochs) {
-  // We call the allocation/free routines directly for the counts
-  // and epochs arrays. The NEW_C_HEAP_ARRAY/FREE_C_HEAP_ARRAY
-  // macros call AllocateHeap and FreeHeap respectively.
-  // AllocateHeap will call vm_exit_out_of_memory in the event
-  // of an allocation failure and abort the JVM. With the
-  // _counts/epochs arrays we only need to abort the JVM if the
-  // initial allocation of these arrays fails.
-  //
-  // Additionally AllocateHeap/FreeHeap do some tracing of
-  // allocate/free calls so calling one without calling the
-  // other can cause inconsistencies in the tracing. So we
-  // call neither.
-
-  assert(*counts == NULL, "out param");
-  assert(*epochs == NULL, "out param");
-
-  size_t counts_size = n * sizeof(CardCountCacheEntry);
-  size_t epochs_size = n * sizeof(CardEpochCacheEntry);
-
-  *counts = (CardCountCacheEntry*) os::malloc(counts_size, mtGC);
-  if (*counts == NULL) {
-    // allocation was unsuccessful
-    return false;
-  }
-
-  *epochs = (CardEpochCacheEntry*) os::malloc(epochs_size, mtGC);
-  if (*epochs == NULL) {
-    // allocation was unsuccessful - free counts array
-    assert(*counts != NULL, "must be");
-    os::free(*counts, mtGC);
-    *counts = NULL;
-    return false;
-  }
-
-  // We successfully allocated both counts and epochs
-  return true;
-}
-
-// Returns true if the card counts/epochs cache was
-// successfully expanded; false otherwise.
-bool ConcurrentG1Refine::expand_card_count_cache(int cache_size_idx) {
-  // Can we expand the card count and epoch tables?
-  if (_n_card_counts < _max_n_card_counts) {
-    assert(cache_size_idx >= 0 && cache_size_idx  < MAX_CC_CACHE_INDEX, "oob");
-
-    size_t cache_size = _cc_cache_sizes[cache_size_idx];
-    // Make sure we don't go bigger than we will ever need
-    cache_size = MIN2(cache_size, _max_n_card_counts);
-
-    // Should we expand the card count and card epoch tables?
-    if (cache_size > _n_card_counts) {
-      // We have been asked to allocate new, larger, arrays for
-      // the card counts and the epochs. Attempt the allocation
-      // of both before we free the existing arrays in case
-      // the allocation is unsuccessful...
-      CardCountCacheEntry* counts = NULL;
-      CardEpochCacheEntry* epochs = NULL;
-
-      if (allocate_card_count_cache(cache_size, &counts, &epochs)) {
-        // Allocation was successful.
-        // We can just free the old arrays; we're
-        // not interested in preserving the contents
-        if (_card_counts != NULL) os::free(_card_counts, mtGC);
-        if (_card_epochs != NULL) os::free(_card_epochs, mtGC);
-
-        // Cache the size of the arrays and the index that got us there.
-        _n_card_counts = cache_size;
-        _cache_size_index = cache_size_idx;
-
-        _card_counts = counts;
-        _card_epochs = epochs;
-
-        // We successfully allocated/expanded the caches.
-        return true;
-      }
-    }
-  }
-
-  // We did not successfully expand the caches.
-  return false;
-}
-
-void ConcurrentG1Refine::clear_and_record_card_counts() {
-  if (G1ConcRSLogCacheSize == 0) {
-    return;
-  }
-
-  double start = os::elapsedTime();
-
-  if (_expand_card_counts) {
-    int new_idx = _cache_size_index + 1;
-
-    if (expand_card_count_cache(new_idx)) {
-      // Allocation was successful and  _n_card_counts has
-      // been updated to the new size. We only need to clear
-      // the epochs so we don't read a bogus epoch value
-      // when inserting a card into the hot card cache.
-      Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
-    }
-    _expand_card_counts = false;
-  }
-
-  int this_epoch = (int) _n_periods;
-  assert((this_epoch+1) <= max_jint, "to many periods");
-  // Update epoch
-  _n_periods++;
-  double cc_clear_time_ms = (os::elapsedTime() - start) * 1000;
-  _g1h->g1_policy()->phase_times()->record_cc_clear_time_ms(cc_clear_time_ms);
+int ConcurrentG1Refine::thread_num() {
+  int n_threads = (G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads
+                                                : ParallelGCThreads;
+  return MAX2<int>(n_threads, 1);
 }
 
 void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Thu May 16 11:47:51 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,13 +25,15 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTG1REFINE_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTG1REFINE_HPP
 
+#include "gc_implementation/g1/g1HotCardCache.hpp"
 #include "memory/allocation.hpp"
-#include "memory/cardTableModRefBS.hpp"
 #include "runtime/thread.hpp"
 #include "utilities/globalDefinitions.hpp"
 
 // Forward decl
 class ConcurrentG1RefineThread;
+class G1CollectedHeap;
+class G1HotCardCache;
 class G1RemSet;
 
 class ConcurrentG1Refine: public CHeapObj<mtGC> {
@@ -61,141 +63,14 @@
 
   int _thread_threshold_step;
 
+  // We delay the refinement of 'hot' cards using the hot card cache.
+  G1HotCardCache _hot_card_cache;
+
   // Reset the threshold step value based of the current zone boundaries.
   void reset_threshold_step();
 
-  // The cache for card refinement.
-  bool   _use_cache;
-  bool   _def_use_cache;
-
-  size_t _n_periods;    // Used as clearing epoch
-
-  // An evicting cache of the number of times each card
-  // is accessed. Reduces, but does not eliminate, the amount
-  // of duplicated processing of dirty cards.
-
-  enum SomePrivateConstants {
-    epoch_bits           = 32,
-    card_num_shift       = epoch_bits,
-    epoch_mask           = AllBits,
-    card_num_mask        = AllBits,
-
-    // The initial cache size is approximately this fraction
-    // of a maximal cache (i.e. the size needed for all cards
-    // in the heap)
-    InitialCacheFraction = 512
-  };
-
-  const static julong card_num_mask_in_place =
-                        (julong) card_num_mask << card_num_shift;
-
-  typedef struct {
-    julong _value;      // |  card_num   |  epoch   |
-  } CardEpochCacheEntry;
-
-  julong make_epoch_entry(unsigned int card_num, unsigned int epoch) {
-    assert(0 <= card_num && card_num < _max_cards, "Bounds");
-    assert(0 <= epoch && epoch <= _n_periods, "must be");
-
-    return ((julong) card_num << card_num_shift) | epoch;
-  }
-
-  unsigned int extract_epoch(julong v) {
-    return (v & epoch_mask);
-  }
-
-  unsigned int extract_card_num(julong v) {
-    return (v & card_num_mask_in_place) >> card_num_shift;
-  }
-
-  typedef struct {
-    unsigned char _count;
-    unsigned char _evict_count;
-  } CardCountCacheEntry;
-
-  CardCountCacheEntry* _card_counts;
-  CardEpochCacheEntry* _card_epochs;
-
-  // The current number of buckets in the card count cache
-  size_t _n_card_counts;
-
-  // The number of cards for the entire reserved heap
-  size_t _max_cards;
-
-  // The max number of buckets for the card counts and epochs caches.
-  // This is the maximum that the counts and epochs will grow to.
-  // It is specified as a fraction or percentage of _max_cards using
-  // G1MaxHotCardCountSizePercent.
-  size_t _max_n_card_counts;
-
-  // Possible sizes of the cache: odd primes that roughly double in size.
-  // (See jvmtiTagMap.cpp).
-  enum {
-    MAX_CC_CACHE_INDEX = 15    // maximum index into the cache size array.
-  };
-
-  static size_t _cc_cache_sizes[MAX_CC_CACHE_INDEX];
-
-  // The index in _cc_cache_sizes corresponding to the size of
-  // _card_counts.
-  int _cache_size_index;
-
-  bool _expand_card_counts;
-
-  const jbyte* _ct_bot;
-
-  jbyte**      _hot_cache;
-  int          _hot_cache_size;
-  int          _n_hot;
-  int          _hot_cache_idx;
-
-  int          _hot_cache_par_chunk_size;
-  volatile int _hot_cache_par_claimed_idx;
-
-  // Needed to workaround 6817995
-  CardTableModRefBS* _ct_bs;
-  G1CollectedHeap*   _g1h;
-
-  // Helper routine for expand_card_count_cache().
-  // The arrays used to hold the card counts and the epochs must have
-  // a 1:1 correspondence. Hence they are allocated and freed together.
-  // Returns true if the allocations of both the counts and epochs
-  // were successful; false otherwise.
-  bool allocate_card_count_cache(size_t n,
-                                 CardCountCacheEntry** counts,
-                                 CardEpochCacheEntry** epochs);
-
-  // Expands the arrays that hold the card counts and epochs
-  // to the cache size at index. Returns true if the expansion/
-  // allocation was successful; false otherwise.
-  bool expand_card_count_cache(int index);
-
-  // hash a given key (index of card_ptr) with the specified size
-  static unsigned int hash(size_t key, size_t size) {
-    return (unsigned int) (key % size);
-  }
-
-  // hash a given key (index of card_ptr)
-  unsigned int hash(size_t key) {
-    return hash(key, _n_card_counts);
-  }
-
-  unsigned int ptr_2_card_num(jbyte* card_ptr) {
-    return (unsigned int) (card_ptr - _ct_bot);
-  }
-
-  jbyte* card_num_2_ptr(unsigned int card_num) {
-    return (jbyte*) (_ct_bot + card_num);
-  }
-
-  // Returns the count of this card after incrementing it.
-  jbyte* add_card_count(jbyte* card_ptr, int* count, bool* defer);
-
-  // Returns true if this card is in a young region
-  bool is_young_card(jbyte* card_ptr);
-
  public:
-  ConcurrentG1Refine();
+  ConcurrentG1Refine(G1CollectedHeap* g1h);
   ~ConcurrentG1Refine();
 
   void init(); // Accomplish some initialization that has to wait.
@@ -206,34 +81,6 @@
   // Iterate over the conc refine threads
   void threads_do(ThreadClosure *tc);
 
-  // If this is the first entry for the slot, writes into the cache and
-  // returns NULL.  If it causes an eviction, returns the evicted pointer.
-  // Otherwise, its a cache hit, and returns NULL.
-  jbyte* cache_insert(jbyte* card_ptr, bool* defer);
-
-  // Process the cached entries.
-  void clean_up_cache(int worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq);
-
-  // Set up for parallel processing of the cards in the hot cache
-  void clear_hot_cache_claimed_index() {
-    _hot_cache_par_claimed_idx = 0;
-  }
-
-  // Discard entries in the hot cache.
-  void clear_hot_cache() {
-    _hot_cache_idx = 0; _n_hot = 0;
-  }
-
-  bool hot_cache_is_empty() { return _n_hot == 0; }
-
-  bool use_cache() { return _use_cache; }
-  void set_use_cache(bool b) {
-    if (b) _use_cache = _def_use_cache;
-    else   _use_cache = false;
-  }
-
-  void clear_and_record_card_counts();
-
   static int thread_num();
 
   void print_worker_threads_on(outputStream* st) const;
@@ -250,6 +97,8 @@
   int worker_thread_num() const { return _n_worker_threads; }
 
   int thread_threshold_step() const { return _thread_threshold_step; }
+
+  G1HotCardCache* hot_card_cache() { return &_hot_card_cache; }
 };
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTG1REFINE_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu May 16 11:47:51 2013 +0100
@@ -1273,10 +1273,9 @@
 
   if (VerifyDuringGC) {
     HandleMark hm;  // handle scope
-    gclog_or_tty->print(" VerifyDuringGC:(before)");
     Universe::heap()->prepare_for_verify();
-    Universe::verify(/* silent */ false,
-                     /* option */ VerifyOption_G1UsePrevMarking);
+    Universe::verify(VerifyOption_G1UsePrevMarking,
+                     " VerifyDuringGC:(before)");
   }
 
   G1CollectorPolicy* g1p = g1h->g1_policy();
@@ -1300,10 +1299,9 @@
     // Verify the heap w.r.t. the previous marking bitmap.
     if (VerifyDuringGC) {
       HandleMark hm;  // handle scope
-      gclog_or_tty->print(" VerifyDuringGC:(overflow)");
       Universe::heap()->prepare_for_verify();
-      Universe::verify(/* silent */ false,
-                       /* option */ VerifyOption_G1UsePrevMarking);
+      Universe::verify(VerifyOption_G1UsePrevMarking,
+                       " VerifyDuringGC:(overflow)");
     }
 
     // Clear the marking state because we will be restarting
@@ -1323,10 +1321,9 @@
 
     if (VerifyDuringGC) {
       HandleMark hm;  // handle scope
-      gclog_or_tty->print(" VerifyDuringGC:(after)");
       Universe::heap()->prepare_for_verify();
-      Universe::verify(/* silent */ false,
-                       /* option */ VerifyOption_G1UseNextMarking);
+      Universe::verify(VerifyOption_G1UseNextMarking,
+                       " VerifyDuringGC:(after)");
     }
     assert(!restart_for_overflow(), "sanity");
     // Completely reset the marking state since marking completed
@@ -1972,10 +1969,9 @@
 
   if (VerifyDuringGC) {
     HandleMark hm;  // handle scope
-    gclog_or_tty->print(" VerifyDuringGC:(before)");
     Universe::heap()->prepare_for_verify();
-    Universe::verify(/* silent */ false,
-                     /* option */ VerifyOption_G1UsePrevMarking);
+    Universe::verify(VerifyOption_G1UsePrevMarking,
+                     " VerifyDuringGC:(before)");
   }
 
   G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
@@ -2127,10 +2123,9 @@
 
   if (VerifyDuringGC) {
     HandleMark hm;  // handle scope
-    gclog_or_tty->print(" VerifyDuringGC:(after)");
     Universe::heap()->prepare_for_verify();
-    Universe::verify(/* silent */ false,
-                     /* option */ VerifyOption_G1UsePrevMarking);
+    Universe::verify(VerifyOption_G1UsePrevMarking,
+                     " VerifyDuringGC:(after)");
   }
 
   g1h->verify_region_sets_optional();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Thu May 16 11:47:51 2013 +0100
@@ -77,7 +77,7 @@
     assert(delta > 0, "just checking");
     if (!_vs.expand_by(delta)) {
       // Do better than this for Merlin
-      vm_exit_out_of_memory(delta, "offset table expansion");
+      vm_exit_out_of_memory(delta, OOM_MMAP_ERROR, "offset table expansion");
     }
     assert(_vs.high() == high + delta, "invalid expansion");
     // Initialization of the contents is left to the
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Thu May 16 11:47:51 2013 +0100
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1CardCounts.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/g1CollectorPolicy.hpp"
+#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
+#include "memory/cardTableModRefBS.hpp"
+#include "services/memTracker.hpp"
+#include "utilities/copy.hpp"
+
+void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) {
+  if (has_count_table()) {
+    check_card_num(from_card_num,
+                   err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
+    assert(from_card_num < to_card_num,
+           err_msg("Wrong order? from: " SIZE_FORMAT ", to: "SIZE_FORMAT,
+                   from_card_num, to_card_num));
+    assert(to_card_num <= _committed_max_card_num,
+           err_msg("to card num out of range: "
+                   "to: "SIZE_FORMAT ", "
+                   "max: "SIZE_FORMAT,
+                   to_card_num, _committed_max_card_num));
+
+    to_card_num = MIN2(_committed_max_card_num, to_card_num);
+
+    Copy::fill_to_bytes(&_card_counts[from_card_num], (to_card_num - from_card_num));
+  }
+}
+
+G1CardCounts::G1CardCounts(G1CollectedHeap *g1h):
+  _g1h(g1h), _card_counts(NULL),
+  _reserved_max_card_num(0), _committed_max_card_num(0),
+  _committed_size(0) {}
+
+void G1CardCounts::initialize() {
+  assert(_g1h->max_capacity() > 0, "initialization order");
+  assert(_g1h->capacity() == 0, "initialization order");
+
+  if (G1ConcRSHotCardLimit > 0) {
+    // The max value we can store in the counts table is
+    // max_jubyte. Guarantee the value of the hot
+    // threshold limit is no more than this.
+    guarantee(G1ConcRSHotCardLimit <= max_jubyte, "sanity");
+
+    ModRefBarrierSet* bs = _g1h->mr_bs();
+    guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
+    _ct_bs = (CardTableModRefBS*)bs;
+    _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
+
+    // Allocate/Reserve the counts table
+    size_t reserved_bytes = _g1h->max_capacity();
+    _reserved_max_card_num = reserved_bytes >> CardTableModRefBS::card_shift;
+
+    size_t reserved_size = _reserved_max_card_num * sizeof(jbyte);
+    ReservedSpace rs(ReservedSpace::allocation_align_size_up(reserved_size));
+    if (!rs.is_reserved()) {
+      warning("Could not reserve enough space for the card counts table");
+      guarantee(!has_reserved_count_table(), "should be NULL");
+      return;
+    }
+
+    MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
+
+    _card_counts_storage.initialize(rs, 0);
+    _card_counts = (jubyte*) _card_counts_storage.low();
+  }
+}
+
+void G1CardCounts::resize(size_t heap_capacity) {
+  // Expand the card counts table to handle a heap with the given capacity.
+
+  if (!has_reserved_count_table()) {
+    // Don't expand if we failed to reserve the card counts table.
+    return;
+  }
+
+  assert(_committed_size ==
+         ReservedSpace::allocation_align_size_up(_committed_size),
+         err_msg("Unaligned? committed_size: " SIZE_FORMAT, _committed_size));
+
+  // Verify that the committed space for the card counts
+  // matches our committed max card num.
+  size_t prev_committed_size = _committed_size;
+  size_t prev_committed_card_num = prev_committed_size / sizeof(jbyte);
+  assert(prev_committed_card_num == _committed_max_card_num,
+         err_msg("Card mismatch: "
+                 "prev: " SIZE_FORMAT ", "
+                 "committed: "SIZE_FORMAT,
+                 prev_committed_card_num, _committed_max_card_num));
+
+  size_t new_size = (heap_capacity >> CardTableModRefBS::card_shift) * sizeof(jbyte);
+  size_t new_committed_size = ReservedSpace::allocation_align_size_up(new_size);
+  size_t new_committed_card_num =
+                MIN2(_reserved_max_card_num, new_committed_size / sizeof(jbyte));
+
+  if (_committed_max_card_num < new_committed_card_num) {
+    // we need to expand the backing store for the card counts
+    size_t expand_size = new_committed_size - prev_committed_size;
+
+    if (!_card_counts_storage.expand_by(expand_size)) {
+      warning("Card counts table backing store commit failure");
+      return;
+    }
+    assert(_card_counts_storage.committed_size() == new_committed_size,
+           "expansion commit failure");
+
+    _committed_size = new_committed_size;
+    _committed_max_card_num = new_committed_card_num;
+
+    clear_range(prev_committed_card_num, _committed_max_card_num);
+  }
+}
+
+uint G1CardCounts::add_card_count(jbyte* card_ptr) {
+  // Returns the number of times the card has been refined.
+  // If we failed to reserve/commit the counts table, return 0.
+  // If card_ptr is beyond the committed end of the counts table,
+  // return 0.
+  // Otherwise return the actual count.
+  // Unless G1ConcRSHotCardLimit has been set appropriately,
+  // returning 0 will result in the card being considered
+  // cold and will be refined immediately.
+  uint count = 0;
+  if (has_count_table()) {
+    size_t card_num = ptr_2_card_num(card_ptr);
+    if (card_num < _committed_max_card_num) {
+      count = (uint) _card_counts[card_num];
+      if (count < G1ConcRSHotCardLimit) {
+        _card_counts[card_num] += 1;
+      }
+      assert(_card_counts[card_num] <= G1ConcRSHotCardLimit,
+             err_msg("Refinement count overflow? "
+                     "new count: "UINT32_FORMAT,
+                     (uint) _card_counts[card_num]));
+    }
+  }
+  return count;
+}
+
+bool G1CardCounts::is_hot(uint count) {
+  return (count >= G1ConcRSHotCardLimit);
+}
+
+void G1CardCounts::clear_region(HeapRegion* hr) {
+  assert(!hr->isHumongous(), "Should have been cleared");
+  if (has_count_table()) {
+    HeapWord* bottom = hr->bottom();
+
+    // We use the last address in hr as hr could be the
+    // last region in the heap. In which case trying to find
+    // the card for hr->end() will be an OOB accesss to the
+    // card table.
+    HeapWord* last = hr->end() - 1;
+    assert(_g1h->g1_committed().contains(last),
+           err_msg("last not in committed: "
+                   "last: " PTR_FORMAT ", "
+                   "committed: [" PTR_FORMAT ", " PTR_FORMAT ")",
+                   last,
+                   _g1h->g1_committed().start(),
+                   _g1h->g1_committed().end()));
+
+    const jbyte* from_card_ptr = _ct_bs->byte_for_const(bottom);
+    const jbyte* last_card_ptr = _ct_bs->byte_for_const(last);
+
+#ifdef ASSERT
+    HeapWord* start_addr = _ct_bs->addr_for(from_card_ptr);
+    assert(start_addr == hr->bottom(), "alignment");
+    HeapWord* last_addr = _ct_bs->addr_for(last_card_ptr);
+    assert((last_addr + CardTableModRefBS::card_size_in_words) == hr->end(), "alignment");
+#endif // ASSERT
+
+    // Clear the counts for the (exclusive) card range.
+    size_t from_card_num = ptr_2_card_num(from_card_ptr);
+    size_t to_card_num = ptr_2_card_num(last_card_ptr) + 1;
+    clear_range(from_card_num, to_card_num);
+  }
+}
+
+void G1CardCounts::clear_all() {
+  assert(SafepointSynchronize::is_at_safepoint(), "don't call this otherwise");
+  clear_range((size_t)0, _committed_max_card_num);
+}
+
+G1CardCounts::~G1CardCounts() {
+  if (has_reserved_count_table()) {
+    _card_counts_storage.release();
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.hpp	Thu May 16 11:47:51 2013 +0100
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
+
+#include "memory/allocation.hpp"
+#include "runtime/virtualspace.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+class CardTableModRefBS;
+class G1CollectedHeap;
+class HeapRegion;
+
+// Table to track the number of times a card has been refined. Once
+// a card has been refined a certain number of times, it is
+// considered 'hot' and its refinement is delayed by inserting the
+// card into the hot card cache. The card will then be refined when
+// it is evicted from the hot card cache, or when the hot card cache
+// is 'drained' during the next evacuation pause.
+
+class G1CardCounts: public CHeapObj<mtGC> {
+  G1CollectedHeap* _g1h;
+
+  // The table of counts
+  jubyte* _card_counts;
+
+  // Max capacity of the reserved space for the counts table
+  size_t _reserved_max_card_num;
+
+  // Max capacity of the committed space for the counts table
+  size_t _committed_max_card_num;
+
+  // Size of committed space for the counts table
+  size_t _committed_size;
+
+  // CardTable bottom.
+  const jbyte* _ct_bot;
+
+  // Barrier set
+  CardTableModRefBS* _ct_bs;
+
+  // The virtual memory backing the counts table
+  VirtualSpace _card_counts_storage;
+
+  // Returns true if the card counts table has been reserved.
+  bool has_reserved_count_table() { return _card_counts != NULL; }
+
+  // Returns true if the card counts table has been reserved and committed.
+  bool has_count_table() {
+    return has_reserved_count_table() && _committed_max_card_num > 0;
+  }
+
+  void check_card_num(size_t card_num, const char* msg) {
+    assert(card_num >= 0 && card_num < _committed_max_card_num, msg);
+  }
+
+  size_t ptr_2_card_num(const jbyte* card_ptr) {
+    assert(card_ptr >= _ct_bot,
+           err_msg("Inavalied card pointer: "
+                   "card_ptr: " PTR_FORMAT ", "
+                   "_ct_bot: " PTR_FORMAT,
+                   card_ptr, _ct_bot));
+    size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte));
+    check_card_num(card_num,
+                   err_msg("card pointer out of range: " PTR_FORMAT, card_ptr));
+    return card_num;
+  }
+
+  jbyte* card_num_2_ptr(size_t card_num) {
+    check_card_num(card_num,
+                   err_msg("card num out of range: "SIZE_FORMAT, card_num));
+    return (jbyte*) (_ct_bot + card_num);
+  }
+
+  // Clear the counts table for the given (exclusive) index range.
+  void clear_range(size_t from_card_num, size_t to_card_num);
+
+ public:
+  G1CardCounts(G1CollectedHeap* g1h);
+  ~G1CardCounts();
+
+  void initialize();
+
+  // Resize the committed space for the card counts table in
+  // response to a resize of the committed space for the heap.
+  void resize(size_t heap_capacity);
+
+  // Increments the refinement count for the given card.
+  // Returns the pre-increment count value.
+  uint add_card_count(jbyte* card_ptr);
+
+  // Returns true if the given count is high enough to be considered
+  // 'hot'; false otherwise.
+  bool is_hot(uint count);
+
+  // Clears the card counts for the cards spanned by the region
+  void clear_region(HeapRegion* hr);
+
+  // Clear the entire card counts table during GC.
+  // Updates the policy stats with the duration.
+  void clear_all();
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1CARDCOUNTS_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu May 16 11:47:51 2013 +0100
@@ -96,7 +96,7 @@
     _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
   {}
   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
-    bool oops_into_cset = _g1rs->concurrentRefineOneCard(card_ptr, worker_i, false);
+    bool oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false);
     // This path is executed by the concurrent refine or mutator threads,
     // concurrently, and so we do not care if card_ptr contains references
     // that point into the collection set.
@@ -1271,9 +1271,8 @@
   if (guard && total_collections() >= VerifyGCStartAt) {
     double verify_start = os::elapsedTime();
     HandleMark hm;  // Discard invalid handles created during verification
-    gclog_or_tty->print(msg);
     prepare_for_verify();
-    Universe::verify(false /* silent */, VerifyOption_G1UsePrevMarking);
+    Universe::verify(VerifyOption_G1UsePrevMarking, msg);
     verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
   }
 
@@ -1304,7 +1303,7 @@
 
   print_heap_before_gc();
 
-  size_t metadata_prev_used = MetaspaceAux::used_in_bytes();
+  size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
 
   HRSPhaseSetter x(HRSPhaseFullGC);
   verify_region_sets_optional();
@@ -1425,6 +1424,7 @@
 
       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
       ClassLoaderDataGraph::purge();
+    MetaspaceAux::verify_metrics();
 
       // Note: since we've just done a full GC, concurrent
       // marking is no longer active. Therefore we need not
@@ -1452,9 +1452,10 @@
         _hr_printer.end_gc(true /* full */, (size_t) total_collections());
       }
 
-      if (_cg1r->use_cache()) {
-        _cg1r->clear_and_record_card_counts();
-        _cg1r->clear_hot_cache();
+      G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
+      if (hot_card_cache->use_cache()) {
+        hot_card_cache->reset_card_counts();
+        hot_card_cache->reset_hot_cache();
       }
 
       // Rebuild remembered sets of all regions.
@@ -1767,6 +1768,8 @@
   Universe::heap()->barrier_set()->resize_covered_region(_g1_committed);
   // Tell the BOT about the update.
   _bot_shared->resize(_g1_committed.word_size());
+  // Tell the hot card cache about the update
+  _cg1r->hot_card_cache()->resize_card_counts(capacity());
 }
 
 bool G1CollectedHeap::expand(size_t expand_bytes) {
@@ -1831,7 +1834,7 @@
     if (G1ExitOnExpansionFailure &&
         _g1_storage.uncommitted_size() >= aligned_expand_bytes) {
       // We had head room...
-      vm_exit_out_of_memory(aligned_expand_bytes, "G1 heap expansion");
+      vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
     }
   }
   return successful;
@@ -1843,33 +1846,32 @@
     ReservedSpace::page_align_size_down(shrink_bytes);
   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
                                          HeapRegion::GrainBytes);
-  uint num_regions_deleted = 0;
-  MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted);
+  uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
+
+  uint num_regions_removed = _hrs.shrink_by(num_regions_to_remove);
   HeapWord* old_end = (HeapWord*) _g1_storage.high();
-  assert(mr.end() == old_end, "post-condition");
+  size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
 
   ergo_verbose3(ErgoHeapSizing,
                 "shrink the heap",
                 ergo_format_byte("requested shrinking amount")
                 ergo_format_byte("aligned shrinking amount")
                 ergo_format_byte("attempted shrinking amount"),
-                shrink_bytes, aligned_shrink_bytes, mr.byte_size());
-  if (mr.byte_size() > 0) {
+                shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
+  if (num_regions_removed > 0) {
+    _g1_storage.shrink_by(shrunk_bytes);
+    HeapWord* new_end = (HeapWord*) _g1_storage.high();
+
     if (_hr_printer.is_active()) {
-      HeapWord* curr = mr.end();
-      while (curr > mr.start()) {
+      HeapWord* curr = old_end;
+      while (curr > new_end) {
         HeapWord* curr_end = curr;
         curr -= HeapRegion::GrainWords;
         _hr_printer.uncommit(curr, curr_end);
       }
-      assert(curr == mr.start(), "post-condition");
     }
 
-    _g1_storage.shrink_by(mr.byte_size());
-    HeapWord* new_end = (HeapWord*) _g1_storage.high();
-    assert(mr.start() == new_end, "post-condition");
-
-    _expansion_regions += num_regions_deleted;
+    _expansion_regions += num_regions_removed;
     update_committed_space(old_end, new_end);
     HeapRegionRemSet::shrink_heap(n_regions());
     g1_policy()->record_new_heap_size(n_regions());
@@ -1955,13 +1957,6 @@
   int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
   assert(n_rem_sets > 0, "Invariant.");
 
-  HeapRegionRemSetIterator** iter_arr =
-    NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues, mtGC);
-  for (int i = 0; i < n_queues; i++) {
-    iter_arr[i] = new HeapRegionRemSetIterator();
-  }
-  _rem_set_iterator = iter_arr;
-
   _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
   _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
 
@@ -2007,7 +2002,7 @@
   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
 
-  _cg1r = new ConcurrentG1Refine();
+  _cg1r = new ConcurrentG1Refine(this);
 
   // Reserve the maximum.
 
@@ -2068,6 +2063,9 @@
                   (HeapWord*) _g1_reserved.end(),
                   _expansion_regions);
 
+  // Do later initialization work for concurrent refinement.
+  _cg1r->init();
+
   // 6843694 - ensure that the maximum region index can fit
   // in the remembered set structures.
   const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
@@ -2085,20 +2083,20 @@
 
   _g1h = this;
 
-   _in_cset_fast_test_length = max_regions();
-   _in_cset_fast_test_base =
+  _in_cset_fast_test_length = max_regions();
+  _in_cset_fast_test_base =
                    NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC);
 
-   // We're biasing _in_cset_fast_test to avoid subtracting the
-   // beginning of the heap every time we want to index; basically
-   // it's the same with what we do with the card table.
-   _in_cset_fast_test = _in_cset_fast_test_base -
+  // We're biasing _in_cset_fast_test to avoid subtracting the
+  // beginning of the heap every time we want to index; basically
+  // it's the same with what we do with the card table.
+  _in_cset_fast_test = _in_cset_fast_test_base -
                ((uintx) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
 
-   // Clear the _cset_fast_test bitmap in anticipation of adding
-   // regions to the incremental collection set for the first
-   // evacuation pause.
-   clear_cset_fast_test();
+  // Clear the _cset_fast_test bitmap in anticipation of adding
+  // regions to the incremental collection set for the first
+  // evacuation pause.
+  clear_cset_fast_test();
 
   // Create the ConcurrentMark data structure and thread.
   // (Must do this late, so that "max_regions" is defined.)
@@ -2160,9 +2158,6 @@
   // counts and that mechanism.
   SpecializationStats::clear();
 
-  // Do later initialization work for concurrent refinement.
-  _cg1r->init();
-
   // Here we allocate the dummy full region that is required by the
   // G1AllocRegion class. If we don't pass an address in the reserved
   // space here, lots of asserts fire.
@@ -2321,7 +2316,8 @@
                                                  bool concurrent,
                                                  int worker_i) {
   // Clean cards in the hot card cache
-  concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq);
+  G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
+  hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
 
   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   int n_completed_buffers = 0;
@@ -3614,7 +3610,7 @@
   uint array_length = g1_policy()->young_cset_region_length();
   _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
   if (_surviving_young_words == NULL) {
-    vm_exit_out_of_memory(sizeof(size_t) * array_length,
+    vm_exit_out_of_memory(sizeof(size_t) * array_length, OOM_MALLOC_ERROR,
                           "Not enough space for young surv words summary.");
   }
   memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
@@ -4397,7 +4393,7 @@
                       PADDING_ELEM_NUM;
   _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
   if (_surviving_young_words_base == NULL)
-    vm_exit_out_of_memory(array_length * sizeof(size_t),
+    vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
                           "Not enough space for young surv histo.");
   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
   memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
@@ -5079,10 +5075,9 @@
 }
 
 void
-G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
-                                       OopClosure* non_root_closure) {
+G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) {
   CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
-  SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
+  SharedHeap::process_weak_roots(root_closure, &roots_in_blobs);
 }
 
 // Weak Reference Processing support
@@ -5612,8 +5607,11 @@
   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
 
   g1_rem_set()->prepare_for_oops_into_collection_set_do();
-  concurrent_g1_refine()->set_use_cache(false);
-  concurrent_g1_refine()->clear_hot_cache_claimed_index();
+
+  // Disable the hot card cache.
+  G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
+  hot_card_cache->reset_hot_cache_claimed_index();
+  hot_card_cache->set_use_cache(false);
 
   uint n_workers;
   if (G1CollectedHeap::use_parallel_gc_threads()) {
@@ -5695,8 +5693,11 @@
   release_gc_alloc_regions(n_workers);
   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
 
-  concurrent_g1_refine()->clear_hot_cache();
-  concurrent_g1_refine()->set_use_cache(true);
+  // Reset and re-enable the hot card cache.
+  // Note the counts for the cards in the regions in the
+  // collection set are reset when the collection set is freed.
+  hot_card_cache->reset_hot_cache();
+  hot_card_cache->set_use_cache(true);
 
   finalize_for_evac_failure();
 
@@ -5758,6 +5759,12 @@
   assert(!hr->is_empty(), "the region should not be empty");
   assert(free_list != NULL, "pre-condition");
 
+  // Clear the card counts for this region.
+  // Note: we only need to do this if the region is not young
+  // (since we don't refine cards in young regions).
+  if (!hr->is_young()) {
+    _cg1r->hot_card_cache()->reset_card_counts(hr);
+  }
   *pre_used += hr->used();
   hr->hr_clear(par, true /* clear_space */);
   free_list->add_as_head(hr);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu May 16 11:47:51 2013 +0100
@@ -786,9 +786,6 @@
   // concurrently after the collection.
   DirtyCardQueueSet _dirty_card_queue_set;
 
-  // The Heap Region Rem Set Iterator.
-  HeapRegionRemSetIterator** _rem_set_iterator;
-
   // The closure used to refine a single card.
   RefineCardTableEntryClosure* _refine_cte_cl;
 
@@ -827,8 +824,7 @@
   // Apply "blk" to all the weak roots of the system.  These include
   // JNI weak roots, the code cache, system dictionary, symbol table,
   // string table, and referents of reachable weak refs.
-  void g1_process_weak_roots(OopClosure* root_closure,
-                             OopClosure* non_root_closure);
+  void g1_process_weak_roots(OopClosure* root_closure);
 
   // Frees a non-humongous region by initializing its contents and
   // adding it to the free list that's passed as a parameter (this is
@@ -1114,15 +1110,6 @@
   G1RemSet* g1_rem_set() const { return _g1_rem_set; }
   ModRefBarrierSet* mr_bs() const { return _mr_bs; }
 
-  // The rem set iterator.
-  HeapRegionRemSetIterator* rem_set_iterator(int i) {
-    return _rem_set_iterator[i];
-  }
-
-  HeapRegionRemSetIterator* rem_set_iterator() {
-    return _rem_set_iterator[0];
-  }
-
   unsigned get_gc_time_stamp() {
     return _gc_time_stamp;
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu May 16 11:47:51 2013 +0100
@@ -309,7 +309,8 @@
 
 void G1CollectorPolicy::initialize_flags() {
   set_min_alignment(HeapRegion::GrainBytes);
-  set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
+  size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name());
+  set_max_alignment(MAX2(card_table_alignment, min_alignment()));
   if (SurvivorRatio < 1) {
     vm_exit_during_initialization("Invalid survivor ratio specified");
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp	Thu May 16 11:47:51 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -155,11 +155,6 @@
 
 G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
   _max_gc_threads(max_gc_threads),
-  _min_clear_cc_time_ms(-1.0),
-  _max_clear_cc_time_ms(-1.0),
-  _cur_clear_cc_time_ms(0.0),
-  _cum_clear_cc_time_ms(0.0),
-  _num_cc_clears(0L),
   _last_gc_worker_start_times_ms(_max_gc_threads, "%.1lf", false),
   _last_ext_root_scan_times_ms(_max_gc_threads, "%.1lf"),
   _last_satb_filtering_times_ms(_max_gc_threads, "%.1lf"),
@@ -212,11 +207,11 @@
     _last_gc_worker_times_ms.set(i, worker_time);
 
     double worker_known_time = _last_ext_root_scan_times_ms.get(i) +
-      _last_satb_filtering_times_ms.get(i) +
-      _last_update_rs_times_ms.get(i) +
-      _last_scan_rs_times_ms.get(i) +
-      _last_obj_copy_times_ms.get(i) +
-      _last_termination_times_ms.get(i);
+                               _last_satb_filtering_times_ms.get(i) +
+                               _last_update_rs_times_ms.get(i) +
+                               _last_scan_rs_times_ms.get(i) +
+                               _last_obj_copy_times_ms.get(i) +
+                               _last_termination_times_ms.get(i);
 
     double worker_other_time = worker_time - worker_known_time;
     _last_gc_worker_other_times_ms.set(i, worker_other_time);
@@ -285,15 +280,6 @@
   }
   print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
   print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
-  if (Verbose && G1Log::finest()) {
-    print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
-    print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
-    print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
-    print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
-    if (_num_cc_clears > 0) {
-      print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
-    }
-  }
   double misc_time_ms = pause_time_sec * MILLIUNITS - accounted_time_ms();
   print_stats(1, "Other", misc_time_ms);
   if (_cur_verify_before_time_ms > 0.0) {
@@ -311,19 +297,3 @@
     print_stats(2, "Verify After", _cur_verify_after_time_ms);
   }
 }
-
-void G1GCPhaseTimes::record_cc_clear_time_ms(double ms) {
-  if (!(Verbose && G1Log::finest())) {
-    return;
-  }
-
-  if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms) {
-    _min_clear_cc_time_ms = ms;
-  }
-  if (_max_clear_cc_time_ms < 0.0 || ms >= _max_clear_cc_time_ms) {
-    _max_clear_cc_time_ms = ms;
-  }
-  _cur_clear_cc_time_ms = ms;
-  _cum_clear_cc_time_ms += ms;
-  _num_cc_clears++;
-}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp	Thu May 16 11:47:51 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -133,13 +133,6 @@
   double _cur_ref_proc_time_ms;
   double _cur_ref_enq_time_ms;
 
-  // Card Table Count Cache stats
-  double _min_clear_cc_time_ms;         // min
-  double _max_clear_cc_time_ms;         // max
-  double _cur_clear_cc_time_ms;         // clearing time during current pause
-  double _cum_clear_cc_time_ms;         // cummulative clearing time
-  jlong  _num_cc_clears;                // number of times the card count cache has been cleared
-
   double _cur_collection_start_sec;
   double _root_region_scan_wait_time_ms;
 
@@ -227,8 +220,6 @@
     _root_region_scan_wait_time_ms = time_ms;
   }
 
-  void record_cc_clear_time_ms(double ms);
-
   void record_young_free_cset_time_ms(double time_ms) {
     _recorded_young_free_cset_time_ms = time_ms;
   }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp	Thu May 16 11:47:51 2013 +0100
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/dirtyCardQueue.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/g1HotCardCache.hpp"
+#include "gc_implementation/g1/g1RemSet.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
+#include "runtime/atomic.hpp"
+
+G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
+  _g1h(g1h), _hot_cache(NULL), _use_cache(false), _card_counts(g1h) {}
+
+void G1HotCardCache::initialize() {
+  if (default_use_cache()) {
+    _use_cache = true;
+
+    _hot_cache_size = (1 << G1ConcRSLogCacheSize);
+    _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size, mtGC);
+
+    _n_hot = 0;
+    _hot_cache_idx = 0;
+
+    // For refining the cards in the hot cache in parallel
+    int n_workers = (ParallelGCThreads > 0 ?
+                        _g1h->workers()->total_workers() : 1);
+    _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / n_workers);
+    _hot_cache_par_claimed_idx = 0;
+
+    _card_counts.initialize();
+  }
+}
+
+G1HotCardCache::~G1HotCardCache() {
+  if (default_use_cache()) {
+    assert(_hot_cache != NULL, "Logic");
+    FREE_C_HEAP_ARRAY(jbyte*, _hot_cache, mtGC);
+  }
+}
+
+jbyte* G1HotCardCache::insert(jbyte* card_ptr) {
+  uint count = _card_counts.add_card_count(card_ptr);
+  if (!_card_counts.is_hot(count)) {
+    // The card is not hot so do not store it in the cache;
+    // return it for immediate refining.
+    return card_ptr;
+  }
+
+  // Otherwise, the card is hot.
+  jbyte* res = NULL;
+  MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag);
+  if (_n_hot == _hot_cache_size) {
+    res = _hot_cache[_hot_cache_idx];
+    _n_hot--;
+  }
+
+  // Now _n_hot < _hot_cache_size, and we can insert at _hot_cache_idx.
+  _hot_cache[_hot_cache_idx] = card_ptr;
+  _hot_cache_idx++;
+
+  if (_hot_cache_idx == _hot_cache_size) {
+    // Wrap around
+    _hot_cache_idx = 0;
+  }
+  _n_hot++;
+
+  return res;
+}
+
+void G1HotCardCache::drain(int worker_i,
+                           G1RemSet* g1rs,
+                           DirtyCardQueue* into_cset_dcq) {
+  if (!default_use_cache()) {
+    assert(_hot_cache == NULL, "Logic");
+    return;
+  }
+
+  assert(_hot_cache != NULL, "Logic");
+  assert(!use_cache(), "cache should be disabled");
+  int start_idx;
+
+  while ((start_idx = _hot_cache_par_claimed_idx) < _n_hot) { // read once
+    int end_idx = start_idx + _hot_cache_par_chunk_size;
+
+    if (start_idx ==
+        Atomic::cmpxchg(end_idx, &_hot_cache_par_claimed_idx, start_idx)) {
+      // The current worker has successfully claimed the chunk [start_idx..end_idx)
+      end_idx = MIN2(end_idx, _n_hot);
+      for (int i = start_idx; i < end_idx; i++) {
+        jbyte* card_ptr = _hot_cache[i];
+        if (card_ptr != NULL) {
+          if (g1rs->refine_card(card_ptr, worker_i, true)) {
+            // The part of the heap spanned by the card contains references
+            // that point into the current collection set.
+            // We need to record the card pointer in the DirtyCardQueueSet
+            // that we use for such cards.
+            //
+            // The only time we care about recording cards that contain
+            // references that point into the collection set is during
+            // RSet updating while within an evacuation pause.
+            // In this case worker_i should be the id of a GC worker thread
+            assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
+            assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads),
+                   err_msg("incorrect worker id: "INT32_FORMAT, worker_i));
+
+            into_cset_dcq->enqueue(card_ptr);
+          }
+        }
+      }
+    }
+  }
+  // The existing entries in the hot card cache, which were just refined
+  // above, are discarded prior to re-enabling the cache near the end of the GC.
+}
+
+void G1HotCardCache::resize_card_counts(size_t heap_capacity) {
+  _card_counts.resize(heap_capacity);
+}
+
+void G1HotCardCache::reset_card_counts(HeapRegion* hr) {
+  _card_counts.clear_region(hr);
+}
+
+void G1HotCardCache::reset_card_counts() {
+  _card_counts.clear_all();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp	Thu May 16 11:47:51 2013 +0100
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1HOTCARDCACHE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1HOTCARDCACHE_HPP
+
+#include "gc_implementation/g1/g1_globals.hpp"
+#include "gc_implementation/g1/g1CardCounts.hpp"
+#include "memory/allocation.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/thread.inline.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+class DirtyCardQueue;
+class G1CollectedHeap;
+class G1RemSet;
+class HeapRegion;
+
+// An evicting cache of cards that have been logged by the G1 post
+// write barrier. Placing a card in the cache delays the refinement
+// of the card until the card is evicted, or the cache is drained
+// during the next evacuation pause.
+//
+// The first thing the G1 post write barrier does is to check whether
+// the card containing the updated pointer is already dirty and, if
+// so, skips the remaining code in the barrier.
+//
+// Delaying the refinement of a card will make the card fail the
+// first is_dirty check in the write barrier, skipping the remainder
+// of the write barrier.
+//
+// This can significantly reduce the overhead of the write barrier
+// code, increasing throughput.
+
+class G1HotCardCache: public CHeapObj<mtGC> {
+  G1CollectedHeap*   _g1h;
+
+  // The card cache table
+  jbyte**      _hot_cache;
+
+  int          _hot_cache_size;
+  int          _n_hot;
+  int          _hot_cache_idx;
+
+  int          _hot_cache_par_chunk_size;
+  volatile int _hot_cache_par_claimed_idx;
+
+  bool         _use_cache;
+
+  G1CardCounts _card_counts;
+
+  bool default_use_cache() const {
+    return (G1ConcRSLogCacheSize > 0);
+  }
+
+ public:
+  G1HotCardCache(G1CollectedHeap* g1h);
+  ~G1HotCardCache();
+
+  void initialize();
+
+  bool use_cache() { return _use_cache; }
+
+  void set_use_cache(bool b) {
+    _use_cache = (b ? default_use_cache() : false);
+  }
+
+  // Returns the card to be refined or NULL.
+  //
+  // Increments the count for given the card. if the card is not 'hot',
+  // it is returned for immediate refining. Otherwise the card is
+  // added to the hot card cache.
+  // If there is enough room in the hot card cache for the card we're
+  // adding, NULL is returned and no further action in needed.
+  // If we evict a card from the cache to make room for the new card,
+  // the evicted card is then returned for refinement.
+  jbyte* insert(jbyte* card_ptr);
+
+  // Refine the cards that have delayed as a result of
+  // being in the cache.
+  void drain(int worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq);
+
+  // Set up for parallel processing of the cards in the hot cache
+  void reset_hot_cache_claimed_index() {
+    _hot_cache_par_claimed_idx = 0;
+  }
+
+  // Resets the hot card cache and discards the entries.
+  void reset_hot_cache() {
+    assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
+    assert(Thread::current()->is_VM_thread(), "Current thread should be the VMthread");
+    _hot_cache_idx = 0; _n_hot = 0;
+  }
+
+  bool hot_cache_is_empty() { return _n_hot == 0; }
+
+  // Resizes the card counts table to match the given capacity
+  void resize_card_counts(size_t heap_capacity);
+
+  // Zeros the values in the card counts table for entire committed heap
+  void reset_card_counts();
+
+  // Zeros the values in the card counts table for the given region
+  void reset_card_counts(HeapRegion* hr);
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1HOTCARDCACHE_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Thu May 16 11:47:51 2013 +0100
@@ -144,33 +144,28 @@
                                     &GenMarkSweep::follow_stack_closure,
                                     NULL);
 
-  // Follow system dictionary roots and unload classes
+
+  // This is the point where the entire marking should have completed.
+  assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed");
+
+  // Unload classes and purge the SystemDictionary.
   bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
-  assert(GenMarkSweep::_marking_stack.is_empty(),
-         "stack should be empty by now");
 
-  // Follow code cache roots (has to be done after system dictionary,
-  // assumes all live klasses are marked)
+  // Unload nmethods.
   CodeCache::do_unloading(&GenMarkSweep::is_alive, purged_class);
-  GenMarkSweep::follow_stack();
 
-  // Update subklass/sibling/implementor links of live klasses
+  // Prune dead klasses from subklass/sibling/implementor lists.
   Klass::clean_weak_klass_links(&GenMarkSweep::is_alive);
-  assert(GenMarkSweep::_marking_stack.is_empty(),
-         "stack should be empty by now");
 
-  // Visit interned string tables and delete unmarked oops
+  // Delete entries for dead interned strings.
   StringTable::unlink(&GenMarkSweep::is_alive);
+
   // Clean up unreferenced symbols in symbol table.
   SymbolTable::unlink();
 
-  assert(GenMarkSweep::_marking_stack.is_empty(),
-         "stack should be empty by now");
-
   if (VerifyDuringGC) {
     HandleMark hm;  // handle scope
     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
-    gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying ");
     Universe::heap()->prepare_for_verify();
     // Note: we can verify only the heap here. When an object is
     // marked, the previous value of the mark word (including
@@ -182,11 +177,13 @@
     // fail. At the end of the GC, the orginal mark word values
     // (including hash values) are restored to the appropriate
     // objects.
-    Universe::heap()->verify(/* silent      */ false,
-                             /* option      */ VerifyOption_G1UseMarkWord);
-
-    G1CollectedHeap* g1h = G1CollectedHeap::heap();
-    gclog_or_tty->print_cr("]");
+    if (!VerifySilently) {
+      gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying ");
+    }
+    Universe::heap()->verify(VerifySilently, VerifyOption_G1UseMarkWord);
+    if (!VerifySilently) {
+      gclog_or_tty->print_cr("]");
+    }
   }
 }
 
@@ -308,17 +305,16 @@
   sh->process_strong_roots(true,  // activate StrongRootsScope
                            false, // not scavenging.
                            SharedHeap::SO_AllClasses,
-                           &GenMarkSweep::adjust_root_pointer_closure,
+                           &GenMarkSweep::adjust_pointer_closure,
                            NULL,  // do not touch code cache here
                            &GenMarkSweep::adjust_klass_closure);
 
   assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
-  g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
+  g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_pointer_closure);
 
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
-  g1h->g1_process_weak_roots(&GenMarkSweep::adjust_root_pointer_closure,
-                             &GenMarkSweep::adjust_pointer_closure);
+  g1h->g1_process_weak_roots(&GenMarkSweep::adjust_pointer_closure);
 
   GenMarkSweep::adjust_marks();
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu May 16 11:47:51 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
+#include "gc_implementation/g1/g1HotCardCache.hpp"
 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
 #include "gc_implementation/g1/g1RemSet.inline.hpp"
@@ -169,14 +170,13 @@
     //   _try_claimed || r->claim_iter()
     // is true: either we're supposed to work on claimed-but-not-complete
     // regions, or we successfully claimed the region.
-    HeapRegionRemSetIterator* iter = _g1h->rem_set_iterator(_worker_i);
-    hrrs->init_iterator(iter);
+    HeapRegionRemSetIterator iter(hrrs);
     size_t card_index;
 
     // We claim cards in block so as to recude the contention. The block size is determined by
     // the G1RSetScanBlockSize parameter.
     size_t jump_to_card = hrrs->iter_claimed_next(_block_size);
-    for (size_t current_card = 0; iter->has_next(card_index); current_card++) {
+    for (size_t current_card = 0; iter.has_next(card_index); current_card++) {
       if (current_card >= jump_to_card + _block_size) {
         jump_to_card = hrrs->iter_claimed_next(_block_size);
       }
@@ -248,7 +248,7 @@
     assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
     assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker");
 
-    if (_g1rs->concurrentRefineOneCard(card_ptr, worker_i, true)) {
+    if (_g1rs->refine_card(card_ptr, worker_i, true)) {
       // 'card_ptr' contains references that point into the collection
       // set. We need to record the card in the DCQS
       // (G1CollectedHeap::into_cset_dirty_card_queue_set())
@@ -289,9 +289,6 @@
 #if CARD_REPEAT_HISTO
   ct_freq_update_histo_and_reset();
 #endif
-  if (worker_i == 0) {
-    _cg1r->clear_and_record_card_counts();
-  }
 
   // We cache the value of 'oc' closure into the appropriate slot in the
   // _cset_rs_update_cl for this worker
@@ -397,7 +394,7 @@
     //   RSet updating,
     // * the post-write barrier shouldn't be logging updates to young
     //   regions (but there is a situation where this can happen - see
-    //   the comment in G1RemSet::concurrentRefineOneCard below -
+    //   the comment in G1RemSet::refine_card() below -
     //   that should not be applicable here), and
     // * during actual RSet updating, the filtering of cards in young
     //   regions in HeapRegion::oops_on_card_seq_iterate_careful is
@@ -503,8 +500,6 @@
                                        claim_val);
 }
 
-
-
 G1TriggerClosure::G1TriggerClosure() :
   _triggered(false) { }
 
@@ -525,13 +520,91 @@
   _record_refs_into_cset(record_refs_into_cset),
   _push_ref_cl(push_ref_cl), _worker_i(worker_i) { }
 
-bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
-                                                   bool check_for_refs_into_cset) {
+// Returns true if the given card contains references that point
+// into the collection set, if we're checking for such references;
+// false otherwise.
+
+bool G1RemSet::refine_card(jbyte* card_ptr, int worker_i,
+                           bool check_for_refs_into_cset) {
+
+  // If the card is no longer dirty, nothing to do.
+  if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
+    // No need to return that this card contains refs that point
+    // into the collection set.
+    return false;
+  }
+
   // Construct the region representing the card.
   HeapWord* start = _ct_bs->addr_for(card_ptr);
   // And find the region containing it.
   HeapRegion* r = _g1->heap_region_containing(start);
-  assert(r != NULL, "unexpected null");
+  if (r == NULL) {
+    // Again no need to return that this card contains refs that
+    // point into the collection set.
+    return false;  // Not in the G1 heap (might be in perm, for example.)
+  }
+
+  // Why do we have to check here whether a card is on a young region,
+  // given that we dirty young regions and, as a result, the
+  // post-barrier is supposed to filter them out and never to enqueue
+  // them? When we allocate a new region as the "allocation region" we
+  // actually dirty its cards after we release the lock, since card
+  // dirtying while holding the lock was a performance bottleneck. So,
+  // as a result, it is possible for other threads to actually
+  // allocate objects in the region (after the acquire the lock)
+  // before all the cards on the region are dirtied. This is unlikely,
+  // and it doesn't happen often, but it can happen. So, the extra
+  // check below filters out those cards.
+  if (r->is_young()) {
+    return false;
+  }
+
+  // While we are processing RSet buffers during the collection, we
+  // actually don't want to scan any cards on the collection set,
+  // since we don't want to update remebered sets with entries that
+  // point into the collection set, given that live objects from the
+  // collection set are about to move and such entries will be stale
+  // very soon. This change also deals with a reliability issue which
+  // involves scanning a card in the collection set and coming across
+  // an array that was being chunked and looking malformed. Note,
+  // however, that if evacuation fails, we have to scan any objects
+  // that were not moved and create any missing entries.
+  if (r->in_collection_set()) {
+    return false;
+  }
+
+  // The result from the hot card cache insert call is either:
+  //   * pointer to the current card
+  //     (implying that the current card is not 'hot'),
+  //   * null
+  //     (meaning we had inserted the card ptr into the "hot" card cache,
+  //     which had some headroom),
+  //   * a pointer to a "hot" card that was evicted from the "hot" cache.
+  //
+
+  G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
+  if (hot_card_cache->use_cache()) {
+    assert(!check_for_refs_into_cset, "sanity");
+    assert(!SafepointSynchronize::is_at_safepoint(), "sanity");
+
+    card_ptr = hot_card_cache->insert(card_ptr);
+    if (card_ptr == NULL) {
+      // There was no eviction. Nothing to do.
+      return false;
+    }
+
+    start = _ct_bs->addr_for(card_ptr);
+    r = _g1->heap_region_containing(start);
+    if (r == NULL) {
+      // Not in the G1 heap
+      return false;
+    }
+
+    // Checking whether the region we got back from the cache
+    // is young here is inappropriate. The region could have been
+    // freed, reallocated and tagged as young while in the cache.
+    // Hence we could see its young type change at any time.
+  }
 
   // Don't use addr_for(card_ptr + 1) which can ask for
   // a card beyond the heap.  This is not safe without a perm
@@ -611,140 +684,17 @@
     _conc_refine_cards++;
   }
 
-  return trigger_cl.triggered();
-}
-
-bool G1RemSet::concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
-                                              bool check_for_refs_into_cset) {
-  // If the card is no longer dirty, nothing to do.
-  if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
-    // No need to return that this card contains refs that point
-    // into the collection set.
-    return false;
-  }
-
-  // Construct the region representing the card.
-  HeapWord* start = _ct_bs->addr_for(card_ptr);
-  // And find the region containing it.
-  HeapRegion* r = _g1->heap_region_containing(start);
-  if (r == NULL) {
-    // Again no need to return that this card contains refs that
-    // point into the collection set.
-    return false;  // Not in the G1 heap (might be in perm, for example.)
-  }
-  // Why do we have to check here whether a card is on a young region,
-  // given that we dirty young regions and, as a result, the
-  // post-barrier is supposed to filter them out and never to enqueue
-  // them? When we allocate a new region as the "allocation region" we
-  // actually dirty its cards after we release the lock, since card
-  // dirtying while holding the lock was a performance bottleneck. So,
-  // as a result, it is possible for other threads to actually
-  // allocate objects in the region (after the acquire the lock)
-  // before all the cards on the region are dirtied. This is unlikely,
-  // and it doesn't happen often, but it can happen. So, the extra
-  // check below filters out those cards.
-  if (r->is_young()) {
-    return false;
-  }
-  // While we are processing RSet buffers during the collection, we
-  // actually don't want to scan any cards on the collection set,
-  // since we don't want to update remebered sets with entries that
-  // point into the collection set, given that live objects from the
-  // collection set are about to move and such entries will be stale
-  // very soon. This change also deals with a reliability issue which
-  // involves scanning a card in the collection set and coming across
-  // an array that was being chunked and looking malformed. Note,
-  // however, that if evacuation fails, we have to scan any objects
-  // that were not moved and create any missing entries.
-  if (r->in_collection_set()) {
-    return false;
-  }
+  // This gets set to true if the card being refined has
+  // references that point into the collection set.
+  bool has_refs_into_cset = trigger_cl.triggered();
 
-  // Should we defer processing the card?
-  //
-  // Previously the result from the insert_cache call would be
-  // either card_ptr (implying that card_ptr was currently "cold"),
-  // null (meaning we had inserted the card ptr into the "hot"
-  // cache, which had some headroom), or a "hot" card ptr
-  // extracted from the "hot" cache.
-  //
-  // Now that the _card_counts cache in the ConcurrentG1Refine
-  // instance is an evicting hash table, the result we get back
-  // could be from evicting the card ptr in an already occupied
-  // bucket (in which case we have replaced the card ptr in the
-  // bucket with card_ptr and "defer" is set to false). To avoid
-  // having a data structure (updates to which would need a lock)
-  // to hold these unprocessed dirty cards, we need to immediately
-  // process card_ptr. The actions needed to be taken on return
-  // from cache_insert are summarized in the following table:
-  //
-  // res      defer   action
-  // --------------------------------------------------------------
-  // null     false   card evicted from _card_counts & replaced with
-  //                  card_ptr; evicted ptr added to hot cache.
-  //                  No need to process res; immediately process card_ptr
-  //
-  // null     true    card not evicted from _card_counts; card_ptr added
-  //                  to hot cache.
-  //                  Nothing to do.
-  //
-  // non-null false   card evicted from _card_counts & replaced with
-  //                  card_ptr; evicted ptr is currently "cold" or
-  //                  caused an eviction from the hot cache.
-  //                  Immediately process res; process card_ptr.
-  //
-  // non-null true    card not evicted from _card_counts; card_ptr is
-  //                  currently cold, or caused an eviction from hot
-  //                  cache.
-  //                  Immediately process res; no need to process card_ptr.
-
-
-  jbyte* res = card_ptr;
-  bool defer = false;
+  // We should only be detecting that the card contains references
+  // that point into the collection set if the current thread is
+  // a GC worker thread.
+  assert(!has_refs_into_cset || SafepointSynchronize::is_at_safepoint(),
+           "invalid result at non safepoint");
 
-  // This gets set to true if the card being refined has references
-  // that point into the collection set.
-  bool oops_into_cset = false;
-
-  if (_cg1r->use_cache()) {
-    jbyte* res = _cg1r->cache_insert(card_ptr, &defer);
-    if (res != NULL && (res != card_ptr || defer)) {
-      start = _ct_bs->addr_for(res);
-      r = _g1->heap_region_containing(start);
-      if (r != NULL) {
-        // Checking whether the region we got back from the cache
-        // is young here is inappropriate. The region could have been
-        // freed, reallocated and tagged as young while in the cache.
-        // Hence we could see its young type change at any time.
-        //
-        // Process card pointer we get back from the hot card cache. This
-        // will check whether the region containing the card is young
-        // _after_ checking that the region has been allocated from.
-        oops_into_cset = concurrentRefineOneCard_impl(res, worker_i,
-                                                      false /* check_for_refs_into_cset */);
-        // The above call to concurrentRefineOneCard_impl is only
-        // performed if the hot card cache is enabled. This cache is
-        // disabled during an evacuation pause - which is the only
-        // time when we need know if the card contains references
-        // that point into the collection set. Also when the hot card
-        // cache is enabled, this code is executed by the concurrent
-        // refine threads - rather than the GC worker threads - and
-        // concurrentRefineOneCard_impl will return false.
-        assert(!oops_into_cset, "should not see true here");
-      }
-    }
-  }
-
-  if (!defer) {
-    oops_into_cset =
-      concurrentRefineOneCard_impl(card_ptr, worker_i, check_for_refs_into_cset);
-    // We should only be detecting that the card contains references
-    // that point into the collection set if the current thread is
-    // a GC worker thread.
-    assert(!oops_into_cset || SafepointSynchronize::is_at_safepoint(),
-           "invalid result at non safepoint");
-  }
-  return oops_into_cset;
+  return has_refs_into_cset;
 }
 
 class HRRSStatsIter: public HeapRegionClosure {
@@ -847,13 +797,16 @@
       DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
       dcqs.concatenate_logs();
     }
-    bool cg1r_use_cache = _cg1r->use_cache();
-    _cg1r->set_use_cache(false);
+
+    G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
+    bool use_hot_card_cache = hot_card_cache->use_cache();
+    hot_card_cache->set_use_cache(false);
+
     DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
     updateRS(&into_cset_dcq, 0);
     _g1->into_cset_dirty_card_queue_set().clear();
-    _cg1r->set_use_cache(cg1r_use_cache);
 
+    hot_card_cache->set_use_cache(use_hot_card_cache);
     assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
   }
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Thu May 16 11:47:51 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,27 +53,19 @@
     NumSeqTasks          = 1
   };
 
-  CardTableModRefBS*             _ct_bs;
-  SubTasksDone*                  _seq_task;
-  G1CollectorPolicy* _g1p;
+  CardTableModRefBS*     _ct_bs;
+  SubTasksDone*          _seq_task;
+  G1CollectorPolicy*     _g1p;
 
-  ConcurrentG1Refine* _cg1r;
+  ConcurrentG1Refine*    _cg1r;
 
-  size_t*             _cards_scanned;
-  size_t              _total_cards_scanned;
+  size_t*                _cards_scanned;
+  size_t                 _total_cards_scanned;
 
   // Used for caching the closure that is responsible for scanning
   // references into the collection set.
   OopsInHeapRegionClosure** _cset_rs_update_cl;
 
-  // The routine that performs the actual work of refining a dirty
-  // card.
-  // If check_for_refs_into_refs is true then a true result is returned
-  // if the card contains oops that have references into the current
-  // collection set.
-  bool concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
-                                    bool check_for_refs_into_cset);
-
 public:
   // This is called to reset dual hash tables after the gc pause
   // is finished and the initial hash table is no longer being
@@ -90,8 +82,7 @@
   // function can be helpful in partitioning the work to be done. It
   // should be the same as the "i" passed to the calling thread's
   // work(i) function. In the sequential case this param will be ingored.
-  void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
-                                   int worker_i);
+  void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, int worker_i);
 
   // Prepare for and cleanup after an oops_into_collection_set_do
   // call.  Must call each of these once before and after (in sequential
@@ -124,14 +115,13 @@
   void scrub_par(BitMap* region_bm, BitMap* card_bm,
                  uint worker_num, int claim_val);
 
-  // Refine the card corresponding to "card_ptr".  If "sts" is non-NULL,
-  // join and leave around parts that must be atomic wrt GC.  (NULL means
-  // being done at a safepoint.)
+  // Refine the card corresponding to "card_ptr".
   // If check_for_refs_into_cset is true, a true result is returned
   // if the given card contains oops that have references into the
   // current collection set.
-  virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i,
-                                       bool check_for_refs_into_cset);
+  virtual bool refine_card(jbyte* card_ptr,
+                           int worker_i,
+                           bool check_for_refs_into_cset);
 
   // Print any relevant summary info.
   virtual void print_summary_info();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Thu May 16 11:47:51 2013 +0100
@@ -163,16 +163,12 @@
           "Select green, yellow and red zones adaptively to meet the "      \
           "the pause requirements.")                                        \
                                                                             \
-  develop(intx, G1ConcRSLogCacheSize, 10,                                   \
+  product(uintx, G1ConcRSLogCacheSize, 10,                                  \
           "Log base 2 of the length of conc RS hot-card cache.")            \
                                                                             \
-  develop(intx, G1ConcRSHotCardLimit, 4,                                    \
+  product(uintx, G1ConcRSHotCardLimit, 4,                                   \
           "The threshold that defines (>=) a hot card.")                    \
                                                                             \
-  develop(intx, G1MaxHotCardCountSizePercent, 25,                           \
-          "The maximum size of the hot card count cache as a "              \
-          "percentage of the number of cards for the maximum heap.")        \
-                                                                            \
   develop(bool, G1PrintOopAppls, false,                                     \
           "When true, print applications of closures to external locs.")    \
                                                                             \
@@ -247,10 +243,6 @@
           "If non-0 is the number of parallel rem set update threads, "     \
           "otherwise the value is determined ergonomically.")               \
                                                                             \
-  develop(intx, G1CardCountCacheExpandThreshold, 16,                        \
-          "Expand the card count cache if the number of collisions for "    \
-          "a particular entry exceeds this value.")                         \
-                                                                            \
   develop(bool, G1VerifyCTCleanup, false,                                   \
           "Verify card table cleanup.")                                     \
                                                                             \
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Thu May 16 11:47:51 2013 +0100
@@ -285,7 +285,7 @@
   _fine_grain_regions = new PerRegionTablePtr[_max_fine_entries];
 
   if (_fine_grain_regions == NULL) {
-    vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries,
+    vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR,
                           "Failed to allocate _fine_grain_entries.");
   }
 
@@ -877,14 +877,9 @@
   return _iter_state == Complete;
 }
 
-void HeapRegionRemSet::init_iterator(HeapRegionRemSetIterator* iter) const {
-  iter->initialize(this);
-}
-
 #ifndef PRODUCT
 void HeapRegionRemSet::print() const {
-  HeapRegionRemSetIterator iter;
-  init_iterator(&iter);
+  HeapRegionRemSetIterator iter(this);
   size_t card_index;
   while (iter.has_next(card_index)) {
     HeapWord* card_start =
@@ -928,35 +923,23 @@
 
 //-------------------- Iteration --------------------
 
-HeapRegionRemSetIterator::
-HeapRegionRemSetIterator() :
-  _hrrs(NULL),
+HeapRegionRemSetIterator:: HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs) :
+  _hrrs(hrrs),
   _g1h(G1CollectedHeap::heap()),
-  _bosa(NULL),
-  _sparse_iter() { }
-
-void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) {
-  _hrrs = hrrs;
-  _coarse_map = &_hrrs->_other_regions._coarse_map;
-  _fine_grain_regions = _hrrs->_other_regions._fine_grain_regions;
-  _bosa = _hrrs->bosa();
-
-  _is = Sparse;
+  _coarse_map(&hrrs->_other_regions._coarse_map),
+  _fine_grain_regions(hrrs->_other_regions._fine_grain_regions),
+  _bosa(hrrs->bosa()),
+  _is(Sparse),
   // Set these values so that we increment to the first region.
-  _coarse_cur_region_index = -1;
-  _coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1);
-
-  _cur_region_cur_card = 0;
-
-  _fine_array_index = -1;
-  _fine_cur_prt = NULL;
-
-  _n_yielded_coarse = 0;
-  _n_yielded_fine = 0;
-  _n_yielded_sparse = 0;
-
-  _sparse_iter.init(&hrrs->_other_regions._sparse_table);
-}
+  _coarse_cur_region_index(-1),
+  _coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1),
+  _cur_region_cur_card(0),
+  _fine_array_index(-1),
+  _fine_cur_prt(NULL),
+  _n_yielded_coarse(0),
+  _n_yielded_fine(0),
+  _n_yielded_sparse(0),
+  _sparse_iter(&hrrs->_other_regions._sparse_table) {}
 
 bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
   if (_hrrs->_other_regions._n_coarse_entries == 0) return false;
@@ -1209,8 +1192,7 @@
   hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom());
 
   // Now, does iteration yield these three?
-  HeapRegionRemSetIterator iter;
-  hrrs->init_iterator(&iter);
+  HeapRegionRemSetIterator iter(hrrs);
   size_t sum = 0;
   size_t card_index;
   while (iter.has_next(card_index)) {
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Thu May 16 11:47:51 2013 +0100
@@ -281,9 +281,6 @@
     return (_iter_state == Unclaimed) && (_iter_claimed == 0);
   }
 
-  // Initialize the given iterator to iterate over this rem set.
-  void init_iterator(HeapRegionRemSetIterator* iter) const;
-
   // The actual # of bytes this hr_remset takes up.
   size_t mem_size() {
     return _other_regions.mem_size()
@@ -345,9 +342,9 @@
 #endif
 };
 
-class HeapRegionRemSetIterator : public CHeapObj<mtGC> {
+class HeapRegionRemSetIterator : public StackObj {
 
-  // The region over which we're iterating.
+  // The region RSet over which we're iterating.
   const HeapRegionRemSet* _hrrs;
 
   // Local caching of HRRS fields.
@@ -362,8 +359,10 @@
   size_t _n_yielded_coarse;
   size_t _n_yielded_sparse;
 
-  // If true we're iterating over the coarse table; if false the fine
-  // table.
+  // Indicates what granularity of table that we're currently iterating over.
+  // We start iterating over the sparse table, progress to the fine grain
+  // table, and then finish with the coarse table.
+  // See HeapRegionRemSetIterator::has_next().
   enum IterState {
     Sparse,
     Fine,
@@ -403,9 +402,7 @@
 public:
   // We require an iterator to be initialized before use, so the
   // constructor does little.
-  HeapRegionRemSetIterator();
-
-  void initialize(const HeapRegionRemSet* hrrs);
+  HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs);
 
   // If there remains one or more cards to be yielded, returns true and
   // sets "card_index" to one of those cards (which is then considered
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Thu May 16 11:47:51 2013 +0100
@@ -124,11 +124,11 @@
       }
       assert(_regions[index] == NULL, "invariant");
       _regions[index] = new_hr;
-      increment_length(&_allocated_length);
+      increment_allocated_length();
     }
     // Have to increment the length first, otherwise we will get an
     // assert failure at(index) below.
-    increment_length(&_length);
+    increment_length();
     HeapRegion* hr = at(index);
     list->add_as_tail(hr);
 
@@ -201,45 +201,29 @@
   }
 }
 
-MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
-                                   uint* num_regions_deleted) {
+uint HeapRegionSeq::shrink_by(uint num_regions_to_remove) {
   // Reset this in case it's currently pointing into the regions that
   // we just removed.
   _next_search_index = 0;
 
-  assert(shrink_bytes % os::vm_page_size() == 0, "unaligned");
-  assert(shrink_bytes % HeapRegion::GrainBytes == 0, "unaligned");
   assert(length() > 0, "the region sequence should not be empty");
   assert(length() <= _allocated_length, "invariant");
   assert(_allocated_length > 0, "we should have at least one region committed");
+  assert(num_regions_to_remove < length(), "We should never remove all regions");
 
-  // around the loop, i will be the next region to be removed
-  uint i = length() - 1;
-  assert(i > 0, "we should never remove all regions");
-  // [last_start, end) is the MemRegion that covers the regions we will remove.
-  HeapWord* end = at(i)->end();
-  HeapWord* last_start = end;
-  *num_regions_deleted = 0;
-  while (shrink_bytes > 0) {
-    HeapRegion* cur = at(i);
-    // We should leave the humongous regions where they are.
-    if (cur->isHumongous()) break;
-    // We should stop shrinking if we come across a non-empty region.
-    if (!cur->is_empty()) break;
+  uint i = 0;
+  for (; i < num_regions_to_remove; i++) {
+    HeapRegion* cur = at(length() - 1);
 
-    i -= 1;
-    *num_regions_deleted += 1;
-    shrink_bytes -= cur->capacity();
-    last_start = cur->bottom();
-    decrement_length(&_length);
-    // We will reclaim the HeapRegion. _allocated_length should be
-    // covering this index. So, even though we removed the region from
-    // the active set by decreasing _length, we still have it
-    // available in the future if we need to re-use it.
-    assert(i > 0, "we should never remove all regions");
-    assert(length() > 0, "we should never remove all regions");
+    if (!cur->is_empty()) {
+      // We have to give up if the region can not be moved
+      break;
   }
-  return MemRegion(last_start, end);
+    assert(!cur->isHumongous(), "Humongous regions should not be empty");
+
+    decrement_length();
+  }
+  return i;
 }
 
 #ifndef PRODUCT
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Thu May 16 11:47:51 2013 +0100
@@ -92,14 +92,19 @@
   // address is valid.
   inline uintx addr_to_index_biased(HeapWord* addr) const;
 
-  void increment_length(uint* length) {
-    assert(*length < _max_length, "pre-condition");
-    *length += 1;
+  void increment_allocated_length() {
+    assert(_allocated_length < _max_length, "pre-condition");
+    _allocated_length++;
   }
 
-  void decrement_length(uint* length) {
-    assert(*length > 0, "pre-condition");
-    *length -= 1;
+  void increment_length() {
+    assert(_length < _max_length, "pre-condition");
+    _length++;
+  }
+
+  void decrement_length() {
+    assert(_length > 0, "pre-condition");
+    _length--;
   }
 
  public:
@@ -153,11 +158,9 @@
   void iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const;
 
   // Tag as uncommitted as many regions that are completely free as
-  // possible, up to shrink_bytes, from the suffix of the committed
-  // sequence. Return a MemRegion that corresponds to the address
-  // range of the uncommitted regions. Assume shrink_bytes is page and
-  // heap region aligned.
-  MemRegion shrink_by(size_t shrink_bytes, uint* num_regions_deleted);
+  // possible, up to num_regions_to_remove, from the suffix of the committed
+  // sequence. Return the actual number of removed regions.
+  uint shrink_by(uint num_regions_to_remove);
 
   // Do some sanity checking.
   void verify_optional() PRODUCT_RETURN;
--- a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Thu May 16 11:47:51 2013 +0100
@@ -35,10 +35,6 @@
 
 #define UNROLL_CARD_LOOPS  1
 
-void SparsePRT::init_iterator(SparsePRTIter* sprt_iter) {
-    sprt_iter->init(this);
-}
-
 void SparsePRTEntry::init(RegionIdx_t region_ind) {
   _region_ind = region_ind;
   _next_index = NullEntry;
--- a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Thu May 16 11:47:51 2013 +0100
@@ -192,18 +192,11 @@
   size_t compute_card_ind(CardIdx_t ci);
 
 public:
-  RSHashTableIter() :
-    _tbl_ind(RSHashTable::NullEntry),
+  RSHashTableIter(RSHashTable* rsht) :
+    _tbl_ind(RSHashTable::NullEntry), // So that first increment gets to 0.
     _bl_ind(RSHashTable::NullEntry),
     _card_ind((SparsePRTEntry::cards_num() - 1)),
-    _rsht(NULL) {}
-
-  void init(RSHashTable* rsht) {
-    _rsht = rsht;
-    _tbl_ind = -1; // So that first increment gets to 0.
-    _bl_ind = RSHashTable::NullEntry;
-    _card_ind = (SparsePRTEntry::cards_num() - 1);
-  }
+    _rsht(rsht) {}
 
   bool has_next(size_t& card_index);
 };
@@ -284,8 +277,6 @@
   static void cleanup_all();
   RSHashTable* cur() const { return _cur; }
 
-  void init_iterator(SparsePRTIter* sprt_iter);
-
   static void add_to_expanded_list(SparsePRT* sprt);
   static SparsePRT* get_from_expanded_list();
 
@@ -321,9 +312,9 @@
 
 class SparsePRTIter: public RSHashTableIter {
 public:
-  void init(const SparsePRT* sprt) {
-    RSHashTableIter::init(sprt->cur());
-  }
+  SparsePRTIter(const SparsePRT* sprt) :
+    RSHashTableIter(sprt->cur()) {}
+
   bool has_next(size_t& card_index) {
     return RSHashTableIter::has_next(card_index);
   }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Thu May 16 11:47:51 2013 +0100
@@ -567,7 +567,7 @@
         MemRegion(new_start_aligned, new_end_for_commit);
       if (!os::commit_memory((char*)new_committed.start(),
                              new_committed.byte_size())) {
-        vm_exit_out_of_memory(new_committed.byte_size(),
+        vm_exit_out_of_memory(new_committed.byte_size(), OOM_MMAP_ERROR,
                               "card table expansion");
       }
     }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.cpp	Thu May 16 11:47:51 2013 +0100
@@ -43,7 +43,7 @@
   _time_stamp_index(0)
 {
   if (!os::create_thread(this, os::pgc_thread))
-    vm_exit_out_of_memory(0, "Cannot create GC thread. Out of system resources.");
+    vm_exit_out_of_memory(0, OOM_MALLOC_ERROR, "Cannot create GC thread. Out of system resources.");
 
   if (PrintGCTaskTimeStamps) {
     _time_stamps = NEW_C_HEAP_ARRAY(GCTaskTimeStamp, GCTaskTimeStampEntries, mtGC);
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp	Thu May 16 11:47:51 2013 +0100
@@ -99,7 +99,7 @@
     // Expand
     size_t expand_by = requested_blocks_size_in_bytes - current_blocks_size_in_bytes;
     if (!_virtual_space.expand_by(expand_by)) {
-      vm_exit_out_of_memory(expand_by, "object start array expansion");
+      vm_exit_out_of_memory(expand_by, OOM_MMAP_ERROR, "object start array expansion");
     }
     // Clear *only* the newly allocated region
     memset(_blocks_region.end(), clean_block, expand_by);
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Thu May 16 11:47:51 2013 +0100
@@ -138,8 +138,7 @@
 
   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
     HandleMark hm;  // Discard invalid handles created during verification
-    gclog_or_tty->print(" VerifyBeforeGC:");
-    Universe::verify();
+    Universe::verify(" VerifyBeforeGC:");
   }
 
   // Verify object start arrays
@@ -177,7 +176,7 @@
     size_t prev_used = heap->used();
 
     // Capture metadata size before collection for sizing.
-    size_t metadata_prev_used = MetaspaceAux::used_in_bytes();
+    size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
 
     // For PrintGCDetails
     size_t old_gen_prev_used = old_gen->used_in_bytes();
@@ -238,6 +237,7 @@
 
     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
     ClassLoaderDataGraph::purge();
+    MetaspaceAux::verify_metrics();
 
     BiasedLocking::restore_marks();
     Threads::gc_epilogue();
@@ -340,8 +340,7 @@
 
   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
     HandleMark hm;  // Discard invalid handles created during verification
-    gclog_or_tty->print(" VerifyAfterGC:");
-    Universe::verify();
+    Universe::verify(" VerifyAfterGC:");
   }
 
   // Re-verify object start arrays
@@ -518,23 +517,23 @@
       is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL);
   }
 
-  // Follow system dictionary roots and unload classes
+  // This is the point where the entire marking should have completed.
+  assert(_marking_stack.is_empty(), "Marking should have completed");
+
+  // Unload classes and purge the SystemDictionary.
   bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
 
-  // Follow code cache roots
+  // Unload nmethods.
   CodeCache::do_unloading(is_alive_closure(), purged_class);
-  follow_stack(); // Flush marking stack
 
-  // Update subklass/sibling/implementor links of live klasses
-  Klass::clean_weak_klass_links(&is_alive);
-  assert(_marking_stack.is_empty(), "just drained");
+  // Prune dead klasses from subklass/sibling/implementor lists.
+  Klass::clean_weak_klass_links(is_alive_closure());
 
-  // Visit interned string tables and delete unmarked oops
+  // Delete entries for dead interned strings.
   StringTable::unlink(is_alive_closure());
+
   // Clean up unreferenced symbols in symbol table.
   SymbolTable::unlink();
-
-  assert(_marking_stack.is_empty(), "stack should be empty by now");
 }
 
 
@@ -583,28 +582,27 @@
   ClassLoaderDataGraph::clear_claimed_marks();
 
   // General strong roots.
-  Universe::oops_do(adjust_root_pointer_closure());
-  JNIHandles::oops_do(adjust_root_pointer_closure());   // Global (strong) JNI handles
-  CLDToOopClosure adjust_from_cld(adjust_root_pointer_closure());
-  Threads::oops_do(adjust_root_pointer_closure(), &adjust_from_cld, NULL);
-  ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
-  FlatProfiler::oops_do(adjust_root_pointer_closure());
-  Management::oops_do(adjust_root_pointer_closure());
-  JvmtiExport::oops_do(adjust_root_pointer_closure());
+  Universe::oops_do(adjust_pointer_closure());
+  JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
+  CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
+  Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
+  ObjectSynchronizer::oops_do(adjust_pointer_closure());
+  FlatProfiler::oops_do(adjust_pointer_closure());
+  Management::oops_do(adjust_pointer_closure());
+  JvmtiExport::oops_do(adjust_pointer_closure());
   // SO_AllClasses
-  SystemDictionary::oops_do(adjust_root_pointer_closure());
-  ClassLoaderDataGraph::oops_do(adjust_root_pointer_closure(), adjust_klass_closure(), true);
-  //CodeCache::scavenge_root_nmethods_oops_do(adjust_root_pointer_closure());
+  SystemDictionary::oops_do(adjust_pointer_closure());
+  ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
 
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
   // Global (weak) JNI handles
-  JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());
+  JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
 
   CodeCache::oops_do(adjust_pointer_closure());
-  StringTable::oops_do(adjust_root_pointer_closure());
-  ref_processor()->weak_oops_do(adjust_root_pointer_closure());
-  PSScavenge::reference_processor()->weak_oops_do(adjust_root_pointer_closure());
+  StringTable::oops_do(adjust_pointer_closure());
+  ref_processor()->weak_oops_do(adjust_pointer_closure());
+  PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
 
   adjust_marks();
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp	Thu May 16 11:47:51 2013 +0100
@@ -44,7 +44,6 @@
   static KlassClosure* follow_klass_closure() { return &MarkSweep::follow_klass_closure; }
   static VoidClosure* follow_stack_closure() { return (VoidClosure*)&MarkSweep::follow_stack_closure; }
   static OopClosure* adjust_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_pointer_closure; }
-  static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_root_pointer_closure; }
   static KlassClosure* adjust_klass_closure() { return &MarkSweep::adjust_klass_closure; }
   static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&MarkSweep::is_alive; }
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Thu May 16 11:47:51 2013 +0100
@@ -787,12 +787,11 @@
 void PSParallelCompact::KeepAliveClosure::do_oop(oop* p)       { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
 void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
 
-PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true);
-PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false);
+PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure;
 PSParallelCompact::AdjustKlassClosure PSParallelCompact::_adjust_klass_closure;
 
-void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p)       { adjust_pointer(p, _is_root); }
-void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
+void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p)       { adjust_pointer(p); }
+void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p); }
 
 void PSParallelCompact::FollowStackClosure::do_void() { _compaction_manager->follow_marking_stacks(); }
 
@@ -805,7 +804,7 @@
   klass->oops_do(_mark_and_push_closure);
 }
 void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) {
-  klass->oops_do(&PSParallelCompact::_adjust_root_pointer_closure);
+  klass->oops_do(&PSParallelCompact::_adjust_pointer_closure);
 }
 
 void PSParallelCompact::post_initialize() {
@@ -892,7 +891,7 @@
     _heap_used      = heap->used();
     _young_gen_used = heap->young_gen()->used_in_bytes();
     _old_gen_used   = heap->old_gen()->used_in_bytes();
-    _metadata_used  = MetaspaceAux::used_in_bytes();
+    _metadata_used  = MetaspaceAux::allocated_used_bytes();
   };
 
   size_t heap_used() const      { return _heap_used; }
@@ -967,8 +966,7 @@
 
   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
     HandleMark hm;  // Discard invalid handles created during verification
-    gclog_or_tty->print(" VerifyBeforeGC:");
-    Universe::verify();
+    Universe::verify(" VerifyBeforeGC:");
   }
 
   // Verify object start arrays
@@ -1027,6 +1025,7 @@
 
   // Delete metaspaces for unloaded class loaders and clean up loader_data graph
   ClassLoaderDataGraph::purge();
+  MetaspaceAux::verify_metrics();
 
   Threads::gc_epilogue();
   CodeCache::gc_epilogue();
@@ -2168,8 +2167,7 @@
 
   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
     HandleMark hm;  // Discard invalid handles created during verification
-    gclog_or_tty->print(" VerifyAfterGC:");
-    Universe::verify();
+    Universe::verify(" VerifyAfterGC:");
   }
 
   // Re-verify object start arrays
@@ -2356,22 +2354,24 @@
   }
 
   TraceTime tm_c("class unloading", print_phases(), true, gclog_or_tty);
+
+  // This is the point where the entire marking should have completed.
+  assert(cm->marking_stacks_empty(), "Marking should have completed");
+
   // Follow system dictionary roots and unload classes.
   bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
 
-  // Follow code cache roots.
+  // Unload nmethods.
   CodeCache::do_unloading(is_alive_closure(), purged_class);
-  cm->follow_marking_stacks(); // Flush marking stack.
-
-  // Update subklass/sibling/implementor links of live klasses
+
+  // Prune dead klasses from subklass/sibling/implementor lists.
   Klass::clean_weak_klass_links(is_alive_closure());
 
-  // Visit interned string tables and delete unmarked oops
+  // Delete entries for dead interned strings.
   StringTable::unlink(is_alive_closure());
+
   // Clean up unreferenced symbols in symbol table.
   SymbolTable::unlink();
-
-  assert(cm->marking_stacks_empty(), "marking stacks should be empty");
 }
 
 void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) {
@@ -2398,7 +2398,7 @@
 
 void PSParallelCompact::adjust_class_loader(ParCompactionManager* cm,
                                             ClassLoaderData* cld) {
-  cld->oops_do(PSParallelCompact::adjust_root_pointer_closure(),
+  cld->oops_do(PSParallelCompact::adjust_pointer_closure(),
                PSParallelCompact::adjust_klass_closure(),
                true);
 }
@@ -2419,32 +2419,31 @@
   ClassLoaderDataGraph::clear_claimed_marks();
 
   // General strong roots.
-  Universe::oops_do(adjust_root_pointer_closure());
-  JNIHandles::oops_do(adjust_root_pointer_closure());   // Global (strong) JNI handles
-  CLDToOopClosure adjust_from_cld(adjust_root_pointer_closure());
-  Threads::oops_do(adjust_root_pointer_closure(), &adjust_from_cld, NULL);
-  ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
-  FlatProfiler::oops_do(adjust_root_pointer_closure());
-  Management::oops_do(adjust_root_pointer_closure());
-  JvmtiExport::oops_do(adjust_root_pointer_closure());
+  Universe::oops_do(adjust_pointer_closure());
+  JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
+  CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
+  Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
+  ObjectSynchronizer::oops_do(adjust_pointer_closure());
+  FlatProfiler::oops_do(adjust_pointer_closure());
+  Management::oops_do(adjust_pointer_closure());
+  JvmtiExport::oops_do(adjust_pointer_closure());
   // SO_AllClasses
-  SystemDictionary::oops_do(adjust_root_pointer_closure());
-  ClassLoaderDataGraph::oops_do(adjust_root_pointer_closure(), adjust_klass_closure(), true);
+  SystemDictionary::oops_do(adjust_pointer_closure());
+  ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
 
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
   // Global (weak) JNI handles
-  JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());
+  JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
 
   CodeCache::oops_do(adjust_pointer_closure());
-  StringTable::oops_do(adjust_root_pointer_closure());
-  ref_processor()->weak_oops_do(adjust_root_pointer_closure());
+  StringTable::oops_do(adjust_pointer_closure());
+  ref_processor()->weak_oops_do(adjust_pointer_closure());
   // Roots were visited so references into the young gen in roots
   // may have been scanned.  Process them also.
   // Should the reference processor have a span that excludes
   // young gen objects?
-  PSScavenge::reference_processor()->weak_oops_do(
-                                              adjust_root_pointer_closure());
+  PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
 }
 
 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Thu May 16 11:47:51 2013 +0100
@@ -799,16 +799,6 @@
     virtual void do_oop(narrowOop* p);
   };
 
-  // Current unused
-  class FollowRootClosure: public OopsInGenClosure {
-   private:
-    ParCompactionManager* _compaction_manager;
-   public:
-    FollowRootClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
-    virtual void do_oop(oop* p);
-    virtual void do_oop(narrowOop* p);
- };
-
   class FollowStackClosure: public VoidClosure {
    private:
     ParCompactionManager* _compaction_manager;
@@ -818,10 +808,7 @@
   };
 
   class AdjustPointerClosure: public OopClosure {
-   private:
-    bool _is_root;
    public:
-    AdjustPointerClosure(bool is_root) : _is_root(is_root) { }
     virtual void do_oop(oop* p);
     virtual void do_oop(narrowOop* p);
     // do not walk from thread stacks to the code cache on this phase
@@ -838,7 +825,6 @@
   friend class AdjustPointerClosure;
   friend class AdjustKlassClosure;
   friend class FollowKlassClosure;
-  friend class FollowRootClosure;
   friend class InstanceClassLoaderKlass;
   friend class RefProcTaskProxy;
 
@@ -853,7 +839,6 @@
   static IsAliveClosure       _is_alive_closure;
   static SpaceInfo            _space_info[last_space_id];
   static bool                 _print_phases;
-  static AdjustPointerClosure _adjust_root_pointer_closure;
   static AdjustPointerClosure _adjust_pointer_closure;
   static AdjustKlassClosure   _adjust_klass_closure;
 
@@ -889,9 +874,6 @@
   static void marking_phase(ParCompactionManager* cm,
                             bool maximum_heap_compaction);
 
-  template <class T> static inline void adjust_pointer(T* p, bool is_root);
-  static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); }
-
   template <class T>
   static inline void follow_root(ParCompactionManager* cm, T* p);
 
@@ -1046,7 +1028,6 @@
 
   // Closure accessors
   static OopClosure* adjust_pointer_closure()      { return (OopClosure*)&_adjust_pointer_closure; }
-  static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&_adjust_root_pointer_closure; }
   static KlassClosure* adjust_klass_closure()      { return (KlassClosure*)&_adjust_klass_closure; }
   static BoolObjectClosure* is_alive_closure()     { return (BoolObjectClosure*)&_is_alive_closure; }
 
@@ -1067,6 +1048,7 @@
   // Check mark and maybe push on marking stack
   template <class T> static inline void mark_and_push(ParCompactionManager* cm,
                                                       T* p);
+  template <class T> static inline void adjust_pointer(T* p);
 
   static void follow_klass(ParCompactionManager* cm, Klass* klass);
   static void adjust_klass(ParCompactionManager* cm, Klass* klass);
@@ -1151,9 +1133,6 @@
   static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
   static ParallelCompactData& summary_data() { return _summary_data; }
 
-  static inline void adjust_pointer(oop* p)       { adjust_pointer(p, false); }
-  static inline void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); }
-
   // Reference Processing
   static ReferenceProcessor* const ref_processor() { return _ref_processor; }
 
@@ -1230,7 +1209,7 @@
 }
 
 template <class T>
-inline void PSParallelCompact::adjust_pointer(T* p, bool isroot) {
+inline void PSParallelCompact::adjust_pointer(T* p) {
   T heap_oop = oopDesc::load_heap_oop(p);
   if (!oopDesc::is_null(heap_oop)) {
     oop obj     = oopDesc::decode_heap_oop_not_null(heap_oop);
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Thu May 16 11:47:51 2013 +0100
@@ -314,8 +314,7 @@
 
   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
     HandleMark hm;  // Discard invalid handles created during verification
-    gclog_or_tty->print(" VerifyBeforeGC:");
-    Universe::verify();
+    Universe::verify(" VerifyBeforeGC:");
   }
 
   {
@@ -638,8 +637,7 @@
 
   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
     HandleMark hm;  // Discard invalid handles created during verification
-    gclog_or_tty->print(" VerifyAfterGC:");
-    Universe::verify();
+    Universe::verify(" VerifyAfterGC:");
   }
 
   heap->print_heap_after_gc();
--- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp	Thu May 16 11:47:51 2013 +0100
@@ -81,7 +81,7 @@
 }
 
 void MarkSweep::adjust_class_loader(ClassLoaderData* cld) {
-  cld->oops_do(&MarkSweep::adjust_root_pointer_closure, &MarkSweep::adjust_klass_closure, true);
+  cld->oops_do(&MarkSweep::adjust_pointer_closure, &MarkSweep::adjust_klass_closure, true);
 }
 
 
@@ -121,11 +121,10 @@
   }
 }
 
-MarkSweep::AdjustPointerClosure MarkSweep::adjust_root_pointer_closure(true);
-MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure(false);
+MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure;
 
-void MarkSweep::AdjustPointerClosure::do_oop(oop* p)       { adjust_pointer(p, _is_root); }
-void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
+void MarkSweep::AdjustPointerClosure::do_oop(oop* p)       { adjust_pointer(p); }
+void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p); }
 
 void MarkSweep::adjust_marks() {
   assert( _preserved_oop_stack.size() == _preserved_mark_stack.size(),
--- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp	Thu May 16 11:47:51 2013 +0100
@@ -80,10 +80,7 @@
   };
 
   class AdjustPointerClosure: public OopsInGenClosure {
-   private:
-    bool _is_root;
    public:
-    AdjustPointerClosure(bool is_root) : _is_root(is_root) {}
     virtual void do_oop(oop* p);
     virtual void do_oop(narrowOop* p);
   };
@@ -146,7 +143,6 @@
   static MarkAndPushClosure   mark_and_push_closure;
   static FollowKlassClosure   follow_klass_closure;
   static FollowStackClosure   follow_stack_closure;
-  static AdjustPointerClosure adjust_root_pointer_closure;
   static AdjustPointerClosure adjust_pointer_closure;
   static AdjustKlassClosure   adjust_klass_closure;
 
@@ -179,12 +175,7 @@
   static void adjust_marks();   // Adjust the pointers in the preserved marks table
   static void restore_marks();  // Restore the marks that we saved in preserve_mark
 
-  template <class T> static inline void adjust_pointer(T* p, bool isroot);
-
-  static void adjust_root_pointer(oop* p)  { adjust_pointer(p, true); }
-  static void adjust_pointer(oop* p)       { adjust_pointer(p, false); }
-  static void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); }
-
+  template <class T> static inline void adjust_pointer(T* p);
 };
 
 class PreservedMark VALUE_OBJ_CLASS_SPEC {
--- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp	Thu May 16 11:47:51 2013 +0100
@@ -76,7 +76,7 @@
   _objarray_stack.push(task);
 }
 
-template <class T> inline void MarkSweep::adjust_pointer(T* p, bool isroot) {
+template <class T> inline void MarkSweep::adjust_pointer(T* p) {
   T heap_oop = oopDesc::load_heap_oop(p);
   if (!oopDesc::is_null(heap_oop)) {
     oop obj     = oopDesc::decode_heap_oop_not_null(heap_oop);
--- a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Thu May 16 11:47:51 2013 +0100
@@ -225,7 +225,10 @@
         gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
       }
       heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
-      _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
+      // After a GC try to allocate without expanding.  Could fail
+      // and expansion will be tried below.
+      _result =
+        _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
     }
     if (_result == NULL && !UseConcMarkSweepGC /* CMS already tried */) {
       // If still failing, allow the Metaspace to expand.
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp	Thu May 16 11:47:51 2013 +0100
@@ -32,6 +32,7 @@
 #include "interpreter/interpreterRuntime.hpp"
 #include "memory/cardTableModRefBS.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/methodCounters.hpp"
 #include "oops/objArrayKlass.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
@@ -304,11 +305,12 @@
 
 
 #define METHOD istate->method()
-#define INVOCATION_COUNT METHOD->invocation_counter()
-#define BACKEDGE_COUNT METHOD->backedge_counter()
-
-
-#define INCR_INVOCATION_COUNT INVOCATION_COUNT->increment()
+#define GET_METHOD_COUNTERS(res)    \
+  res = METHOD->method_counters();  \
+  if (res == NULL) {                \
+    CALL_VM(res = InterpreterRuntime::build_method_counters(THREAD, METHOD), handle_exception); \
+  }
+
 #define OSR_REQUEST(res, branch_pc) \
             CALL_VM(res=InterpreterRuntime::frequency_counter_overflow(THREAD, branch_pc), handle_exception);
 /*
@@ -325,10 +327,12 @@
 
 #define DO_BACKEDGE_CHECKS(skip, branch_pc)                                                         \
     if ((skip) <= 0) {                                                                              \
+      MethodCounters* mcs;                                                                          \
+      GET_METHOD_COUNTERS(mcs);                                                                     \
       if (UseLoopCounter) {                                                                         \
         bool do_OSR = UseOnStackReplacement;                                                        \
-        BACKEDGE_COUNT->increment();                                                                \
-        if (do_OSR) do_OSR = BACKEDGE_COUNT->reached_InvocationLimit();                             \
+        mcs->backedge_counter()->increment();                                                       \
+        if (do_OSR) do_OSR = mcs->backedge_counter()->reached_InvocationLimit();                    \
         if (do_OSR) {                                                                               \
           nmethod*  osr_nmethod;                                                                    \
           OSR_REQUEST(osr_nmethod, branch_pc);                                                      \
@@ -341,7 +345,7 @@
           }                                                                                         \
         }                                                                                           \
       }  /* UseCompiler ... */                                                                      \
-      INCR_INVOCATION_COUNT;                                                                        \
+      mcs->invocation_counter()->increment();                                                       \
       SAFEPOINT;                                                                                    \
     }
 
@@ -618,11 +622,13 @@
       // count invocations
       assert(initialized, "Interpreter not initialized");
       if (_compiling) {
+        MethodCounters* mcs;
+        GET_METHOD_COUNTERS(mcs);
         if (ProfileInterpreter) {
-          METHOD->increment_interpreter_invocation_count();
+          METHOD->increment_interpreter_invocation_count(THREAD);
         }
-        INCR_INVOCATION_COUNT;
-        if (INVOCATION_COUNT->reached_InvocationLimit()) {
+        mcs->invocation_counter()->increment();
+        if (mcs->invocation_counter()->reached_InvocationLimit()) {
             CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception);
 
             // We no longer retry on a counter overflow
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Thu May 16 11:47:51 2013 +0100
@@ -1052,7 +1052,7 @@
     return;
   }
   if (set_handler_blob() == NULL) {
-    vm_exit_out_of_memory(blob_size, "native signature handlers");
+    vm_exit_out_of_memory(blob_size, OOM_MALLOC_ERROR, "native signature handlers");
   }
 
   BufferBlob* bb = BufferBlob::create("Signature Handler Temp Buffer",
--- a/hotspot/src/share/vm/memory/allocation.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/memory/allocation.cpp	Thu May 16 11:47:51 2013 +0100
@@ -259,7 +259,7 @@
     }
     if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC);
     if (p == NULL)
-      vm_exit_out_of_memory(bytes, "ChunkPool::allocate");
+      vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "ChunkPool::allocate");
 
     return p;
   }
@@ -371,7 +371,7 @@
    default: {
      void *p =  os::malloc(bytes, mtChunk, CALLER_PC);
      if (p == NULL)
-       vm_exit_out_of_memory(bytes, "Chunk::new");
+       vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new");
      return p;
    }
   }
@@ -531,7 +531,7 @@
 }
 
 void Arena::signal_out_of_memory(size_t sz, const char* whence) const {
-  vm_exit_out_of_memory(sz, whence);
+  vm_exit_out_of_memory(sz, OOM_MALLOC_ERROR, whence);
 }
 
 // Grow a new Chunk
--- a/hotspot/src/share/vm/memory/allocation.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/memory/allocation.hpp	Thu May 16 11:47:51 2013 +0100
@@ -539,6 +539,9 @@
 #define NEW_RESOURCE_ARRAY(type, size)\
   (type*) resource_allocate_bytes((size) * sizeof(type))
 
+#define NEW_RESOURCE_ARRAY_RETURN_NULL(type, size)\
+  (type*) resource_allocate_bytes((size) * sizeof(type), AllocFailStrategy::RETURN_NULL)
+
 #define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\
   (type*) resource_allocate_bytes(thread, (size) * sizeof(type))
 
--- a/hotspot/src/share/vm/memory/allocation.inline.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/memory/allocation.inline.hpp	Thu May 16 11:47:51 2013 +0100
@@ -58,7 +58,9 @@
   #ifdef ASSERT
   if (PrintMallocFree) trace_heap_malloc(size, "AllocateHeap", p);
   #endif
-  if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) vm_exit_out_of_memory(size, "AllocateHeap");
+  if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
+    vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "AllocateHeap");
+  }
   return p;
 }
 
@@ -68,7 +70,9 @@
   #ifdef ASSERT
   if (PrintMallocFree) trace_heap_malloc(size, "ReallocateHeap", p);
   #endif
-  if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) vm_exit_out_of_memory(size, "ReallocateHeap");
+  if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
+    vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "ReallocateHeap");
+  }
   return p;
 }
 
@@ -128,14 +132,14 @@
   int alignment = os::vm_allocation_granularity();
   _size = align_size_up(_size, alignment);
 
-  _addr = os::reserve_memory(_size, NULL, alignment);
+  _addr = os::reserve_memory(_size, NULL, alignment, F);
   if (_addr == NULL) {
-    vm_exit_out_of_memory(_size, "Allocator (reserve)");
+    vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (reserve)");
   }
 
   bool success = os::commit_memory(_addr, _size, false /* executable */);
   if (!success) {
-    vm_exit_out_of_memory(_size, "Allocator (commit)");
+    vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (commit)");
   }
 
   return (E*)_addr;
--- a/hotspot/src/share/vm/memory/blockOffsetTable.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/memory/blockOffsetTable.cpp	Thu May 16 11:47:51 2013 +0100
@@ -80,7 +80,7 @@
     assert(delta > 0, "just checking");
     if (!_vs.expand_by(delta)) {
       // Do better than this for Merlin
-      vm_exit_out_of_memory(delta, "offset table expansion");
+      vm_exit_out_of_memory(delta, OOM_MMAP_ERROR, "offset table expansion");
     }
     assert(_vs.high() == high + delta, "invalid expansion");
   } else {
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Thu May 16 11:47:51 2013 +0100
@@ -116,7 +116,7 @@
   _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
   if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) {
     // Do better than this for Merlin
-    vm_exit_out_of_memory(_page_size, "card table last card");
+    vm_exit_out_of_memory(_page_size, OOM_MMAP_ERROR, "card table last card");
   }
 
   *guard_card = last_card;
@@ -292,7 +292,7 @@
       if (!os::commit_memory((char*)new_committed.start(),
                              new_committed.byte_size(), _page_size)) {
         // Do better than this for Merlin
-        vm_exit_out_of_memory(new_committed.byte_size(),
+        vm_exit_out_of_memory(new_committed.byte_size(), OOM_MMAP_ERROR,
                 "card table expansion");
       }
     // Use new_end_aligned (as opposed to new_end_for_commit) because
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp	Thu May 16 11:47:51 2013 +0100
@@ -48,6 +48,17 @@
 // CollectorPolicy methods.
 
 void CollectorPolicy::initialize_flags() {
+  assert(max_alignment() >= min_alignment(),
+      err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT,
+          max_alignment(), min_alignment()));
+  assert(max_alignment() % min_alignment() == 0,
+      err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT,
+          max_alignment(), min_alignment()));
+
+  if (MaxHeapSize < InitialHeapSize) {
+    vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
+  }
+
   if (MetaspaceSize > MaxMetaspaceSize) {
     MaxMetaspaceSize = MetaspaceSize;
   }
@@ -71,21 +82,9 @@
 }
 
 void CollectorPolicy::initialize_size_info() {
-  // User inputs from -mx and ms are aligned
-  set_initial_heap_byte_size(InitialHeapSize);
-  if (initial_heap_byte_size() == 0) {
-    set_initial_heap_byte_size(NewSize + OldSize);
-  }
-  set_initial_heap_byte_size(align_size_up(_initial_heap_byte_size,
-                                           min_alignment()));
-
-  set_min_heap_byte_size(Arguments::min_heap_size());
-  if (min_heap_byte_size() == 0) {
-    set_min_heap_byte_size(NewSize + OldSize);
-  }
-  set_min_heap_byte_size(align_size_up(_min_heap_byte_size,
-                                       min_alignment()));
-
+  // User inputs from -mx and ms must be aligned
+  set_min_heap_byte_size(align_size_up(Arguments::min_heap_size(), min_alignment()));
+  set_initial_heap_byte_size(align_size_up(InitialHeapSize, min_alignment()));
   set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment()));
 
   // Check heap parameter properties
@@ -201,9 +200,6 @@
   // All sizes must be multiples of the generation granularity.
   set_min_alignment((uintx) Generation::GenGrain);
   set_max_alignment(compute_max_alignment());
-  assert(max_alignment() >= min_alignment() &&
-         max_alignment() % min_alignment() == 0,
-         "invalid alignment constraints");
 
   CollectorPolicy::initialize_flags();
 
@@ -233,9 +229,6 @@
   GenCollectorPolicy::initialize_flags();
 
   OldSize = align_size_down(OldSize, min_alignment());
-  if (NewSize + OldSize > MaxHeapSize) {
-    MaxHeapSize = NewSize + OldSize;
-  }
 
   if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(NewSize)) {
     // NewRatio will be used later to set the young generation size so we use
@@ -250,6 +243,27 @@
   }
   MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
 
+  // adjust max heap size if necessary
+  if (NewSize + OldSize > MaxHeapSize) {
+    if (FLAG_IS_CMDLINE(MaxHeapSize)) {
+      // somebody set a maximum heap size with the intention that we should not
+      // exceed it. Adjust New/OldSize as necessary.
+      uintx calculated_size = NewSize + OldSize;
+      double shrink_factor = (double) MaxHeapSize / calculated_size;
+      // align
+      NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment());
+      // OldSize is already aligned because above we aligned MaxHeapSize to
+      // max_alignment(), and we just made sure that NewSize is aligned to
+      // min_alignment(). In initialize_flags() we verified that max_alignment()
+      // is a multiple of min_alignment().
+      OldSize = MaxHeapSize - NewSize;
+    } else {
+      MaxHeapSize = NewSize + OldSize;
+    }
+  }
+  // need to do this again
+  MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
+
   always_do_update_barrier = UseConcMarkSweepGC;
 
   // Check validity of heap flags
--- a/hotspot/src/share/vm/memory/filemap.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/memory/filemap.cpp	Thu May 16 11:47:51 2013 +0100
@@ -238,8 +238,8 @@
 
 void FileMapInfo::write_space(int i, Metaspace* space, bool read_only) {
   align_file_position();
-  size_t used = space->used_words(Metaspace::NonClassType) * BytesPerWord;
-  size_t capacity = space->capacity_words(Metaspace::NonClassType) * BytesPerWord;
+  size_t used = space->used_bytes_slow(Metaspace::NonClassType);
+  size_t capacity = space->capacity_bytes_slow(Metaspace::NonClassType);
   struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i];
   write_region(i, (char*)space->bottom(), used, capacity, read_only, false);
 }
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Thu May 16 11:47:51 2013 +0100
@@ -377,7 +377,7 @@
 
   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
 
-  const size_t metadata_prev_used = MetaspaceAux::used_in_bytes();
+  const size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
 
   print_heap_before_gc();
 
@@ -447,8 +447,7 @@
             prepare_for_verify();
             prepared_for_verification = true;
           }
-          gclog_or_tty->print(" VerifyBeforeGC:");
-          Universe::verify();
+          Universe::verify(" VerifyBeforeGC:");
         }
         COMPILER2_PRESENT(DerivedPointerTable::clear());
 
@@ -519,8 +518,7 @@
         if (VerifyAfterGC && i >= VerifyGCLevel &&
             total_collections() >= VerifyGCStartAt) {
           HandleMark hm;  // Discard invalid handles created during verification
-          gclog_or_tty->print(" VerifyAfterGC:");
-          Universe::verify();
+          Universe::verify(" VerifyAfterGC:");
         }
 
         if (PrintGCDetails) {
@@ -556,6 +554,7 @@
     if (complete) {
       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
       ClassLoaderDataGraph::purge();
+      MetaspaceAux::verify_metrics();
       // Resize the metaspace capacity after full collections
       MetaspaceGC::compute_new_size();
       update_full_collections_completed();
@@ -633,9 +632,8 @@
 }
 
 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure,
-                                              CodeBlobClosure* code_roots,
-                                              OopClosure* non_root_closure) {
-  SharedHeap::process_weak_roots(root_closure, code_roots, non_root_closure);
+                                              CodeBlobClosure* code_roots) {
+  SharedHeap::process_weak_roots(root_closure, code_roots);
   // "Local" "weak" refs
   for (int i = 0; i < _n_gens; i++) {
     _gens[i]->ref_processor()->weak_oops_do(root_closure);
--- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Thu May 16 11:47:51 2013 +0100
@@ -432,8 +432,7 @@
   // JNI weak roots, the code cache, system dictionary, symbol table,
   // string table, and referents of reachable weak refs.
   void gen_process_weak_roots(OopClosure* root_closure,
-                              CodeBlobClosure* code_roots,
-                              OopClosure* non_root_closure);
+                              CodeBlobClosure* code_roots);
 
   // Set the saved marks of generations, if that makes sense.
   // In particular, if any generation might iterate over the oops
--- a/hotspot/src/share/vm/memory/genMarkSweep.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/memory/genMarkSweep.cpp	Thu May 16 11:47:51 2013 +0100
@@ -223,23 +223,23 @@
       &is_alive, &keep_alive, &follow_stack_closure, NULL);
   }
 
-  // Follow system dictionary roots and unload classes
+  // This is the point where the entire marking should have completed.
+  assert(_marking_stack.is_empty(), "Marking should have completed");
+
+  // Unload classes and purge the SystemDictionary.
   bool purged_class = SystemDictionary::do_unloading(&is_alive);
 
-  // Follow code cache roots
+  // Unload nmethods.
   CodeCache::do_unloading(&is_alive, purged_class);
-  follow_stack(); // Flush marking stack
 
-  // Update subklass/sibling/implementor links of live klasses
+  // Prune dead klasses from subklass/sibling/implementor lists.
   Klass::clean_weak_klass_links(&is_alive);
-  assert(_marking_stack.is_empty(), "just drained");
 
-  // Visit interned string tables and delete unmarked oops
+  // Delete entries for dead interned strings.
   StringTable::unlink(&is_alive);
+
   // Clean up unreferenced symbols in symbol table.
   SymbolTable::unlink();
-
-  assert(_marking_stack.is_empty(), "stack should be empty by now");
 }
 
 
@@ -282,11 +282,10 @@
   // Need new claim bits for the pointer adjustment tracing.
   ClassLoaderDataGraph::clear_claimed_marks();
 
-  // Because the two closures below are created statically, cannot
+  // Because the closure below is created statically, we cannot
   // use OopsInGenClosure constructor which takes a generation,
   // as the Universe has not been created when the static constructors
   // are run.
-  adjust_root_pointer_closure.set_orig_generation(gch->get_gen(level));
   adjust_pointer_closure.set_orig_generation(gch->get_gen(level));
 
   gch->gen_process_strong_roots(level,
@@ -294,18 +293,17 @@
                                 true,  // activate StrongRootsScope
                                 false, // not scavenging
                                 SharedHeap::SO_AllClasses,
-                                &adjust_root_pointer_closure,
+                                &adjust_pointer_closure,
                                 false, // do not walk code
-                                &adjust_root_pointer_closure,
+                                &adjust_pointer_closure,
                                 &adjust_klass_closure);
 
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
   CodeBlobToOopClosure adjust_code_pointer_closure(&adjust_pointer_closure,
                                                    /*do_marking=*/ false);
-  gch->gen_process_weak_roots(&adjust_root_pointer_closure,
-                              &adjust_code_pointer_closure,
-                              &adjust_pointer_closure);
+  gch->gen_process_weak_roots(&adjust_pointer_closure,
+                              &adjust_code_pointer_closure);
 
   adjust_marks();
   GenAdjustPointersClosure blk;
--- a/hotspot/src/share/vm/memory/metachunk.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/memory/metachunk.cpp	Thu May 16 11:47:51 2013 +0100
@@ -28,6 +28,7 @@
 #include "utilities/copy.hpp"
 #include "utilities/debug.hpp"
 
+class VirtualSpaceNode;
 //
 // Future modification
 //
@@ -45,27 +46,30 @@
 
 // Metachunk methods
 
-Metachunk* Metachunk::initialize(MetaWord* ptr, size_t word_size) {
-  // Set bottom, top, and end.  Allow space for the Metachunk itself
-  Metachunk* chunk = (Metachunk*) ptr;
-
-  MetaWord* chunk_bottom = ptr + _overhead;
-  chunk->set_bottom(ptr);
-  chunk->set_top(chunk_bottom);
-  MetaWord* chunk_end = ptr + word_size;
-  assert(chunk_end > chunk_bottom, "Chunk must be too small");
-  chunk->set_end(chunk_end);
-  chunk->set_next(NULL);
-  chunk->set_prev(NULL);
-  chunk->set_word_size(word_size);
+Metachunk::Metachunk(size_t word_size,
+                     VirtualSpaceNode* container) :
+    _word_size(word_size),
+    _bottom(NULL),
+    _end(NULL),
+    _top(NULL),
+    _next(NULL),
+    _prev(NULL),
+    _container(container)
+{
+  _bottom = (MetaWord*)this;
+  _top = (MetaWord*)this + _overhead;
+  _end = (MetaWord*)this + word_size;
 #ifdef ASSERT
-  size_t data_word_size = pointer_delta(chunk_end, chunk_bottom, sizeof(MetaWord));
-  Copy::fill_to_words((HeapWord*) chunk_bottom, data_word_size, metadata_chunk_initialize);
+  set_is_free(false);
+  size_t data_word_size = pointer_delta(end(),
+                                        top(),
+                                        sizeof(MetaWord));
+  Copy::fill_to_words((HeapWord*) top(),
+                      data_word_size,
+                      metadata_chunk_initialize);
 #endif
-  return chunk;
 }
 
-
 MetaWord* Metachunk::allocate(size_t word_size) {
   MetaWord* result = NULL;
   // If available, bump the pointer to allocate.
--- a/hotspot/src/share/vm/memory/metachunk.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/memory/metachunk.hpp	Thu May 16 11:47:51 2013 +0100
@@ -41,10 +41,13 @@
 //            |              |              |        |
 //            +--------------+ <- bottom ---+     ---+
 
+class VirtualSpaceNode;
+
 class Metachunk VALUE_OBJ_CLASS_SPEC {
   // link to support lists of chunks
   Metachunk* _next;
   Metachunk* _prev;
+  VirtualSpaceNode* _container;
 
   MetaWord* _bottom;
   MetaWord* _end;
@@ -61,29 +64,20 @@
   // the space.
   static size_t _overhead;
 
-  void set_bottom(MetaWord* v) { _bottom = v; }
-  void set_end(MetaWord* v) { _end = v; }
-  void set_top(MetaWord* v) { _top = v; }
-  void set_word_size(size_t v) { _word_size = v; }
  public:
-#ifdef ASSERT
-  Metachunk() : _bottom(NULL), _end(NULL), _top(NULL), _is_free(false),
-    _next(NULL), _prev(NULL) {}
-#else
-  Metachunk() : _bottom(NULL), _end(NULL), _top(NULL),
-    _next(NULL), _prev(NULL) {}
-#endif
+  Metachunk(size_t word_size , VirtualSpaceNode* container);
 
   // Used to add a Metachunk to a list of Metachunks
   void set_next(Metachunk* v) { _next = v; assert(v != this, "Boom");}
   void set_prev(Metachunk* v) { _prev = v; assert(v != this, "Boom");}
+  void set_container(VirtualSpaceNode* v) { _container = v; }
 
   MetaWord* allocate(size_t word_size);
-  static Metachunk* initialize(MetaWord* ptr, size_t word_size);
 
   // Accessors
   Metachunk* next() const { return _next; }
   Metachunk* prev() const { return _prev; }
+  VirtualSpaceNode* container() const { return _container; }
   MetaWord* bottom() const { return _bottom; }
   MetaWord* end() const { return _end; }
   MetaWord* top() const { return _top; }
--- a/hotspot/src/share/vm/memory/metaspace.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/memory/metaspace.cpp	Thu May 16 11:47:51 2013 +0100
@@ -47,7 +47,6 @@
 // the free chunk lists
 const bool metaspace_slow_verify = false;
 
-
 // Parameters for stress mode testing
 const uint metadata_deallocate_a_lot_block = 10;
 const uint metadata_deallocate_a_lock_chunk = 3;
@@ -112,6 +111,7 @@
 class ChunkManager VALUE_OBJ_CLASS_SPEC {
 
   // Free list of chunks of different sizes.
+  //   SpecializedChunk
   //   SmallChunk
   //   MediumChunk
   //   HumongousChunk
@@ -165,6 +165,10 @@
   // for special, small, medium, and humongous chunks.
   static ChunkIndex list_index(size_t size);
 
+  // Remove the chunk from its freelist.  It is
+  // expected to be on one of the _free_chunks[] lists.
+  void remove_chunk(Metachunk* chunk);
+
   // Add the simple linked list of chunks to the freelist of chunks
   // of type index.
   void return_chunks(ChunkIndex index, Metachunk* chunks);
@@ -215,7 +219,6 @@
   void print_on(outputStream* st);
 };
 
-
 // Used to manage the free list of Metablocks (a block corresponds
 // to the allocation of a quantum of metadata).
 class BlockFreelist VALUE_OBJ_CLASS_SPEC {
@@ -255,6 +258,8 @@
   ReservedSpace _rs;
   VirtualSpace _virtual_space;
   MetaWord* _top;
+  // count of chunks contained in this VirtualSpace
+  uintx _container_count;
 
   // Convenience functions for logical bottom and end
   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
@@ -264,10 +269,19 @@
   char* low()  const { return virtual_space()->low(); }
   char* high() const { return virtual_space()->high(); }
 
+  // The first Metachunk will be allocated at the bottom of the
+  // VirtualSpace
+  Metachunk* first_chunk() { return (Metachunk*) bottom(); }
+
+  void inc_container_count();
+#ifdef ASSERT
+  uint container_count_slow();
+#endif
+
  public:
 
   VirtualSpaceNode(size_t byte_size);
-  VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs) {}
+  VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
   ~VirtualSpaceNode();
 
   // address of next available space in _virtual_space;
@@ -282,15 +296,22 @@
   MemRegion* reserved() { return &_reserved; }
   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 
-  // Returns true if "word_size" is available in the virtual space
+  // Returns true if "word_size" is available in the VirtualSpace
   bool is_available(size_t word_size) { return _top + word_size <= end(); }
 
   MetaWord* top() const { return _top; }
   void inc_top(size_t word_size) { _top += word_size; }
 
+  uintx container_count() { return _container_count; }
+  void dec_container_count();
+#ifdef ASSERT
+  void verify_container_count();
+#endif
+
   // used and capacity in this single entry in the list
   size_t used_words_in_vs() const;
   size_t capacity_words_in_vs() const;
+  size_t free_words_in_vs() const;
 
   bool initialize();
 
@@ -306,6 +327,10 @@
   bool expand_by(size_t words, bool pre_touch = false);
   bool shrink_by(size_t words);
 
+  // In preparation for deleting this node, remove all the chunks
+  // in the node from any freelist.
+  void purge(ChunkManager* chunk_manager);
+
 #ifdef ASSERT
   // Debug support
   static void verify_virtual_space_total();
@@ -317,7 +342,7 @@
 };
 
   // byte_size is the size of the associated virtualspace.
-VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0) {
+VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0), _container_count(0) {
   // align up to vm allocation granularity
   byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
 
@@ -341,6 +366,39 @@
   MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 }
 
+void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
+  Metachunk* chunk = first_chunk();
+  Metachunk* invalid_chunk = (Metachunk*) top();
+  while (chunk < invalid_chunk ) {
+    assert(chunk->is_free(), "Should be marked free");
+      MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
+      chunk_manager->remove_chunk(chunk);
+      assert(chunk->next() == NULL &&
+             chunk->prev() == NULL,
+             "Was not removed from its list");
+      chunk = (Metachunk*) next;
+  }
+}
+
+#ifdef ASSERT
+uint VirtualSpaceNode::container_count_slow() {
+  uint count = 0;
+  Metachunk* chunk = first_chunk();
+  Metachunk* invalid_chunk = (Metachunk*) top();
+  while (chunk < invalid_chunk ) {
+    MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
+    // Don't count the chunks on the free lists.  Those are
+    // still part of the VirtualSpaceNode but not currently
+    // counted.
+    if (!chunk->is_free()) {
+      count++;
+    }
+    chunk = (Metachunk*) next;
+  }
+  return count;
+}
+#endif
+
 // List of VirtualSpaces for metadata allocation.
 // It has a  _next link for singly linked list and a MemRegion
 // for total space in the VirtualSpace.
@@ -390,6 +448,8 @@
   VirtualSpaceList(size_t word_size);
   VirtualSpaceList(ReservedSpace rs);
 
+  size_t free_bytes();
+
   Metachunk* get_new_chunk(size_t word_size,
                            size_t grow_chunks_by_words,
                            size_t medium_chunk_bunch);
@@ -410,14 +470,14 @@
   void initialize(size_t word_size);
 
   size_t virtual_space_total() { return _virtual_space_total; }
-  void inc_virtual_space_total(size_t v) {
-    Atomic::add_ptr(v, &_virtual_space_total);
-  }
-
-  size_t virtual_space_count() { return _virtual_space_count; }
-  void inc_virtual_space_count() {
-    Atomic::inc_ptr(&_virtual_space_count);
-  }
+
+  void inc_virtual_space_total(size_t v);
+  void dec_virtual_space_total(size_t v);
+  void inc_virtual_space_count();
+  void dec_virtual_space_count();
+
+  // Unlink empty VirtualSpaceNodes and free it.
+  void purge();
 
   // Used and capacity in the entire list of virtual spaces.
   // These are global values shared by all Metaspaces
@@ -520,7 +580,11 @@
   bool has_small_chunk_limit() { return !vs_list()->is_class(); }
 
   // Sum of all space in allocated chunks
-  size_t _allocation_total;
+  size_t _allocated_blocks_words;
+
+  // Sum of all allocated chunks
+  size_t _allocated_chunks_words;
+  size_t _allocated_chunks_count;
 
   // Free lists of blocks are per SpaceManager since they
   // are assumed to be in chunks in use by the SpaceManager
@@ -576,12 +640,27 @@
   size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; }
   size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
 
-  size_t allocation_total() const { return _allocation_total; }
-  void inc_allocation_total(size_t v) { Atomic::add_ptr(v, &_allocation_total); }
+  size_t allocated_blocks_words() const { return _allocated_blocks_words; }
+  size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
+  size_t allocated_chunks_words() const { return _allocated_chunks_words; }
+  size_t allocated_chunks_count() const { return _allocated_chunks_count; }
+
   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
 
   static Mutex* expand_lock() { return _expand_lock; }
 
+  // Increment the per Metaspace and global running sums for Metachunks
+  // by the given size.  This is used when a Metachunk to added to
+  // the in-use list.
+  void inc_size_metrics(size_t words);
+  // Increment the per Metaspace and global running sums Metablocks by the given
+  // size.  This is used when a Metablock is allocated.
+  void inc_used_metrics(size_t words);
+  // Delete the portion of the running sums for this SpaceManager. That is,
+  // the globals running sums for the Metachunks and Metablocks are
+  // decremented for all the Metachunks in-use by this SpaceManager.
+  void dec_total_from_size_metrics();
+
   // Set the sizes for the initial chunks.
   void get_initial_chunk_sizes(Metaspace::MetaspaceType type,
                                size_t* chunk_word_size,
@@ -627,7 +706,7 @@
   void verify_chunk_size(Metachunk* chunk);
   NOT_PRODUCT(void mangle_freed_chunks();)
 #ifdef ASSERT
-  void verify_allocation_total();
+  void verify_allocated_blocks_words();
 #endif
 };
 
@@ -641,6 +720,28 @@
             SpaceManager::_expand_lock_name,
             Mutex::_allow_vm_block_flag);
 
+void VirtualSpaceNode::inc_container_count() {
+  assert_lock_strong(SpaceManager::expand_lock());
+  _container_count++;
+  assert(_container_count == container_count_slow(),
+         err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
+                 "container_count_slow() " SIZE_FORMAT,
+                 _container_count, container_count_slow()));
+}
+
+void VirtualSpaceNode::dec_container_count() {
+  assert_lock_strong(SpaceManager::expand_lock());
+  _container_count--;
+}
+
+#ifdef ASSERT
+void VirtualSpaceNode::verify_container_count() {
+  assert(_container_count == container_count_slow(),
+    err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
+            "container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
+}
+#endif
+
 // BlockFreelist methods
 
 BlockFreelist::BlockFreelist() : _dictionary(NULL) {}
@@ -701,6 +802,10 @@
 
 VirtualSpaceNode::~VirtualSpaceNode() {
   _rs.release();
+#ifdef ASSERT
+  size_t word_size = sizeof(*this) / BytesPerWord;
+  Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
+#endif
 }
 
 size_t VirtualSpaceNode::used_words_in_vs() const {
@@ -712,6 +817,9 @@
   return pointer_delta(end(), bottom(), sizeof(MetaWord));
 }
 
+size_t VirtualSpaceNode::free_words_in_vs() const {
+  return pointer_delta(end(), top(), sizeof(MetaWord));
+}
 
 // Allocates the chunk from the virtual space only.
 // This interface is also used internally for debugging.  Not all
@@ -733,8 +841,8 @@
   // Take the space  (bump top on the current virtual space).
   inc_top(chunk_word_size);
 
-  // Point the chunk at the space
-  Metachunk* result = Metachunk::initialize(chunk_limit, chunk_word_size);
+  // Initialize the chunk
+  Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
   return result;
 }
 
@@ -762,9 +870,11 @@
 
 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
   assert_lock_strong(SpaceManager::expand_lock());
-  Metachunk* result = NULL;
-
-  return take_from_committed(chunk_word_size);
+  Metachunk* result = take_from_committed(chunk_word_size);
+  if (result != NULL) {
+    inc_container_count();
+  }
+  return result;
 }
 
 Metachunk* VirtualSpaceNode::get_chunk_vs_with_expand(size_t chunk_word_size) {
@@ -843,6 +953,83 @@
   }
 }
 
+void VirtualSpaceList::inc_virtual_space_total(size_t v) {
+  assert_lock_strong(SpaceManager::expand_lock());
+  _virtual_space_total = _virtual_space_total + v;
+}
+void VirtualSpaceList::dec_virtual_space_total(size_t v) {
+  assert_lock_strong(SpaceManager::expand_lock());
+  _virtual_space_total = _virtual_space_total - v;
+}
+
+void VirtualSpaceList::inc_virtual_space_count() {
+  assert_lock_strong(SpaceManager::expand_lock());
+  _virtual_space_count++;
+}
+void VirtualSpaceList::dec_virtual_space_count() {
+  assert_lock_strong(SpaceManager::expand_lock());
+  _virtual_space_count--;
+}
+
+void ChunkManager::remove_chunk(Metachunk* chunk) {
+  size_t word_size = chunk->word_size();
+  ChunkIndex index = list_index(word_size);
+  if (index != HumongousIndex) {
+    free_chunks(index)->remove_chunk(chunk);
+  } else {
+    humongous_dictionary()->remove_chunk(chunk);
+  }
+
+  // Chunk is being removed from the chunks free list.
+  dec_free_chunks_total(chunk->capacity_word_size());
+}
+
+// Walk the list of VirtualSpaceNodes and delete
+// nodes with a 0 container_count.  Remove Metachunks in
+// the node from their respective freelists.
+void VirtualSpaceList::purge() {
+  assert_lock_strong(SpaceManager::expand_lock());
+  // Don't use a VirtualSpaceListIterator because this
+  // list is being changed and a straightforward use of an iterator is not safe.
+  VirtualSpaceNode* purged_vsl = NULL;
+  VirtualSpaceNode* prev_vsl = virtual_space_list();
+  VirtualSpaceNode* next_vsl = prev_vsl;
+  while (next_vsl != NULL) {
+    VirtualSpaceNode* vsl = next_vsl;
+    next_vsl = vsl->next();
+    // Don't free the current virtual space since it will likely
+    // be needed soon.
+    if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
+      // Unlink it from the list
+      if (prev_vsl == vsl) {
+        // This is the case of the current note being the first note.
+        assert(vsl == virtual_space_list(), "Expected to be the first note");
+        set_virtual_space_list(vsl->next());
+      } else {
+        prev_vsl->set_next(vsl->next());
+      }
+
+      vsl->purge(chunk_manager());
+      dec_virtual_space_total(vsl->reserved()->word_size());
+      dec_virtual_space_count();
+      purged_vsl = vsl;
+      delete vsl;
+    } else {
+      prev_vsl = vsl;
+    }
+  }
+#ifdef ASSERT
+  if (purged_vsl != NULL) {
+  // List should be stable enough to use an iterator here.
+  VirtualSpaceListIterator iter(virtual_space_list());
+    while (iter.repeat()) {
+      VirtualSpaceNode* vsl = iter.get_next();
+      assert(vsl != purged_vsl, "Purge of vsl failed");
+    }
+  }
+#endif
+}
+
 size_t VirtualSpaceList::used_words_sum() {
   size_t allocated_by_vs = 0;
   VirtualSpaceListIterator iter(virtual_space_list());
@@ -907,6 +1094,10 @@
   link_vs(class_entry, rs.size()/BytesPerWord);
 }
 
+size_t VirtualSpaceList::free_bytes() {
+  return virtual_space_list()->free_words_in_vs() * BytesPerWord;
+}
+
 // Allocate another meta virtual space and add it to the list.
 bool VirtualSpaceList::grow_vs(size_t vs_word_size) {
   assert_lock_strong(SpaceManager::expand_lock());
@@ -955,8 +1146,10 @@
   // Get a chunk from the chunk freelist
   Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
 
-  // Allocate a chunk out of the current virtual space.
-  if (next == NULL) {
+  if (next != NULL) {
+    next->container()->inc_container_count();
+  } else {
+    // Allocate a chunk out of the current virtual space.
     next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
   }
 
@@ -1045,9 +1238,9 @@
 //
 // After the GC the compute_new_size() for MetaspaceGC is called to
 // resize the capacity of the metaspaces.  The current implementation
-// is based on the flags MinMetaspaceFreeRatio and MaxHeapFreeRatio used
+// is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
 // to resize the Java heap by some GC's.  New flags can be implemented
-// if really needed.  MinHeapFreeRatio is used to calculate how much
+// if really needed.  MinMetaspaceFreeRatio is used to calculate how much
 // free space is desirable in the metaspace capacity to decide how much
 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
 // free space is desirable in the metaspace capacity before decreasing
@@ -1082,7 +1275,11 @@
 }
 
 bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
+
+  size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes();
   // If the user wants a limit, impose one.
+  size_t max_metaspace_size_bytes = MaxMetaspaceSize;
+  size_t metaspace_size_bytes = MetaspaceSize;
   if (!FLAG_IS_DEFAULT(MaxMetaspaceSize) &&
       MetaspaceAux::reserved_in_bytes() >= MaxMetaspaceSize) {
     return false;
@@ -1094,57 +1291,48 @@
 
   // If this is part of an allocation after a GC, expand
   // unconditionally.
-  if(MetaspaceGC::expand_after_GC()) {
+  if (MetaspaceGC::expand_after_GC()) {
     return true;
   }
 
-  size_t metaspace_size_words = MetaspaceSize / BytesPerWord;
+
 
   // If the capacity is below the minimum capacity, allow the
   // expansion.  Also set the high-water-mark (capacity_until_GC)
   // to that minimum capacity so that a GC will not be induced
   // until that minimum capacity is exceeded.
-  if (vsl->capacity_words_sum() < metaspace_size_words ||
+  if (committed_capacity_bytes < metaspace_size_bytes ||
       capacity_until_GC() == 0) {
-    set_capacity_until_GC(metaspace_size_words);
+    set_capacity_until_GC(metaspace_size_bytes);
     return true;
   } else {
-    if (vsl->capacity_words_sum() < capacity_until_GC()) {
+    if (committed_capacity_bytes < capacity_until_GC()) {
       return true;
     } else {
       if (TraceMetadataChunkAllocation && Verbose) {
         gclog_or_tty->print_cr("  allocation request size " SIZE_FORMAT
                         "  capacity_until_GC " SIZE_FORMAT
-                        "  capacity_words_sum " SIZE_FORMAT
-                        "  used_words_sum " SIZE_FORMAT
-                        "  free chunks " SIZE_FORMAT
-                        "  free chunks count %d",
+                        "  allocated_capacity_bytes " SIZE_FORMAT,
                         word_size,
                         capacity_until_GC(),
-                        vsl->capacity_words_sum(),
-                        vsl->used_words_sum(),
-                        vsl->chunk_manager()->free_chunks_total(),
-                        vsl->chunk_manager()->free_chunks_count());
+                        MetaspaceAux::allocated_capacity_bytes());
       }
       return false;
     }
   }
 }
 
-// Variables are in bytes
+
 
 void MetaspaceGC::compute_new_size() {
   assert(_shrink_factor <= 100, "invalid shrink factor");
   uint current_shrink_factor = _shrink_factor;
   _shrink_factor = 0;
 
-  VirtualSpaceList *vsl = Metaspace::space_list();
-
-  size_t capacity_after_gc = vsl->capacity_bytes_sum();
-  // Check to see if these two can be calculated without walking the CLDG
-  size_t used_after_gc = vsl->used_bytes_sum();
-  size_t capacity_until_GC = vsl->capacity_bytes_sum();
-  size_t free_after_gc = capacity_until_GC - used_after_gc;
+  // Until a faster way of calculating the "used" quantity is implemented,
+  // use "capacity".
+  const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
+  const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
 
   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
@@ -1157,45 +1345,34 @@
                                   MetaspaceSize);
 
   if (PrintGCDetails && Verbose) {
-    const double free_percentage = ((double)free_after_gc) / capacity_until_GC;
     gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
     gclog_or_tty->print_cr("  "
                   "  minimum_free_percentage: %6.2f"
                   "  maximum_used_percentage: %6.2f",
                   minimum_free_percentage,
                   maximum_used_percentage);
-    double d_free_after_gc = free_after_gc / (double) K;
     gclog_or_tty->print_cr("  "
-                  "   free_after_gc       : %6.1fK"
-                  "   used_after_gc       : %6.1fK"
-                  "   capacity_after_gc   : %6.1fK"
-                  "   metaspace HWM     : %6.1fK",
-                  free_after_gc / (double) K,
-                  used_after_gc / (double) K,
-                  capacity_after_gc / (double) K,
-                  capacity_until_GC / (double) K);
-    gclog_or_tty->print_cr("  "
-                  "   free_percentage: %6.2f",
-                  free_percentage);
+                  "   used_after_gc       : %6.1fKB",
+                  used_after_gc / (double) K);
   }
 
 
+  size_t shrink_bytes = 0;
   if (capacity_until_GC < minimum_desired_capacity) {
     // If we have less capacity below the metaspace HWM, then
     // increment the HWM.
     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
     // Don't expand unless it's significant
     if (expand_bytes >= MinMetaspaceExpansion) {
-      size_t expand_words = expand_bytes / BytesPerWord;
-      MetaspaceGC::inc_capacity_until_GC(expand_words);
+      MetaspaceGC::set_capacity_until_GC(capacity_until_GC + expand_bytes);
     }
     if (PrintGCDetails && Verbose) {
-      size_t new_capacity_until_GC = MetaspaceGC::capacity_until_GC_in_bytes();
+      size_t new_capacity_until_GC = capacity_until_GC;
       gclog_or_tty->print_cr("    expanding:"
-                    "  minimum_desired_capacity: %6.1fK"
-                    "  expand_words: %6.1fK"
-                    "  MinMetaspaceExpansion: %6.1fK"
-                    "  new metaspace HWM:  %6.1fK",
+                    "  minimum_desired_capacity: %6.1fKB"
+                    "  expand_bytes: %6.1fKB"
+                    "  MinMetaspaceExpansion: %6.1fKB"
+                    "  new metaspace HWM:  %6.1fKB",
                     minimum_desired_capacity / (double) K,
                     expand_bytes / (double) K,
                     MinMetaspaceExpansion / (double) K,
@@ -1205,11 +1382,10 @@
   }
 
   // No expansion, now see if we want to shrink
-  size_t shrink_words = 0;
   // We would never want to shrink more than this
-  size_t max_shrink_words = capacity_until_GC - minimum_desired_capacity;
-  assert(max_shrink_words >= 0, err_msg("max_shrink_words " SIZE_FORMAT,
-    max_shrink_words));
+  size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
+  assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT,
+    max_shrink_bytes));
 
   // Should shrinking be considered?
   if (MaxMetaspaceFreeRatio < 100) {
@@ -1219,17 +1395,15 @@
     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
     maximum_desired_capacity = MAX2(maximum_desired_capacity,
                                     MetaspaceSize);
-    if (PrintGC && Verbose) {
+    if (PrintGCDetails && Verbose) {
       gclog_or_tty->print_cr("  "
                              "  maximum_free_percentage: %6.2f"
                              "  minimum_used_percentage: %6.2f",
                              maximum_free_percentage,
                              minimum_used_percentage);
       gclog_or_tty->print_cr("  "
-                             "  capacity_until_GC: %6.1fK"
-                             "  minimum_desired_capacity: %6.1fK"
-                             "  maximum_desired_capacity: %6.1fK",
-                             capacity_until_GC / (double) K,
+                             "  minimum_desired_capacity: %6.1fKB"
+                             "  maximum_desired_capacity: %6.1fKB",
                              minimum_desired_capacity / (double) K,
                              maximum_desired_capacity / (double) K);
     }
@@ -1239,17 +1413,17 @@
 
     if (capacity_until_GC > maximum_desired_capacity) {
       // Capacity too large, compute shrinking size
-      shrink_words = capacity_until_GC - maximum_desired_capacity;
+      shrink_bytes = capacity_until_GC - maximum_desired_capacity;
       // We don't want shrink all the way back to initSize if people call
       // System.gc(), because some programs do that between "phases" and then
       // we'd just have to grow the heap up again for the next phase.  So we
       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
       // on the third call, and 100% by the fourth call.  But if we recompute
       // size without shrinking, it goes back to 0%.
-      shrink_words = shrink_words / 100 * current_shrink_factor;
-      assert(shrink_words <= max_shrink_words,
+      shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
+      assert(shrink_bytes <= max_shrink_bytes,
         err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
-          shrink_words, max_shrink_words));
+          shrink_bytes, max_shrink_bytes));
       if (current_shrink_factor == 0) {
         _shrink_factor = 10;
       } else {
@@ -1263,11 +1437,11 @@
                       MetaspaceSize / (double) K,
                       maximum_desired_capacity / (double) K);
         gclog_or_tty->print_cr("  "
-                      "  shrink_words: %.1fK"
+                      "  shrink_bytes: %.1fK"
                       "  current_shrink_factor: %d"
                       "  new shrink factor: %d"
                       "  MinMetaspaceExpansion: %.1fK",
-                      shrink_words / (double) K,
+                      shrink_bytes / (double) K,
                       current_shrink_factor,
                       _shrink_factor,
                       MinMetaspaceExpansion / (double) K);
@@ -1275,23 +1449,11 @@
     }
   }
 
-
   // Don't shrink unless it's significant
-  if (shrink_words >= MinMetaspaceExpansion) {
-    VirtualSpaceNode* csp = vsl->current_virtual_space();
-    size_t available_to_shrink = csp->capacity_words_in_vs() -
-      csp->used_words_in_vs();
-    shrink_words = MIN2(shrink_words, available_to_shrink);
-    csp->shrink_by(shrink_words);
-    MetaspaceGC::dec_capacity_until_GC(shrink_words);
-    if (PrintGCDetails && Verbose) {
-      size_t new_capacity_until_GC = MetaspaceGC::capacity_until_GC_in_bytes();
-      gclog_or_tty->print_cr("  metaspace HWM: %.1fK", new_capacity_until_GC / (double) K);
-    }
+  if (shrink_bytes >= MinMetaspaceExpansion &&
+      ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
+    MetaspaceGC::set_capacity_until_GC(capacity_until_GC - shrink_bytes);
   }
-  assert(used_after_gc <= vsl->capacity_bytes_sum(),
-         "sanity check");
-
 }
 
 // Metadebug methods
@@ -1567,9 +1729,6 @@
       }
       // Chunk is being removed from the chunks free list.
       dec_free_chunks_total(chunk->capacity_word_size());
-#ifdef ASSERT
-      chunk->set_is_free(false);
-#endif
     } else {
       return NULL;
     }
@@ -1578,6 +1737,11 @@
   // Remove it from the links to this freelist
   chunk->set_next(NULL);
   chunk->set_prev(NULL);
+#ifdef ASSERT
+  // Chunk is no longer on any freelist. Setting to false make container_count_slow()
+  // work.
+  chunk->set_is_free(false);
+#endif
   slow_locked_verify();
   return chunk;
 }
@@ -1692,18 +1856,28 @@
 }
 
 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
-  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
-  size_t sum = 0;
-  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
-    Metachunk* chunk = chunks_in_use(i);
-    while (chunk != NULL) {
-      // Just changed this sum += chunk->capacity_word_size();
-      // sum += chunk->word_size() - Metachunk::overhead();
-      sum += chunk->capacity_word_size();
-      chunk = chunk->next();
+  // For CMS use "allocated_chunks_words()" which does not need the
+  // Metaspace lock.  For the other collectors sum over the
+  // lists.  Use both methods as a check that "allocated_chunks_words()"
+  // is correct.  That is, sum_capacity_in_chunks() is too expensive
+  // to use in the product and allocated_chunks_words() should be used
+  // but allow for  checking that allocated_chunks_words() returns the same
+  // value as sum_capacity_in_chunks_in_use() which is the definitive
+  // answer.
+  if (UseConcMarkSweepGC) {
+    return allocated_chunks_words();
+  } else {
+    MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
+    size_t sum = 0;
+    for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
+      Metachunk* chunk = chunks_in_use(i);
+      while (chunk != NULL) {
+        sum += chunk->capacity_word_size();
+        chunk = chunk->next();
+      }
     }
+  return sum;
   }
-  return sum;
 }
 
 size_t SpaceManager::sum_count_in_chunks_in_use() {
@@ -1861,12 +2035,44 @@
 SpaceManager::SpaceManager(Mutex* lock,
                            VirtualSpaceList* vs_list) :
   _vs_list(vs_list),
-  _allocation_total(0),
+  _allocated_blocks_words(0),
+  _allocated_chunks_words(0),
+  _allocated_chunks_count(0),
   _lock(lock)
 {
   initialize();
 }
 
+void SpaceManager::inc_size_metrics(size_t words) {
+  assert_lock_strong(SpaceManager::expand_lock());
+  // Total of allocated Metachunks and allocated Metachunks count
+  // for each SpaceManager
+  _allocated_chunks_words = _allocated_chunks_words + words;
+  _allocated_chunks_count++;
+  // Global total of capacity in allocated Metachunks
+  MetaspaceAux::inc_capacity(words);
+  // Global total of allocated Metablocks.
+  // used_words_slow() includes the overhead in each
+  // Metachunk so include it in the used when the
+  // Metachunk is first added (so only added once per
+  // Metachunk).
+  MetaspaceAux::inc_used(Metachunk::overhead());
+}
+
+void SpaceManager::inc_used_metrics(size_t words) {
+  // Add to the per SpaceManager total
+  Atomic::add_ptr(words, &_allocated_blocks_words);
+  // Add to the global total
+  MetaspaceAux::inc_used(words);
+}
+
+void SpaceManager::dec_total_from_size_metrics() {
+  MetaspaceAux::dec_capacity(allocated_chunks_words());
+  MetaspaceAux::dec_used(allocated_blocks_words());
+  // Also deduct the overhead per Metachunk
+  MetaspaceAux::dec_used(allocated_chunks_count() * Metachunk::overhead());
+}
+
 void SpaceManager::initialize() {
   Metadebug::init_allocation_fail_alot_count();
   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
@@ -1887,11 +2093,13 @@
   assert_lock_strong(SpaceManager::expand_lock());
   Metachunk* cur = chunks;
 
-  // This return chunks one at a time.  If a new
+  // This returns chunks one at a time.  If a new
   // class List can be created that is a base class
   // of FreeList then something like FreeList::prepend()
   // can be used in place of this loop
   while (cur != NULL) {
+    assert(cur->container() != NULL, "Container should have been set");
+    cur->container()->dec_container_count();
     // Capture the next link before it is changed
     // by the call to return_chunk_at_head();
     Metachunk* next = cur->next();
@@ -1903,7 +2111,10 @@
 
 SpaceManager::~SpaceManager() {
   // This call this->_lock which can't be done while holding expand_lock()
-  const size_t in_use_before = sum_capacity_in_chunks_in_use();
+  assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
+    err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT
+            " allocated_chunks_words() " SIZE_FORMAT,
+            sum_capacity_in_chunks_in_use(), allocated_chunks_words()));
 
   MutexLockerEx fcl(SpaceManager::expand_lock(),
                     Mutex::_no_safepoint_check_flag);
@@ -1912,17 +2123,19 @@
 
   chunk_manager->slow_locked_verify();
 
+  dec_total_from_size_metrics();
+
   if (TraceMetadataChunkAllocation && Verbose) {
     gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
     locked_print_chunks_in_use_on(gclog_or_tty);
   }
 
-  // Mangle freed memory.
-  NOT_PRODUCT(mangle_freed_chunks();)
+  // Do not mangle freed Metachunks.  The chunk size inside Metachunks
+  // is during the freeing of a VirtualSpaceNodes.
 
   // Have to update before the chunks_in_use lists are emptied
   // below.
-  chunk_manager->inc_free_chunks_total(in_use_before,
+  chunk_manager->inc_free_chunks_total(allocated_chunks_words(),
                                        sum_count_in_chunks_in_use());
 
   // Add all the chunks in use by this space manager
@@ -1978,6 +2191,7 @@
                    " granularity %d",
                    humongous_chunks->word_size(), HumongousChunkGranularity));
     Metachunk* next_humongous_chunks = humongous_chunks->next();
+    humongous_chunks->container()->dec_container_count();
     chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks);
     humongous_chunks = next_humongous_chunks;
   }
@@ -1987,7 +2201,6 @@
                      chunk_manager->humongous_dictionary()->total_count(),
                      chunk_size_name(HumongousIndex));
   }
-  set_chunks_in_use(HumongousIndex, NULL);
   chunk_manager->slow_locked_verify();
 }
 
@@ -2067,12 +2280,17 @@
     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
   }
 
+  // Add to the running sum of capacity
+  inc_size_metrics(new_chunk->word_size());
+
   assert(new_chunk->is_empty(), "Not ready for reuse");
   if (TraceMetadataChunkAllocation && Verbose) {
     gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
                         sum_count_in_chunks_in_use());
     new_chunk->print_on(gclog_or_tty);
-    vs_list()->chunk_manager()->locked_print_free_chunks(tty);
+    if (vs_list() != NULL) {
+      vs_list()->chunk_manager()->locked_print_free_chunks(tty);
+    }
   }
 }
 
@@ -2143,7 +2361,7 @@
   // of memory if this returns null.
   if (DumpSharedSpaces) {
     assert(current_chunk() != NULL, "should never happen");
-    inc_allocation_total(word_size);
+    inc_used_metrics(word_size);
     return current_chunk()->allocate(word_size); // caller handles null result
   }
   if (current_chunk() != NULL) {
@@ -2154,7 +2372,7 @@
     result = grow_and_allocate(word_size);
   }
   if (result > 0) {
-    inc_allocation_total(word_size);
+    inc_used_metrics(word_size);
     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
            "Head of the list is being allocated");
   }
@@ -2188,20 +2406,14 @@
 }
 
 #ifdef ASSERT
-void SpaceManager::verify_allocation_total() {
+void SpaceManager::verify_allocated_blocks_words() {
   // Verification is only guaranteed at a safepoint.
-  if (SafepointSynchronize::is_at_safepoint()) {
-    gclog_or_tty->print_cr("Chunk " PTR_FORMAT " allocation_total " SIZE_FORMAT
-                           " sum_used_in_chunks_in_use " SIZE_FORMAT,
-                           this,
-                           allocation_total(),
-                           sum_used_in_chunks_in_use());
-  }
-  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
-  assert(allocation_total() == sum_used_in_chunks_in_use(),
+  assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
+    "Verification can fail if the applications is running");
+  assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
     err_msg("allocation total is not consistent " SIZE_FORMAT
             " vs " SIZE_FORMAT,
-            allocation_total(), sum_used_in_chunks_in_use()));
+            allocated_blocks_words(), sum_used_in_chunks_in_use()));
 }
 
 #endif
@@ -2257,14 +2469,65 @@
 
 // MetaspaceAux
 
-size_t MetaspaceAux::used_in_bytes(Metaspace::MetadataType mdtype) {
+
+size_t MetaspaceAux::_allocated_capacity_words = 0;
+size_t MetaspaceAux::_allocated_used_words = 0;
+
+size_t MetaspaceAux::free_bytes() {
+  size_t result = 0;
+  if (Metaspace::class_space_list() != NULL) {
+    result = result + Metaspace::class_space_list()->free_bytes();
+  }
+  if (Metaspace::space_list() != NULL) {
+    result = result + Metaspace::space_list()->free_bytes();
+  }
+  return result;
+}
+
+void MetaspaceAux::dec_capacity(size_t words) {
+  assert_lock_strong(SpaceManager::expand_lock());
+  assert(words <= _allocated_capacity_words,
+    err_msg("About to decrement below 0: words " SIZE_FORMAT
+            " is greater than _allocated_capacity_words " SIZE_FORMAT,
+            words, _allocated_capacity_words));
+  _allocated_capacity_words = _allocated_capacity_words - words;
+}
+
+void MetaspaceAux::inc_capacity(size_t words) {
+  assert_lock_strong(SpaceManager::expand_lock());
+  // Needs to be atomic
+  _allocated_capacity_words = _allocated_capacity_words + words;
+}
+
+void MetaspaceAux::dec_used(size_t words) {
+  assert(words <= _allocated_used_words,
+    err_msg("About to decrement below 0: words " SIZE_FORMAT
+            " is greater than _allocated_used_words " SIZE_FORMAT,
+            words, _allocated_used_words));
+  // For CMS deallocation of the Metaspaces occurs during the
+  // sweep which is a concurrent phase.  Protection by the expand_lock()
+  // is not enough since allocation is on a per Metaspace basis
+  // and protected by the Metaspace lock.
+  jlong minus_words = (jlong) - (jlong) words;
+  Atomic::add_ptr(minus_words, &_allocated_used_words);
+}
+
+void MetaspaceAux::inc_used(size_t words) {
+  // _allocated_used_words tracks allocations for
+  // each piece of metadata.  Those allocations are
+  // generally done concurrently by different application
+  // threads so must be done atomically.
+  Atomic::add_ptr(words, &_allocated_used_words);
+}
+
+size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
   size_t used = 0;
   ClassLoaderDataGraphMetaspaceIterator iter;
   while (iter.repeat()) {
     Metaspace* msp = iter.get_next();
-    // Sum allocation_total for each metaspace
+    // Sum allocated_blocks_words for each metaspace
     if (msp != NULL) {
-      used += msp->used_words(mdtype);
+      used += msp->used_words_slow(mdtype);
     }
   }
   return used * BytesPerWord;
@@ -2282,13 +2545,15 @@
   return free * BytesPerWord;
 }
 
-size_t MetaspaceAux::capacity_in_bytes(Metaspace::MetadataType mdtype) {
-  size_t capacity = free_chunks_total(mdtype);
+size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
+  // Don't count the space in the freelists.  That space will be
+  // added to the capacity calculation as needed.
+  size_t capacity = 0;
   ClassLoaderDataGraphMetaspaceIterator iter;
   while (iter.repeat()) {
     Metaspace* msp = iter.get_next();
     if (msp != NULL) {
-      capacity += msp->capacity_words(mdtype);
+      capacity += msp->capacity_words_slow(mdtype);
     }
   }
   return capacity * BytesPerWord;
@@ -2315,23 +2580,30 @@
   return free_chunks_total(mdtype) * BytesPerWord;
 }
 
+size_t MetaspaceAux::free_chunks_total() {
+  return free_chunks_total(Metaspace::ClassType) +
+         free_chunks_total(Metaspace::NonClassType);
+}
+
+size_t MetaspaceAux::free_chunks_total_in_bytes() {
+  return free_chunks_total() * BytesPerWord;
+}
+
 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
   gclog_or_tty->print(", [Metaspace:");
   if (PrintGCDetails && Verbose) {
     gclog_or_tty->print(" "  SIZE_FORMAT
                         "->" SIZE_FORMAT
-                        "("  SIZE_FORMAT "/" SIZE_FORMAT ")",
+                        "("  SIZE_FORMAT ")",
                         prev_metadata_used,
-                        used_in_bytes(),
-                        capacity_in_bytes(),
+                        allocated_capacity_bytes(),
                         reserved_in_bytes());
   } else {
     gclog_or_tty->print(" "  SIZE_FORMAT "K"
                         "->" SIZE_FORMAT "K"
-                        "("  SIZE_FORMAT "K/" SIZE_FORMAT "K)",
+                        "("  SIZE_FORMAT "K)",
                         prev_metadata_used / K,
-                        used_in_bytes()/ K,
-                        capacity_in_bytes()/K,
+                        allocated_capacity_bytes() / K,
                         reserved_in_bytes()/ K);
   }
 
@@ -2346,23 +2618,30 @@
   out->print_cr(" Metaspace total "
                 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
                 " reserved " SIZE_FORMAT "K",
-                capacity_in_bytes()/K, used_in_bytes()/K, reserved_in_bytes()/K);
-  out->print_cr("  data space     "
-                SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
-                " reserved " SIZE_FORMAT "K",
-                capacity_in_bytes(nct)/K, used_in_bytes(nct)/K, reserved_in_bytes(nct)/K);
-  out->print_cr("  class space    "
-                SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
-                " reserved " SIZE_FORMAT "K",
-                capacity_in_bytes(ct)/K, used_in_bytes(ct)/K, reserved_in_bytes(ct)/K);
+                allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_in_bytes()/K);
+#if 0
+// The calls to capacity_bytes_slow() and used_bytes_slow() cause
+// lock ordering assertion failures with some collectors.  Do
+// not include this code until the lock ordering is fixed.
+  if (PrintGCDetails && Verbose) {
+    out->print_cr("  data space     "
+                  SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
+                  " reserved " SIZE_FORMAT "K",
+                  capacity_bytes_slow(nct)/K, used_bytes_slow(nct)/K, reserved_in_bytes(nct)/K);
+    out->print_cr("  class space    "
+                  SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
+                  " reserved " SIZE_FORMAT "K",
+                  capacity_bytes_slow(ct)/K, used_bytes_slow(ct)/K, reserved_in_bytes(ct)/K);
+  }
+#endif
 }
 
 // Print information for class space and data space separately.
 // This is almost the same as above.
 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
   size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype);
-  size_t capacity_bytes = capacity_in_bytes(mdtype);
-  size_t used_bytes = used_in_bytes(mdtype);
+  size_t capacity_bytes = capacity_bytes_slow(mdtype);
+  size_t used_bytes = used_bytes_slow(mdtype);
   size_t free_bytes = free_in_bytes(mdtype);
   size_t used_and_free = used_bytes + free_bytes +
                            free_chunks_capacity_bytes;
@@ -2435,6 +2714,36 @@
   Metaspace::class_space_list()->chunk_manager()->verify();
 }
 
+void MetaspaceAux::verify_capacity() {
+#ifdef ASSERT
+  size_t running_sum_capacity_bytes = allocated_capacity_bytes();
+  // For purposes of the running sum of used, verify against capacity
+  size_t capacity_in_use_bytes = capacity_bytes_slow();
+  assert(running_sum_capacity_bytes == capacity_in_use_bytes,
+    err_msg("allocated_capacity_words() * BytesPerWord " SIZE_FORMAT
+            " capacity_bytes_slow()" SIZE_FORMAT,
+            running_sum_capacity_bytes, capacity_in_use_bytes));
+#endif
+}
+
+void MetaspaceAux::verify_used() {
+#ifdef ASSERT
+  size_t running_sum_used_bytes = allocated_used_bytes();
+  // For purposes of the running sum of used, verify against capacity
+  size_t used_in_use_bytes = used_bytes_slow();
+  assert(allocated_used_bytes() == used_in_use_bytes,
+    err_msg("allocated_used_bytes() " SIZE_FORMAT
+            " used_bytes_slow()()" SIZE_FORMAT,
+            allocated_used_bytes(), used_in_use_bytes));
+#endif
+}
+
+void MetaspaceAux::verify_metrics() {
+  verify_capacity();
+  verify_used();
+}
+
+
 // Metaspace methods
 
 size_t Metaspace::_first_chunk_word_size = 0;
@@ -2584,8 +2893,8 @@
   MetaWord* result;
   MetaspaceGC::set_expand_after_GC(true);
   size_t before_inc = MetaspaceGC::capacity_until_GC();
-  size_t delta_words = MetaspaceGC::delta_capacity_until_GC(word_size);
-  MetaspaceGC::inc_capacity_until_GC(delta_words);
+  size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord;
+  MetaspaceGC::inc_capacity_until_GC(delta_bytes);
   if (PrintGCDetails && Verbose) {
     gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
       " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC());
@@ -2603,8 +2912,8 @@
   return (char*)vsm()->current_chunk()->bottom();
 }
 
-size_t Metaspace::used_words(MetadataType mdtype) const {
-  // return vsm()->allocation_total();
+size_t Metaspace::used_words_slow(MetadataType mdtype) const {
+  // return vsm()->allocated_used_words();
   return mdtype == ClassType ? class_vsm()->sum_used_in_chunks_in_use() :
                                vsm()->sum_used_in_chunks_in_use();  // includes overhead!
 }
@@ -2619,16 +2928,24 @@
 // have been made. Don't include space in the global freelist and
 // in the space available in the dictionary which
 // is already counted in some chunk.
-size_t Metaspace::capacity_words(MetadataType mdtype) const {
+size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
   return mdtype == ClassType ? class_vsm()->sum_capacity_in_chunks_in_use() :
                                vsm()->sum_capacity_in_chunks_in_use();
 }
 
+size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
+  return used_words_slow(mdtype) * BytesPerWord;
+}
+
+size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
+  return capacity_words_slow(mdtype) * BytesPerWord;
+}
+
 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
   if (SafepointSynchronize::is_at_safepoint()) {
     assert(Thread::current()->is_VM_thread(), "should be the VM thread");
     // Don't take Heap_lock
-    MutexLocker ml(vsm()->lock());
+    MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
     if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
       // Dark matter.  Too small for dictionary.
 #ifdef ASSERT
@@ -2642,7 +2959,7 @@
       vsm()->deallocate(ptr, word_size);
     }
   } else {
-    MutexLocker ml(vsm()->lock());
+    MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
 
     if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
       // Dark matter.  Too small for dictionary.
@@ -2716,6 +3033,13 @@
   return Metablock::initialize(result, word_size);
 }
 
+void Metaspace::purge() {
+  MutexLockerEx cl(SpaceManager::expand_lock(),
+                   Mutex::_no_safepoint_check_flag);
+  space_list()->purge();
+  class_space_list()->purge();
+}
+
 void Metaspace::print_on(outputStream* out) const {
   // Print both class virtual space counts and metaspace.
   if (Verbose) {
@@ -2733,7 +3057,8 @@
   // aren't deleted presently.  When they are, some sort of locking might
   // be needed.  Note, locking this can cause inversion problems with the
   // caller in MetaspaceObj::is_metadata() function.
-  return space_list()->contains(ptr) || class_space_list()->contains(ptr);
+  return space_list()->contains(ptr) ||
+         class_space_list()->contains(ptr);
 }
 
 void Metaspace::verify() {
@@ -2742,10 +3067,6 @@
 }
 
 void Metaspace::dump(outputStream* const out) const {
-  if (UseMallocOnly) {
-    // Just print usage for now
-    out->print_cr("usage %d", used_words(Metaspace::NonClassType));
-  }
   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
   vsm()->dump(out);
   out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
--- a/hotspot/src/share/vm/memory/metaspace.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/memory/metaspace.hpp	Thu May 16 11:47:51 2013 +0100
@@ -111,6 +111,10 @@
   SpaceManager* _class_vsm;
   SpaceManager* class_vsm() const { return _class_vsm; }
 
+  // Allocate space for metadata of type mdtype. This is space
+  // within a Metachunk and is used by
+  //   allocate(ClassLoaderData*, size_t, bool, MetadataType, TRAPS)
+  // which returns a Metablock.
   MetaWord* allocate(size_t word_size, MetadataType mdtype);
 
   // Virtual Space lists for both classes and other metadata
@@ -133,11 +137,14 @@
   static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; }
 
   char*  bottom() const;
-  size_t used_words(MetadataType mdtype) const;
+  size_t used_words_slow(MetadataType mdtype) const;
   size_t free_words(MetadataType mdtype) const;
-  size_t capacity_words(MetadataType mdtype) const;
+  size_t capacity_words_slow(MetadataType mdtype) const;
   size_t waste_words(MetadataType mdtype) const;
 
+  size_t used_bytes_slow(MetadataType mdtype) const;
+  size_t capacity_bytes_slow(MetadataType mdtype) const;
+
   static Metablock* allocate(ClassLoaderData* loader_data, size_t size,
                             bool read_only, MetadataType mdtype, TRAPS);
   void deallocate(MetaWord* ptr, size_t byte_size, bool is_class);
@@ -150,6 +157,9 @@
   static bool contains(const void *ptr);
   void dump(outputStream* const out) const;
 
+  // Free empty virtualspaces
+  static void purge();
+
   void print_on(outputStream* st) const;
   // Debugging support
   void verify();
@@ -158,28 +168,81 @@
 class MetaspaceAux : AllStatic {
 
   // Statistics for class space and data space in metaspace.
-  static size_t used_in_bytes(Metaspace::MetadataType mdtype);
+
+  // These methods iterate over the classloader data graph
+  // for the given Metaspace type.  These are slow.
+  static size_t used_bytes_slow(Metaspace::MetadataType mdtype);
   static size_t free_in_bytes(Metaspace::MetadataType mdtype);
-  static size_t capacity_in_bytes(Metaspace::MetadataType mdtype);
+  static size_t capacity_bytes_slow(Metaspace::MetadataType mdtype);
+
+  // Iterates over the virtual space list.
   static size_t reserved_in_bytes(Metaspace::MetadataType mdtype);
 
   static size_t free_chunks_total(Metaspace::MetadataType mdtype);
   static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype);
 
  public:
-  // Total of space allocated to metadata in all Metaspaces
-  static size_t used_in_bytes() {
-    return used_in_bytes(Metaspace::ClassType) +
-           used_in_bytes(Metaspace::NonClassType);
+  // Running sum of space in all Metachunks that has been
+  // allocated to a Metaspace.  This is used instead of
+  // iterating over all the classloaders
+  static size_t _allocated_capacity_words;
+  // Running sum of space in all Metachunks that have
+  // are being used for metadata.
+  static size_t _allocated_used_words;
+
+ public:
+  // Decrement and increment _allocated_capacity_words
+  static void dec_capacity(size_t words);
+  static void inc_capacity(size_t words);
+
+  // Decrement and increment _allocated_used_words
+  static void dec_used(size_t words);
+  static void inc_used(size_t words);
+
+  // Total of space allocated to metadata in all Metaspaces.
+  // This sums the space used in each Metachunk by
+  // iterating over the classloader data graph
+  static size_t used_bytes_slow() {
+    return used_bytes_slow(Metaspace::ClassType) +
+           used_bytes_slow(Metaspace::NonClassType);
   }
 
-  // Total of available space in all Metaspaces
-  // Total of capacity allocated to all Metaspaces.  This includes
-  // space in Metachunks not yet allocated and in the Metachunk
-  // freelist.
-  static size_t capacity_in_bytes() {
-    return capacity_in_bytes(Metaspace::ClassType) +
-           capacity_in_bytes(Metaspace::NonClassType);
+  // Used by MetaspaceCounters
+  static size_t free_chunks_total();
+  static size_t free_chunks_total_in_bytes();
+
+  static size_t allocated_capacity_words() {
+    return _allocated_capacity_words;
+  }
+  static size_t allocated_capacity_bytes() {
+    return _allocated_capacity_words * BytesPerWord;
+  }
+
+  static size_t allocated_used_words() {
+    return _allocated_used_words;
+  }
+  static size_t allocated_used_bytes() {
+    return _allocated_used_words * BytesPerWord;
+  }
+
+  static size_t free_bytes();
+
+  // Total capacity in all Metaspaces
+  static size_t capacity_bytes_slow() {
+#ifdef PRODUCT
+    // Use allocated_capacity_bytes() in PRODUCT instead of this function.
+    guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
+#endif
+    size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
+    size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
+    assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
+           err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
+             " class_capacity + non_class_capacity " SIZE_FORMAT
+             " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
+             allocated_capacity_bytes(), class_capacity + non_class_capacity,
+             class_capacity, non_class_capacity));
+
+    return class_capacity + non_class_capacity;
   }
 
   // Total space reserved in all Metaspaces
@@ -198,6 +261,11 @@
   static void print_waste(outputStream* out);
   static void dump(outputStream* out);
   static void verify_free_chunks();
+  // Checks that the values returned by allocated_capacity_bytes() and
+  // capacity_bytes_slow() are the same.
+  static void verify_capacity();
+  static void verify_used();
+  static void verify_metrics();
 };
 
 // Metaspace are deallocated when their class loader are GC'ed.
@@ -232,7 +300,6 @@
  public:
 
   static size_t capacity_until_GC() { return _capacity_until_GC; }
-  static size_t capacity_until_GC_in_bytes() { return _capacity_until_GC * BytesPerWord; }
   static void inc_capacity_until_GC(size_t v) { _capacity_until_GC += v; }
   static void dec_capacity_until_GC(size_t v) {
     _capacity_until_GC = _capacity_until_GC > v ? _capacity_until_GC - v : 0;
--- a/hotspot/src/share/vm/memory/metaspaceCounters.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/memory/metaspaceCounters.cpp	Thu May 16 11:47:51 2013 +0100
@@ -29,6 +29,16 @@
 
 MetaspaceCounters* MetaspaceCounters::_metaspace_counters = NULL;
 
+size_t MetaspaceCounters::calc_total_capacity() {
+  // The total capacity is the sum of
+  //   1) capacity of Metachunks in use by all Metaspaces
+  //   2) unused space at the end of each Metachunk
+  //   3) space in the freelist
+  size_t total_capacity = MetaspaceAux::allocated_capacity_bytes()
+    + MetaspaceAux::free_bytes() + MetaspaceAux::free_chunks_total_in_bytes();
+  return total_capacity;
+}
+
 MetaspaceCounters::MetaspaceCounters() :
     _capacity(NULL),
     _used(NULL),
@@ -36,8 +46,8 @@
   if (UsePerfData) {
     size_t min_capacity = MetaspaceAux::min_chunk_size();
     size_t max_capacity = MetaspaceAux::reserved_in_bytes();
-    size_t curr_capacity = MetaspaceAux::capacity_in_bytes();
-    size_t used = MetaspaceAux::used_in_bytes();
+    size_t curr_capacity = calc_total_capacity();
+    size_t used = MetaspaceAux::allocated_used_bytes();
 
     initialize(min_capacity, max_capacity, curr_capacity, used);
   }
@@ -82,15 +92,13 @@
 
 void MetaspaceCounters::update_capacity() {
   assert(UsePerfData, "Should not be called unless being used");
-  assert(_capacity != NULL, "Should be initialized");
-  size_t capacity_in_bytes = MetaspaceAux::capacity_in_bytes();
-  _capacity->set_value(capacity_in_bytes);
+  size_t total_capacity = calc_total_capacity();
+  _capacity->set_value(total_capacity);
 }
 
 void MetaspaceCounters::update_used() {
   assert(UsePerfData, "Should not be called unless being used");
-  assert(_used != NULL, "Should be initialized");
-  size_t used_in_bytes = MetaspaceAux::used_in_bytes();
+  size_t used_in_bytes = MetaspaceAux::allocated_used_bytes();
   _used->set_value(used_in_bytes);
 }
 
--- a/hotspot/src/share/vm/memory/metaspaceCounters.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/memory/metaspaceCounters.hpp	Thu May 16 11:47:51 2013 +0100
@@ -37,6 +37,7 @@
                   size_t max_capacity,
                   size_t curr_capacity,
                   size_t used);
+  size_t calc_total_capacity();
  public:
   MetaspaceCounters();
   ~MetaspaceCounters();
--- a/hotspot/src/share/vm/memory/metaspaceShared.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp	Thu May 16 11:47:51 2013 +0100
@@ -376,18 +376,17 @@
   const char* fmt = "%s space: %9d [ %4.1f%% of total] out of %9d bytes [%4.1f%% used] at " PTR_FORMAT;
   Metaspace* ro_space = _loader_data->ro_metaspace();
   Metaspace* rw_space = _loader_data->rw_metaspace();
-  const size_t BPW = BytesPerWord;
 
   // Allocated size of each space (may not be all occupied)
-  const size_t ro_alloced = ro_space->capacity_words(Metaspace::NonClassType) * BPW;
-  const size_t rw_alloced = rw_space->capacity_words(Metaspace::NonClassType) * BPW;
+  const size_t ro_alloced = ro_space->capacity_bytes_slow(Metaspace::NonClassType);
+  const size_t rw_alloced = rw_space->capacity_bytes_slow(Metaspace::NonClassType);
   const size_t md_alloced = md_end-md_low;
   const size_t mc_alloced = mc_end-mc_low;
   const size_t total_alloced = ro_alloced + rw_alloced + md_alloced + mc_alloced;
 
   // Occupied size of each space.
-  const size_t ro_bytes = ro_space->used_words(Metaspace::NonClassType) * BPW;
-  const size_t rw_bytes = rw_space->used_words(Metaspace::NonClassType) * BPW;
+  const size_t ro_bytes = ro_space->used_bytes_slow(Metaspace::NonClassType);
+  const size_t rw_bytes = rw_space->used_bytes_slow(Metaspace::NonClassType);
   const size_t md_bytes = size_t(md_top - md_low);
   const size_t mc_bytes = size_t(mc_top - mc_low);
 
--- a/hotspot/src/share/vm/memory/sharedHeap.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/memory/sharedHeap.cpp	Thu May 16 11:47:51 2013 +0100
@@ -218,14 +218,13 @@
 static AlwaysTrueClosure always_true;
 
 void SharedHeap::process_weak_roots(OopClosure* root_closure,
-                                    CodeBlobClosure* code_roots,
-                                    OopClosure* non_root_closure) {
+                                    CodeBlobClosure* code_roots) {
   // Global (weak) JNI handles
   JNIHandles::weak_oops_do(&always_true, root_closure);
 
   CodeCache::blobs_do(code_roots);
-    StringTable::oops_do(root_closure);
-  }
+  StringTable::oops_do(root_closure);
+}
 
 void SharedHeap::set_barrier_set(BarrierSet* bs) {
   _barrier_set = bs;
--- a/hotspot/src/share/vm/memory/sharedHeap.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/memory/sharedHeap.hpp	Thu May 16 11:47:51 2013 +0100
@@ -249,8 +249,7 @@
   // JNI weak roots, the code cache, system dictionary, symbol table,
   // string table.
   void process_weak_roots(OopClosure* root_closure,
-                          CodeBlobClosure* code_roots,
-                          OopClosure* non_root_closure);
+                          CodeBlobClosure* code_roots);
 
   // The functions below are helper functions that a subclass of
   // "SharedHeap" can use in the implementation of its virtual
--- a/hotspot/src/share/vm/memory/universe.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/memory/universe.cpp	Thu May 16 11:47:51 2013 +0100
@@ -1270,7 +1270,7 @@
   st->print_cr("}");
 }
 
-void Universe::verify(bool silent, VerifyOption option) {
+void Universe::verify(VerifyOption option, const char* prefix, bool silent) {
   // The use of _verify_in_progress is a temporary work around for
   // 6320749.  Don't bother with a creating a class to set and clear
   // it since it is only used in this method and the control flow is
@@ -1287,11 +1287,12 @@
   HandleMark hm;  // Handles created during verification can be zapped
   _verify_count++;
 
+  if (!silent) gclog_or_tty->print(prefix);
   if (!silent) gclog_or_tty->print("[Verifying ");
   if (!silent) gclog_or_tty->print("threads ");
   Threads::verify();
+  if (!silent) gclog_or_tty->print("heap ");
   heap()->verify(silent, option);
-
   if (!silent) gclog_or_tty->print("syms ");
   SymbolTable::verify();
   if (!silent) gclog_or_tty->print("strs ");
--- a/hotspot/src/share/vm/memory/universe.hpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/memory/universe.hpp	Thu May 16 11:47:51 2013 +0100
@@ -445,12 +445,12 @@
 
   // Debugging
   static bool verify_in_progress() { return _verify_in_progress; }
-  static void verify(bool silent, VerifyOption option);
-  static void verify(bool silent) {
-    verify(silent, VerifyOption_Default /* option */);
+  static void verify(VerifyOption option, const char* prefix, bool silent = VerifySilently);
+  static void verify(const char* prefix, bool silent = VerifySilently) {
+    verify(VerifyOption_Default, prefix, silent);
   }
-  static void verify() {
-    verify(false /* silent */);
+  static void verify(bool silent = VerifySilently) {
+    verify("", silent);
   }
 
   static int  verify_count()       { return _verify_count; }
--- a/hotspot/src/share/vm/oops/constantPool.cpp	Wed May 08 11:22:25 2013 +0100
+++ b/hotspot/src/share/vm/oops/constantPool.cpp	Thu May 16 11:47:51 2013 +0100
@@ -40,6 +40,7 @@
 #include "runtime/init.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/signature.hpp"
+#include "runtime/synchronizer.hpp"
 #include "runtime/vframe.hpp"
 
 ConstantPool* ConstantPool::allocate(ClassLoaderData* loader_data, int length, TRAPS) {
@@ -69,7 +70,6 @@
 
   // only set to non-zero if constant pool is merged by RedefineClasses
   set_version(0);
-  set_lock(new Monitor(Monitor::nonleaf + 2, "A constant pool lock"));
 
   // initialize tag array
   int length = tags->length();
@@ -95,9 +95,6 @@
 void ConstantPool::release_C_heap_structures() {
   // walk constant pool and decrement symbol reference counts
   unreference_symbols();
-
-  delete _lock;
-  set_lock(NULL);
 }
 
 objArrayOop ConstantPool::resolved_references() const {
@@ -154,9 +151,6 @@
       ClassLoaderData* loader_data = pool_holder()->class_loader_data();
       set_resolved_references(loader_data->add_handle(refs_handle));
     }
-
-    // Also need to recreate the mutex.  Make sure this matches the constructor
-    set_lock(new Monitor(Monitor::nonleaf + 2, "A constant pool lock"));
   }
 }
 
@@ -167,7 +161,23 @@
   set_resolved_reference_length(
     resolved_references() != NULL ? resolved_references()->length() : 0);
   set_resolved_references(NULL);
-  set_lock(NULL);
+}
+
+oop ConstantPool::lock() {
+  if (_pool_holder) {
+    // We re-use the _pool_holder's init_lock to reduce footprint.
+    // Notes on deadlocks:
+    // [1] This lock is a Java oop, so it can be recursively locked by
+    //     the same thread without self-deadlocks.
+    // [2] Deadlock will happen if there is circular dependency between
+    //     the <clinit> of two Java classes. However, in this case,
+    //     the deadlock would have happened long before we reach
+    //     ConstantPool::lock(), so reusing init_lock does not
+    //     increase the possibility of deadlock.
+    return _pool_holder->init_lock();
+  } else {
+    return NULL;
+  }
 }
 
 int ConstantPool::cp_to_object_index(int cp_index) {
@@ -208,7 +218,9 @@
 
   Symbol* name = NULL;
   Handle       loader;
-  {  MonitorLockerEx ml(this_oop->lock());
+  {
+    oop cplock = this_oop->lock();
+    ObjectLocker ol(cplock , THREAD, cplock != NULL);
 
     if (this_oop->tag_at(which).is_unresolved_klass()) {
       if (this_oop->tag_at(which).is_unresolved_klass_in_error()) {
@@ -255,7 +267,8 @@
 
       bool throw_orig_error = false;
       {
-        MonitorLockerEx ml(this_oop->lock());
+        oop cplock = this_oop->lock();
+        ObjectLocker ol(cplock, THREAD, cplock != NULL);
 
         // some other thread has beaten us and has resolved the class.
         if (this_oop->tag_at(which).is_klass()) {
@@ -323,7 +336,8 @@
       }
       return k();
     } else {
-      MonitorLockerEx ml(this_oop->lock());
+      oop cplock = this_oop->lock();
+      ObjectLocker ol(cplock, THREAD, cplock != NULL);
       // Only updated constant pool - if it is resolved.
       do_resolve = this_oop->tag_at(which).is_unresolved_klass();
       if (do_resolve) {
@@ -619,7 +633,8 @@
                                      int tag, TRAPS) {
   ResourceMark rm;
   Symbol* error = PENDING_EXCEPTION->klass()->name();
-  MonitorLockerEx ml(this_oop->lock());  // lock cpool to change tag.
+  oop cplock = this_oop->lock();
+  ObjectLocker ol(cplock, THREAD, cplock != NULL);  // lock cpool to change tag.
 
   int error_tag = (tag == JVM_CONSTANT_MethodHandle) ?
            JVM_CONSTANT_MethodHandleInError : JVM_CONSTANT_MethodTypeInError;
@@ -780,7 +795,8 @@
   if (cache_index >= 0) {
     // Cache the oop here also.
     Handle result_handle(THREAD, result_oop);
-    MonitorLockerEx ml(this_oop->lock());  // don't know if we really need this
+    oop cplock = this_oop->lock();
+    ObjectLocker o