changeset 2097:ceef3b0d9f72

Merge
author xdono
date Fri, 27 Feb 2009 15:12:52 -0800
parents da51837a7368 5da0e6b9f4f1
children 7b486f6bf8c4 a9be2da8a8f2
files jdk/make/javax/sound/jsoundhs/FILES.gmk jdk/make/javax/sound/jsoundhs/Makefile jdk/make/javax/sound/jsoundhs/mapfile-vers jdk/src/share/classes/com/sun/beans/ObjectHandler.java jdk/src/share/lib/audio/soundbank.gm jdk/src/solaris/classes/sun/nio/ch/FileDispatcher.java jdk/src/solaris/native/sun/nio/ch/FileDispatcher.c jdk/src/windows/classes/sun/nio/ch/FileDispatcher.java jdk/src/windows/native/sun/nio/ch/FileDispatcher.c jdk/src/windows/native/sun/windows/UnicowsLoader.cpp jdk/src/windows/native/sun/windows/UnicowsLoader.h jdk/src/windows/native/sun/windows/awt_MMStub.cpp jdk/src/windows/native/sun/windows/awt_MMStub.h jdk/src/windows/native/sun/windows/awt_Multimon.h jdk/src/windows/native/sun/windows/awt_Unicode.cpp jdk/src/windows/native/sun/windows/awt_Unicode.h jdk/src/windows/native/sun/windows/awt_dlls.cpp jdk/src/windows/native/sun/windows/awt_dlls.h langtools/make/netbeans/apt/README langtools/make/netbeans/apt/build.xml langtools/make/netbeans/apt/nbproject/project.xml langtools/make/netbeans/common/shared.xml langtools/make/netbeans/common/standard-context-menu-items-no-javadoc.ent langtools/make/netbeans/common/standard-context-menu-items.ent langtools/make/netbeans/common/standard-ide-actions-no-javadoc.ent langtools/make/netbeans/common/standard-ide-actions.ent langtools/make/netbeans/compiler/README langtools/make/netbeans/compiler/build.xml langtools/make/netbeans/compiler/nbproject/project.xml langtools/make/netbeans/doclets/README langtools/make/netbeans/doclets/build.xml langtools/make/netbeans/doclets/nbproject/project.xml langtools/make/netbeans/javadoc/README langtools/make/netbeans/javadoc/build.xml langtools/make/netbeans/javadoc/nbproject/project.xml langtools/make/netbeans/javah/README langtools/make/netbeans/javah/build.xml langtools/make/netbeans/javah/nbproject/project.xml langtools/make/netbeans/javap/README langtools/make/netbeans/javap/build.xml langtools/make/netbeans/javap/nbproject/project.xml
diffstat 1596 files changed, 171208 insertions(+), 13476 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Sat Jan 31 15:26:34 2009 -0800
+++ b/.hgtags	Fri Feb 27 15:12:52 2009 -0800
@@ -18,3 +18,9 @@
 3cb2a607c347934f8e7e86f840a094c28b08d9ea jdk7-b41
 caf58ffa084568990cbb3441f9ae188e36b31770 jdk7-b42
 41bd0a702bc8ec6feebd725a63e7c3227f82ab11 jdk7-b43
+5843778bda89b1d5ac8e1aa05e26930ac90b3145 jdk7-b44
+54dffad0bf066791a2793305875250c395011d5f jdk7-b45
+04b2620edc72de93671646e4720c5992c74ac8b5 jdk7-b46
+0c4657194eec95c08ba478aee9cfc3c295e41657 jdk7-b47
+1bf51a4c2627c2f0e0cbcc2cf0421bdb37f1f2b2 jdk7-b48
+6b84b04a80afe23262377c60913eebfc898f14c4 jdk7-b49
--- a/.hgtags-top-repo	Sat Jan 31 15:26:34 2009 -0800
+++ b/.hgtags-top-repo	Fri Feb 27 15:12:52 2009 -0800
@@ -18,3 +18,9 @@
 541bdc5ad32fc33255944d0a044ad992f3d915e8 jdk7-b41
 94052b87287303527125026fe4b2698cf867ea83 jdk7-b42
 848e684279d2ba42577d9621d5b2e5af3823d12d jdk7-b43
+a395e3aac4744cc9033fcd819fad1239a45add52 jdk7-b44
+99846f001ca214015578d593802d26e27246a802 jdk7-b45
+e8a2a4d187773a62f3309b0fa265c13425bc2258 jdk7-b46
+d7744e86dedc21a8ecf6bdb73eb191b8eaf5b0da jdk7-b47
+4ae9f4bfdb98f65bd957e3fe72471b320150b38e jdk7-b48
+aee93a8992d2389121eb610c00a86196f3e2b9b0 jdk7-b49
--- a/corba/.hgtags	Sat Jan 31 15:26:34 2009 -0800
+++ b/corba/.hgtags	Fri Feb 27 15:12:52 2009 -0800
@@ -18,3 +18,9 @@
 c90eeda9594ed2983403e2049aed8d503126c62e jdk7-b41
 ccd6a16502e0650d91d85c4b86be05cbcd461a87 jdk7-b42
 9cd740d48a4855321d69f137a7109c00bcda76be jdk7-b43
+9803dac7254041b30ca65e3852d4c566b9757c3b jdk7-b44
+68814aa5b44b1f16931a97e7cd4028c70eb9586b jdk7-b45
+1691dbfc08f8ee3f4e23a1ff30cdff920718696c jdk7-b46
+167ad0164301f318b069a947e1c9c07ed667748a jdk7-b47
+0be222241fd405e48915647facfaa176621b39b9 jdk7-b48
+d70978bc64bc7a04be7797ab0dcd9b7b1b3a6bff jdk7-b49
--- a/hotspot/.hgtags	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/.hgtags	Fri Feb 27 15:12:52 2009 -0800
@@ -18,3 +18,9 @@
 f9d938ede1960d18cb7cf23c645b026519c1a678 jdk7-b41
 ad8c8ca4ab0f4c86e74c061958f44a8f4a930f2c jdk7-b42
 fc6a5ae3fef5ebacfa896dbb3ae37715e388e282 jdk7-b43
+809e899c638bd9b21836abf9d09ab2a30ff3900b jdk7-b44
+945bf754069766e76873c53102fae48abf04cf5b jdk7-b45
+16bb38eeda35b46268eefa4c1f829eb086e0ca46 jdk7-b46
+fcb923bad68e2b10380a030ea83a723f4dc3d4d6 jdk7-b47
+bcb33806d186561c781992e5f4d8a90bb033f9f0 jdk7-b48
+8b22ccb5aba2c6c11bddf6488a7bb7ef5b4bf2be jdk7-b49
--- a/hotspot/agent/src/os/linux/ps_core.c	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/agent/src/os/linux/ps_core.c	Fri Feb 27 15:12:52 2009 -0800
@@ -238,8 +238,8 @@
   // Ignore the rest of the FileMapHeader. We don't need those fields here.
 };
 
-static bool read_int(struct ps_prochandle* ph, uintptr_t addr, int* pvalue) {
-   int i;
+static bool read_jboolean(struct ps_prochandle* ph, uintptr_t addr, jboolean* pvalue) {
+   jboolean i;
    if (ps_pdread(ph, (psaddr_t) addr, &i, sizeof(i)) == PS_OK) {
       *pvalue = i;
       return true;
@@ -295,7 +295,7 @@
          int fd = -1, m = 0;
          uintptr_t base = 0, useSharedSpacesAddr = 0;
          uintptr_t sharedArchivePathAddrAddr = 0, sharedArchivePathAddr = 0;
-         int useSharedSpaces = 0;
+         jboolean useSharedSpaces = 0;
          map_info* mi = 0;
 
          memset(classes_jsa, 0, sizeof(classes_jsa));
@@ -306,12 +306,15 @@
             return false;
          }
 
-         if (read_int(ph, useSharedSpacesAddr, &useSharedSpaces) != true) {
+         // Hotspot vm types are not exported to build this library. So
+         // using equivalent type jboolean to read the value of
+         // UseSharedSpaces which is same as hotspot type "bool".
+         if (read_jboolean(ph, useSharedSpacesAddr, &useSharedSpaces) != true) {
             print_debug("can't read the value of 'UseSharedSpaces' flag\n");
             return false;
          }
 
-         if (useSharedSpaces == 0) {
+         if ((int)useSharedSpaces == 0) {
             print_debug("UseSharedSpaces is false, assuming -Xshare:off!\n");
             return true;
          }
--- a/hotspot/agent/src/os/solaris/proc/saproc.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/agent/src/os/solaris/proc/saproc.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -502,8 +502,8 @@
 };
 
 static bool
-read_int(struct ps_prochandle* ph, psaddr_t addr, int* pvalue) {
-  int i;
+read_jboolean(struct ps_prochandle* ph, psaddr_t addr, jboolean* pvalue) {
+  jboolean i;
   if (ps_pread(ph, addr, &i, sizeof(i)) == PS_OK) {
     *pvalue = i;
     return true;
@@ -575,10 +575,13 @@
   }
 
   // read the value of the flag "UseSharedSpaces"
-  int value = 0;
-  if (read_int(ph, useSharedSpacesAddr, &value) != true) {
+  // Since hotspot types are not available to build this library. So
+  // equivalent type "jboolean" is used to read the value of "UseSharedSpaces"
+  // which is same as hotspot type "bool".
+  jboolean value = 0;
+  if (read_jboolean(ph, useSharedSpacesAddr, &value) != true) {
     THROW_NEW_DEBUGGER_EXCEPTION_("can't read 'UseSharedSpaces' flag", 1);
-  } else if (value == 0) {
+  } else if ((int)value == 0) {
     print_debug("UseSharedSpaces is false, assuming -Xshare:off!\n");
     return 1;
   }
--- a/hotspot/make/hotspot_version	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/make/hotspot_version	Fri Feb 27 15:12:52 2009 -0800
@@ -33,9 +33,9 @@
 # Don't put quotes (fail windows build).
 HOTSPOT_VM_COPYRIGHT=Copyright 2008
 
-HS_MAJOR_VER=14
+HS_MAJOR_VER=15
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=10
+HS_BUILD_NUMBER=02
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=7
--- a/hotspot/src/cpu/sparc/vm/jni_sparc.h	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/cpu/sparc/vm/jni_sparc.h	Fri Feb 27 15:12:52 2009 -0800
@@ -28,5 +28,11 @@
 #define JNICALL
 
 typedef int jint;
-typedef long long jlong;
+
+#ifdef _LP64
+  typedef long jlong;
+#else
+  typedef long long jlong;
+#endif
+
 typedef signed char jbyte;
--- a/hotspot/src/cpu/sparc/vm/sparc.ad	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad	Fri Feb 27 15:12:52 2009 -0800
@@ -762,7 +762,7 @@
     case Assembler::stdf_op3: st_op = Op_StoreD; break;
 
     case Assembler::ldsb_op3: ld_op = Op_LoadB; break;
-    case Assembler::lduh_op3: ld_op = Op_LoadC; break;
+    case Assembler::lduh_op3: ld_op = Op_LoadUS; break;
     case Assembler::ldsh_op3: ld_op = Op_LoadS; break;
     case Assembler::ldx_op3:  // may become LoadP or stay LoadI
     case Assembler::ldsw_op3: // may become LoadP or stay LoadI
@@ -3869,6 +3869,8 @@
   constraint(ALLOC_IN_RC(dflt_reg));
   match(RegD);
 
+  match(regD_low);
+
   format %{ %}
   interface(REG_INTER);
 %}
@@ -3883,7 +3885,7 @@
 
 operand regD_low() %{
   constraint(ALLOC_IN_RC(dflt_low_reg));
-  match(RegD);
+  match(regD);
 
   format %{ %}
   interface(REG_INTER);
@@ -5314,9 +5316,9 @@
   ins_pipe(iload_mask_mem);
 %}
 
-// Load Char (16bit UNsigned) into a Long Register
-instruct loadUCL(iRegL dst, memory mem, immL_FFFF bytemask) %{
-  match(Set dst (AndL (ConvI2L (LoadC mem)) bytemask));
+// Load Unsigned Short/Char (16bit UNsigned) into a Long Register
+instruct loadUS2L(iRegL dst, memory mem, immL_FFFF bytemask) %{
+  match(Set dst (AndL (ConvI2L (LoadUS mem)) bytemask));
   ins_cost(MEMORY_REF_COST);
 
   size(4);
@@ -5326,9 +5328,9 @@
   ins_pipe(iload_mask_mem);
 %}
 
-// Load Char (16bit unsigned)
-instruct loadC(iRegI dst, memory mem) %{
-  match(Set dst (LoadC mem));
+// Load Unsigned Short/Char (16bit unsigned)
+instruct loadUS(iRegI dst, memory mem) %{
+  match(Set dst (LoadUS mem));
   ins_cost(MEMORY_REF_COST);
 
   size(4);
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -5212,15 +5212,15 @@
 void MacroAssembler::reset_last_Java_frame(bool clear_fp,
                                            bool clear_pc) {
   // we must set sp to zero to clear frame
-  movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), (int32_t)NULL_WORD);
+  movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
   // must clear fp, so that compiled frames are not confused; it is
   // possible that we need it only for debugging
   if (clear_fp) {
-    movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), (int32_t)NULL_WORD);
+    movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
   }
 
   if (clear_pc) {
-    movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), (int32_t)NULL_WORD);
+    movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
   }
 }
 
@@ -5670,7 +5670,7 @@
   // get oop result if there is one and reset the value in the thread
   if (oop_result->is_valid()) {
     movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
-    movptr(Address(java_thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD);
+    movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD);
     verify_oop(oop_result, "broken oop in call_VM_base");
   }
 }
@@ -6426,13 +6426,13 @@
     get_thread(java_thread);
   }
   // we must set sp to zero to clear frame
-  movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), (int32_t)NULL_WORD);
+  movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
   if (clear_fp) {
-    movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), (int32_t)NULL_WORD);
+    movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
   }
 
   if (clear_pc)
-    movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), (int32_t)NULL_WORD);
+    movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
 
 }
 
@@ -6943,29 +6943,32 @@
 
   Label slow_case, done;
 
-  // x ?<= pi/4
-  fld_d(ExternalAddress((address)&pi_4));
-  fld_s(1);                // Stack:  X  PI/4  X
-  fabs();                  // Stack: |X| PI/4  X
-  fcmp(tmp);
-  jcc(Assembler::above, slow_case);
-
-  // fastest case: -pi/4 <= x <= pi/4
-  switch(trig) {
-  case 's':
-    fsin();
-    break;
-  case 'c':
-    fcos();
-    break;
-  case 't':
-    ftan();
-    break;
-  default:
-    assert(false, "bad intrinsic");
-    break;
-  }
-  jmp(done);
+  ExternalAddress pi4_adr = (address)&pi_4;
+  if (reachable(pi4_adr)) {
+    // x ?<= pi/4
+    fld_d(pi4_adr);
+    fld_s(1);                // Stack:  X  PI/4  X
+    fabs();                  // Stack: |X| PI/4  X
+    fcmp(tmp);
+    jcc(Assembler::above, slow_case);
+
+    // fastest case: -pi/4 <= x <= pi/4
+    switch(trig) {
+    case 's':
+      fsin();
+      break;
+    case 'c':
+      fcos();
+      break;
+    case 't':
+      ftan();
+      break;
+    default:
+      assert(false, "bad intrinsic");
+      break;
+    }
+    jmp(done);
+  }
 
   // slow case: runtime call
   bind(slow_case);
--- a/hotspot/src/cpu/x86/vm/bytecodeInterpreter_x86.inline.hpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/cpu/x86/vm/bytecodeInterpreter_x86.inline.hpp	Fri Feb 27 15:12:52 2009 -0800
@@ -213,7 +213,7 @@
 
 inline jint BytecodeInterpreter::VMintDiv(jint op1, jint op2) {
   /* it's possible we could catch this special case implicitly */
-  if (op1 == 0x80000000 && op2 == -1) return op1;
+  if ((juint)op1 == 0x80000000 && op2 == -1) return op1;
   else return op1 / op2;
 }
 
@@ -231,7 +231,7 @@
 
 inline jint BytecodeInterpreter::VMintRem(jint op1, jint op2) {
   /* it's possible we could catch this special case implicitly */
-  if (op1 == 0x80000000 && op2 == -1) return 0;
+  if ((juint)op1 == 0x80000000 && op2 == -1) return 0;
   else return op1 % op2;
 }
 
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -779,7 +779,7 @@
     case T_OBJECT:  // fall through
     case T_ARRAY:
       if (c->as_jobject() == NULL) {
-        __ movptr(as_Address(addr), (int32_t)NULL_WORD);
+        __ movptr(as_Address(addr), NULL_WORD);
       } else {
         if (is_literal_address(addr)) {
           ShouldNotReachHere();
--- a/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -78,10 +78,10 @@
     movptr(rax, Address(thread, Thread::pending_exception_offset()));
     // make sure that the vm_results are cleared
     if (oop_result1->is_valid()) {
-      movptr(Address(thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD);
+      movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
     }
     if (oop_result2->is_valid()) {
-      movptr(Address(thread, JavaThread::vm_result_2_offset()), (int32_t)NULL_WORD);
+      movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
     }
     if (frame_size() == no_frame_size) {
       leave();
@@ -96,12 +96,12 @@
   // get oop results if there are any and reset the values in the thread
   if (oop_result1->is_valid()) {
     movptr(oop_result1, Address(thread, JavaThread::vm_result_offset()));
-    movptr(Address(thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD);
+    movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
     verify_oop(oop_result1);
   }
   if (oop_result2->is_valid()) {
     movptr(oop_result2, Address(thread, JavaThread::vm_result_2_offset()));
-    movptr(Address(thread, JavaThread::vm_result_2_offset()), (int32_t)NULL_WORD);
+    movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
     verify_oop(oop_result2);
   }
   return call_offset;
@@ -728,8 +728,8 @@
 
   // clear exception fields in JavaThread because they are no longer needed
   // (fields must be cleared because they are processed by GC otherwise)
-  __ movptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
-  __ movptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
+  __ movptr(Address(thread, JavaThread::exception_oop_offset()), NULL_WORD);
+  __ movptr(Address(thread, JavaThread::exception_pc_offset()), NULL_WORD);
 
   // pop the stub frame off
   __ leave();
@@ -878,7 +878,7 @@
 
     // load and clear pending exception
     __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
-    __ movptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
+    __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
 
     // check that there is really a valid exception
     __ verify_not_null_oop(rax);
@@ -971,14 +971,14 @@
         // load pending exception oop into rax,
         __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
         // clear pending exception
-        __ movptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
+        __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
 
         // load issuing PC (the return address for this stub) into rdx
         __ movptr(exception_pc, Address(rbp, 1*BytesPerWord));
 
         // make sure that the vm_results are cleared (may be unnecessary)
-        __ movptr(Address(thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD);
-        __ movptr(Address(thread, JavaThread::vm_result_2_offset()), (int32_t)NULL_WORD);
+        __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
+        __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
 
         // verify that that there is really a valid exception in rax,
         __ verify_not_null_oop(exception_oop);
@@ -1393,7 +1393,7 @@
         __ ret(0);
 
         __ bind(miss);
-        __ movptr(Address(rsp, (super_off) * VMRegImpl::stack_slot_size), 0); // result
+        __ movptr(Address(rsp, (super_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result
         __ pop(rax);
         __ pop(rcx);
         __ pop(rsi);
--- a/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -594,7 +594,7 @@
   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax);
 
   // for c++ interpreter can rsi really be munged?
-  __ lea(state, Address(rbp, -sizeof(BytecodeInterpreter)));                               // restore state
+  __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));                               // restore state
   __ movptr(rbx, Address(state, byte_offset_of(BytecodeInterpreter, _method)));            // restore method
   __ movptr(rdi, Address(state, byte_offset_of(BytecodeInterpreter, _locals)));            // get locals pointer
 
@@ -658,7 +658,7 @@
     const Address size_of_stack    (rbx, methodOopDesc::max_stack_offset());
     // Always give one monitor to allow us to start interp if sync method.
     // Any additional monitors need a check when moving the expression stack
-    const one_monitor = frame::interpreter_frame_monitor_size() * wordSize;
+    const int one_monitor = frame::interpreter_frame_monitor_size() * wordSize;
   __ load_unsigned_word(rax, size_of_stack);                            // get size of expression stack in words
   __ lea(rax, Address(noreg, rax, Interpreter::stackElementScale(), one_monitor));
   __ lea(rax, Address(rax, rdx, Interpreter::stackElementScale(), overhead_size));
@@ -1829,7 +1829,7 @@
   Label unwind_and_forward;
 
   // restore state pointer.
-  __ lea(state, Address(rbp,  -sizeof(BytecodeInterpreter)));
+  __ lea(state, Address(rbp,  -(int)sizeof(BytecodeInterpreter)));
 
   __ movptr(rbx, STATE(_method));                       // get method
 #ifdef _LP64
@@ -1877,14 +1877,14 @@
 
   // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
   if (UseSSE < 2) {
-    __ lea(state, Address(rbp,  -sizeof(BytecodeInterpreter)));
+    __ lea(state, Address(rbp,  -(int)sizeof(BytecodeInterpreter)));
     __ movptr(rbx, STATE(_result._to_call._callee));                   // get method just executed
     __ movl(rcx, Address(rbx, methodOopDesc::result_index_offset()));
     __ cmpl(rcx, AbstractInterpreter::BasicType_as_index(T_FLOAT));    // Result stub address array index
     __ jcc(Assembler::equal, do_float);
     __ cmpl(rcx, AbstractInterpreter::BasicType_as_index(T_DOUBLE));    // Result stub address array index
     __ jcc(Assembler::equal, do_double);
-#ifdef COMPILER2
+#if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2)
     __ empty_FPU_stack();
 #endif // COMPILER2
     __ jmp(done_conv);
@@ -1928,7 +1928,7 @@
 
   // Restore rsi/r13 as compiled code may not preserve it
 
-  __ lea(state, Address(rbp,  -sizeof(BytecodeInterpreter)));
+  __ lea(state, Address(rbp,  -(int)sizeof(BytecodeInterpreter)));
 
   // restore stack to what we had when we left (in case i2c extended it)
 
@@ -1942,7 +1942,7 @@
 #else
   __ movptr(rcx, STATE(_thread));                       // get thread
   __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
-#endif / __LP64
+#endif // _LP64
   __ jcc(Assembler::notZero, return_with_exception);
 
   // get method just executed
--- a/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp	Fri Feb 27 15:12:52 2009 -0800
@@ -139,7 +139,7 @@
 #ifdef CC_INTERP
 
 inline interpreterState frame::get_interpreterState() const {
-  return ((interpreterState)addr_at( -sizeof(BytecodeInterpreter)/wordSize ));
+  return ((interpreterState)addr_at( -((int)sizeof(BytecodeInterpreter))/wordSize ));
 }
 
 inline intptr_t*    frame::sender_sp()        const {
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -133,7 +133,7 @@
                              + in_ByteSize(wordSize));
   switch (state) {
     case atos: movptr(rax, oop_addr);
-               movptr(oop_addr, (int32_t)NULL_WORD);
+               movptr(oop_addr, NULL_WORD);
                verify_oop(rax, state);                break;
     case ltos:
                movl(rdx, val_addr1);               // fall through
@@ -148,8 +148,8 @@
   }
   // Clean up tos value in the thread object
   movl(tos_addr,  (int32_t) ilgl);
-  movptr(val_addr,  (int32_t)NULL_WORD);
-  NOT_LP64(movl(val_addr1, (int32_t)NULL_WORD));
+  movptr(val_addr,  NULL_WORD);
+  NOT_LP64(movptr(val_addr1, NULL_WORD));
 }
 
 
@@ -944,7 +944,7 @@
     movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes ()));
 
     // Free entry
-    movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD);
+    movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), NULL_WORD);
 
     if (UseBiasedLocking) {
       biased_locking_exit(obj_reg, header_reg, done);
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_32.hpp	Fri Feb 27 15:12:52 2009 -0800
@@ -120,7 +120,7 @@
   void empty_expression_stack()                            {
        movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
       // NULL last_sp until next java call
-      movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
+      movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
   }
 
   // Tagged stack helpers for swap and dup
--- a/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/cpu/x86/vm/interp_masm_x86_64.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -30,7 +30,7 @@
 
 #ifdef CC_INTERP
 void InterpreterMacroAssembler::get_method(Register reg) {
-  movptr(reg, Address(rbp, -(sizeof(BytecodeInterpreter) + 2 * wordSize)));
+  movptr(reg, Address(rbp, -((int)sizeof(BytecodeInterpreter) + 2 * wordSize)));
   movptr(reg, Address(reg, byte_offset_of(BytecodeInterpreter, _method)));
 }
 #endif // CC_INTERP
--- a/hotspot/src/cpu/x86/vm/interpreterRT_x86_32.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/cpu/x86/vm/interpreterRT_x86_32.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -54,7 +54,7 @@
   __ cmpptr(Address(from(), Interpreter::local_offset_in_bytes(from_offset)), (int32_t)NULL_WORD); // do not use temp() to avoid AGI
   Label L;
   __ jcc(Assembler::notZero, L);
-  __ movptr(temp(), ((int32_t)NULL_WORD));
+  __ movptr(temp(), NULL_WORD);
   __ bind(L);
   __ movptr(Address(to(), to_offset * wordSize), temp());
 }
@@ -110,7 +110,7 @@
   virtual void pass_object() {
     // pass address of from
     intptr_t from_addr = (intptr_t)(_from + Interpreter::local_offset_in_bytes(0));
-    *_to++ = (*(intptr_t*)from_addr == 0) ? NULL : from_addr;
+    *_to++ = (*(intptr_t*)from_addr == 0) ? NULL_WORD : from_addr;
     debug_only(verify_tag(frame::TagReference));
     _from -= Interpreter::stackElementSize();
    }
--- a/hotspot/src/cpu/x86/vm/jni_x86.h	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/cpu/x86/vm/jni_x86.h	Fri Feb 27 15:12:52 2009 -0800
@@ -32,7 +32,13 @@
   #define JNICALL
 
   typedef int jint;
+
+#ifdef _LP64
+  typedef long jlong;
+#else
   typedef long long jlong;
+#endif
+
 #else
   #define JNIEXPORT __declspec(dllexport)
   #define JNIIMPORT __declspec(dllimport)
--- a/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -129,11 +129,11 @@
   // Get the exception pc in case we are deoptimized
   __ movptr(rdx, Address(rcx, JavaThread::exception_pc_offset()));
 #ifdef ASSERT
-  __ movptr(Address(rcx, JavaThread::exception_handler_pc_offset()), (int32_t)NULL_WORD);
-  __ movptr(Address(rcx, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
+  __ movptr(Address(rcx, JavaThread::exception_handler_pc_offset()), NULL_WORD);
+  __ movptr(Address(rcx, JavaThread::exception_pc_offset()), NULL_WORD);
 #endif
   // Clear the exception oop so GC no longer processes it as a root.
-  __ movptr(Address(rcx, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
+  __ movptr(Address(rcx, JavaThread::exception_oop_offset()), NULL_WORD);
 
   __ pop(rcx);
 
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -39,6 +39,8 @@
 RuntimeStub*       SharedRuntime::_resolve_virtual_call_blob;
 RuntimeStub*       SharedRuntime::_resolve_static_call_blob;
 
+const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
+
 class RegisterSaver {
   enum { FPU_regs_live = 8 /*for the FPU stack*/+8/*eight more for XMM registers*/ };
   // Capture info about frame layout
@@ -1299,7 +1301,7 @@
 
   // Now compute actual number of stack words we need rounding to make
   // stack properly aligned.
-  stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word);
+  stack_slots = round_to(stack_slots, StackAlignmentInSlots);
 
   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
 
@@ -1793,7 +1795,7 @@
   // reset handle block
   __ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
 
-  __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
+  __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
 
   // Any exception pending?
   __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
@@ -1865,7 +1867,7 @@
     // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
 
     __ pushptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
-    __ movptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
+    __ movptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
 
 
     // should be a peal
@@ -2431,7 +2433,7 @@
   __ get_thread(rdi);
   __ movptr(rdx, Address(rdi, JavaThread::exception_pc_offset()));
   __ movptr(Address(rbp, wordSize), rdx);
-  __ movptr(Address(rdi, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
+  __ movptr(Address(rdi, JavaThread::exception_pc_offset()), NULL_WORD);
 
 #ifdef ASSERT
   // verify that there is really an exception oop in JavaThread
@@ -2489,8 +2491,8 @@
   __ jcc(Assembler::notEqual, noException);
   __ movptr(rax, Address(rcx, JavaThread::exception_oop_offset()));
   __ movptr(rdx, Address(rcx, JavaThread::exception_pc_offset()));
-  __ movptr(Address(rcx, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
-  __ movptr(Address(rcx, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
+  __ movptr(Address(rcx, JavaThread::exception_oop_offset()), NULL_WORD);
+  __ movptr(Address(rcx, JavaThread::exception_pc_offset()), NULL_WORD);
 
   __ verify_oop(rax);
 
@@ -2582,7 +2584,7 @@
           rbx); // Make it walkable
 #else /* CC_INTERP */
   // This value is corrected by layout_activation_impl
-  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
+  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
 #endif /* CC_INTERP */
   __ movptr(sp_temp, rsp);              // pass to next frame
@@ -2802,7 +2804,7 @@
           rbx); // Make it walkable
 #else /* CC_INTERP */
   // This value is corrected by layout_activation_impl
-  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
+  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD );
   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
 #endif /* CC_INTERP */
   __ movptr(sp_temp, rsp);              // pass to next frame
@@ -3020,7 +3022,7 @@
   // exception pending => remove activation and forward to exception handler
 
   __ get_thread(thread);
-  __ movptr(Address(thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD);
+  __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
   __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
   __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
 
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -39,6 +39,8 @@
 RuntimeStub*       SharedRuntime::_resolve_virtual_call_blob;
 RuntimeStub*       SharedRuntime::_resolve_static_call_blob;
 
+const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
+
 #define __ masm->
 
 class SimpleRuntimeFrame {
@@ -1286,7 +1288,7 @@
 
   // Now compute actual number of stack words we need rounding to make
   // stack properly aligned.
-  stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word);
+  stack_slots = round_to(stack_slots, StackAlignmentInSlots);
 
   int stack_size = stack_slots * VMRegImpl::stack_slot_size;
 
@@ -2954,10 +2956,16 @@
   __ pushptr(Address(rcx, 0));     // Save return address
   __ enter();                      // Save old & set new rbp
   __ subptr(rsp, rbx);             // Prolog
+#ifdef CC_INTERP
+  __ movptr(Address(rbp,
+                  -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
+            sender_sp); // Make it walkable
+#else // CC_INTERP
   __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize),
             sender_sp);            // Make it walkable
   // This value is corrected by layout_activation_impl
   __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
+#endif // CC_INTERP
   __ mov(sender_sp, rsp);          // Pass sender_sp to next frame
   __ addptr(rsi, wordSize);        // Bump array pointer (sizes)
   __ addptr(rcx, wordSize);        // Bump array pointer (pcs)
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -407,7 +407,7 @@
     __ get_thread(rcx);
     __ pop(rdx);
     __ movptr(rax, Address(rcx, Thread::pending_exception_offset()));
-    __ movptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
+    __ movptr(Address(rcx, Thread::pending_exception_offset()), NULL_WORD);
 
 #ifdef ASSERT
     // make sure exception is set
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -472,7 +472,7 @@
     // setup rax & rdx, remove return address & clear pending exception
     __ pop(rdx);
     __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
-    __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int)NULL_WORD);
+    __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
 
 #ifdef ASSERT
     // make sure exception is set
@@ -954,9 +954,9 @@
     __ jcc(Assembler::zero, exit); // if obj is NULL it is OK
     // Check if the oop is in the right area of memory
     __ movptr(c_rarg2, rax);
-    __ movptr(c_rarg3, (int64_t) Universe::verify_oop_mask());
+    __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_mask());
     __ andptr(c_rarg2, c_rarg3);
-    __ movptr(c_rarg3, (int64_t) Universe::verify_oop_bits());
+    __ movptr(c_rarg3, (intptr_t) Universe::verify_oop_bits());
     __ cmpptr(c_rarg2, c_rarg3);
     __ jcc(Assembler::notZero, error);
 
@@ -969,9 +969,9 @@
     __ jcc(Assembler::zero, error); // if klass is NULL it is broken
     // Check if the klass is in the right area of memory
     __ mov(c_rarg2, rax);
-    __ movptr(c_rarg3, (int64_t) Universe::verify_klass_mask());
+    __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_mask());
     __ andptr(c_rarg2, c_rarg3);
-    __ movptr(c_rarg3, (int64_t) Universe::verify_klass_bits());
+    __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_bits());
     __ cmpptr(c_rarg2, c_rarg3);
     __ jcc(Assembler::notZero, error);
 
@@ -980,9 +980,9 @@
     __ testptr(rax, rax);
     __ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken
     // Check if the klass' klass is in the right area of memory
-    __ movptr(c_rarg3, (int64_t) Universe::verify_klass_mask());
+    __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_mask());
     __ andptr(rax, c_rarg3);
-    __ movptr(c_rarg3, (int64_t) Universe::verify_klass_bits());
+    __ movptr(c_rarg3, (intptr_t) Universe::verify_klass_bits());
     __ cmpptr(rax, c_rarg3);
     __ jcc(Assembler::notZero, error);
 
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -110,7 +110,7 @@
     if (message != NULL) {
       __ lea(rbx, ExternalAddress((address)message));
     } else {
-      __ movptr(rbx, (int32_t)NULL_WORD);
+      __ movptr(rbx, NULL_WORD);
     }
     __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), rax, rbx);
   }
@@ -123,7 +123,7 @@
 address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
   address entry = __ pc();
   // NULL last_sp until next java call
-  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
+  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
   __ dispatch_next(state);
   return entry;
 }
@@ -176,7 +176,7 @@
   // Restore stack bottom in case i2c adjusted stack
   __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
   // and NULL it as marker that rsp is now tos until next java call
-  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
+  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
 
   __ restore_bcp();
   __ restore_locals();
@@ -211,7 +211,7 @@
 
   // The stack is not extended by deopt but we must NULL last_sp as this
   // entry is like a "return".
-  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
+  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
   __ restore_bcp();
   __ restore_locals();
   // handle exceptions
@@ -382,7 +382,7 @@
   // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
   // The call returns the address of the verified entry point for the method or NULL
   // if the compilation did not complete (either went background or bailed out).
-  __ movptr(rax, (int32_t)false);
+  __ movptr(rax, (intptr_t)false);
   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax);
 
   __ movptr(rbx, Address(rbp, method_offset));   // restore methodOop
@@ -1028,7 +1028,7 @@
 
   // reset handle block
   __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
-  __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
+  __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
 
   // If result was an oop then unbox and save it in the frame
   { Label L;
@@ -1488,7 +1488,7 @@
 
   // Restore sp to interpreter_frame_last_sp even though we are going
   // to empty the expression stack for the exception processing.
-  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
+  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
   // rax,: exception
   // rdx: return address/pc that threw exception
   __ restore_bcp();                              // rsi points to call/send
@@ -1608,7 +1608,7 @@
   __ reset_last_Java_frame(rcx, true, true);
   // Restore the last_sp and null it out
   __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
-  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
+  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
 
   __ restore_bcp();
   __ restore_locals();
@@ -1636,7 +1636,7 @@
   // restore exception
   __ get_thread(rcx);
   __ movptr(rax, Address(rcx, JavaThread::vm_result_offset()));
-  __ movptr(Address(rcx, JavaThread::vm_result_offset()), (int32_t)NULL_WORD);
+  __ movptr(Address(rcx, JavaThread::vm_result_offset()), NULL_WORD);
   __ verify_oop(rax);
 
   // Inbetween activations - previous activation type unknown yet
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -137,7 +137,7 @@
         // Do the actual store
         // noreg means NULL
         if (val == noreg) {
-          __ movl(Address(rdx, 0), NULL_WORD);
+          __ movptr(Address(rdx, 0), NULL_WORD);
           // No post barrier for NULL
         } else {
           __ movl(Address(rdx, 0), val);
@@ -152,7 +152,7 @@
     case BarrierSet::CardTableExtension:
       {
         if (val == noreg) {
-          __ movl(obj, NULL_WORD);
+          __ movptr(obj, NULL_WORD);
         } else {
           __ movl(obj, val);
           // flatten object address if needed
@@ -168,7 +168,7 @@
     case BarrierSet::ModRef:
     case BarrierSet::Other:
       if (val == noreg) {
-        __ movl(obj, NULL_WORD);
+        __ movptr(obj, NULL_WORD);
       } else {
         __ movl(obj, val);
       }
--- a/hotspot/src/cpu/x86/vm/x86_32.ad	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/cpu/x86/vm/x86_32.ad	Fri Feb 27 15:12:52 2009 -0800
@@ -3371,7 +3371,7 @@
          masm.movptr(Address(boxReg, 0), 3) ;            // results in ST-before-CAS penalty
          masm.get_thread (scrReg) ; 
          masm.movptr(boxReg, tmpReg);                    // consider: LEA box, [tmp-2] 
-         masm.movptr(tmpReg, 0);                         // consider: xor vs mov
+         masm.movptr(tmpReg, NULL_WORD);                 // consider: xor vs mov
          if (os::is_MP()) { masm.lock(); } 
          masm.cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; 
       } else 
@@ -3387,7 +3387,7 @@
 
          if ((EmitSync & 64) == 0) {
            // Optimistic form: consider XORL tmpReg,tmpReg
-           masm.movptr(tmpReg, 0 ) ; 
+           masm.movptr(tmpReg, NULL_WORD) ; 
          } else { 
            // Can suffer RTS->RTO upgrades on shared or cold $ lines
            // Test-And-CAS instead of CAS
@@ -3587,7 +3587,7 @@
          masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ; 
          masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ; 
          masm.jccb  (Assembler::notZero, DONE_LABEL) ; 
-         masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), 0) ; 
+         masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD) ; 
          masm.jmpb  (DONE_LABEL) ; 
       } else { 
          masm.xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;  
@@ -3596,7 +3596,7 @@
          masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ; 
          masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ; 
          masm.jccb  (Assembler::notZero, CheckSucc) ; 
-         masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), 0) ; 
+         masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD) ; 
          masm.jmpb  (DONE_LABEL) ; 
       }
 
@@ -3644,7 +3644,7 @@
          // We currently use (3), although it's likely that switching to (2)
          // is correct for the future.
             
-         masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), 0) ; 
+         masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD) ; 
          if (os::is_MP()) { 
             if (VM_Version::supports_sse2() && 1 == FenceInstruction) { 
               masm.mfence();
@@ -6413,9 +6413,9 @@
   ins_pipe( ialu_reg_mem );
 %}
 
-// Load Char (16bit unsigned)
-instruct loadC(eRegI dst, memory mem) %{
-  match(Set dst (LoadC mem));
+// Load Unsigned Short/Char (16bit unsigned)
+instruct loadUS(eRegI dst, memory mem) %{
+  match(Set dst (LoadUS mem));
 
   ins_cost(125);
   format %{ "MOVZX  $dst,$mem" %}
--- a/hotspot/src/cpu/x86/vm/x86_64.ad	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad	Fri Feb 27 15:12:52 2009 -0800
@@ -6096,25 +6096,25 @@
 //   ins_pipe(ialu_reg_mem);
 // %}
 
-// Load Char (16 bit UNsigned)
-instruct loadC(rRegI dst, memory mem)
-%{
-  match(Set dst (LoadC mem));
+// Load Unsigned Short/Char (16 bit UNsigned)
+instruct loadUS(rRegI dst, memory mem)
+%{
+  match(Set dst (LoadUS mem));
 
   ins_cost(125);
-  format %{ "movzwl  $dst, $mem\t# char" %}
+  format %{ "movzwl  $dst, $mem\t# ushort/char" %}
   opcode(0x0F, 0xB7);
   ins_encode(REX_reg_mem(dst, mem), OpcP, OpcS, reg_mem(dst, mem));
   ins_pipe(ialu_reg_mem);
 %}
 
-// Load Char (16 bit UNsigned) into long
-// instruct loadC2L(rRegL dst, memory mem)
+// Load Unsigned Short/Char (16 bit UNsigned) into long
+// instruct loadUS2L(rRegL dst, memory mem)
 // %{
-//   match(Set dst (ConvI2L (LoadC mem)));
+//   match(Set dst (ConvI2L (LoadUS mem)));
 
 //   ins_cost(125);
-//   format %{ "movzwl  $dst, $mem\t# char -> long" %}
+//   format %{ "movzwl  $dst, $mem\t# ushort/char -> long" %}
 //   opcode(0x0F, 0xB7);
 //   ins_encode(REX_reg_mem(dst, mem), OpcP, OpcS, reg_mem(dst, mem));
 //   ins_pipe(ialu_reg_mem);
@@ -9490,14 +9490,14 @@
 %{
   match(Set dst (AndL dst src));
 
-  format %{ "movzbq  $dst, $src\t# long & 0xFF" %}
+  format %{ "movzbq  $dst, $dst\t# long & 0xFF" %}
   opcode(0x0F, 0xB6);
   ins_encode(REX_reg_reg_wide(dst, dst), OpcP, OpcS, reg_reg(dst, dst));
   ins_pipe(ialu_reg);
 %}
 
 // And Register with Immediate 65535
-instruct andL_rReg_imm65535(rRegI dst, immL_65535 src)
+instruct andL_rReg_imm65535(rRegL dst, immL_65535 src)
 %{
   match(Set dst (AndL dst src));
 
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -279,7 +279,11 @@
  *        ...
  *        7: The default directories, normally /lib and /usr/lib.
  */
+#if defined(AMD64) || defined(_LP64) && (defined(SPARC) || defined(PPC) || defined(S390))
+#define DEFAULT_LIBPATH "/usr/lib64:/lib64:/lib:/usr/lib"
+#else
 #define DEFAULT_LIBPATH "/lib:/usr/lib"
+#endif
 
 #define EXTENSIONS_DIR  "/lib/ext"
 #define ENDORSED_DIR    "/lib/endorsed"
@@ -1160,7 +1164,10 @@
 
         /*                                     1   1   1   1   1   1   1   1   1   1   2   2   2   2   2   2   2   2   2 */
         /*              3  4  5  6  7  8   9   0   1   2   3   4   5   6   7   8   9   0   1   2   3   4   5   6   7   8 */
-        i = sscanf(s, "%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld %lu %lu %ld %lu %lu %lu %lu",
+        i = sscanf(s, "%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld "
+                   UINTX_FORMAT UINTX_FORMAT UINTX_FORMAT
+                   " %lu "
+                   UINTX_FORMAT UINTX_FORMAT UINTX_FORMAT,
              &state,          /* 3  %c  */
              &ppid,           /* 4  %d  */
              &pgrp,           /* 5  %d  */
@@ -1180,13 +1187,13 @@
              &nice,           /* 19 %ld  */
              &junk,           /* 20 %ld  */
              &it_real,        /* 21 %ld  */
-             &start,          /* 22 %lu  */
-             &vsize,          /* 23 %lu  */
-             &rss,            /* 24 %ld  */
+             &start,          /* 22 UINTX_FORMAT  */
+             &vsize,          /* 23 UINTX_FORMAT  */
+             &rss,            /* 24 UINTX_FORMAT  */
              &rsslim,         /* 25 %lu  */
-             &scodes,         /* 26 %lu  */
-             &ecode,          /* 27 %lu  */
-             &stack_start);   /* 28 %lu  */
+             &scodes,         /* 26 UINTX_FORMAT  */
+             &ecode,          /* 27 UINTX_FORMAT  */
+             &stack_start);   /* 28 UINTX_FORMAT  */
       }
 
       if (i != 28 - 2) {
@@ -1425,6 +1432,10 @@
   return buf;
 }
 
+struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
+  return localtime_r(clock, res);
+}
+
 ////////////////////////////////////////////////////////////////////////////////
 // runtime exit support
 
@@ -2024,7 +2035,8 @@
                 CAST_FROM_FN_PTR(address, os::jvm_path),
                 dli_fname, sizeof(dli_fname), NULL);
   assert(ret != 0, "cannot locate libjvm");
-  realpath(dli_fname, buf);
+  if (realpath(dli_fname, buf) == NULL)
+    return;
 
   if (strcmp(Arguments::sun_java_launcher(), "gamma") == 0) {
     // Support for the gamma launcher.  Typical value for buf is
@@ -2048,7 +2060,8 @@
         assert(strstr(p, "/libjvm") == p, "invalid library name");
         p = strstr(p, "_g") ? "_g" : "";
 
-        realpath(java_home_var, buf);
+        if (realpath(java_home_var, buf) == NULL)
+          return;
         sprintf(buf + strlen(buf), "/jre/lib/%s", cpu_arch);
         if (0 == access(buf, F_OK)) {
           // Use current module name "libjvm[_g].so" instead of
@@ -2059,7 +2072,8 @@
           sprintf(buf + strlen(buf), "/hotspot/libjvm%s.so", p);
         } else {
           // Go back to path of .so
-          realpath(dli_fname, buf);
+          if (realpath(dli_fname, buf) == NULL)
+            return;
         }
       }
     }
@@ -4184,11 +4198,11 @@
   // Skip blank chars
   do s++; while (isspace(*s));
 
-  count = sscanf(s,"%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu",
-                 &idummy, &idummy, &idummy, &idummy, &idummy, &idummy,
+  count = sscanf(s,"%*c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu",
+                 &idummy, &idummy, &idummy, &idummy, &idummy,
                  &ldummy, &ldummy, &ldummy, &ldummy, &ldummy,
                  &user_time, &sys_time);
-  if ( count != 13 ) return -1;
+  if ( count != 12 ) return -1;
   if (user_sys_cpu_time) {
     return ((jlong)sys_time + (jlong)user_time) * (1000000000 / clock_tics_per_sec);
   } else {
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -323,6 +323,10 @@
   return (size_t)(base - bottom);
 }
 
+struct tm* os::localtime_pd(const time_t* clock, struct tm*  res) {
+  return localtime_r(clock, res);
+}
+
 // interruptible infrastructure
 
 // setup_interruptible saves the thread state before going into an
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -327,6 +327,14 @@
   return sz;
 }
 
+struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
+  const struct tm* time_struct_ptr = localtime(clock);
+  if (time_struct_ptr != NULL) {
+    *res = *time_struct_ptr;
+    return res;
+  }
+  return NULL;
+}
 
 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 
--- a/hotspot/src/share/vm/adlc/dict2.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/adlc/dict2.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -316,9 +316,12 @@
   return strcmp((const char *)k1,(const char *)k2);
 }
 
-// Slimey cheap key comparator.
+// Cheap key comparator.
 int cmpkey(const void *key1, const void *key2) {
-  return (int)((intptr_t)key1 - (intptr_t)key2);
+  if (key1 == key2) return 0;
+  intptr_t delta = (intptr_t)key1 - (intptr_t)key2;
+  if (delta > 0) return 1;
+  return -1;
 }
 
 //=============================================================================
--- a/hotspot/src/share/vm/adlc/forms.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/adlc/forms.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -248,7 +248,7 @@
 // True if 'opType', an ideal name, loads or stores.
 Form::DataType Form::is_load_from_memory(const char *opType) const {
   if( strcmp(opType,"LoadB")==0 )  return Form::idealB;
-  if( strcmp(opType,"LoadC")==0 )  return Form::idealC;
+  if( strcmp(opType,"LoadUS")==0 )  return Form::idealC;
   if( strcmp(opType,"LoadD")==0 )  return Form::idealD;
   if( strcmp(opType,"LoadD_unaligned")==0 )  return Form::idealD;
   if( strcmp(opType,"LoadF")==0 )  return Form::idealF;
--- a/hotspot/src/share/vm/adlc/formssel.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/adlc/formssel.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -3314,7 +3314,7 @@
     "StoreI","StoreL","StoreP","StoreN","StoreD","StoreF" ,
     "StoreB","StoreC","Store" ,"StoreFP",
     "LoadI" ,"LoadL", "LoadP" ,"LoadN", "LoadD" ,"LoadF"  ,
-    "LoadB" ,"LoadC" ,"LoadS" ,"Load"   ,
+    "LoadB" ,"LoadUS" ,"LoadS" ,"Load"   ,
     "Store4I","Store2I","Store2L","Store2D","Store4F","Store2F","Store16B",
     "Store8B","Store4B","Store8C","Store4C","Store2C",
     "Load4I" ,"Load2I" ,"Load2L" ,"Load2D" ,"Load4F" ,"Load2F" ,"Load16B" ,
--- a/hotspot/src/share/vm/asm/codeBuffer.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/asm/codeBuffer.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -123,6 +123,10 @@
     // addresses constructed before expansions will not be confused.
     cb->free_blob();
   }
+
+  // free any overflow storage
+  delete _overflow_arena;
+
 #ifdef ASSERT
   Copy::fill_to_bytes(this, sizeof(*this), badResourceValue);
 #endif
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -232,7 +232,9 @@
     length >= 1, "Illegal constant pool size %u in class file %s",
     length, CHECK_(nullHandle));
   constantPoolOop constant_pool =
-                      oopFactory::new_constantPool(length, CHECK_(nullHandle));
+                      oopFactory::new_constantPool(length,
+                                                   methodOopDesc::IsSafeConc,
+                                                   CHECK_(nullHandle));
   constantPoolHandle cp (THREAD, constant_pool);
 
   cp->set_partially_loaded();    // Enables heap verify to work on partial constantPoolOops
@@ -1675,7 +1677,8 @@
   // All sizing information for a methodOop is finally available, now create it
   methodOop m_oop  = oopFactory::new_method(
     code_length, access_flags, linenumber_table_length,
-    total_lvt_length, checked_exceptions_length, CHECK_(nullHandle));
+    total_lvt_length, checked_exceptions_length,
+    methodOopDesc::IsSafeConc, CHECK_(nullHandle));
   methodHandle m (THREAD, m_oop);
 
   ClassLoadingService::add_class_method_size(m_oop->size()*HeapWordSize);
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -441,6 +441,7 @@
 
 bool java_lang_Class::offsets_computed = false;
 int  java_lang_Class::classRedefinedCount_offset = -1;
+int  java_lang_Class::parallelCapable_offset = -1;
 
 void java_lang_Class::compute_offsets() {
   assert(!offsets_computed, "offsets should be initialized only once");
@@ -451,6 +452,23 @@
   // so don't go fatal.
   compute_optional_offset(classRedefinedCount_offset,
     k, vmSymbols::classRedefinedCount_name(), vmSymbols::int_signature());
+
+  // The field indicating parallelCapable (parallelLockMap) is only present starting in 7,
+  klassOop k1 = SystemDictionary::classloader_klass();
+  compute_optional_offset(parallelCapable_offset,
+    k1, vmSymbols::parallelCapable_name(), vmSymbols::concurrenthashmap_signature());
+}
+
+// For class loader classes, parallelCapable defined
+// based on non-null field
+// Written to by java.lang.ClassLoader, vm only reads this field, doesn't set it
+bool java_lang_Class::parallelCapable(oop class_loader) {
+  if (!JDK_Version::is_gte_jdk17x_version()
+     || parallelCapable_offset == -1) {
+     // Default for backward compatibility is false
+     return false;
+  }
+  return (class_loader->obj_field(parallelCapable_offset) != NULL);
 }
 
 int java_lang_Class::classRedefinedCount(oop the_class_mirror) {
@@ -866,7 +884,7 @@
     }
     nmethod* nm = method->code();
     if (WizardMode && nm != NULL) {
-      sprintf(buf + (int)strlen(buf), "(nmethod %#x)", nm);
+      sprintf(buf + (int)strlen(buf), "(nmethod " PTR_FORMAT ")", (intptr_t)nm);
     }
   }
 
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp	Fri Feb 27 15:12:52 2009 -0800
@@ -141,6 +141,7 @@
   static void compute_offsets();
   static bool offsets_computed;
   static int classRedefinedCount_offset;
+  static int parallelCapable_offset;
 
  public:
   // Instance creation
@@ -168,6 +169,8 @@
   // Support for classRedefinedCount field
   static int classRedefinedCount(oop the_class_mirror);
   static void set_classRedefinedCount(oop the_class_mirror, int value);
+  // Support for parallelCapable field
+  static bool parallelCapable(oop the_class_mirror);
   // Debugging
   friend class JavaClasses;
   friend class instanceKlass;   // verification code accesses offsets
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -90,6 +90,14 @@
 #endif
 
 // ----------------------------------------------------------------------------
+// Parallel class loading check
+
+bool SystemDictionary::is_parallelCapable(Handle class_loader) {
+  if (UnsyncloadClass || class_loader.is_null()) return true;
+  if (AlwaysLockClassLoader) return false;
+  return java_lang_Class::parallelCapable(class_loader());
+}
+// ----------------------------------------------------------------------------
 // Resolving of classes
 
 // Forwards to resolve_or_null
@@ -196,7 +204,8 @@
 // super-class callers:
 //   ClassFileParser - for defineClass & jvmtiRedefineClasses
 //   load_shared_class - while loading a class from shared archive
-//   resolve_instance_class_or_fail:
+//   resolve_instance_class_or_null:
+//     via: handle_parallel_super_load
 //      when resolving a class that has an existing placeholder with
 //      a saved superclass [i.e. a defineClass is currently in progress]
 //      if another thread is trying to resolve the class, it must do
@@ -283,12 +292,9 @@
       if (probe && probe->check_seen_thread(THREAD, PlaceholderTable::LOAD_SUPER)) {
           throw_circularity_error = true;
       }
-
-      // add placeholder entry even if error - callers will remove on error
+    }
+    if (!throw_circularity_error) {
       PlaceholderEntry* newprobe = placeholders()->find_and_add(p_index, p_hash, child_name, class_loader, PlaceholderTable::LOAD_SUPER, class_name, THREAD);
-      if (throw_circularity_error) {
-         newprobe->remove_seen_thread(THREAD, PlaceholderTable::LOAD_SUPER);
-      }
     }
   }
   if (throw_circularity_error) {
@@ -325,7 +331,6 @@
   return superk_h();
 }
 
-
 void SystemDictionary::validate_protection_domain(instanceKlassHandle klass,
                                                   Handle class_loader,
                                                   Handle protection_domain,
@@ -421,7 +426,7 @@
   bool calledholdinglock
       = ObjectSynchronizer::current_thread_holds_lock((JavaThread*)THREAD, lockObject);
   assert(calledholdinglock,"must hold lock for notify");
-  assert(!UnsyncloadClass, "unexpected double_lock_wait");
+  assert((!(lockObject() == _system_loader_lock_obj) && !is_parallelCapable(lockObject)), "unexpected double_lock_wait");
   ObjectSynchronizer::notifyall(lockObject, THREAD);
   intptr_t recursions =  ObjectSynchronizer::complete_exit(lockObject, THREAD);
   SystemDictionary_lock->wait();
@@ -439,7 +444,7 @@
 // even in non-circularity situations.
 // Note: only one thread can define the class, but multiple can resolve
 // Note: must call resolve_super_or_fail even if null super -
-// to force placeholder entry creation for this class
+// to force placeholder entry creation for this class for circularity detection
 // Caller must check for pending exception
 // Returns non-null klassOop if other thread has completed load
 // and we are done,
@@ -477,9 +482,9 @@
     SystemDictionary_lock->notify_all();
   }
 
-  // UnsyncloadClass does NOT wait for parallel superclass loads to complete
-  // Bootstrap classloader does wait for parallel superclass loads
- if (UnsyncloadClass) {
+  // parallelCapable class loaders do NOT wait for parallel superclass loads to complete
+  // Serial class loaders and bootstrap classloader do wait for superclass loads
+ if (!class_loader.is_null() && is_parallelCapable(class_loader)) {
     MutexLocker mu(SystemDictionary_lock, THREAD);
     // Check if classloading completed while we were loading superclass or waiting
     klassOop check = find_class(d_index, d_hash, name, class_loader);
@@ -566,10 +571,10 @@
   // This lock must be acquired here so the waiter will find
   // any successful result in the SystemDictionary and not attempt
   // the define
-  // Classloaders that support parallelism, e.g. bootstrap classloader,
+  // ParallelCapable Classloaders and the bootstrap classloader,
   // or all classloaders with UnsyncloadClass do not acquire lock here
   bool DoObjectLock = true;
-  if (UnsyncloadClass || (class_loader.is_null())) {
+  if (is_parallelCapable(class_loader)) {
     DoObjectLock = false;
   }
 
@@ -627,6 +632,9 @@
     // Five cases:
     // All cases need to prevent modifying bootclasssearchpath
     // in parallel with a classload of same classname
+    // Redefineclasses uses existence of the placeholder for the duration
+    // of the class load to prevent concurrent redefinition of not completely
+    // defined classes.
     // case 1. traditional classloaders that rely on the classloader object lock
     //   - no other need for LOAD_INSTANCE
     // case 2. traditional classloaders that break the classloader object lock
@@ -642,12 +650,13 @@
     //    This classloader supports parallelism at the classloader level,
     //    but only allows a single load of a class/classloader pair.
     //    No performance benefit and no deadlock issues.
-    // case 5. Future: parallel user level classloaders - without objectLocker
+    // case 5. parallelCapable user level classloaders - without objectLocker
+    //    Allow parallel classloading of a class/classloader pair
     symbolHandle nullsymbolHandle;
     bool throw_circularity_error = false;
     {
       MutexLocker mu(SystemDictionary_lock, THREAD);
-      if (!UnsyncloadClass) {
+      if (class_loader.is_null() || !is_parallelCapable(class_loader)) {
         PlaceholderEntry* oldprobe = placeholders()->get_entry(p_index, p_hash, name, class_loader);
         if (oldprobe) {
           // only need check_seen_thread once, not on each loop
@@ -681,25 +690,25 @@
         }
       }
       // All cases: add LOAD_INSTANCE
-      // case 3: UnsyncloadClass: allow competing threads to try
+      // case 3: UnsyncloadClass || case 5: parallelCapable: allow competing threads to try
       // LOAD_INSTANCE in parallel
       // add placeholder entry even if error - callers will remove on error
-      if (!class_has_been_loaded) {
+      if (!throw_circularity_error && !class_has_been_loaded) {
         PlaceholderEntry* newprobe = placeholders()->find_and_add(p_index, p_hash, name, class_loader, PlaceholderTable::LOAD_INSTANCE, nullsymbolHandle, THREAD);
-        if (throw_circularity_error) {
-          newprobe->remove_seen_thread(THREAD, PlaceholderTable::LOAD_INSTANCE);
-        }
         // For class loaders that do not acquire the classloader object lock,
         // if they did not catch another thread holding LOAD_INSTANCE,
         // need a check analogous to the acquire ObjectLocker/find_class
         // i.e. now that we hold the LOAD_INSTANCE token on loading this class/CL
         // one final check if the load has already completed
+        // class loaders holding the ObjectLock shouldn't find the class here
         klassOop check = find_class(d_index, d_hash, name, class_loader);
         if (check != NULL) {
         // Klass is already loaded, so just return it
           k = instanceKlassHandle(THREAD, check);
           class_has_been_loaded = true;
           newprobe->remove_seen_thread(THREAD, PlaceholderTable::LOAD_INSTANCE);
+          placeholders()->find_and_remove(p_index, p_hash, name, class_loader, THREAD);
+          SystemDictionary_lock->notify_all();
         }
       }
     }
@@ -714,18 +723,14 @@
       // Do actual loading
       k = load_instance_class(name, class_loader, THREAD);
 
-      // In custom class loaders, the usual findClass calls
-      // findLoadedClass, which directly searches  the SystemDictionary, then
-      // defineClass. If these are not atomic with respect to other threads,
-      // the findLoadedClass can fail, but the defineClass can get a
-      // LinkageError:: duplicate class definition.
+      // For UnsyncloadClass and AllowParallelDefineClass only:
       // If they got a linkageError, check if a parallel class load succeeded.
       // If it did, then for bytecode resolution the specification requires
       // that we return the same result we did for the other thread, i.e. the
       // successfully loaded instanceKlass
-      // Note: Class can not be unloaded as long as any classloader refs exist
       // Should not get here for classloaders that support parallelism
-      // with the new cleaner mechanism, e.g. bootstrap classloader
+      // with the new cleaner mechanism
+      // Bootstrap goes through here to allow for an extra guarantee check
       if (UnsyncloadClass || (class_loader.is_null())) {
         if (k.is_null() && HAS_PENDING_EXCEPTION
           && PENDING_EXCEPTION->is_a(SystemDictionary::linkageError_klass())) {
@@ -841,6 +846,12 @@
                                 Handle protection_domain,
                                 TRAPS) {
 
+  // UseNewReflection
+  // The result of this call should be consistent with the result
+  // of the call to resolve_instance_class_or_null().
+  // See evaluation 6790209 and 4474172 for more details.
+  class_loader = Handle(THREAD, java_lang_ClassLoader::non_reflection_class_loader(class_loader()));
+
   unsigned int d_hash = dictionary()->compute_hash(class_name, class_loader);
   int d_index = dictionary()->hash_to_index(d_hash);
 
@@ -955,10 +966,10 @@
   instanceKlassHandle k = ClassFileParser(st).parseClassFile(class_name,
                                                              class_loader,
                                                              protection_domain,
-                                                             cp_patches,
                                                              parsed_name,
                                                              THREAD);
 
+
   // We don't redefine the class, so we just need to clean up whether there
   // was an error or not (don't want to modify any system dictionary
   // data structures).
@@ -1013,11 +1024,17 @@
                                                ClassFileStream* st,
                                                TRAPS) {
 
-  // Make sure we are synchronized on the class loader before we initiate
-  // loading.
+  // Classloaders that support parallelism, e.g. bootstrap classloader,
+  // or all classloaders with UnsyncloadClass do not acquire lock here
+  bool DoObjectLock = true;
+  if (is_parallelCapable(class_loader)) {
+    DoObjectLock = false;
+  }
+
+  // Make sure we are synchronized on the class loader before we proceed
   Handle lockObject = compute_loader_lock_object(class_loader, THREAD);
   check_loader_lock_contention(lockObject, THREAD);
-  ObjectLocker ol(lockObject, THREAD);
+  ObjectLocker ol(lockObject, THREAD, DoObjectLock);
 
   symbolHandle parsed_name;
 
@@ -1069,7 +1086,13 @@
            "external class name format used internally");
 
     // Add class just loaded
-    define_instance_class(k, THREAD);
+    // If a class loader supports parallel classloading handle parallel define requests
+    // find_or_define_instance_class may return a different instanceKlass
+    if (is_parallelCapable(class_loader)) {
+      k = find_or_define_instance_class(class_name, class_loader, k, THREAD);
+    } else {
+      define_instance_class(k, THREAD);
+    }
   }
 
   // If parsing the class file or define_instance_class failed, we
@@ -1299,7 +1322,7 @@
     }
 #endif // KERNEL
 
-    // find_or_define_instance_class may return a different k
+    // find_or_define_instance_class may return a different instanceKlass
     if (!k.is_null()) {
       k = find_or_define_instance_class(class_name, class_loader, k, CHECK_(nh));
     }
@@ -1316,14 +1339,24 @@
 
     KlassHandle spec_klass (THREAD, SystemDictionary::classloader_klass());
 
-    // UnsyncloadClass option means don't synchronize loadClass() calls.
-    // loadClassInternal() is synchronized and public loadClass(String) is not.
-    // This flag is for diagnostic purposes only. It is risky to call
+    // Call public unsynchronized loadClass(String) directly for all class loaders
+    // for parallelCapable class loaders. JDK >=7, loadClass(String, boolean) will
+    // acquire a class-name based lock rather than the class loader object lock.
+    // JDK < 7 already acquire the class loader lock in loadClass(String, boolean),
+    // so the call to loadClassInternal() was not required.
+    //
+    // UnsyncloadClass flag means both call loadClass(String) and do
+    // not acquire the class loader lock even for class loaders that are
+    // not parallelCapable. This was a risky transitional
+    // flag for diagnostic purposes only. It is risky to call
     // custom class loaders without synchronization.
     // WARNING If a custom class loader does NOT synchronizer findClass, or callers of
-    // findClass, this flag risks unexpected timing bugs in the field.
+    // findClass, the UnsyncloadClass flag risks unexpected timing bugs in the field.
     // Do NOT assume this will be supported in future releases.
-    if (!UnsyncloadClass && has_loadClassInternal()) {
+    //
+    // Added MustCallLoadClassInternal in case we discover in the field
+    // a customer that counts on this call
+    if (MustCallLoadClassInternal && has_loadClassInternal()) {
       JavaCalls::call_special(&result,
                               class_loader,
                               spec_klass,
@@ -1365,14 +1398,17 @@
 
   Handle class_loader_h(THREAD, k->class_loader());
 
-  // for bootstrap classloader don't acquire lock
-  if (!class_loader_h.is_null()) {
+ // for bootstrap and other parallel classloaders don't acquire lock,
+ // use placeholder token
+ // If a parallelCapable class loader calls define_instance_class instead of
+ // find_or_define_instance_class to get here, we have a timing
+ // hole with systemDictionary updates and check_constraints
+ if (!class_loader_h.is_null() && !is_parallelCapable(class_loader_h)) {
     assert(ObjectSynchronizer::current_thread_holds_lock((JavaThread*)THREAD,
          compute_loader_lock_object(class_loader_h, THREAD)),
          "define called without lock");
   }
 
-
   // Check class-loading constraints. Throw exception if violation is detected.
   // Grabs and releases SystemDictionary_lock
   // The check_constraints/find_class call and update_dictionary sequence
@@ -1427,59 +1463,63 @@
 
 // Support parallel classloading
 // Initial implementation for bootstrap classloader
-// For future:
 // For custom class loaders that support parallel classloading,
-// in case they do not synchronize around
-// FindLoadedClass/DefineClass calls, we check for parallel
+// With AllowParallelDefine flag==true, in case they do not synchronize around
+// FindLoadedClass/DefineClass, calls, we check for parallel
 // loading for them, wait if a defineClass is in progress
 // and return the initial requestor's results
+// With AllowParallelDefine flag==false, call through to define_instance_class
+// which will throw LinkageError: duplicate class definition.
 // For better performance, the class loaders should synchronize
-// findClass(), i.e. FindLoadedClass/DefineClass or they
+// findClass(), i.e. FindLoadedClass/DefineClassIfAbsent or they
 // potentially waste time reading and parsing the bytestream.
 // Note: VM callers should ensure consistency of k/class_name,class_loader
 instanceKlassHandle SystemDictionary::find_or_define_instance_class(symbolHandle class_name, Handle class_loader, instanceKlassHandle k, TRAPS) {
 
   instanceKlassHandle nh = instanceKlassHandle(); // null Handle
+  symbolHandle name_h(THREAD, k->name()); // passed in class_name may be null
 
-  unsigned int d_hash = dictionary()->compute_hash(class_name, class_loader);
+  unsigned int d_hash = dictionary()->compute_hash(name_h, class_loader);
   int d_index = dictionary()->hash_to_index(d_hash);
 
 // Hold SD lock around find_class and placeholder creation for DEFINE_CLASS
-  unsigned int p_hash = placeholders()->compute_hash(class_name, class_loader);
+  unsigned int p_hash = placeholders()->compute_hash(name_h, class_loader);
   int p_index = placeholders()->hash_to_index(p_hash);
   PlaceholderEntry* probe;
 
   {
     MutexLocker mu(SystemDictionary_lock, THREAD);
     // First check if class already defined
-    klassOop check = find_class(d_index, d_hash, class_name, class_loader);
+    klassOop check = find_class(d_index, d_hash, name_h, class_loader);
     if (check != NULL) {
       return(instanceKlassHandle(THREAD, check));
     }
 
     // Acquire define token for this class/classloader
     symbolHandle nullsymbolHandle;
-    probe = placeholders()->find_and_add(p_index, p_hash, class_name, class_loader, PlaceholderTable::DEFINE_CLASS, nullsymbolHandle, THREAD);
-    // Check if another thread defining in parallel
-    if (probe->definer() == NULL) {
-      // Thread will define the class
-      probe->set_definer(THREAD);
+    probe = placeholders()->find_and_add(p_index, p_hash, name_h, class_loader, PlaceholderTable::DEFINE_CLASS, nullsymbolHandle, THREAD);
+    // Wait if another thread defining in parallel
+    // All threads wait - even those that will throw duplicate class: otherwise
+    // caller is surprised by LinkageError: duplicate, but findLoadedClass fails
+    // if other thread has not finished updating dictionary
+    while (probe->definer() != NULL) {
+      SystemDictionary_lock->wait();
+    }
+    // Only special cases allow parallel defines and can use other thread's results
+    // Other cases fall through, and may run into duplicate defines
+    // caught by finding an entry in the SystemDictionary
+    if ((UnsyncloadClass || AllowParallelDefineClass) && (probe->instanceKlass() != NULL)) {
+        probe->remove_seen_thread(THREAD, PlaceholderTable::DEFINE_CLASS);
+        placeholders()->find_and_remove(p_index, p_hash, name_h, class_loader, THREAD);
+        SystemDictionary_lock->notify_all();
+#ifdef ASSERT
+        klassOop check = find_class(d_index, d_hash, name_h, class_loader);
+        assert(check != NULL, "definer missed recording success");
+#endif
+        return(instanceKlassHandle(THREAD, probe->instanceKlass()));
     } else {
-      // Wait for defining thread to finish and return results
-      while (probe->definer() != NULL) {
-        SystemDictionary_lock->wait();
-      }
-      if (probe->instanceKlass() != NULL) {
-        probe->remove_seen_thread(THREAD, PlaceholderTable::DEFINE_CLASS);
-        return(instanceKlassHandle(THREAD, probe->instanceKlass()));
-      } else {
-        // If definer had an error, try again as any new thread would
-        probe->set_definer(THREAD);
-#ifdef ASSERT
-        klassOop check = find_class(d_index, d_hash, class_name, class_loader);
-        assert(check == NULL, "definer missed recording success");
-#endif
-      }
+      // This thread will define the class (even if earlier thread tried and had an error)
+      probe->set_definer(THREAD);
     }
   }
 
@@ -1490,7 +1530,7 @@
   // definer must notify any waiting threads
   {
     MutexLocker mu(SystemDictionary_lock, THREAD);
-    PlaceholderEntry* probe = placeholders()->get_entry(p_index, p_hash, class_name, class_loader);
+    PlaceholderEntry* probe = placeholders()->get_entry(p_index, p_hash, name_h, class_loader);
     assert(probe != NULL, "DEFINE_CLASS placeholder lost?");
     if (probe != NULL) {
       if (HAS_PENDING_EXCEPTION) {
@@ -1501,6 +1541,7 @@
       }
       probe->set_definer(NULL);
       probe->remove_seen_thread(THREAD, PlaceholderTable::DEFINE_CLASS);
+      placeholders()->find_and_remove(p_index, p_hash, name_h, class_loader, THREAD);
       SystemDictionary_lock->notify_all();
     }
   }
@@ -1512,7 +1553,6 @@
 
   return k;
 }
-
 Handle SystemDictionary::compute_loader_lock_object(Handle class_loader, TRAPS) {
   // If class_loader is NULL we synchronize on _system_loader_lock_obj
   if (class_loader.is_null()) {
@@ -1902,11 +1942,11 @@
     warning("Cannot find sun/jkernel/DownloadManager");
   }
 #endif // KERNEL
+
   { // Compute whether we should use loadClass or loadClassInternal when loading classes.
     methodOop method = instanceKlass::cast(classloader_klass())->find_method(vmSymbols::loadClassInternal_name(), vmSymbols::string_class_signature());
     _has_loadClassInternal = (method != NULL);
   }
-
   { // Compute whether we should use checkPackageAccess or NOT
     methodOop method = instanceKlass::cast(classloader_klass())->find_method(vmSymbols::checkPackageAccess_name(), vmSymbols::class_protectiondomain_signature());
     _has_checkPackageAccess = (method != NULL);
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp	Fri Feb 27 15:12:52 2009 -0800
@@ -526,6 +526,7 @@
   static instanceKlassHandle load_instance_class(symbolHandle class_name, Handle class_loader, TRAPS);
   static Handle compute_loader_lock_object(Handle class_loader, TRAPS);
   static void check_loader_lock_contention(Handle loader_lock, TRAPS);
+  static bool is_parallelCapable(Handle class_loader);
 
   static klassOop find_shared_class(symbolHandle class_name);
 
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp	Fri Feb 27 15:12:52 2009 -0800
@@ -362,6 +362,7 @@
   template(class_signature,                           "Ljava/lang/Class;")                                        \
   template(string_signature,                          "Ljava/lang/String;")                                       \
   template(reference_signature,                       "Ljava/lang/ref/Reference;")                                \
+  template(concurrenthashmap_signature,               "Ljava/util/concurrent/ConcurrentHashMap;")                 \
   /* signature symbols needed by intrinsics */                                                                    \
   VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, template, VM_ALIAS_IGNORE)            \
                                                                                                                   \
@@ -374,6 +375,9 @@
   /* used by ClassFormatError when class name is not known yet */                                                 \
   template(unknown_class_name,                        "<Unknown>")                                                \
                                                                                                                   \
+  /* used to identify class loaders handling parallel class loading */                                            \
+  template(parallelCapable_name,                      "parallelLockMap;")                                         \
+                                                                                                                  \
   /* JVM monitoring and management support */                                                                     \
   template(java_lang_StackTraceElement_array,          "[Ljava/lang/StackTraceElement;")                          \
   template(java_lang_management_ThreadState,           "java/lang/management/ThreadState")                        \
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -706,6 +706,30 @@
   }
 }
 
+// Apply the given closure to each live object in the space
+//   The usage of CompactibleFreeListSpace
+// by the ConcurrentMarkSweepGeneration for concurrent GC's allows
+// objects in the space with references to objects that are no longer
+// valid.  For example, an object may reference another object
+// that has already been sweep up (collected).  This method uses
+// obj_is_alive() to determine whether it is safe to apply the closure to
+// an object.  See obj_is_alive() for details on how liveness of an
+// object is decided.
+
+void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
+  assert_lock_strong(freelistLock());
+  NOT_PRODUCT(verify_objects_initialized());
+  HeapWord *cur, *limit;
+  size_t curSize;
+  for (cur = bottom(), limit = end(); cur < limit;
+       cur += curSize) {
+    curSize = block_size(cur);
+    if (block_is_obj(cur) && obj_is_alive(cur)) {
+      blk->do_object(oop(cur));
+    }
+  }
+}
+
 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
                                                   UpwardsObjectClosure* cl) {
   assert_locked();
@@ -861,7 +885,9 @@
     } else {
       // must read from what 'p' points to in each loop.
       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
-      if (k != NULL && ((oopDesc*)p)->is_parsable()) {
+      if (k != NULL &&
+          ((oopDesc*)p)->is_parsable() &&
+          ((oopDesc*)p)->is_conc_safe()) {
         assert(k->is_oop(), "Should really be klass oop.");
         oop o = (oop)p;
         assert(o->is_oop(), "Should be an oop");
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Fri Feb 27 15:12:52 2009 -0800
@@ -481,6 +481,15 @@
   void oop_iterate(OopClosure* cl);
 
   void object_iterate(ObjectClosure* blk);
+  // Apply the closure to each object in the space whose references
+  // point to objects in the heap.  The usage of CompactibleFreeListSpace
+  // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
+  // objects in the space with references to objects that are no longer
+  // valid.  For example, an object may reference another object
+  // that has already been sweep up (collected).  This method uses
+  // obj_is_alive() to determine whether it is safe to iterate of
+  // an object.
+  void safe_object_iterate(ObjectClosure* blk);
   void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
 
   // Requires that "mr" be entirely within the space.
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -3018,6 +3018,16 @@
 }
 
 void
+ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
+  if (freelistLock()->owned_by_self()) {
+    Generation::safe_object_iterate(cl);
+  } else {
+    MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
+    Generation::safe_object_iterate(cl);
+  }
+}
+
+void
 ConcurrentMarkSweepGeneration::pre_adjust_pointers() {
 }
 
@@ -6623,7 +6633,11 @@
   if (_bitMap->isMarked(addr)) {
     // it's marked; is it potentially uninitialized?
     if (p->klass_or_null() != NULL) {
-      if (CMSPermGenPrecleaningEnabled && !p->is_parsable()) {
+      // If is_conc_safe is false, the object may be undergoing
+      // change by the VM outside a safepoint.  Don't try to
+      // scan it, but rather leave it for the remark phase.
+      if (CMSPermGenPrecleaningEnabled &&
+          (!p->is_conc_safe() || !p->is_parsable())) {
         // Signal precleaning to redirty the card since
         // the klass pointer is already installed.
         assert(size == 0, "Initial value");
@@ -7001,7 +7015,6 @@
       _mut->clear_range(mr);
     }
   DEBUG_ONLY(})
-
   // Note: the finger doesn't advance while we drain
   // the stack below.
   PushOrMarkClosure pushOrMarkClosure(_collector,
@@ -8062,9 +8075,13 @@
     #ifdef DEBUG
       if (oop(addr)->klass_or_null() != NULL &&
           (   !_collector->should_unload_classes()
-           || oop(addr)->is_parsable())) {
+           || (oop(addr)->is_parsable()) &&
+               oop(addr)->is_conc_safe())) {
         // Ignore mark word because we are running concurrent with mutators
         assert(oop(addr)->is_oop(true), "live block should be an oop");
+        // is_conc_safe is checked before performing this assertion
+        // because an object that is not is_conc_safe may yet have
+        // the return from size() correct.
         assert(size ==
                CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
                "P-mark and computed size do not agree");
@@ -8077,6 +8094,13 @@
            (!_collector->should_unload_classes()
             || oop(addr)->is_parsable()),
            "Should be an initialized object");
+    // Note that there are objects used during class redefinition
+    // (e.g., merge_cp in VM_RedefineClasses::merge_cp_and_rewrite()
+    // which are discarded with their is_conc_safe state still
+    // false.  These object may be floating garbage so may be
+    // seen here.  If they are floating garbage their size
+    // should be attainable from their klass.  Do not that
+    // is_conc_safe() is true for oop(addr).
     // Ignore mark word because we are running concurrent with mutators
     assert(oop(addr)->is_oop(true), "live block should be an oop");
     // Verify that the bit map has no bits marked between
@@ -8484,7 +8508,7 @@
   size_t i = num;
   oop  cur = _overflow_list;
   const markOop proto = markOopDesc::prototype();
-  NOT_PRODUCT(size_t n = 0;)
+  NOT_PRODUCT(ssize_t n = 0;)
   for (oop next; i > 0 && cur != NULL; cur = next, i--) {
     next = oop(cur->mark());
     cur->set_mark(proto);   // until proven otherwise
@@ -8501,45 +8525,131 @@
   return !stack->isEmpty();
 }
 
-// Multi-threaded; use CAS to break off a prefix
+#define BUSY  (oop(0x1aff1aff))
+// (MT-safe) Get a prefix of at most "num" from the list.
+// The overflow list is chained through the mark word of
+// each object in the list. We fetch the entire list,
+// break off a prefix of the right size and return the
+// remainder. If other threads try to take objects from
+// the overflow list at that time, they will wait for
+// some time to see if data becomes available. If (and
+// only if) another thread places one or more object(s)
+// on the global list before we have returned the suffix
+// to the global list, we will walk down our local list
+// to find its end and append the global list to
+// our suffix before returning it. This suffix walk can
+// prove to be expensive (quadratic in the amount of traffic)
+// when there are many objects in the overflow list and
+// there is much producer-consumer contention on the list.
+// *NOTE*: The overflow list manipulation code here and
+// in ParNewGeneration:: are very similar in shape,
+// except that in the ParNew case we use the old (from/eden)
+// copy of the object to thread the list via its klass word.
+// Because of the common code, if you make any changes in
+// the code below, please check the ParNew version to see if
+// similar changes might be needed.
+// CR 6797058 has been filed to consolidate the common code.
 bool CMSCollector::par_take_from_overflow_list(size_t num,
                                                OopTaskQueue* work_q) {
-  assert(work_q->size() == 0, "That's the current policy");
+  assert(work_q->size() == 0, "First empty local work queue");
   assert(num < work_q->max_elems(), "Can't bite more than we can chew");
   if (_overflow_list == NULL) {
     return false;
   }
   // Grab the entire list; we'll put back a suffix
-  oop prefix = (oop)Atomic::xchg_ptr(NULL, &_overflow_list);
-  if (prefix == NULL) {  // someone grabbed it before we did ...
-    // ... we could spin for a short while, but for now we don't
-    return false;
-  }
+  oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
+  Thread* tid = Thread::current();
+  size_t CMSOverflowSpinCount = (size_t)ParallelGCThreads;
+  size_t sleep_time_millis = MAX2((size_t)1, num/100);
+  // If the list is busy, we spin for a short while,
+  // sleeping between attempts to get the list.
+  for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
+    os::sleep(tid, sleep_time_millis, false);
+    if (_overflow_list == NULL) {
+      // Nothing left to take
+      return false;
+    } else if (_overflow_list != BUSY) {
+      // Try and grab the prefix
+      prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
+    }
+  }
+  // If the list was found to be empty, or we spun long
+  // enough, we give up and return empty-handed. If we leave
+  // the list in the BUSY state below, it must be the case that
+  // some other thread holds the overflow list and will set it
+  // to a non-BUSY state in the future.
+  if (prefix == NULL || prefix == BUSY) {
+     // Nothing to take or waited long enough
+     if (prefix == NULL) {
+       // Write back the NULL in case we overwrote it with BUSY above
+       // and it is still the same value.
+       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
+     }
+     return false;
+  }
+  assert(prefix != NULL && prefix != BUSY, "Error");
   size_t i = num;
   oop cur = prefix;
+  // Walk down the first "num" objects, unless we reach the end.
   for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
-  if (cur->mark() != NULL) {
+  if (cur->mark() == NULL) {
+    // We have "num" or fewer elements in the list, so there
+    // is nothing to return to the global list.
+    // Write back the NULL in lieu of the BUSY we wrote
+    // above, if it is still the same value.
+    if (_overflow_list == BUSY) {
+      (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
+    }
+  } else {
+    // Chop off the suffix and rerturn it to the global list.
+    assert(cur->mark() != BUSY, "Error");
     oop suffix_head = cur->mark(); // suffix will be put back on global list
     cur->set_mark(NULL);           // break off suffix
-    // Find tail of suffix so we can prepend suffix to global list
-    for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
-    oop suffix_tail = cur;
-    assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
-           "Tautology");
+    // It's possible that the list is still in the empty(busy) state
+    // we left it in a short while ago; in that case we may be
+    // able to place back the suffix without incurring the cost
+    // of a walk down the list.
     oop observed_overflow_list = _overflow_list;
-    do {
-      cur = observed_overflow_list;
-      suffix_tail->set_mark(markOop(cur));
+    oop cur_overflow_list = observed_overflow_list;
+    bool attached = false;
+    while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
       observed_overflow_list =
-        (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur);
-    } while (cur != observed_overflow_list);
+        (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
+      if (cur_overflow_list == observed_overflow_list) {
+        attached = true;
+        break;
+      } else cur_overflow_list = observed_overflow_list;
+    }
+    if (!attached) {
+      // Too bad, someone else sneaked in (at least) an element; we'll need
+      // to do a splice. Find tail of suffix so we can prepend suffix to global
+      // list.
+      for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
+      oop suffix_tail = cur;
+      assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
+             "Tautology");
+      observed_overflow_list = _overflow_list;
+      do {
+        cur_overflow_list = observed_overflow_list;
+        if (cur_overflow_list != BUSY) {
+          // Do the splice ...
+          suffix_tail->set_mark(markOop(cur_overflow_list));
+        } else { // cur_overflow_list == BUSY
+          suffix_tail->set_mark(NULL);
+        }
+        // ... and try to place spliced list back on overflow_list ...
+        observed_overflow_list =
+          (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
+      } while (cur_overflow_list != observed_overflow_list);
+      // ... until we have succeeded in doing so.
+    }
   }
 
   // Push the prefix elements on work_q
   assert(prefix != NULL, "control point invariant");
   const markOop proto = markOopDesc::prototype();
   oop next;
-  NOT_PRODUCT(size_t n = 0;)
+  NOT_PRODUCT(ssize_t n = 0;)
   for (cur = prefix; cur != NULL; cur = next) {
     next = oop(cur->mark());
     cur->set_mark(proto);   // until proven otherwise
@@ -8573,11 +8683,16 @@
   oop cur_overflow_list;
   do {
     cur_overflow_list = observed_overflow_list;
-    p->set_mark(markOop(cur_overflow_list));
+    if (cur_overflow_list != BUSY) {
+      p->set_mark(markOop(cur_overflow_list));
+    } else {
+      p->set_mark(NULL);
+    }
     observed_overflow_list =
       (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
   } while (cur_overflow_list != observed_overflow_list);
 }
+#undef BUSY
 
 // Single threaded
 // General Note on GrowableArray: pushes may silently fail
@@ -8586,7 +8701,7 @@
 // a lot of code in the JVM. The prudent thing for GrowableArray
 // to do (for now) is to exit with an error. However, that may
 // be too draconian in some cases because the caller may be
-// able to recover without much harm. For suych cases, we
+// able to recover without much harm. For such cases, we
 // should probably introduce a "soft_push" method which returns
 // an indication of success or failure with the assumption that
 // the caller may be able to recover from a failure; code in
@@ -8594,8 +8709,6 @@
 // failures where possible, thus, incrementally hardening the VM
 // in such low resource situations.
 void CMSCollector::preserve_mark_work(oop p, markOop m) {
-  int PreserveMarkStackSize = 128;
-
   if (_preserved_oop_stack == NULL) {
     assert(_preserved_mark_stack == NULL,
            "bijection with preserved_oop_stack");
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Fri Feb 27 15:12:52 2009 -0800
@@ -595,7 +595,7 @@
   size_t        _ser_kac_preclean_ovflw;
   size_t        _ser_kac_ovflw;
   size_t        _par_kac_ovflw;
-  NOT_PRODUCT(size_t _num_par_pushes;)
+  NOT_PRODUCT(ssize_t _num_par_pushes;)
 
   // ("Weak") Reference processing support
   ReferenceProcessor*            _ref_processor;
@@ -1212,6 +1212,7 @@
   // More iteration support
   virtual void oop_iterate(MemRegion mr, OopClosure* cl);
   virtual void oop_iterate(OopClosure* cl);
+  virtual void safe_object_iterate(ObjectClosure* cl);
   virtual void object_iterate(ObjectClosure* cl);
 
   // Need to declare the full complement of closures, whether we'll
--- a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp	Fri Feb 27 15:12:52 2009 -0800
@@ -24,7 +24,7 @@
 
 // We need to sort heap regions by collection desirability.
 
-class CSetChooserCache {
+class CSetChooserCache VALUE_OBJ_CLASS_SPEC {
 private:
   enum {
     CacheLength = 16
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Fri Feb 27 15:12:52 2009 -0800
@@ -33,7 +33,7 @@
   PYA_cancel     // It's been completed by somebody else: cancel.
 };
 
-class ConcurrentG1Refine {
+class ConcurrentG1Refine: public CHeapObj {
   ConcurrentG1RefineThread* _cg1rThread;
 
   volatile jint _pya;
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Fri Feb 27 15:12:52 2009 -0800
@@ -30,7 +30,7 @@
 // A generic CM bit map.  This is essentially a wrapper around the BitMap
 // class, with one bit per (1<<_shifter) HeapWords.
 
-class CMBitMapRO {
+class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
  protected:
   HeapWord* _bmStartWord;      // base address of range covered by map
   size_t    _bmWordSize;       // map size (in #HeapWords covered)
@@ -139,7 +139,7 @@
 
 // Represents a marking stack used by the CM collector.
 // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
-class CMMarkStack {
+class CMMarkStack VALUE_OBJ_CLASS_SPEC {
   ConcurrentMark* _cm;
   oop*   _base;      // bottom of stack
   jint   _index;     // one more than last occupied index
@@ -237,7 +237,7 @@
   void oops_do(OopClosure* f);
 };
 
-class CMRegionStack {
+class CMRegionStack VALUE_OBJ_CLASS_SPEC {
   MemRegion* _base;
   jint _capacity;
   jint _index;
@@ -312,7 +312,7 @@
 
 class ConcurrentMarkThread;
 
-class ConcurrentMark {
+class ConcurrentMark: public CHeapObj {
   friend class ConcurrentMarkThread;
   friend class CMTask;
   friend class CMBitMapClosure;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -141,7 +141,7 @@
     _scan_only_head(NULL), _scan_only_tail(NULL), _curr_scan_only(NULL),
     _length(0), _scan_only_length(0),
     _last_sampled_rs_lengths(0),
-    _survivor_head(NULL), _survivors_tail(NULL), _survivor_length(0)
+    _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0)
 {
   guarantee( check_list_empty(false), "just making sure..." );
 }
@@ -159,16 +159,15 @@
 }
 
 void YoungList::add_survivor_region(HeapRegion* hr) {
-  assert(!hr->is_survivor(), "should not already be for survived");
+  assert(hr->is_survivor(), "should be flagged as survivor region");
   assert(hr->get_next_young_region() == NULL, "cause it should!");
 
   hr->set_next_young_region(_survivor_head);
   if (_survivor_head == NULL) {
-    _survivors_tail = hr;
+    _survivor_tail = hr;
   }
   _survivor_head = hr;
 
-  hr->set_survivor();
   ++_survivor_length;
 }
 
@@ -239,7 +238,7 @@
 
   empty_list(_survivor_head);
   _survivor_head = NULL;
-  _survivors_tail = NULL;
+  _survivor_tail = NULL;
   _survivor_length = 0;
 
   _last_sampled_rs_lengths = 0;
@@ -391,6 +390,7 @@
 
   // Add survivor regions to SurvRateGroup.
   _g1h->g1_policy()->note_start_adding_survivor_regions();
+  _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
   for (HeapRegion* curr = _survivor_head;
        curr != NULL;
        curr = curr->get_next_young_region()) {
@@ -401,7 +401,7 @@
   if (_survivor_head != NULL) {
     _head           = _survivor_head;
     _length         = _survivor_length + _scan_only_length;
-    _survivors_tail->set_next_young_region(_scan_only_head);
+    _survivor_tail->set_next_young_region(_scan_only_head);
   } else {
     _head           = _scan_only_head;
     _length         = _scan_only_length;
@@ -418,9 +418,9 @@
   _curr_scan_only   = NULL;
 
   _survivor_head    = NULL;
-  _survivors_tail   = NULL;
+  _survivor_tail   = NULL;
   _survivor_length  = 0;
-  _g1h->g1_policy()->finished_recalculating_age_indexes();
+  _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
 
   assert(check_list_well_formed(), "young list should be well formed");
 }
@@ -553,7 +553,7 @@
   if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
     alloc_region = newAllocRegion_work(word_size, true, zero_filled);
     if (purpose == GCAllocForSurvived && alloc_region != NULL) {
-      _young_list->add_survivor_region(alloc_region);
+      alloc_region->set_survivor();
     }
     ++_gc_alloc_region_counts[purpose];
   } else {
@@ -949,6 +949,10 @@
     GCOverheadReporter::recordSTWEnd(end);
     g1_policy()->record_full_collection_end();
 
+#ifdef TRACESPINNING
+    ParallelTaskTerminator::print_termination_counts();
+#endif
+
     gc_epilogue(true);
 
     // Abandon concurrent refinement.  This must happen last: in the
@@ -1285,7 +1289,9 @@
   _unclean_regions_coming(false),
   _young_list(new YoungList(this)),
   _gc_time_stamp(0),
-  _surviving_young_words(NULL)
+  _surviving_young_words(NULL),
+  _in_cset_fast_test(NULL),
+  _in_cset_fast_test_base(NULL)
 {
   _g1h = this; // To catch bugs.
   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
@@ -2485,6 +2491,19 @@
     g1_policy()->record_collection_pause_start(start_time_sec,
                                                start_used_bytes);
 
+    guarantee(_in_cset_fast_test == NULL, "invariant");
+    guarantee(_in_cset_fast_test_base == NULL, "invariant");
+    _in_cset_fast_test_length = n_regions();
+    _in_cset_fast_test_base =
+                             NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
+    memset(_in_cset_fast_test_base, false,
+                                     _in_cset_fast_test_length * sizeof(bool));
+    // We're biasing _in_cset_fast_test to avoid subtracting the
+    // beginning of the heap every time we want to index; basically
+    // it's the same with what we do with the card table.
+    _in_cset_fast_test = _in_cset_fast_test_base -
+              ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
+
 #if SCAN_ONLY_VERBOSE
     _young_list->print();
 #endif // SCAN_ONLY_VERBOSE
@@ -2553,6 +2572,12 @@
       free_collection_set(g1_policy()->collection_set());
       g1_policy()->clear_collection_set();
 
+      FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
+      // this is more for peace of mind; we're nulling them here and
+      // we're expecting them to be null at the beginning of the next GC
+      _in_cset_fast_test = NULL;
+      _in_cset_fast_test_base = NULL;
+
       if (popular_region != NULL) {
         // We have to wait until now, because we don't want the region to
         // be rescheduled for pop-evac during RS update.
@@ -2572,6 +2597,9 @@
         _young_list->print();
 #endif // SCAN_ONLY_VERBOSE
 
+        g1_policy()->record_survivor_regions(_young_list->survivor_length(),
+                                             _young_list->first_survivor_region(),
+                                             _young_list->last_survivor_region());
         _young_list->reset_auxilary_lists();
       }
     } else {
@@ -2598,7 +2626,9 @@
 #endif // SCAN_ONLY_VERBOSE
 
     double end_time_sec = os::elapsedTime();
-    g1_policy()->record_pause_time((end_time_sec - start_time_sec)*1000.0);
+    if (!evacuation_failed()) {
+      g1_policy()->record_pause_time((end_time_sec - start_time_sec)*1000.0);
+    }
     GCOverheadReporter::recordSTWEnd(end_time_sec);
     g1_policy()->record_collection_pause_end(popular_region != NULL,
                                              abandoned);
@@ -2621,8 +2651,13 @@
       }
     }
 
-    if (mark_in_progress())
+    if (mark_in_progress()) {
       concurrent_mark()->update_g1_committed();
+    }
+
+#ifdef TRACESPINNING
+    ParallelTaskTerminator::print_termination_counts();
+#endif
 
     gc_epilogue(false);
   }
@@ -2733,6 +2768,13 @@
     _gc_alloc_region_list = r->next_gc_alloc_region();
     r->set_next_gc_alloc_region(NULL);
     r->set_is_gc_alloc_region(false);
+    if (r->is_survivor()) {
+      if (r->is_empty()) {
+        r->set_not_young();
+      } else {
+        _young_list->add_survivor_region(r);
+      }
+    }
     if (r->is_empty()) {
       ++_free_regions;
     }
@@ -3129,6 +3171,20 @@
   return block;
 }
 
+void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region,
+                                            bool par) {
+  // Another thread might have obtained alloc_region for the given
+  // purpose, and might be attempting to allocate in it, and might
+  // succeed.  Therefore, we can't do the "finalization" stuff on the
+  // region below until we're sure the last allocation has happened.
+  // We ensure this by allocating the remaining space with a garbage
+  // object.
+  if (par) par_allocate_remaining_space(alloc_region);
+  // Now we can do the post-GC stuff on the region.
+  alloc_region->note_end_of_copying();
+  g1_policy()->record_after_bytes(alloc_region->used());
+}
+
 HeapWord*
 G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose,
                                          HeapRegion*    alloc_region,
@@ -3146,16 +3202,7 @@
     // Otherwise, continue; this new region is empty, too.
   }
   assert(alloc_region != NULL, "We better have an allocation region");
-  // Another thread might have obtained alloc_region for the given
-  // purpose, and might be attempting to allocate in it, and might
-  // succeed.  Therefore, we can't do the "finalization" stuff on the
-  // region below until we're sure the last allocation has happened.
-  // We ensure this by allocating the remaining space with a garbage
-  // object.
-  if (par) par_allocate_remaining_space(alloc_region);
-  // Now we can do the post-GC stuff on the region.
-  alloc_region->note_end_of_copying();
-  g1_policy()->record_after_bytes(alloc_region->used());
+  retire_alloc_region(alloc_region, par);
 
   if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) {
     // Cannot allocate more regions for the given purpose.
@@ -3164,7 +3211,7 @@
     if (purpose != alt_purpose) {
       HeapRegion* alt_region = _gc_alloc_regions[alt_purpose];
       // Has not the alternative region been aliased?
-      if (alloc_region != alt_region) {
+      if (alloc_region != alt_region && alt_region != NULL) {
         // Try to allocate in the alternative region.
         if (par) {
           block = alt_region->par_allocate(word_size);
@@ -3173,9 +3220,10 @@
         }
         // Make an alias.
         _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose];
-      }
-      if (block != NULL) {
-        return block;
+        if (block != NULL) {
+          return block;
+        }
+        retire_alloc_region(alt_region, par);
       }
       // Both the allocation region and the alternative one are full
       // and aliased, replace them with a new allocation region.
@@ -3476,6 +3524,7 @@
   OverflowQueue* _overflowed_refs;
 
   G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount];
+  ageTable           _age_table;
 
   size_t           _alloc_buffer_waste;
   size_t           _undo_waste;
@@ -3517,6 +3566,7 @@
       _refs(g1h->task_queue(queue_num)),
       _hash_seed(17), _queue_num(queue_num),
       _term_attempts(0),
+      _age_table(false),
 #if G1_DETAILED_STATS
       _pushes(0), _pops(0), _steals(0),
       _steal_attempts(0),  _overflow_pushes(0),
@@ -3551,8 +3601,9 @@
 
   RefToScanQueue*   refs()            { return _refs;             }
   OverflowQueue*    overflowed_refs() { return _overflowed_refs;  }
-
-  inline G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
+  ageTable*         age_table()       { return &_age_table;       }
+
+  G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
     return &_alloc_buffers[purpose];
   }
 
@@ -3560,6 +3611,9 @@
   size_t undo_waste()                            { return _undo_waste; }
 
   void push_on_queue(oop* ref) {
+    assert(ref != NULL, "invariant");
+    assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref), "invariant");
+
     if (!refs()->push(ref)) {
       overflowed_refs()->push(ref);
       IF_G1_DETAILED_STATS(note_overflow_push());
@@ -3572,6 +3626,10 @@
     if (!refs()->pop_local(ref)) {
       ref = NULL;
     } else {
+      assert(ref != NULL, "invariant");
+      assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref),
+             "invariant");
+
       IF_G1_DETAILED_STATS(note_pop());
     }
   }
@@ -3601,8 +3659,7 @@
 
       obj = alloc_buf->allocate(word_sz);
       assert(obj != NULL, "buffer was definitely big enough...");
-    }
-    else {
+    } else {
       obj = _g1h->par_allocate_during_gc(purpose, word_sz);
     }
     return obj;
@@ -3695,24 +3752,57 @@
     }
   }
 
+private:
+  void deal_with_reference(oop* ref_to_scan) {
+    if (has_partial_array_mask(ref_to_scan)) {
+      _partial_scan_cl->do_oop_nv(ref_to_scan);
+    } else {
+      // Note: we can use "raw" versions of "region_containing" because
+      // "obj_to_scan" is definitely in the heap, and is not in a
+      // humongous region.
+      HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
+      _evac_cl->set_region(r);
+      _evac_cl->do_oop_nv(ref_to_scan);
+    }
+  }
+
+public:
   void trim_queue() {
+    // I've replicated the loop twice, first to drain the overflow
+    // queue, second to drain the task queue. This is better than
+    // having a single loop, which checks both conditions and, inside
+    // it, either pops the overflow queue or the task queue, as each
+    // loop is tighter. Also, the decision to drain the overflow queue
+    // first is not arbitrary, as the overflow queue is not visible
+    // to the other workers, whereas the task queue is. So, we want to
+    // drain the "invisible" entries first, while allowing the other
+    // workers to potentially steal the "visible" entries.
+
     while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) {
-      oop *ref_to_scan = NULL;
-      if (overflowed_refs_to_scan() == 0) {
-        pop_from_queue(ref_to_scan);
-      } else {
+      while (overflowed_refs_to_scan() > 0) {
+        oop *ref_to_scan = NULL;
         pop_from_overflow_queue(ref_to_scan);
+        assert(ref_to_scan != NULL, "invariant");
+        // We shouldn't have pushed it on the queue if it was not
+        // pointing into the CSet.
+        assert(ref_to_scan != NULL, "sanity");
+        assert(has_partial_array_mask(ref_to_scan) ||
+                                      _g1h->obj_in_cs(*ref_to_scan), "sanity");
+
+        deal_with_reference(ref_to_scan);
       }
-      if (ref_to_scan != NULL) {
-        if ((intptr_t)ref_to_scan & G1_PARTIAL_ARRAY_MASK) {
-          _partial_scan_cl->do_oop_nv(ref_to_scan);
-        } else {
-          // Note: we can use "raw" versions of "region_containing" because
-          // "obj_to_scan" is definitely in the heap, and is not in a
-          // humongous region.
-          HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
-          _evac_cl->set_region(r);
-          _evac_cl->do_oop_nv(ref_to_scan);
+
+      while (refs_to_scan() > 0) {
+        oop *ref_to_scan = NULL;
+        pop_from_queue(ref_to_scan);
+
+        if (ref_to_scan != NULL) {
+          // We shouldn't have pushed it on the queue if it was not
+          // pointing into the CSet.
+          assert(has_partial_array_mask(ref_to_scan) ||
+                                      _g1h->obj_in_cs(*ref_to_scan), "sanity");
+
+          deal_with_reference(ref_to_scan);
         }
       }
     }
@@ -3728,16 +3818,25 @@
 // Should probably be made inline and moved in g1OopClosures.inline.hpp.
 void G1ParScanClosure::do_oop_nv(oop* p) {
   oop obj = *p;
+
   if (obj != NULL) {
-    if (_g1->obj_in_cs(obj)) {
-      if (obj->is_forwarded()) {
-        *p = obj->forwardee();
-      } else {
-        _par_scan_state->push_on_queue(p);
-        return;
-      }
+    if (_g1->in_cset_fast_test(obj)) {
+      // We're not going to even bother checking whether the object is
+      // already forwarded or not, as this usually causes an immediate
+      // stall. We'll try to prefetch the object (for write, given that
+      // we might need to install the forwarding reference) and we'll
+      // get back to it when pop it from the queue
+      Prefetch::write(obj->mark_addr(), 0);
+      Prefetch::read(obj->mark_addr(), (HeapWordSize*2));
+
+      // slightly paranoid test; I'm trying to catch potential
+      // problems before we go into push_on_queue to know where the
+      // problem is coming from
+      assert(obj == *p, "the value of *p should not have changed");
+      _par_scan_state->push_on_queue(p);
+    } else {
+      _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num());
     }
-    _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num());
   }
 }
 
@@ -3765,7 +3864,9 @@
           (!from_region->is_young() && young_index == 0), "invariant" );
   G1CollectorPolicy* g1p = _g1->g1_policy();
   markOop m = old->mark();
-  GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, m->age(),
+  int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
+                                           : m->age();
+  GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
                                                              word_sz);
   HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
   oop       obj     = oop(obj_ptr);
@@ -3777,13 +3878,39 @@
     return _g1->handle_evacuation_failure_par(cl, old);
   }
 
+  // We're going to allocate linearly, so might as well prefetch ahead.
+  Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
+
   oop forward_ptr = old->forward_to_atomic(obj);
   if (forward_ptr == NULL) {
     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
-    obj->set_mark(m);
     if (g1p->track_object_age(alloc_purpose)) {
-      obj->incr_age();
+      // We could simply do obj->incr_age(). However, this causes a
+      // performance issue. obj->incr_age() will first check whether
+      // the object has a displaced mark by checking its mark word;
+      // getting the mark word from the new location of the object
+      // stalls. So, given that we already have the mark word and we
+      // are about to install it anyway, it's better to increase the
+      // age on the mark word, when the object does not have a
+      // displaced mark word. We're not expecting many objects to have
+      // a displaced marked word, so that case is not optimized
+      // further (it could be...) and we simply call obj->incr_age().
+
+      if (m->has_displaced_mark_helper()) {
+        // in this case, we have to install the mark word first,
+        // otherwise obj looks to be forwarded (the old mark word,
+        // which contains the forward pointer, was copied)
+        obj->set_mark(m);
+        obj->incr_age();
+      } else {
+        m = m->incr_age();
+        obj->set_mark(m);
+      }
+      _par_scan_state->age_table()->add(obj, word_sz);
+    } else {
+      obj->set_mark(m);
     }
+
     // preserve "next" mark bit
     if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) {
       if (!use_local_bitmaps ||
@@ -3805,9 +3932,11 @@
 
     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
       arrayOop(old)->set_length(0);
-      _par_scan_state->push_on_queue((oop*) ((intptr_t)old | G1_PARTIAL_ARRAY_MASK));
+      _par_scan_state->push_on_queue(set_partial_array_mask(old));
     } else {
-      _scanner->set_region(_g1->heap_region_containing(obj));
+      // No point in using the slower heap_region_containing() method,
+      // given that we know obj is in the heap.
+      _scanner->set_region(_g1->heap_region_containing_raw(obj));
       obj->oop_iterate_backwards(_scanner);
     }
   } else {
@@ -3817,47 +3946,55 @@
   return obj;
 }
 
-template<bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee>
-void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_forwardee>::do_oop_work(oop* p) {
+template<bool do_gen_barrier, G1Barrier barrier,
+         bool do_mark_forwardee, bool skip_cset_test>
+void G1ParCopyClosure<do_gen_barrier, barrier,
+                      do_mark_forwardee, skip_cset_test>::do_oop_work(oop* p) {
   oop obj = *p;
   assert(barrier != G1BarrierRS || obj != NULL,
          "Precondition: G1BarrierRS implies obj is nonNull");
 
-  if (obj != NULL) {
-    if (_g1->obj_in_cs(obj)) {
+  // The only time we skip the cset test is when we're scanning
+  // references popped from the queue. And we only push on the queue
+  // references that we know point into the cset, so no point in
+  // checking again. But we'll leave an assert here for peace of mind.
+  assert(!skip_cset_test || _g1->obj_in_cs(obj), "invariant");
+
+  // here the null check is implicit in the cset_fast_test() test
+  if (skip_cset_test || _g1->in_cset_fast_test(obj)) {
 #if G1_REM_SET_LOGGING
-      gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" into CS.",
-                             p, (void*) obj);
+    gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" "
+                           "into CS.", p, (void*) obj);
 #endif
-      if (obj->is_forwarded()) {
-        *p = obj->forwardee();
-      } else {
-        *p = copy_to_survivor_space(obj);
-      }
-      // When scanning the RS, we only care about objs in CS.
-      if (barrier == G1BarrierRS) {
-        _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num());
-      }
+    if (obj->is_forwarded()) {
+      *p = obj->forwardee();
+    } else {
+      *p = copy_to_survivor_space(obj);
     }
-    // When scanning moved objs, must look at all oops.
-    if (barrier == G1BarrierEvac) {
+    // When scanning the RS, we only care about objs in CS.
+    if (barrier == G1BarrierRS) {
       _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num());
     }
-
-    if (do_gen_barrier) {
-      par_do_barrier(p);
-    }
-  }
-}
-
-template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p);
-
-template <class T> void G1ParScanPartialArrayClosure::process_array_chunk(
+  }
+
+  // When scanning moved objs, must look at all oops.
+  if (barrier == G1BarrierEvac && obj != NULL) {
+    _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num());
+  }
+
+  if (do_gen_barrier && obj != NULL) {
+    par_do_barrier(p);
+  }
+}
+
+template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(oop* p);
+
+template<class T> void G1ParScanPartialArrayClosure::process_array_chunk(
   oop obj, int start, int end) {
   // process our set of indices (include header in first chunk)
   assert(start < end, "invariant");
   T* const base      = (T*)objArrayOop(obj)->base();
-  T* const start_addr = base + start;
+  T* const start_addr = (start == 0) ? (T*) obj : base + start;
   T* const end_addr   = base + end;
   MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr);
   _scanner.set_region(_g1->heap_region_containing(obj));
@@ -3866,7 +4003,8 @@
 
 void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) {
   assert(!UseCompressedOops, "Needs to be fixed to work with compressed oops");
-  oop old = oop((intptr_t)p & ~G1_PARTIAL_ARRAY_MASK);
+  assert(has_partial_array_mask(p), "invariant");
+  oop old = clear_partial_array_mask(p);
   assert(old->is_objArray(), "must be obj array");
   assert(old->is_forwarded(), "must be forwarded");
   assert(Universe::heap()->is_in_reserved(old), "must be in heap.");
@@ -3884,7 +4022,7 @@
     end = start + ParGCArrayScanChunk;
     arrayOop(old)->set_length(end);
     // Push remainder.
-    _par_scan_state->push_on_queue((oop*) ((intptr_t) old | G1_PARTIAL_ARRAY_MASK));
+    _par_scan_state->push_on_queue(set_partial_array_mask(old));
   } else {
     // Restore length so that the heap remains parsable in
     // case of evacuation failure.
@@ -3893,11 +4031,6 @@
 
   // process our set of indices (include header in first chunk)
   process_array_chunk<oop>(obj, start, end);
-  oop* start_addr = start == 0 ? (oop*)obj : obj->obj_at_addr<oop>(start);
-  oop* end_addr   = (oop*)(obj->base()) + end; // obj_at_addr(end) asserts end < length
-  MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr);
-  _scanner.set_region(_g1->heap_region_containing(obj));
-  obj->oop_iterate(&_scanner, mr);
 }
 
 int G1ScanAndBalanceClosure::_nq = 0;
@@ -3931,6 +4064,13 @@
                           pss->hash_seed(),
                           ref_to_scan)) {
         IF_G1_DETAILED_STATS(pss->note_steal());
+
+        // slightly paranoid tests; I'm trying to catch potential
+        // problems before we go into push_on_queue to know where the
+        // problem is coming from
+        assert(ref_to_scan != NULL, "invariant");
+        assert(has_partial_array_mask(ref_to_scan) ||
+                                   _g1h->obj_in_cs(*ref_to_scan), "invariant");
         pss->push_on_queue(ref_to_scan);
         continue;
       }
@@ -3976,10 +4116,10 @@
     ResourceMark rm;
     HandleMark   hm;
 
-    G1ParScanThreadState pss(_g1h, i);
-    G1ParScanHeapEvacClosure     scan_evac_cl(_g1h, &pss);
-    G1ParScanHeapEvacClosure     evac_failure_cl(_g1h, &pss);
-    G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss);
+    G1ParScanThreadState            pss(_g1h, i);
+    G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss);
+    G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss);
+    G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss);
 
     pss.set_evac_closure(&scan_evac_cl);
     pss.set_evac_failure_closure(&evac_failure_cl);
@@ -4024,6 +4164,9 @@
       _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
       _g1h->g1_policy()->record_termination_time(i, term_ms);
     }
+    if (G1UseSurvivorSpace) {
+      _g1h->g1_policy()->record_thread_age_table(pss.age_table());
+    }
     _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
 
     // Clean up any par-expanded rem sets.
@@ -4263,7 +4406,7 @@
   // Is this the right thing to do here?  We don't save marks
   // on individual heap regions when we allocate from
   // them in parallel, so this seems like the correct place for this.
-  all_alloc_regions_note_end_of_copying();
+  retire_all_alloc_regions();
   {
     G1IsAliveClosure is_alive(this);
     G1KeepAliveClosure keep_alive(this);
@@ -4903,7 +5046,7 @@
   return no_allocs;
 }
 
-void G1CollectedHeap::all_alloc_regions_note_end_of_copying() {
+void G1CollectedHeap::retire_all_alloc_regions() {
   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
     HeapRegion* r = _gc_alloc_regions[ap];
     if (r != NULL) {
@@ -4916,8 +5059,7 @@
         }
       }
       if (!has_processed_alias) {
-        r->note_end_of_copying();
-        g1_policy()->record_after_bytes(r->used());
+        retire_alloc_region(r, false /* par */);
       }
     }
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Feb 27 15:12:52 2009 -0800
@@ -90,7 +90,7 @@
   HeapRegion* _curr_scan_only;
 
   HeapRegion* _survivor_head;
-  HeapRegion* _survivors_tail;
+  HeapRegion* _survivor_tail;
   size_t      _survivor_length;
 
   void          empty_list(HeapRegion* list);
@@ -105,6 +105,7 @@
   bool          is_empty() { return _length == 0; }
   size_t        length() { return _length; }
   size_t        scan_only_length() { return _scan_only_length; }
+  size_t        survivor_length() { return _survivor_length; }
 
   void rs_length_sampling_init();
   bool rs_length_sampling_more();
@@ -120,6 +121,7 @@
   HeapRegion* first_region() { return _head; }
   HeapRegion* first_scan_only_region() { return _scan_only_head; }
   HeapRegion* first_survivor_region() { return _survivor_head; }
+  HeapRegion* last_survivor_region() { return _survivor_tail; }
   HeapRegion* par_get_next_scan_only_region() {
     MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
     HeapRegion* ret = _curr_scan_only;
@@ -219,7 +221,7 @@
   // The to-space memory regions into which objects are being copied during
   // a GC.
   HeapRegion* _gc_alloc_regions[GCAllocPurposeCount];
-  uint _gc_alloc_region_counts[GCAllocPurposeCount];
+  size_t _gc_alloc_region_counts[GCAllocPurposeCount];
 
   // A list of the regions that have been set to be alloc regions in the
   // current collection.
@@ -247,6 +249,27 @@
   NumberSeq _pop_obj_rc_at_copy;
   void print_popularity_summary_info() const;
 
+  // This is used for a quick test on whether a reference points into
+  // the collection set or not. Basically, we have an array, with one
+  // byte per region, and that byte denotes whether the corresponding
+  // region is in the collection set or not. The entry corresponding
+  // the bottom of the heap, i.e., region 0, is pointed to by
+  // _in_cset_fast_test_base.  The _in_cset_fast_test field has been
+  // biased so that it actually points to address 0 of the address
+  // space, to make the test as fast as possible (we can simply shift
+  // the address to address into it, instead of having to subtract the
+  // bottom of the heap from the address before shifting it; basically
+  // it works in the same way the card table works).
+  bool* _in_cset_fast_test;
+
+  // The allocated array used for the fast test on whether a reference
+  // points into the collection set or not. This field is also used to
+  // free the array.
+  bool* _in_cset_fast_test_base;
+
+  // The length of the _in_cset_fast_test_base array.
+  size_t _in_cset_fast_test_length;
+
   volatile unsigned _gc_time_stamp;
 
   size_t* _surviving_young_words;
@@ -260,8 +283,8 @@
   // Returns "true" iff none of the gc alloc regions have any allocations
   // since the last call to "save_marks".
   bool all_alloc_regions_no_allocs_since_save_marks();
-  // Calls "note_end_of_copying on all gc alloc_regions.
-  void all_alloc_regions_note_end_of_copying();
+  // Perform finalization stuff on all allocation regions.
+  void retire_all_alloc_regions();
 
   // The number of regions allocated to hold humongous objects.
   int         _num_humongous_regions;
@@ -330,6 +353,10 @@
   // that parallel threads might be attempting allocations.
   void par_allocate_remaining_space(HeapRegion* r);
 
+  // Retires an allocation region when it is full or at the end of a
+  // GC pause.
+  void  retire_alloc_region(HeapRegion* alloc_region, bool par);
+
   // Helper function for two callbacks below.
   // "full", if true, indicates that the GC is for a System.gc() request,
   // and should collect the entire heap.  If "clear_all_soft_refs" is true,
@@ -368,6 +395,38 @@
   virtual void gc_prologue(bool full);
   virtual void gc_epilogue(bool full);
 
+  // We register a region with the fast "in collection set" test. We
+  // simply set to true the array slot corresponding to this region.
+  void register_region_with_in_cset_fast_test(HeapRegion* r) {
+    assert(_in_cset_fast_test_base != NULL, "sanity");
+    assert(r->in_collection_set(), "invariant");
+    int index = r->hrs_index();
+    assert(0 <= (size_t) index && (size_t) index < _in_cset_fast_test_length,
+           "invariant");
+    assert(!_in_cset_fast_test_base[index], "invariant");
+    _in_cset_fast_test_base[index] = true;
+  }
+
+  // This is a fast test on whether a reference points into the
+  // collection set or not. It does not assume that the reference
+  // points into the heap; if it doesn't, it will return false.
+  bool in_cset_fast_test(oop obj) {
+    assert(_in_cset_fast_test != NULL, "sanity");
+    if (_g1_committed.contains((HeapWord*) obj)) {
+      // no need to subtract the bottom of the heap from obj,
+      // _in_cset_fast_test is biased
+      size_t index = ((size_t) obj) >> HeapRegion::LogOfHRGrainBytes;
+      bool ret = _in_cset_fast_test[index];
+      // let's make sure the result is consistent with what the slower
+      // test returns
+      assert( ret || !obj_in_cs(obj), "sanity");
+      assert(!ret ||  obj_in_cs(obj), "sanity");
+      return ret;
+    } else {
+      return false;
+    }
+  }
+
 protected:
 
   // Shrink the garbage-first heap by at most the given size (in bytes!).
@@ -850,6 +909,7 @@
 
   // Iterate over all objects, calling "cl.do_object" on each.
   virtual void object_iterate(ObjectClosure* cl);
+  virtual void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); }
 
   // Iterate over all objects allocated since the last collection, calling
   // "cl.do_object" on each.  The heap must have been initialized properly
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Fri Feb 27 15:12:52 2009 -0800
@@ -36,8 +36,11 @@
 
 inline HeapRegion*
 G1CollectedHeap::heap_region_containing_raw(const void* addr) const {
-  HeapRegion* res = _hrs->addr_to_region(addr);
-  assert(res != NULL, "addr outside of heap?");
+  assert(_g1_reserved.contains(addr), "invariant");
+  size_t index = ((intptr_t) addr - (intptr_t) _g1_reserved.start())
+                                              >> HeapRegion::LogOfHRGrainBytes;
+  HeapRegion* res = _hrs->at(index);
+  assert(res == _hrs->addr_to_region(addr), "sanity");
   return res;
 }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -196,8 +196,13 @@
   _short_lived_surv_rate_group(new SurvRateGroup(this, "Short Lived",
                                                  G1YoungSurvRateNumRegionsSummary)),
   _survivor_surv_rate_group(new SurvRateGroup(this, "Survivor",
-                                              G1YoungSurvRateNumRegionsSummary))
+                                              G1YoungSurvRateNumRegionsSummary)),
   // add here any more surv rate groups
+  _recorded_survivor_regions(0),
+  _recorded_survivor_head(NULL),
+  _recorded_survivor_tail(NULL),
+  _survivors_age_table(true)
+
 {
   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
@@ -272,6 +277,15 @@
   _concurrent_mark_cleanup_times_ms->add(0.20);
   _tenuring_threshold = MaxTenuringThreshold;
 
+  if (G1UseSurvivorSpace) {
+    // if G1FixedSurvivorSpaceSize is 0 which means the size is not
+    // fixed, then _max_survivor_regions will be calculated at
+    // calculate_young_list_target_config during initialization
+    _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
+  } else {
+    _max_survivor_regions = 0;
+  }
+
   initialize_all();
 }
 
@@ -283,6 +297,9 @@
 void G1CollectorPolicy::initialize_flags() {
   set_min_alignment(HeapRegion::GrainBytes);
   set_max_alignment(GenRemSet::max_alignment_constraint(rem_set_name()));
+  if (SurvivorRatio < 1) {
+    vm_exit_during_initialization("Invalid survivor ratio specified");
+  }
   CollectorPolicy::initialize_flags();
 }
 
@@ -301,6 +318,8 @@
                                   "-XX:+UseConcMarkSweepGC.");
   }
 
+  initialize_gc_policy_counters();
+
   if (G1Gen) {
     _in_young_gc_mode = true;
 
@@ -322,6 +341,12 @@
   }
 }
 
+// Create the jstat counters for the policy.
+void G1CollectorPolicy::initialize_gc_policy_counters()
+{
+  _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 2 + G1Gen);
+}
+
 void G1CollectorPolicy::calculate_young_list_min_length() {
   _young_list_min_length = 0;
 
@@ -352,6 +377,7 @@
     guarantee( so_length < _young_list_target_length, "invariant" );
     _young_list_so_prefix_length = so_length;
   }
+  calculate_survivors_policy();
 }
 
 // This method calculate the optimal scan-only set for a fixed young
@@ -448,6 +474,9 @@
   if (full_young_gcs() && _free_regions_at_end_of_collection > 0) {
     // we are in fully-young mode and there are free regions in the heap
 
+    double survivor_regions_evac_time =
+        predict_survivor_regions_evac_time();
+
     size_t min_so_length = 0;
     size_t max_so_length = 0;
 
@@ -497,9 +526,8 @@
       scanned_cards = predict_non_young_card_num(adj_rs_lengths);
     // calculate this once, so that we don't have to recalculate it in
     // the innermost loop
-    double base_time_ms = predict_base_elapsed_time_ms(pending_cards,
-                                                       scanned_cards);
-
+    double base_time_ms = predict_base_elapsed_time_ms(pending_cards, scanned_cards)
+                          + survivor_regions_evac_time;
     // the result
     size_t final_young_length = 0;
     size_t final_so_length = 0;
@@ -548,14 +576,14 @@
     bool done = false;
     // this is the outermost loop
     while (!done) {
-#if 0
+#ifdef TRACE_CALC_YOUNG_CONFIG
       // leave this in for debugging, just in case
       gclog_or_tty->print_cr("searching between " SIZE_FORMAT " and " SIZE_FORMAT
                              ", incr " SIZE_FORMAT ", pass %s",
                              from_so_length, to_so_length, so_length_incr,
                              (pass == pass_type_coarse) ? "coarse" :
                              (pass == pass_type_fine) ? "fine" : "final");
-#endif // 0
+#endif // TRACE_CALC_YOUNG_CONFIG
 
       size_t so_length = from_so_length;
       size_t init_free_regions =
@@ -651,11 +679,11 @@
           guarantee( so_length_incr == so_coarse_increments, "invariant" );
           guarantee( final_so_length >= min_so_length, "invariant" );
 
-#if 0
+#ifdef TRACE_CALC_YOUNG_CONFIG
           // leave this in for debugging, just in case
           gclog_or_tty->print_cr("  coarse pass: SO length " SIZE_FORMAT,
                                  final_so_length);
-#endif // 0
+#endif // TRACE_CALC_YOUNG_CONFIG
 
           from_so_length =
             (final_so_length - min_so_length > so_coarse_increments) ?
@@ -687,12 +715,12 @@
             // of the optimal
             size_t new_so_length = 950 * final_so_length / 1000;
 
-#if 0
+#ifdef TRACE_CALC_YOUNG_CONFIG
             // leave this in for debugging, just in case
             gclog_or_tty->print_cr("  fine pass: SO length " SIZE_FORMAT
                                    ", setting it to " SIZE_FORMAT,
                                     final_so_length, new_so_length);
-#endif // 0
+#endif // TRACE_CALC_YOUNG_CONFIG
 
             from_so_length = new_so_length;
             to_so_length = new_so_length;
@@ -719,7 +747,8 @@
     }
 
     // we should have at least one region in the target young length
-    _young_list_target_length = MAX2((size_t) 1, final_young_length);
+    _young_list_target_length =
+        MAX2((size_t) 1, final_young_length + _recorded_survivor_regions);
     if (final_so_length >= final_young_length)
       // and we need to ensure that the S-O length is not greater than
       // the target young length (this is being a bit careful)
@@ -734,7 +763,7 @@
     double end_time_sec = os::elapsedTime();
     double elapsed_time_ms = (end_time_sec - start_time_sec) * 1000.0;
 
-#if 0
+#ifdef TRACE_CALC_YOUNG_CONFIG
     // leave this in for debugging, just in case
     gclog_or_tty->print_cr("target = %1.1lf ms, young = " SIZE_FORMAT
                            ", SO = " SIZE_FORMAT ", "
@@ -747,9 +776,9 @@
                            calculations,
                            full_young_gcs() ? "full" : "partial",
                            should_initiate_conc_mark() ? " i-m" : "",
-                           in_marking_window(),
-                           in_marking_window_im());
-#endif // 0
+                           _in_marking_window,
+                           _in_marking_window_im);
+#endif // TRACE_CALC_YOUNG_CONFIG
 
     if (_young_list_target_length < _young_list_min_length) {
       // bummer; this means that, if we do a pause when the optimal
@@ -768,14 +797,14 @@
         // S-O length
         so_length = calculate_optimal_so_length(_young_list_min_length);
 
-#if 0
+#ifdef TRACE_CALC_YOUNG_CONFIG
       // leave this in for debugging, just in case
       gclog_or_tty->print_cr("adjusted target length from "
                              SIZE_FORMAT " to " SIZE_FORMAT
                              ", SO " SIZE_FORMAT,
                              _young_list_target_length, _young_list_min_length,
                              so_length);
-#endif // 0
+#endif // TRACE_CALC_YOUNG_CONFIG
 
       _young_list_target_length =
         MAX2(_young_list_min_length, (size_t)1);
@@ -785,12 +814,12 @@
     // we are in a partially-young mode or we've run out of regions (due
     // to evacuation failure)
 
-#if 0
+#ifdef TRACE_CALC_YOUNG_CONFIG
     // leave this in for debugging, just in case
     gclog_or_tty->print_cr("(partial) setting target to " SIZE_FORMAT
                            ", SO " SIZE_FORMAT,
                            _young_list_min_length, 0);
-#endif // 0
+#endif // TRACE_CALC_YOUNG_CONFIG
 
     // we'll do the pause as soon as possible and with no S-O prefix
     // (see above for the reasons behind the latter)
@@ -884,6 +913,16 @@
   return true;
 }
 
+double G1CollectorPolicy::predict_survivor_regions_evac_time() {
+  double survivor_regions_evac_time = 0.0;
+  for (HeapRegion * r = _recorded_survivor_head;
+       r != NULL && r != _recorded_survivor_tail->get_next_young_region();
+       r = r->get_next_young_region()) {
+    survivor_regions_evac_time += predict_region_elapsed_time_ms(r, true);
+  }
+  return survivor_regions_evac_time;
+}
+
 void G1CollectorPolicy::check_prediction_validity() {
   guarantee( adaptive_young_list_length(), "should not call this otherwise" );
 
@@ -995,11 +1034,15 @@
   _short_lived_surv_rate_group->start_adding_regions();
   // also call this on any additional surv rate groups
 
+  record_survivor_regions(0, NULL, NULL);
+
   _prev_region_num_young   = _region_num_young;
   _prev_region_num_tenured = _region_num_tenured;
 
   _free_regions_at_end_of_collection = _g1->free_regions();
   _scan_only_regions_at_end_of_collection = 0;
+  // Reset survivors SurvRateGroup.
+  _survivor_surv_rate_group->reset();
   calculate_young_list_min_length();
   calculate_young_list_target_config();
  }
@@ -1104,6 +1147,10 @@
   _short_lived_surv_rate_group->record_scan_only_prefix(short_lived_so_length);
   tag_scan_only(short_lived_so_length);
 
+  if (G1UseSurvivorSpace) {
+    _survivors_age_table.clear();
+  }
+
   assert( verify_young_ages(), "region age verification" );
 }
 
@@ -1965,9 +2012,6 @@
   // </NEW PREDICTION>
 
   _target_pause_time_ms = -1.0;
-
-  // TODO: calculate tenuring threshold
-  _tenuring_threshold = MaxTenuringThreshold;
 }
 
 // <NEW PREDICTION>
@@ -2058,7 +2102,7 @@
     guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1,
                "invariant" );
     int age = hr->age_in_surv_rate_group();
-    double yg_surv_rate = predict_yg_surv_rate(age);
+    double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
     bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
   }
 
@@ -2091,7 +2135,7 @@
   }
 #if PREDICTIONS_VERBOSE
   if (young) {
-    _recorded_young_bytes += hr->asSpace()->used();
+    _recorded_young_bytes += hr->used();
   } else {
     _recorded_marked_bytes += hr->max_live_bytes();
   }
@@ -2119,11 +2163,6 @@
       predict_non_young_card_num(_predicted_rs_lengths);
   _recorded_region_num = _recorded_young_regions + _recorded_non_young_regions;
 
-  _predicted_young_survival_ratio = 0.0;
-  for (int i = 0; i < _recorded_young_regions; ++i)
-    _predicted_young_survival_ratio += predict_yg_surv_rate(i);
-  _predicted_young_survival_ratio /= (double) _recorded_young_regions;
-
   _predicted_scan_only_scan_time_ms =
     predict_scan_only_time_ms(_recorded_scan_only_regions);
   _predicted_rs_update_time_ms =
@@ -2673,8 +2712,11 @@
   assert(in_young_gc_mode(), "should be in young GC mode");
   bool ret;
   size_t young_list_length = _g1->young_list_length();
-
-  if (young_list_length < _young_list_target_length) {
+  size_t young_list_max_length = _young_list_target_length;
+  if (G1FixedEdenSize) {
+    young_list_max_length -= _max_survivor_regions;
+  }
+  if (young_list_length < young_list_max_length) {
     ret = true;
     ++_region_num_young;
   } else {
@@ -2710,17 +2752,39 @@
 }
 
 
-uint G1CollectorPolicy::max_regions(int purpose) {
+size_t G1CollectorPolicy::max_regions(int purpose) {
   switch (purpose) {
     case GCAllocForSurvived:
-      return G1MaxSurvivorRegions;
+      return _max_survivor_regions;
     case GCAllocForTenured:
-      return UINT_MAX;
+      return REGIONS_UNLIMITED;
     default:
-      return UINT_MAX;
+      ShouldNotReachHere();
+      return REGIONS_UNLIMITED;
   };
 }
 
+// Calculates survivor space parameters.
+void G1CollectorPolicy::calculate_survivors_policy()
+{
+  if (!G1UseSurvivorSpace) {
+    return;
+  }
+  if (G1FixedSurvivorSpaceSize == 0) {
+    _max_survivor_regions = _young_list_target_length / SurvivorRatio;
+  } else {
+    _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
+  }
+
+  if (G1FixedTenuringThreshold) {
+    _tenuring_threshold = MaxTenuringThreshold;
+  } else {
+    _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
+        HeapRegion::GrainWords * _max_survivor_regions);
+  }
+}
+
+
 void
 G1CollectorPolicy_BestRegionsFirst::
 set_single_region_collection_set(HeapRegion* hr) {
@@ -2743,7 +2807,11 @@
   double max_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
 
   size_t young_list_length = _g1->young_list_length();
-  bool reached_target_length = young_list_length >= _young_list_target_length;
+  size_t young_list_max_length = _young_list_target_length;
+  if (G1FixedEdenSize) {
+    young_list_max_length -= _max_survivor_regions;
+  }
+  bool reached_target_length = young_list_length >= young_list_max_length;
 
   if (in_young_gc_mode()) {
     if (reached_target_length) {
@@ -2985,6 +3053,7 @@
   _collection_set = hr;
   _collection_set_size++;
   _collection_set_bytes_used_before += hr->used();
+  _g1->register_region_with_in_cset_fast_test(hr);
 }
 
 void
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Fri Feb 27 15:12:52 2009 -0800
@@ -49,7 +49,7 @@
 class MainBodySummary;
 class PopPreambleSummary;
 
-class PauseSummary {
+class PauseSummary: public CHeapObj {
   define_num_seq(total)
     define_num_seq(other)
 
@@ -58,7 +58,7 @@
   virtual PopPreambleSummary* pop_preamble_summary() { return NULL; }
 };
 
-class MainBodySummary {
+class MainBodySummary: public CHeapObj {
   define_num_seq(satb_drain) // optional
   define_num_seq(parallel) // parallel only
     define_num_seq(ext_root_scan)
@@ -75,7 +75,7 @@
   define_num_seq(clear_ct)  // parallel only
 };
 
-class PopPreambleSummary {
+class PopPreambleSummary: public CHeapObj {
   define_num_seq(pop_preamble)
     define_num_seq(pop_update_rs)
     define_num_seq(pop_scan_rs)
@@ -557,6 +557,8 @@
     return get_new_neg_prediction(_young_gc_eff_seq);
   }
 
+  double predict_survivor_regions_evac_time();
+
   // </NEW PREDICTION>
 
 public:
@@ -599,8 +601,8 @@
 
   // Returns an estimate of the survival rate of the region at yg-age
   // "yg_age".
-  double predict_yg_surv_rate(int age) {
-    TruncatedSeq* seq = _short_lived_surv_rate_group->get_seq(age);
+  double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) {
+    TruncatedSeq* seq = surv_rate_group->get_seq(age);
     if (seq->num() == 0)
       gclog_or_tty->print("BARF! age is %d", age);
     guarantee( seq->num() > 0, "invariant" );
@@ -610,6 +612,10 @@
     return pred;
   }
 
+  double predict_yg_surv_rate(int age) {
+    return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
+  }
+
   double accum_yg_surv_rate_pred(int age) {
     return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
   }
@@ -822,6 +828,9 @@
 
   virtual void init();
 
+  // Create jstat counters for the policy.
+  virtual void initialize_gc_policy_counters();
+
   virtual HeapWord* mem_allocate_work(size_t size,
                                       bool is_tlab,
                                       bool* gc_overhead_limit_was_exceeded);
@@ -1047,8 +1056,12 @@
   // Print stats on young survival ratio
   void print_yg_surv_rate_info() const;
 
-  void finished_recalculating_age_indexes() {
-    _short_lived_surv_rate_group->finished_recalculating_age_indexes();
+  void finished_recalculating_age_indexes(bool is_survivors) {
+    if (is_survivors) {
+      _survivor_surv_rate_group->finished_recalculating_age_indexes();
+    } else {
+      _short_lived_surv_rate_group->finished_recalculating_age_indexes();
+    }
     // do that for any other surv rate groups
   }
 
@@ -1097,6 +1110,17 @@
   // maximum amount of suvivors regions.
   int _tenuring_threshold;
 
+  // The limit on the number of regions allocated for survivors.
+  size_t _max_survivor_regions;
+
+  // The amount of survor regions after a collection.
+  size_t _recorded_survivor_regions;
+  // List of survivor regions.
+  HeapRegion* _recorded_survivor_head;
+  HeapRegion* _recorded_survivor_tail;
+
+  ageTable _survivors_age_table;
+
 public:
 
   inline GCAllocPurpose
@@ -1116,7 +1140,9 @@
     return GCAllocForTenured;
   }
 
-  uint max_regions(int purpose);
+  static const size_t REGIONS_UNLIMITED = ~(size_t)0;
+
+  size_t max_regions(int purpose);
 
   // The limit on regions for a particular purpose is reached.
   void note_alloc_region_limit_reached(int purpose) {
@@ -1132,6 +1158,23 @@
   void note_stop_adding_survivor_regions() {
     _survivor_surv_rate_group->stop_adding_regions();
   }
+
+  void record_survivor_regions(size_t      regions,
+                               HeapRegion* head,
+                               HeapRegion* tail) {
+    _recorded_survivor_regions = regions;
+    _recorded_survivor_head    = head;
+    _recorded_survivor_tail    = tail;
+  }
+
+  void record_thread_age_table(ageTable* age_table)
+  {
+    _survivors_age_table.merge_par(age_table);
+  }
+
+  // Calculates survivor space parameters.
+  void calculate_survivors_policy();
+
 };
 
 // This encapsulates a particular strategy for a g1 Collector.
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp	Fri Feb 27 15:12:52 2009 -0800
@@ -28,7 +28,7 @@
 /***** ALL TIMES ARE IN SECS!!!!!!! *****/
 
 // this is the "interface"
-class G1MMUTracker {
+class G1MMUTracker: public CHeapObj {
 protected:
   double          _time_slice;
   double          _max_gc_time; // this is per time slice
@@ -67,7 +67,7 @@
   }
 };
 
-class G1MMUTrackerQueueElem {
+class G1MMUTrackerQueueElem VALUE_OBJ_CLASS_SPEC {
 private:
   double _start_time;
   double _end_time;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Fri Feb 27 15:12:52 2009 -0800
@@ -77,6 +77,18 @@
 
 #define G1_PARTIAL_ARRAY_MASK 1
 
+inline bool has_partial_array_mask(oop* ref) {
+  return (intptr_t) ref & G1_PARTIAL_ARRAY_MASK;
+}
+
+inline oop* set_partial_array_mask(oop obj) {
+  return (oop*) ((intptr_t) obj | G1_PARTIAL_ARRAY_MASK);
+}
+
+inline oop clear_partial_array_mask(oop* ref) {
+  return oop((intptr_t) ref & ~G1_PARTIAL_ARRAY_MASK);
+}
+
 class G1ParScanPartialArrayClosure : public G1ParClosureSuper {
   G1ParScanClosure _scanner;
   template <class T> void process_array_chunk(oop obj, int start, int end);
@@ -101,7 +113,8 @@
     G1ParClosureSuper(g1, par_scan_state), _scanner(scanner) { }
 };
 
-template<bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee>
+template<bool do_gen_barrier, G1Barrier barrier,
+         bool do_mark_forwardee, bool skip_cset_test>
 class G1ParCopyClosure : public G1ParCopyHelper {
   G1ParScanClosure _scanner;
   void do_oop_work(oop* p);
@@ -119,14 +132,22 @@
   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
 };
 
-typedef G1ParCopyClosure<false, G1BarrierNone, false> G1ParScanExtRootClosure;
-typedef G1ParCopyClosure<true, G1BarrierNone, false> G1ParScanPermClosure;
-typedef G1ParCopyClosure<false, G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
-typedef G1ParCopyClosure<true, G1BarrierNone, true> G1ParScanAndMarkPermClosure;
-typedef G1ParCopyClosure<false, G1BarrierRS, false> G1ParScanHeapRSClosure;
-typedef G1ParCopyClosure<false, G1BarrierRS, true> G1ParScanAndMarkHeapRSClosure;
-typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure;
-
+typedef G1ParCopyClosure<false, G1BarrierNone, false, false> G1ParScanExtRootClosure;
+typedef G1ParCopyClosure<true,  G1BarrierNone, false, false> G1ParScanPermClosure;
+typedef G1ParCopyClosure<false, G1BarrierNone, true,  false> G1ParScanAndMarkExtRootClosure;
+typedef G1ParCopyClosure<true,  G1BarrierNone, true,  false> G1ParScanAndMarkPermClosure;
+typedef G1ParCopyClosure<false, G1BarrierRS,   false, false> G1ParScanHeapRSClosure;
+typedef G1ParCopyClosure<false, G1BarrierRS,   true,  false> G1ParScanAndMarkHeapRSClosure;
+// This is the only case when we set skip_cset_test. Basically, this
+// closure is (should?) only be called directly while we're draining
+// the overflow and task queues. In that case we know that the
+// reference in question points into the collection set, otherwise we
+// would not have pushed it on the queue.
+typedef G1ParCopyClosure<false, G1BarrierEvac, false, true> G1ParScanHeapEvacClosure;
+// We need a separate closure to handle references during evacuation
+// failure processing, as it cannot asume that the reference already
+ // points to the collection set (like G1ParScanHeapEvacClosure does).
+typedef G1ParCopyClosure<false, G1BarrierEvac, false, false> G1ParScanHeapEvacFailureClosure;
 
 class FilterIntoCSClosure: public OopClosure {
   G1CollectedHeap* _g1;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -572,6 +572,9 @@
   }
   guarantee( _cards_scanned == NULL, "invariant" );
   _cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers());
+  for (uint i = 0; i < n_workers(); ++i) {
+    _cards_scanned[i] = 0;
+  }
   _total_cards_scanned = 0;
 }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Fri Feb 27 15:12:52 2009 -0800
@@ -30,7 +30,7 @@
 class HRInto_G1RemSet;
 class ConcurrentG1Refine;
 
-class G1RemSet {
+class G1RemSet: public CHeapObj {
 protected:
   G1CollectedHeap* _g1;
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Fri Feb 27 15:12:52 2009 -0800
@@ -28,7 +28,7 @@
 
 #define G1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw) \
                                                                             \
-  product(intx, ParallelGCG1AllocBufferSize, 4*K,                           \
+  product(intx, ParallelGCG1AllocBufferSize, 8*K,                           \
           "Size of parallel G1 allocation buffers in to-space.")            \
                                                                             \
   product(intx, G1TimeSliceMS, 500,                                         \
@@ -281,7 +281,17 @@
   develop(bool, G1HRRSFlushLogBuffersOnVerify, false,                       \
           "Forces flushing of log buffers before verification.")            \
                                                                             \
-  product(intx, G1MaxSurvivorRegions, 0,                                    \
-          "The maximum number of survivor regions")
+  product(bool, G1UseSurvivorSpace, true,                                   \
+          "When true, use survivor space.")                                 \
+                                                                            \
+  product(bool, G1FixedTenuringThreshold, false,                            \
+          "When set, G1 will not adjust the tenuring threshold")            \
+                                                                            \
+  product(bool, G1FixedEdenSize, false,                                     \
+          "When set, G1 will not allocate unused survivor space regions")   \
+                                                                            \
+  product(uintx, G1FixedSurvivorSpaceSize, 0,                               \
+          "If non-0 is the size of the G1 survivor space, "                 \
+          "otherwise SurvivorRatio is used to determine the size")
 
 G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp	Fri Feb 27 15:12:52 2009 -0800
@@ -32,11 +32,13 @@
   G1BarrierNone, G1BarrierRS, G1BarrierEvac
 };
 
-template<bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee>
+template<bool do_gen_barrier, G1Barrier barrier,
+         bool do_mark_forwardee, bool skip_cset_test>
 class G1ParCopyClosure;
 class G1ParScanClosure;
 
-typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure;
+typedef G1ParCopyClosure<false, G1BarrierEvac, false, true>
+                                                      G1ParScanHeapEvacClosure;
 
 class FilterIntoCSClosure;
 class FilterOutOfRegionClosure;
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Feb 27 15:12:52 2009 -0800
@@ -566,7 +566,11 @@
   void note_end_of_copying() {
     assert(top() >= _next_top_at_mark_start,
            "Increase only");
-    _next_top_at_mark_start = top();
+    // Survivor regions will be scanned on the start of concurrent
+    // marking.
+    if (!is_survivor()) {
+      _next_top_at_mark_start = top();
+    }
   }
 
   // Returns "false" iff no object in the region was allocated when the
@@ -829,7 +833,7 @@
 
 // A linked lists of heap regions.  It leaves the "next" field
 // unspecified; that's up to subtypes.
-class RegionList {
+class RegionList VALUE_OBJ_CLASS_SPEC {
 protected:
   virtual HeapRegion* get_next(HeapRegion* chr) = 0;
   virtual void set_next(HeapRegion* chr,
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -65,9 +65,11 @@
   // We need access in order to union things into the base table.
   BitMap* bm() { return &_bm; }
 
+#if PRT_COUNT_OCCUPIED
   void recount_occupied() {
     _occupied = (jint) bm()->count_one_bits();
   }
+#endif
 
   PerRegionTable(HeapRegion* hr) :
     _hr(hr),
@@ -1144,7 +1146,9 @@
   size_t i = _outgoing_region_map.get_next_one_offset(0);
   while (i < _outgoing_region_map.size()) {
     HeapRegion* to_region = g1h->region_at(i);
-    to_region->rem_set()->clear_incoming_entry(hr());
+    if (!to_region->in_collection_set()) {
+      to_region->rem_set()->clear_incoming_entry(hr());
+    }
     i = _outgoing_region_map.get_next_one_offset(i+1);
   }
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Fri Feb 27 15:12:52 2009 -0800
@@ -58,7 +58,7 @@
 //      is represented.  If a deleted PRT is re-used, a thread adding a bit,
 //      thinking the PRT is for a different region, does no harm.
 
-class OtherRegionsTable: public CHeapObj {
+class OtherRegionsTable VALUE_OBJ_CLASS_SPEC {
   friend class HeapRegionRemSetIterator;
 
   G1CollectedHeap* _g1h;
--- a/hotspot/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Fri Feb 27 15:12:52 2009 -0800
@@ -29,7 +29,7 @@
 
 class PtrQueueSet;
 
-class PtrQueue: public CHeapObj {
+class PtrQueue VALUE_OBJ_CLASS_SPEC {
 
 protected:
   // The ptr queue set to which this queue belongs.
@@ -130,7 +130,7 @@
 // In particular, the individual queues allocate buffers from this shared
 // set, and return completed buffers to the set.
 // All these variables are are protected by the TLOQ_CBL_mon. XXX ???
-class PtrQueueSet: public CHeapObj {
+class PtrQueueSet VALUE_OBJ_CLASS_SPEC {
 
 protected:
 
--- a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Fri Feb 27 15:12:52 2009 -0800
@@ -33,7 +33,7 @@
 // old versions synchronously.
 
 
-class SparsePRTEntry {
+class SparsePRTEntry: public CHeapObj {
 public:
   enum SomePublicConstants {
     CardsPerEntry = (short)4,
@@ -167,7 +167,7 @@
 };
 
   // ValueObj because will be embedded in HRRS iterator.
-class RSHashTableIter: public CHeapObj {
+class RSHashTableIter VALUE_OBJ_CLASS_SPEC {
     short _tbl_ind;
     short _bl_ind;
     short _card_ind;
@@ -213,7 +213,7 @@
 
 class SparsePRTIter;
 
-class SparsePRT : public CHeapObj {
+class SparsePRT VALUE_OBJ_CLASS_SPEC {
   //  Iterations are done on the _cur hash table, since they only need to
   //  see entries visible at the start of a collection pause.
   //  All other operations are done using the _next hash table.
--- a/hotspot/src/share/vm/gc_implementation/g1/survRateGroup.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/survRateGroup.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -29,23 +29,14 @@
                              const char* name,
                              size_t summary_surv_rates_len) :
     _g1p(g1p), _name(name),
-    _all_regions_allocated(0),
-    _curr_length(0), _scan_only_prefix(0), _setup_seq_num(0),
-    _array_length(0), _surv_rate(NULL), _accum_surv_rate_pred(NULL),
-    _accum_surv_rate(0.0), _surv_rate_pred(NULL), _last_pred(0.0),
     _summary_surv_rates_len(summary_surv_rates_len),
     _summary_surv_rates_max_len(0),
-    _summary_surv_rates(NULL) {
-
-  // the following will set up the arrays with length 1
-  _curr_length = 1;
-  stop_adding_regions();
-  guarantee( _array_length == 1, "invariant" );
-  guarantee( _surv_rate_pred[0] != NULL, "invariant" );
-  _surv_rate_pred[0]->add(0.4);
-  all_surviving_words_recorded(false);
-  _curr_length = 0;
-
+    _summary_surv_rates(NULL),
+    _surv_rate(NULL),
+    _accum_surv_rate_pred(NULL),
+    _surv_rate_pred(NULL)
+{
+  reset();
   if (summary_surv_rates_len > 0) {
     size_t length = summary_surv_rates_len;
       _summary_surv_rates = NEW_C_HEAP_ARRAY(NumberSeq*, length);
@@ -60,61 +51,80 @@
   start_adding_regions();
 }
 
+
+void SurvRateGroup::reset()
+{
+  _all_regions_allocated = 0;
+  _scan_only_prefix      = 0;
+  _setup_seq_num         = 0;
+  _stats_arrays_length   = 0;
+  _accum_surv_rate       = 0.0;
+  _last_pred             = 0.0;
+  // the following will set up the arrays with length 1
+  _region_num            = 1;
+  stop_adding_regions();
+  guarantee( _stats_arrays_length == 1, "invariant" );
+  guarantee( _surv_rate_pred[0] != NULL, "invariant" );
+  _surv_rate_pred[0]->add(0.4);
+  all_surviving_words_recorded(false);
+  _region_num = 0;
+}
+
+
 void
 SurvRateGroup::start_adding_regions() {
-  _setup_seq_num   = _array_length;
-  _curr_length     = _scan_only_prefix;
+  _setup_seq_num   = _stats_arrays_length;
+  _region_num      = _scan_only_prefix;
   _accum_surv_rate = 0.0;
 
 #if 0
-  gclog_or_tty->print_cr("start adding regions, seq num %d, length %d",
-                         _setup_seq_num, _curr_length);
+  gclog_or_tty->print_cr("[%s] start adding regions, seq num %d, length %d",
+                         _name, _setup_seq_num, _region_num);
 #endif // 0
 }
 
 void
 SurvRateGroup::stop_adding_regions() {
-  size_t length = _curr_length;
 
 #if 0
-  gclog_or_tty->print_cr("stop adding regions, length %d", length);
+  gclog_or_tty->print_cr("[%s] stop adding regions, length %d", _name, _region_num);
 #endif // 0
 
-  if (length > _array_length) {
+  if (_region_num > _stats_arrays_length) {
     double* old_surv_rate = _surv_rate;
     double* old_accum_surv_rate_pred = _accum_surv_rate_pred;
     TruncatedSeq** old_surv_rate_pred = _surv_rate_pred;
 
-    _surv_rate = NEW_C_HEAP_ARRAY(double, length);
+    _surv_rate = NEW_C_HEAP_ARRAY(double, _region_num);
     if (_surv_rate == NULL) {
-      vm_exit_out_of_memory(sizeof(double) * length,
+      vm_exit_out_of_memory(sizeof(double) * _region_num,
                             "Not enough space for surv rate array.");
     }
-    _accum_surv_rate_pred = NEW_C_HEAP_ARRAY(double, length);
+    _accum_surv_rate_pred = NEW_C_HEAP_ARRAY(double, _region_num);
     if (_accum_surv_rate_pred == NULL) {
-      vm_exit_out_of_memory(sizeof(double) * length,
+      vm_exit_out_of_memory(sizeof(double) * _region_num,
                          "Not enough space for accum surv rate pred array.");
     }
-    _surv_rate_pred = NEW_C_HEAP_ARRAY(TruncatedSeq*, length);
+    _surv_rate_pred = NEW_C_HEAP_ARRAY(TruncatedSeq*, _region_num);
     if (_surv_rate == NULL) {
-      vm_exit_out_of_memory(sizeof(TruncatedSeq*) * length,
+      vm_exit_out_of_memory(sizeof(TruncatedSeq*) * _region_num,
                             "Not enough space for surv rate pred array.");
     }
 
-    for (size_t i = 0; i < _array_length; ++i)
+    for (size_t i = 0; i < _stats_arrays_length; ++i)
       _surv_rate_pred[i] = old_surv_rate_pred[i];
 
 #if 0
-    gclog_or_tty->print_cr("stop adding regions, new seqs %d to %d",
-                  _array_length, length - 1);
+    gclog_or_tty->print_cr("[%s] stop adding regions, new seqs %d to %d",
+                  _name, _array_length, _region_num - 1);
 #endif // 0
 
-    for (size_t i = _array_length; i < length; ++i) {
+    for (size_t i = _stats_arrays_length; i < _region_num; ++i) {
       _surv_rate_pred[i] = new TruncatedSeq(10);
       // _surv_rate_pred[i]->add(last_pred);
     }
 
-    _array_length = length;
+    _stats_arrays_length = _region_num;
 
     if (old_surv_rate != NULL)
       FREE_C_HEAP_ARRAY(double, old_surv_rate);
@@ -124,7 +134,7 @@
       FREE_C_HEAP_ARRAY(NumberSeq*, old_surv_rate_pred);
   }
 
-  for (size_t i = 0; i < _array_length; ++i)
+  for (size_t i = 0; i < _stats_arrays_length; ++i)
     _surv_rate[i] = 0.0;
 }
 
@@ -135,7 +145,7 @@
 
   double ret = _accum_surv_rate;
   if (adjustment > 0) {
-    TruncatedSeq* seq = get_seq(_curr_length+1);
+    TruncatedSeq* seq = get_seq(_region_num+1);
     double surv_rate = _g1p->get_new_prediction(seq);
     ret += surv_rate;
   }
@@ -145,23 +155,23 @@
 
 int
 SurvRateGroup::next_age_index() {
-  TruncatedSeq* seq = get_seq(_curr_length);
+  TruncatedSeq* seq = get_seq(_region_num);
   double surv_rate = _g1p->get_new_prediction(seq);
   _accum_surv_rate += surv_rate;
 
-  ++_curr_length;
+  ++_region_num;
   return (int) ++_all_regions_allocated;
 }
 
 void
 SurvRateGroup::record_scan_only_prefix(size_t scan_only_prefix) {
-  guarantee( scan_only_prefix <= _curr_length, "pre-condition" );
+  guarantee( scan_only_prefix <= _region_num, "pre-condition" );
   _scan_only_prefix = scan_only_prefix;
 }
 
 void
 SurvRateGroup::record_surviving_words(int age_in_group, size_t surv_words) {
-  guarantee( 0 <= age_in_group && (size_t) age_in_group < _curr_length,
+  guarantee( 0 <= age_in_group && (size_t) age_in_group < _region_num,
              "pre-condition" );
   guarantee( _surv_rate[age_in_group] <= 0.00001,
              "should only update each slot once" );
@@ -178,15 +188,15 @@
 
 void
 SurvRateGroup::all_surviving_words_recorded(bool propagate) {
-  if (propagate && _curr_length > 0) { // conservative
-    double surv_rate = _surv_rate_pred[_curr_length-1]->last();
+  if (propagate && _region_num > 0) { // conservative
+    double surv_rate = _surv_rate_pred[_region_num-1]->last();
 
 #if 0
     gclog_or_tty->print_cr("propagating %1.2lf from %d to %d",
                   surv_rate, _curr_length, _array_length - 1);
 #endif // 0
 
-    for (size_t i = _curr_length; i < _array_length; ++i) {
+    for (size_t i = _region_num; i < _stats_arrays_length; ++i) {
       guarantee( _surv_rate[i] <= 0.00001,
                  "the slot should not have been updated" );
       _surv_rate_pred[i]->add(surv_rate);
@@ -195,7 +205,7 @@
 
   double accum = 0.0;
   double pred = 0.0;
-  for (size_t i = 0; i < _array_length; ++i) {
+  for (size_t i = 0; i < _stats_arrays_length; ++i) {
     pred = _g1p->get_new_prediction(_surv_rate_pred[i]);
     if (pred > 1.0) pred = 1.0;
     accum += pred;
@@ -209,8 +219,8 @@
 void
 SurvRateGroup::print() {
   gclog_or_tty->print_cr("Surv Rate Group: %s (%d entries, %d scan-only)",
-                _name, _curr_length, _scan_only_prefix);
-  for (size_t i = 0; i < _curr_length; ++i) {
+                _name, _region_num, _scan_only_prefix);
+  for (size_t i = 0; i < _region_num; ++i) {
     gclog_or_tty->print_cr("    age %4d   surv rate %6.2lf %%   pred %6.2lf %%%s",
                   i, _surv_rate[i] * 100.0,
                   _g1p->get_new_prediction(_surv_rate_pred[i]) * 100.0,
--- a/hotspot/src/share/vm/gc_implementation/g1/survRateGroup.hpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/survRateGroup.hpp	Fri Feb 27 15:12:52 2009 -0800
@@ -29,7 +29,7 @@
   G1CollectorPolicy* _g1p;
   const char* _name;
 
-  size_t  _array_length;
+  size_t  _stats_arrays_length;
   double* _surv_rate;
   double* _accum_surv_rate_pred;
   double  _last_pred;
@@ -40,7 +40,7 @@
   size_t         _summary_surv_rates_max_len;
 
   int _all_regions_allocated;
-  size_t _curr_length;
+  size_t _region_num;
   size_t _scan_only_prefix;
   size_t _setup_seq_num;
 
@@ -48,6 +48,7 @@
   SurvRateGroup(G1CollectorPolicy* g1p,
                 const char* name,
                 size_t summary_surv_rates_len);
+  void reset();
   void start_adding_regions();
   void stop_adding_regions();
   void record_scan_only_prefix(size_t scan_only_prefix);
@@ -55,22 +56,21 @@
   void all_surviving_words_recorded(bool propagate);
   const char* name() { return _name; }
 
-  size_t region_num() { return _curr_length; }
+  size_t region_num() { return _region_num; }
   size_t scan_only_length() { return _scan_only_prefix; }
   double accum_surv_rate_pred(int age) {
     assert(age >= 0, "must be");
-    if ((size_t)age < _array_length)
+    if ((size_t)age < _stats_arrays_length)
       return _accum_surv_rate_pred[age];
     else {
-      double diff = (double) (age - _array_length + 1);
-      return _accum_surv_rate_pred[_array_length-1] + diff * _last_pred;
+      double diff = (double) (age - _stats_arrays_length + 1);
+      return _accum_surv_rate_pred[_stats_arrays_length-1] + diff * _last_pred;
     }
   }
 
   double accum_surv_rate(size_t adjustment);
 
   TruncatedSeq* get_seq(size_t age) {
-    guarantee( 0 <= age, "pre-condition" );
     if (age >= _setup_seq_num) {
       guarantee( _setup_seq_num > 0, "invariant" );
       age = _setup_seq_num-1;
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep	Fri Feb 27 15:12:52 2009 -0800
@@ -28,6 +28,7 @@
 binaryTreeDictionary.cpp                binaryTreeDictionary.hpp
 binaryTreeDictionary.cpp                globals.hpp
 binaryTreeDictionary.cpp                ostream.hpp
+binaryTreeDictionary.cpp                space.inline.hpp
 binaryTreeDictionary.cpp                spaceDecorator.hpp
 
 binaryTreeDictionary.hpp                freeBlockDictionary.hpp
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_g1	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_g1	Fri Feb 27 15:12:52 2009 -0800
@@ -31,9 +31,10 @@
 cardTableRS.cpp				concurrentMark.hpp
 cardTableRS.cpp				g1SATBCardTableModRefBS.hpp
 
-collectionSetChooser.cpp		g1CollectedHeap.hpp
+collectionSetChooser.cpp		g1CollectedHeap.inline.hpp
 collectionSetChooser.cpp		g1CollectorPolicy.hpp
 collectionSetChooser.cpp		collectionSetChooser.hpp
+collectionSetChooser.cpp		space.inline.hpp
 
 collectionSetChooser.hpp		heapRegion.hpp
 collectionSetChooser.hpp                growableArray.hpp
@@ -42,14 +43,16 @@
 concurrentG1Refine.cpp			concurrentG1Refine.hpp
 concurrentG1Refine.cpp			concurrentG1RefineThread.hpp
 concurrentG1Refine.cpp			copy.hpp
-concurrentG1Refine.cpp			g1CollectedHeap.hpp
+concurrentG1Refine.cpp			g1CollectedHeap.inline.hpp
 concurrentG1Refine.cpp			g1RemSet.hpp
+concurrentG1Refine.cpp			space.inline.hpp
 
 concurrentG1Refine.hpp			globalDefinitions.hpp
+concurrentG1Refine.hpp			allocation.hpp
 
 concurrentG1RefineThread.cpp		concurrentG1Refine.hpp
 concurrentG1RefineThread.cpp		concurrentG1RefineThread.hpp
-concurrentG1RefineThread.cpp		g1CollectedHeap.hpp
+concurrentG1RefineThread.cpp		g1CollectedHeap.inline.hpp
 concurrentG1RefineThread.cpp            g1CollectorPolicy.hpp
 concurrentG1RefineThread.cpp		handles.inline.hpp
 concurrentG1RefineThread.cpp		mutexLocker.hpp
@@ -166,10 +169,11 @@
 g1CollectorPolicy.cpp			concurrentMarkThread.inline.hpp
 g1CollectorPolicy.cpp			debug.hpp
 g1CollectorPolicy.cpp			java.hpp
-g1CollectorPolicy.cpp                   g1CollectedHeap.hpp
+g1CollectorPolicy.cpp                   g1CollectedHeap.inline.hpp
 g1CollectorPolicy.cpp                   g1CollectorPolicy.hpp
 g1CollectorPolicy.cpp                   heapRegionRemSet.hpp
 g1CollectorPolicy.cpp			mutexLocker.hpp
+g1CollectorPolicy.cpp			gcPolicyCounters.hpp
 
 g1CollectorPolicy.hpp                   collectorPolicy.hpp
 g1CollectorPolicy.hpp                   collectionSetChooser.hpp
@@ -187,7 +191,7 @@
 g1MarkSweep.cpp                         codeCache.hpp
 g1MarkSweep.cpp                         events.hpp
 g1MarkSweep.cpp                         fprofiler.hpp
-g1MarkSweep.hpp                         g1CollectedHeap.hpp
+g1MarkSweep.hpp                         g1CollectedHeap.inline.hpp
 g1MarkSweep.cpp                         g1MarkSweep.hpp
 g1MarkSweep.cpp                         gcLocker.hpp
 g1MarkSweep.cpp                         genCollectedHeap.hpp
@@ -226,7 +230,7 @@
 g1MMUTracker.cpp			mutexLocker.hpp
 
 g1MMUTracker.hpp			debug.hpp
-
+g1MMUTracker.hpp			allocation.hpp
 g1RemSet.cpp				bufferingOopClosure.hpp
 g1RemSet.cpp				concurrentG1Refine.hpp
 g1RemSet.cpp				concurrentG1RefineThread.hpp
@@ -264,12 +268,13 @@
 heapRegion.cpp                          iterator.hpp
 heapRegion.cpp                          oop.inline.hpp
 
-heapRegion.hpp                          space.hpp
+heapRegion.hpp                          space.inline.hpp
 heapRegion.hpp                          spaceDecorator.hpp
 heapRegion.hpp                          g1BlockOffsetTable.inline.hpp
 heapRegion.hpp                          watermark.hpp
 heapRegion.hpp				g1_specialized_oop_closures.hpp
 heapRegion.hpp				survRateGroup.hpp
+heapRegion.hpp				ageTable.hpp
 
 heapRegionRemSet.hpp			sparsePRT.hpp
 
@@ -283,7 +288,7 @@
 heapRegionRemSet.cpp                    space.inline.hpp
 
 heapRegionSeq.cpp                       allocation.hpp
-heapRegionSeq.cpp                       g1CollectedHeap.hpp
+heapRegionSeq.cpp                       g1CollectedHeap.inline.hpp
 heapRegionSeq.cpp                       heapRegionSeq.hpp
 
 heapRegionSeq.hpp                       growableArray.hpp
@@ -334,18 +339,18 @@
 survRateGroup.hpp			numberSeq.hpp
 
 survRateGroup.cpp			allocation.hpp
-survRateGroup.cpp			g1CollectedHeap.hpp
+survRateGroup.cpp			g1CollectedHeap.inline.hpp
 survRateGroup.cpp			g1CollectorPolicy.hpp
 survRateGroup.cpp			heapRegion.hpp
 survRateGroup.cpp			survRateGroup.hpp
 
 thread.cpp				concurrentMarkThread.inline.hpp
 
-universe.cpp                            g1CollectedHeap.hpp
+universe.cpp                            g1CollectedHeap.inline.hpp
 universe.cpp                            g1CollectorPolicy.hpp
 
 vm_operations_g1.hpp			vmGCOperations.hpp
 
 vm_operations_g1.cpp			vm_operations_g1.hpp
-vm_operations_g1.cpp                    g1CollectedHeap.hpp
+vm_operations_g1.cpp                    g1CollectedHeap.inline.hpp
 vm_operations_g1.cpp                    isGCActiveMark.hpp
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_parNew	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_parNew	Fri Feb 27 15:12:52 2009 -0800
@@ -29,6 +29,8 @@
 asParNewGeneration.cpp                  cmsAdaptiveSizePolicy.hpp
 asParNewGeneration.cpp                  cmsGCAdaptivePolicyCounters.hpp
 asParNewGeneration.cpp                  defNewGeneration.inline.hpp
+asParNewGeneration.cpp                  markOop.inline.hpp
+asParNewGeneration.cpp                  markSweep.inline.hpp
 asParNewGeneration.cpp                  oop.pcgc.inline.hpp
 asParNewGeneration.cpp                  parNewGeneration.hpp
 asParNewGeneration.cpp                  referencePolicy.hpp
@@ -40,7 +42,7 @@
 parCardTableModRefBS.cpp                java.hpp
 parCardTableModRefBS.cpp                mutexLocker.hpp
 parCardTableModRefBS.cpp                sharedHeap.hpp
-parCardTableModRefBS.cpp                space.hpp
+parCardTableModRefBS.cpp                space.inline.hpp
 parCardTableModRefBS.cpp                universe.hpp
 parCardTableModRefBS.cpp                virtualspace.hpp
 
@@ -77,6 +79,7 @@
 parNewGeneration.cpp                    sharedHeap.hpp
 parNewGeneration.cpp                    space.hpp
 parNewGeneration.cpp                    spaceDecorator.hpp
+parNewGeneration.cpp                    thread.hpp
 parNewGeneration.cpp                    workgroup.hpp
 
 parNewGeneration.hpp                    defNewGeneration.hpp
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge	Fri Feb 27 15:12:52 2009 -0800
@@ -302,6 +302,8 @@
 psOldGen.hpp                            spaceCounters.hpp
 
 psPermGen.cpp                           gcUtil.hpp
+psPermGen.cpp                           markOop.inline.hpp
+psPermGen.cpp                           markSweep.inline.hpp
 psPermGen.cpp                           parallelScavengeHeap.hpp
 psPermGen.cpp                           psMarkSweepDecorator.hpp
 psPermGen.cpp                           psParallelCompact.hpp
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_shared	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_shared	Fri Feb 27 15:12:52 2009 -0800
@@ -100,4 +100,4 @@
 spaceCounters.hpp                       perfData.hpp
 spaceCounters.hpp                       generationCounters.hpp
 
-vmGCOperations.cpp                      g1CollectedHeap.hpp
+vmGCOperations.cpp                      g1CollectedHeap.inline.hpp
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Sat Jan 31 15:26:34 2009 -0800
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Fri Feb 27 15:12:52 2009 -0800
@@ -404,6 +404,8 @@
     if (terminator()->offer_termination()) break;
     par_scan_state()->end_term_time();
   }
+  assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0,
+         "Broken overflow list?");
   // Finish the last termination pause.
   par_scan_state()->end_term_time();
 }
@@ -456,6 +458,8 @@
   _is_alive_closure(this),
   _plab_stats(YoungPLABSize, PLABWeight)
 {
+  NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;)
+  NOT_PRODUCT(_num_par_pushes = 0;)
   _task_queues = new ObjToScanQueueSet(ParallelGCThreads);
   guarantee(_task_queues != NULL, "task_queues allocation failure.");
 
@@ -993,12 +997,19 @@
              "push forwarded object");
     }
     // Push it on one of the queues of to-be-scanned objects.
-    if (!par_scan_state->work_queue()->push(obj_to_push)) {
+    bool simulate_overflow = false;
+    NOT_PRODUCT(
+      if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {
+        // simulate a stack overflow
+        simulate_overflow = true;
+      }
+    )
+    if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {
       // Add stats for overflow pushes.
       if (Verbose && PrintGCDetails) {
         gclog_or_tty->print("queue overflow!\n");
       }
-      push_on_overflow_list(old);
+      push_on_overflow_list(old, par_scan_state);
       par_scan_state->note_overflow_push();
     }
     par_scan_state->note_push();
@@ -1110,9 +1121,16 @@
              "push forwarded object");
     }
     // Push it on one of the queues of to-be-scanned objects.
-    if (!par_scan_state->work_queue()->push(obj_to_push)) {
+    bool simulate_overflow = false;
+    NOT_PRODUCT(
+      if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {
+        // simulate a stack overflow
+        simulate_overflow = true;
+      }
+    )
+    if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {
       // Add stats for overflow pushes.
-      push_on_overflow_list(old);
+      push_on_overflow_list(old, par_scan_state);
       par_scan_state->note_overflow_push();
     }
     par_scan_state->note_push();
@@ -1135,89 +1153,190 @@
   return forward_ptr;
 }
 
-void ParNewGeneration::push_on_overflow_list(oop from_space_obj) {
-  oop cur_overflow_list = _overflow_list;
+#ifndef PRODUCT
+// It's OK to call this multi-threaded;  the worst thing
+// that can happen is that we'll get a bunch of closely
+// spaced simulated oveflows, but that's OK, in fact
+// probably good as it would exercise the overflow code
+// under contention.
+bool ParNewGeneration::should_simulate_overflow() {
+  if (_overflow_counter-- <= 0) { // just being defensive
+    _overflow_counter = ParGCWorkQueueOverflowInterval;
+    return true;
+  } else {
+    return false;
+  }
+}
+#endif
+
+#define BUSY (oop(0x1aff1aff))
+void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) {
   // if the object has been forwarded to itself, then we cannot
   // use the klass pointer for the linked list.  Instead we have
   // to allocate an oopDesc in the C-Heap and use that for the linked list.
+  // XXX This is horribly inefficient when a promotion failure occurs
+  // and should be fixed. XXX FIX ME !!!
+#ifndef PRODUCT
+  Atomic::inc_ptr(&_num_par_pushes);
+  assert(_num_par_pushes > 0, "Tautology");
+#endif
   if (from_space_obj->forwardee() == from_space_obj) {
     oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1);
     listhead->forward_to(from_space_obj);
     from_space_obj = listhead;
   }
-  while (true) {
-    from_space_obj->set_klass_to_list_ptr(cur_overflow_list);
-    oop observed_overflow_list =
+  oop observed_overflow_list = _overflow_list;
+  oop cur_overflow_list;
+  do {
+    cur_overflow_list = observed_overflow_list;
+    if (cur_overflow_list != BUSY) {
+      from_space_obj->set_klass_to_list_ptr(cur_overflow_list);
+    } else {
+      from_space_obj->set_klass_to_list_ptr(NULL);
+    }
+    observed_overflow_list =
       (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list);
-    if (observed_overflow_list == cur_overflow_list) break;
-    // Otherwise...
-    cur_overflow_list = observed_overflow_list;
-  }
+  } while (cur_overflow_list != observed_overflow_list);
 }
 
+// *NOTE*: The overflow list manipulation code here and
+// in CMSCollector:: are very similar in shape,
+// except that in the CMS case we thread the objects
+// directly into the list via their mark word, and do
+// not need to deal with special cases below related
+// to chunking of object arrays and promotion failure
+// handling.
+// CR 6797058 has been filed to attempt consolidation of
+// the common code.
+// Because of the common code, if you make any changes in
+// the code below, please check the CMS version to see if
+// similar changes might be needed.
+// See CMSCollector::par_take_from_overflow_list() for
+// more extensive documentation comments.
 bool
 ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) {
   ObjToScanQueue* work_q = par_scan_state->work_queue();
+  assert(work_q->size() == 0, "Should first empty local work queue");
   // How many to take?
-  int objsFromOverflow = MIN2(work_q->max_elems()/4,
-                              (juint)ParGCDesiredObjsFromOverflowList);
+  size_t objsFromOverflow = MIN2((size_t)work_q->max_elems()/4,
+                                 (size_t)ParGCDesiredObjsFromOverflowList);
 
   if (_overflow_list == NULL) return false;
 
   // Otherwise, there was something there; try claiming the list.
-  oop prefix = (oop)Atomic::xchg_ptr(NULL, &_overflow_list);
-
-  if (prefix == NULL) {
-    return false;
+  oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
+  // Trim off a prefix of at most objsFromOverflow items
+  Thread* tid = Thread::current();
+  size_t spin_count = (size_t)ParallelGCThreads;
+  size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100);
+  for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) {
+    // someone grabbed it before we did ...
+    // ... we spin for a short while...
+    os::sleep(tid, sleep_time_millis, false);
+    if (_overflow_list == NULL) {
+      // nothing left to take
+      return false;
+    } else if (_overflow_list != BUSY) {
+     // try and grab the prefix
+     prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
+    }
   }
-  // Trim off a prefix of at most objsFromOverflow items
-  int i = 1;
+  if (prefix == NULL || prefix == BUSY) {
+     // Nothing to take or waited long enough
+     if (prefix == NULL) {
+       // Write back the NULL in case we overwrote it with BUSY above
+       // and it is still the same value.
+       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
+     }
+     return false;
+  }
+  assert(prefix != NULL && prefix != BUSY, "Error");
+  size_t i = 1;
   oop cur = prefix;
   while (i < objsFromOverflow && cur->klass_or_null() != NULL) {
     i++; cur = oop(cur->klass());
   }
 
   // Reattach remaining (suffix) to overflow list
-  if (cur->klass_or_null() != NULL) {
-    oop suffix = oop(cur->klass());
-    cur->set_klass_to_list_ptr(NULL);
-
-    // Find last item of suffix list
-    oop last = suffix;
-    while (last->klass_or_null() != NULL) {
-      last = oop(last->klass());
+  if (cur->klass_or_null() == NULL) {
+    // Write back the NULL in lieu of the BUSY we wrote
+    // above and it is still the same value.
+    if (_overflow_list == BUSY) {
+      (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
     }
-    // Atomically prepend suffix to current overflow list
-    oop cur_overflow_list = _overflow_list;
-    while (true) {
-      last->set_klass_to_list_ptr(cur_overflow_list);
-      oop observed_overflow_list =
-        (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
-      if (observed_overflow_list == cur_overflow_list) break;
-      // Otherwise...
-      cur_overflow_list = observed_overflow_list;
+  } else {
+    assert(cur->klass_or_null() != BUSY, "Error");
+    oop suffix = oop(cur->klass());       // suffix will be put back on global list
+    cur->set_klass_to_list_ptr(NULL);     // break off suffix
+    // It's possible that the list is still in the empty(busy) state
+    // we left it in a short while ago; in that case we may be
+    // able to place back the suffix.
+    oop observed_overflow_list = _overflow_list;
+    oop cur_overflow_list = observed_overflow_list;
+    bool attached = false;
+    while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
+      observed_overflow_list =
+        (oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
+      if (cur_overflow_list == observed_overflow_list) {
+        attached = true;
+        break;
+      } else cur_overflow_list = observed_overflow_list;
+    }
+    if (!attached) {
+      // Too bad, someone else got in in between; we'll need to do a splice.
+      // Find the last item of suffix list
+      oop last = suffix;
+      while (last->klass_or_null() != NULL) {
+        last = oop(last->klass());
+      }
+      // Atomically prepend suffix to current overflow list
+      observed_overflow_list = _overflow_list;
+      do {
+        cur_overflow_list = observed_overflow_list;
+        if (cur_overflow_list != BUSY) {
+          // Do the splice ...
+          last->set_klass_to_list_ptr(cur_overflow_list);
+        } else { // cur_overflow_list == BUSY
+          last->set_klass_to_list_ptr(NULL);
+        }
+        observed_overflow_list =
+          (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
+      } while (cur_overflow_list != observed_overflow_list);
     }
   }
 
   // Push objects on prefix list onto this thread's work queue
-  assert(cur != NULL, "program logic");
+  assert(prefix != NULL && prefix != BUSY, "program logic");
   cur = prefix;
-  int n = 0;
+  ssize_t n = 0;
   while (cur != NULL) {
     oop obj_to_push = cur->forwardee();</