changeset 5777:6fa574bfd32a

Merge
author chegar
date Thu, 03 Oct 2013 19:13:12 +0100
parents 9b4ce069642e ebfa5793d349
children 6795fcebbf42
files src/share/vm/classfile/classFileParser.cpp src/share/vm/runtime/os.cpp test/gc/metaspace/ClassMetaspaceSizeInJmapHeap.java test/runtime/6878713/Test6878713.sh test/runtime/6878713/testcase.jar test/runtime/7020373/Test7020373.sh test/runtime/7020373/testcase.jar
diffstat 245 files changed, 7191 insertions(+), 2543 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Sat Sep 14 20:40:34 2013 +0100
+++ b/.hgtags	Thu Oct 03 19:13:12 2013 +0100
@@ -376,3 +376,9 @@
 aed585cafc0d9655726af6d1e1081d1c94cb3b5c jdk8-b106
 50794d8ac11c9579b41dec4de23b808fef9f34a1 hs25-b49
 5b7f90aab3ad25a25b75b7b2bb18d5ae23d8231c jdk8-b107
+a09fe9d1e016c285307507a5793bc4fa6215e9c9 hs25-b50
+85072013aad46050a362d10ab78e963121c8014c jdk8-b108
+566db1b0e6efca31f181456e54c8911d0192410d hs25-b51
+c81dd5393a5e333df7cb1f6621f5897ada6522b5 jdk8-b109
+58043478c26d4e8bf48700acea5f97aba8b417d4 hs25-b52
+6209b0ed51c086d4127bac0e086c8f326d1764d7 jdk8-b110
--- a/agent/src/os/linux/ps_core.c	Sat Sep 14 20:40:34 2013 +0100
+++ b/agent/src/os/linux/ps_core.c	Thu Oct 03 19:13:12 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -698,29 +698,58 @@
 
 // read segments of a shared object
 static bool read_lib_segments(struct ps_prochandle* ph, int lib_fd, ELF_EHDR* lib_ehdr, uintptr_t lib_base) {
-   int i = 0;
-   ELF_PHDR* phbuf;
-   ELF_PHDR* lib_php = NULL;
+  int i = 0;
+  ELF_PHDR* phbuf;
+  ELF_PHDR* lib_php = NULL;
+
+  int page_size=sysconf(_SC_PAGE_SIZE);
 
-   if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL)
-      return false;
+  if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL) {
+    return false;
+  }
+
+  // we want to process only PT_LOAD segments that are not writable.
+  // i.e., text segments. The read/write/exec (data) segments would
+  // have been already added from core file segments.
+  for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) {
+    if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) {
+
+      uintptr_t target_vaddr = lib_php->p_vaddr + lib_base;
+      map_info *existing_map = core_lookup(ph, target_vaddr);
 
-   // we want to process only PT_LOAD segments that are not writable.
-   // i.e., text segments. The read/write/exec (data) segments would
-   // have been already added from core file segments.
-   for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) {
-      if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) {
-         if (add_map_info(ph, lib_fd, lib_php->p_offset, lib_php->p_vaddr + lib_base, lib_php->p_filesz) == NULL)
-            goto err;
+      if (existing_map == NULL){
+        if (add_map_info(ph, lib_fd, lib_php->p_offset,
+                          target_vaddr, lib_php->p_filesz) == NULL) {
+          goto err;
+        }
+      } else {
+        if ((existing_map->memsz != page_size) &&
+            (existing_map->fd != lib_fd) &&
+            (existing_map->memsz != lib_php->p_filesz)){
+
+          print_debug("address conflict @ 0x%lx (size = %ld, flags = %d\n)",
+                        target_vaddr, lib_php->p_filesz, lib_php->p_flags);
+          goto err;
+        }
+
+        /* replace PT_LOAD segment with library segment */
+        print_debug("overwrote with new address mapping (memsz %ld -> %ld)\n",
+                     existing_map->memsz, lib_php->p_filesz);
+
+        existing_map->fd = lib_fd;
+        existing_map->offset = lib_php->p_offset;
+        existing_map->memsz = lib_php->p_filesz;
       }
-      lib_php++;
-   }
+    }
+
+    lib_php++;
+  }
 
-   free(phbuf);
-   return true;
+  free(phbuf);
+  return true;
 err:
-   free(phbuf);
-   return false;
+  free(phbuf);
+  return false;
 }
 
 // process segments from interpreter (ld.so or ld-linux.so)
--- a/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Sat Sep 14 20:40:34 2013 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Thu Oct 03 19:13:12 2013 +0100
@@ -1213,6 +1213,7 @@
                 }
                 HotSpotTypeDataBase db = (HotSpotTypeDataBase)agent.getTypeDataBase();
                 if (t.countTokens() == 1) {
+                    String name = t.nextToken();
                     out.println("intConstant " + name + " " + db.lookupIntConstant(name));
                 } else if (t.countTokens() == 0) {
                     Iterator i = db.getIntConstants();
@@ -1235,6 +1236,7 @@
                 }
                 HotSpotTypeDataBase db = (HotSpotTypeDataBase)agent.getTypeDataBase();
                 if (t.countTokens() == 1) {
+                    String name = t.nextToken();
                     out.println("longConstant " + name + " " + db.lookupLongConstant(name));
                 } else if (t.countTokens() == 0) {
                     Iterator i = db.getLongConstants();
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdAddress.java	Sat Sep 14 20:40:34 2013 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdAddress.java	Thu Oct 03 19:13:12 2013 +0100
@@ -81,7 +81,7 @@
 
     public Address getCompKlassAddressAt(long offset)
             throws UnalignedAddressException, UnmappedAddressException {
-        return debugger.readCompOopAddress(addr + offset);
+        return debugger.readCompKlassAddress(addr + offset);
     }
 
     //
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Sat Sep 14 20:40:34 2013 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Thu Oct 03 19:13:12 2013 +0100
@@ -792,7 +792,7 @@
 
   public boolean isCompressedKlassPointersEnabled() {
     if (compressedKlassPointersEnabled == null) {
-        Flag flag = getCommandLineFlag("UseCompressedKlassPointers");
+        Flag flag = getCommandLineFlag("UseCompressedClassPointers");
         compressedKlassPointersEnabled = (flag == null) ? Boolean.FALSE:
              (flag.getBool()? Boolean.TRUE: Boolean.FALSE);
     }
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Sat Sep 14 20:40:34 2013 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Thu Oct 03 19:13:12 2013 +0100
@@ -66,18 +66,18 @@
       printGCAlgorithm(flagMap);
       System.out.println();
       System.out.println("Heap Configuration:");
-      printValue("MinHeapFreeRatio   = ", getFlagValue("MinHeapFreeRatio", flagMap));
-      printValue("MaxHeapFreeRatio   = ", getFlagValue("MaxHeapFreeRatio", flagMap));
-      printValMB("MaxHeapSize        = ", getFlagValue("MaxHeapSize", flagMap));
-      printValMB("NewSize            = ", getFlagValue("NewSize", flagMap));
-      printValMB("MaxNewSize         = ", getFlagValue("MaxNewSize", flagMap));
-      printValMB("OldSize            = ", getFlagValue("OldSize", flagMap));
-      printValue("NewRatio           = ", getFlagValue("NewRatio", flagMap));
-      printValue("SurvivorRatio      = ", getFlagValue("SurvivorRatio", flagMap));
-      printValMB("MetaspaceSize      = ", getFlagValue("MetaspaceSize", flagMap));
-      printValMB("ClassMetaspaceSize = ", getFlagValue("ClassMetaspaceSize", flagMap));
-      printValMB("MaxMetaspaceSize   = ", getFlagValue("MaxMetaspaceSize", flagMap));
-      printValMB("G1HeapRegionSize   = ", HeapRegion.grainBytes());
+      printValue("MinHeapFreeRatio         = ", getFlagValue("MinHeapFreeRatio", flagMap));
+      printValue("MaxHeapFreeRatio         = ", getFlagValue("MaxHeapFreeRatio", flagMap));
+      printValMB("MaxHeapSize              = ", getFlagValue("MaxHeapSize", flagMap));
+      printValMB("NewSize                  = ", getFlagValue("NewSize", flagMap));
+      printValMB("MaxNewSize               = ", getFlagValue("MaxNewSize", flagMap));
+      printValMB("OldSize                  = ", getFlagValue("OldSize", flagMap));
+      printValue("NewRatio                 = ", getFlagValue("NewRatio", flagMap));
+      printValue("SurvivorRatio            = ", getFlagValue("SurvivorRatio", flagMap));
+      printValMB("MetaspaceSize            = ", getFlagValue("MetaspaceSize", flagMap));
+      printValMB("CompressedClassSpaceSize = ", getFlagValue("CompressedClassSpaceSize", flagMap));
+      printValMB("MaxMetaspaceSize         = ", getFlagValue("MaxMetaspaceSize", flagMap));
+      printValMB("G1HeapRegionSize         = ", HeapRegion.grainBytes());
 
       System.out.println();
       System.out.println("Heap Usage:");
--- a/make/bsd/makefiles/gcc.make	Sat Sep 14 20:40:34 2013 +0100
+++ b/make/bsd/makefiles/gcc.make	Thu Oct 03 19:13:12 2013 +0100
@@ -80,7 +80,7 @@
     HOSTCC  = $(CC)
   endif
 
-  AS   = $(CC) -c -x assembler-with-cpp
+  AS   = $(CC) -c 
 endif
 
 
@@ -347,6 +347,13 @@
   LDFLAGS += -mmacosx-version-min=$(MACOSX_VERSION_MIN)
 endif
 
+
+#------------------------------------------------------------------------
+# Assembler flags
+
+# Enforce prerpocessing of .s files
+ASFLAGS += -x assembler-with-cpp
+
 #------------------------------------------------------------------------
 # Linker flags
 
--- a/make/excludeSrc.make	Sat Sep 14 20:40:34 2013 +0100
+++ b/make/excludeSrc.make	Thu Oct 03 19:13:12 2013 +0100
@@ -88,7 +88,7 @@
 	g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \
 	g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp \
 	g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \
-	heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
+	g1BiasedArray.cpp heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
 	ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \
 	adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \
 	cardTableExtension.cpp gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp \
@@ -99,7 +99,7 @@
 	psTasks.cpp psVirtualspace.cpp psYoungGen.cpp vmPSOperations.cpp asParNewGeneration.cpp \
 	parCardTableModRefBS.cpp parGCAllocBuffer.cpp parNewGeneration.cpp mutableSpace.cpp \
 	gSpaceCounters.cpp allocationStats.cpp spaceCounters.cpp gcAdaptivePolicyCounters.cpp \
-	mutableNUMASpace.cpp immutableSpace.cpp yieldingWorkGroup.cpp
+	mutableNUMASpace.cpp immutableSpace.cpp yieldingWorkGroup.cpp hSpaceCounters.cpp
 endif
 
 ifeq ($(INCLUDE_NMT), false)
--- a/make/hotspot_version	Sat Sep 14 20:40:34 2013 +0100
+++ b/make/hotspot_version	Thu Oct 03 19:13:12 2013 +0100
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=25
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=49
+HS_BUILD_NUMBER=52
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/make/jprt.properties	Sat Sep 14 20:40:34 2013 +0100
+++ b/make/jprt.properties	Thu Oct 03 19:13:12 2013 +0100
@@ -120,13 +120,13 @@
 jprt.my.macosx.x64.jdk7u8=${jprt.my.macosx.x64.jdk7}
 jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}}
 
-jprt.my.windows.i586.jdk8=windows_i586_5.1
-jprt.my.windows.i586.jdk7=windows_i586_5.1
+jprt.my.windows.i586.jdk8=windows_i586_6.1
+jprt.my.windows.i586.jdk7=windows_i586_6.1
 jprt.my.windows.i586.jdk7u8=${jprt.my.windows.i586.jdk7}
 jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
 
-jprt.my.windows.x64.jdk8=windows_x64_5.2
-jprt.my.windows.x64.jdk7=windows_x64_5.2
+jprt.my.windows.x64.jdk8=windows_x64_6.1
+jprt.my.windows.x64.jdk7=windows_x64_6.1
 jprt.my.windows.x64.jdk7u8=${jprt.my.windows.x64.jdk7}
 jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
 
--- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -105,7 +105,7 @@
         if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false;
       }
 
-      if (UseCompressedKlassPointers) {
+      if (UseCompressedClassPointers) {
         if (src->is_address() && !src->is_stack() && src->type() == T_ADDRESS &&
             src->as_address_ptr()->disp() == oopDesc::klass_offset_in_bytes()) return false;
       }
@@ -963,7 +963,7 @@
       case T_METADATA:  __ ld_ptr(base, offset, to_reg->as_register()); break;
       case T_ADDRESS:
 #ifdef _LP64
-        if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedKlassPointers) {
+        if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) {
           __ lduw(base, offset, to_reg->as_register());
           __ decode_klass_not_null(to_reg->as_register());
         } else
@@ -2208,7 +2208,7 @@
     // We don't know the array types are compatible
     if (basic_type != T_OBJECT) {
       // Simple test for basic type arrays
-      if (UseCompressedKlassPointers) {
+      if (UseCompressedClassPointers) {
         // We don't need decode because we just need to compare
         __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp);
         __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
@@ -2342,7 +2342,7 @@
     // but not necessarily exactly of type default_type.
     Label known_ok, halt;
     metadata2reg(op->expected_type()->constant_encoding(), tmp);
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       // tmp holds the default type. It currently comes uncompressed after the
       // load of a constant, so encode it.
       __ encode_klass_not_null(tmp);
--- a/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -186,7 +186,7 @@
     set((intx)markOopDesc::prototype(), t1);
   }
   st_ptr(t1, obj, oopDesc::mark_offset_in_bytes());
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     // Save klass
     mov(klass, t1);
     encode_klass_not_null(t1);
@@ -196,7 +196,7 @@
   }
   if (len->is_valid()) {
     st(len, obj, arrayOopDesc::length_offset_in_bytes());
-  } else if (UseCompressedKlassPointers) {
+  } else if (UseCompressedClassPointers) {
     // otherwise length is in the class gap
     store_klass_gap(G0, obj);
   }
--- a/src/cpu/sparc/vm/c2_globals_sparc.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/cpu/sparc/vm/c2_globals_sparc.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -57,6 +57,7 @@
 define_pd_global(bool, UseTLAB,                      true);
 define_pd_global(bool, ResizeTLAB,                   true);
 define_pd_global(intx, LoopUnrollLimit,              60); // Design center runs on 1.3.1
+define_pd_global(intx, MinJumpTableSize,             5);
 
 // Peephole and CISC spilling both break the graph, and so makes the
 // scheduler sick.
--- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -3911,7 +3911,7 @@
   // The number of bytes in this code is used by
   // MachCallDynamicJavaNode::ret_addr_offset()
   // if this changes, change that.
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass);
     decode_klass_not_null(klass);
   } else {
@@ -3920,7 +3920,7 @@
 }
 
 void MacroAssembler::store_klass(Register klass, Register dst_oop) {
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     assert(dst_oop != klass, "not enough registers");
     encode_klass_not_null(klass);
     st(klass, dst_oop, oopDesc::klass_offset_in_bytes());
@@ -3930,7 +3930,7 @@
 }
 
 void MacroAssembler::store_klass_gap(Register s, Register d) {
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     assert(s != d, "not enough registers");
     st(s, d, oopDesc::klass_gap_offset_in_bytes());
   }
@@ -4089,7 +4089,7 @@
 }
 
 void MacroAssembler::encode_klass_not_null(Register r) {
-  assert (UseCompressedKlassPointers, "must be compressed");
+  assert (UseCompressedClassPointers, "must be compressed");
   assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
   assert(r != G6_heapbase, "bad register choice");
   set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
@@ -4105,7 +4105,7 @@
   if (src == dst) {
     encode_klass_not_null(src);
   } else {
-    assert (UseCompressedKlassPointers, "must be compressed");
+    assert (UseCompressedClassPointers, "must be compressed");
     assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
     set((intptr_t)Universe::narrow_klass_base(), dst);
     sub(src, dst, dst);
@@ -4119,7 +4119,7 @@
 // generated by decode_klass_not_null() and reinit_heapbase().  Hence, if
 // the instructions they generate change, then this method needs to be updated.
 int MacroAssembler::instr_size_for_decode_klass_not_null() {
-  assert (UseCompressedKlassPointers, "only for compressed klass ptrs");
+  assert (UseCompressedClassPointers, "only for compressed klass ptrs");
   // set + add + set
   int num_instrs = insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 1 +
     insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base());
@@ -4135,7 +4135,7 @@
 void  MacroAssembler::decode_klass_not_null(Register r) {
   // Do not add assert code to this unless you change vtableStubs_sparc.cpp
   // pd_code_size_limit.
-  assert (UseCompressedKlassPointers, "must be compressed");
+  assert (UseCompressedClassPointers, "must be compressed");
   assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
   assert(r != G6_heapbase, "bad register choice");
   set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
@@ -4151,7 +4151,7 @@
   } else {
     // Do not add assert code to this unless you change vtableStubs_sparc.cpp
     // pd_code_size_limit.
-    assert (UseCompressedKlassPointers, "must be compressed");
+    assert (UseCompressedClassPointers, "must be compressed");
     assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
     if (Universe::narrow_klass_shift() != 0) {
       assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice");
@@ -4167,7 +4167,7 @@
 }
 
 void MacroAssembler::reinit_heapbase() {
-  if (UseCompressedOops || UseCompressedKlassPointers) {
+  if (UseCompressedOops || UseCompressedClassPointers) {
     if (Universe::heap() != NULL) {
       set((intptr_t)Universe::narrow_ptrs_base(), G6_heapbase);
     } else {
--- a/src/cpu/sparc/vm/sparc.ad	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/cpu/sparc/vm/sparc.ad	Thu Oct 03 19:13:12 2013 +0100
@@ -557,7 +557,7 @@
     int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
     int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
     int klass_load_size;
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       assert(Universe::heap() != NULL, "java heap should be initialized");
       klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
     } else {
@@ -1657,7 +1657,7 @@
 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
   st->print_cr("\nUEP:");
 #ifdef    _LP64
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     assert(Universe::heap() != NULL, "java heap should be initialized");
     st->print_cr("\tLDUW   [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
     st->print_cr("\tSET    Universe::narrow_klass_base,R_G6_heap_base");
@@ -1897,7 +1897,7 @@
 
 bool Matcher::narrow_klass_use_complex_address() {
   NOT_LP64(ShouldNotCallThis());
-  assert(UseCompressedKlassPointers, "only for compressed klass code");
+  assert(UseCompressedClassPointers, "only for compressed klass code");
   return false;
 }
 
@@ -2561,7 +2561,7 @@
       int off = __ offset();
       __ load_klass(O0, G3_scratch);
       int klass_load_size;
-      if (UseCompressedKlassPointers) {
+      if (UseCompressedClassPointers) {
         assert(Universe::heap() != NULL, "java heap should be initialized");
         klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
       } else {
--- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -2945,7 +2945,7 @@
 
     BLOCK_COMMENT("arraycopy argument klass checks");
     //  get src->klass()
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       __ delayed()->nop(); // ??? not good
       __ load_klass(src, G3_src_klass);
     } else {
@@ -2980,7 +2980,7 @@
     // Load 32-bits signed value. Use br() instruction with it to check icc.
     __ lduw(G3_src_klass, lh_offset, G5_lh);
 
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       __ load_klass(dst, G4_dst_klass);
     }
     // Handle objArrays completely differently...
@@ -2988,7 +2988,7 @@
     __ set(objArray_lh, O5_temp);
     __ cmp(G5_lh,       O5_temp);
     __ br(Assembler::equal, false, Assembler::pt, L_objArray);
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       __ delayed()->nop();
     } else {
       __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
--- a/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -52,6 +52,11 @@
 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
   const int sparc_code_length = VtableStub::pd_code_size_limit(true);
   VtableStub* s = new(sparc_code_length) VtableStub(true, vtable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+
   ResourceMark rm;
   CodeBuffer cb(s->entry_point(), sparc_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
@@ -125,6 +130,11 @@
 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
   const int sparc_code_length = VtableStub::pd_code_size_limit(false);
   VtableStub* s = new(sparc_code_length) VtableStub(false, itable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+
   ResourceMark rm;
   CodeBuffer cb(s->entry_point(), sparc_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
@@ -218,13 +228,13 @@
       // ld;ld;ld,jmp,nop
       const int basic = 5*BytesPerInstWord +
                         // shift;add for load_klass (only shift with zero heap based)
-                        (UseCompressedKlassPointers ?
+                        (UseCompressedClassPointers ?
                           MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
       return basic + slop;
     } else {
       const int basic = (28 LP64_ONLY(+ 6)) * BytesPerInstWord +
                         // shift;add for load_klass (only shift with zero heap based)
-                        (UseCompressedKlassPointers ?
+                        (UseCompressedClassPointers ?
                           MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
       return (basic + slop);
     }
--- a/src/cpu/x86/vm/c1_FrameMap_x86.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/cpu/x86/vm/c1_FrameMap_x86.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -148,7 +148,7 @@
 
   static int adjust_reg_range(int range) {
     // Reduce the number of available regs (to free r12) in case of compressed oops
-    if (UseCompressedOops || UseCompressedKlassPointers) return range - 1;
+    if (UseCompressedOops || UseCompressedClassPointers) return range - 1;
     return range;
   }
 
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -341,7 +341,7 @@
   Register receiver = FrameMap::receiver_opr->as_register();
   Register ic_klass = IC_Klass;
   const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
-  const bool do_post_padding = VerifyOops || UseCompressedKlassPointers;
+  const bool do_post_padding = VerifyOops || UseCompressedClassPointers;
   if (!do_post_padding) {
     // insert some nops so that the verified entry point is aligned on CodeEntryAlignment
     while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) {
@@ -1263,7 +1263,7 @@
       break;
 
     case T_ADDRESS:
-      if (UseCompressedKlassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
+      if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
         __ movl(dest->as_register(), from_addr);
       } else {
         __ movptr(dest->as_register(), from_addr);
@@ -1371,7 +1371,7 @@
     __ verify_oop(dest->as_register());
   } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
 #ifdef _LP64
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       __ decode_klass_not_null(dest->as_register());
     }
 #endif
@@ -1716,7 +1716,7 @@
   } else if (obj == klass_RInfo) {
     klass_RInfo = dst;
   }
-  if (k->is_loaded() && !UseCompressedKlassPointers) {
+  if (k->is_loaded() && !UseCompressedClassPointers) {
     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
   } else {
     Rtmp1 = op->tmp3()->as_register();
@@ -1724,14 +1724,6 @@
   }
 
   assert_different_registers(obj, k_RInfo, klass_RInfo);
-  if (!k->is_loaded()) {
-    klass2reg_with_patching(k_RInfo, op->info_for_patch());
-  } else {
-#ifdef _LP64
-    __ mov_metadata(k_RInfo, k->constant_encoding());
-#endif // _LP64
-  }
-  assert(obj != k_RInfo, "must be different");
 
   __ cmpptr(obj, (int32_t)NULL_WORD);
   if (op->should_profile()) {
@@ -1748,13 +1740,21 @@
   } else {
     __ jcc(Assembler::equal, *obj_is_null);
   }
+
+  if (!k->is_loaded()) {
+    klass2reg_with_patching(k_RInfo, op->info_for_patch());
+  } else {
+#ifdef _LP64
+    __ mov_metadata(k_RInfo, k->constant_encoding());
+#endif // _LP64
+  }
   __ verify_oop(obj);
 
   if (op->fast_check()) {
     // get object class
     // not a safepoint as obj null check happens earlier
 #ifdef _LP64
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       __ load_klass(Rtmp1, obj);
       __ cmpptr(k_RInfo, Rtmp1);
     } else {
@@ -3294,7 +3294,7 @@
     // We don't know the array types are compatible
     if (basic_type != T_OBJECT) {
       // Simple test for basic type arrays
-      if (UseCompressedKlassPointers) {
+      if (UseCompressedClassPointers) {
         __ movl(tmp, src_klass_addr);
         __ cmpl(tmp, dst_klass_addr);
       } else {
@@ -3456,21 +3456,21 @@
     Label known_ok, halt;
     __ mov_metadata(tmp, default_type->constant_encoding());
 #ifdef _LP64
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       __ encode_klass_not_null(tmp);
     }
 #endif
 
     if (basic_type != T_OBJECT) {
 
-      if (UseCompressedKlassPointers)          __ cmpl(tmp, dst_klass_addr);
+      if (UseCompressedClassPointers)          __ cmpl(tmp, dst_klass_addr);
       else                   __ cmpptr(tmp, dst_klass_addr);
       __ jcc(Assembler::notEqual, halt);
-      if (UseCompressedKlassPointers)          __ cmpl(tmp, src_klass_addr);
+      if (UseCompressedClassPointers)          __ cmpl(tmp, src_klass_addr);
       else                   __ cmpptr(tmp, src_klass_addr);
       __ jcc(Assembler::equal, known_ok);
     } else {
-      if (UseCompressedKlassPointers)          __ cmpl(tmp, dst_klass_addr);
+      if (UseCompressedClassPointers)          __ cmpl(tmp, dst_klass_addr);
       else                   __ cmpptr(tmp, dst_klass_addr);
       __ jcc(Assembler::equal, known_ok);
       __ cmpptr(src, dst);
--- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -1239,7 +1239,7 @@
   }
   LIR_Opr reg = rlock_result(x);
   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
-  if (!x->klass()->is_loaded() || UseCompressedKlassPointers) {
+  if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
     tmp3 = new_register(objectType);
   }
   __ checkcast(reg, obj.result(), x->klass(),
@@ -1261,7 +1261,7 @@
   }
   obj.load_item();
   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
-  if (!x->klass()->is_loaded() || UseCompressedKlassPointers) {
+  if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
     tmp3 = new_register(objectType);
   }
   __ instanceof(reg, obj.result(), x->klass(),
--- a/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -157,7 +157,7 @@
     movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype());
   }
 #ifdef _LP64
-  if (UseCompressedKlassPointers) { // Take care not to kill klass
+  if (UseCompressedClassPointers) { // Take care not to kill klass
     movptr(t1, klass);
     encode_klass_not_null(t1);
     movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1);
@@ -171,7 +171,7 @@
     movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len);
   }
 #ifdef _LP64
-  else if (UseCompressedKlassPointers) {
+  else if (UseCompressedClassPointers) {
     xorptr(t1, t1);
     store_klass_gap(obj, t1);
   }
@@ -334,7 +334,7 @@
   assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
   int start_offset = offset();
 
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     load_klass(rscratch1, receiver);
     cmpptr(rscratch1, iCache);
   } else {
@@ -345,7 +345,7 @@
   jump_cc(Assembler::notEqual,
           RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
   const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
-  assert(UseCompressedKlassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
+  assert(UseCompressedClassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
 }
 
 
--- a/src/cpu/x86/vm/c2_globals_x86.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/cpu/x86/vm/c2_globals_x86.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -30,7 +30,6 @@
 
 // Sets the default values for platform dependent flags used by the server compiler.
 // (see c2_globals.hpp).  Alpha-sorted.
-
 define_pd_global(bool, BackgroundCompilation,        true);
 define_pd_global(bool, UseTLAB,                      true);
 define_pd_global(bool, ResizeTLAB,                   true);
@@ -52,6 +51,7 @@
 define_pd_global(intx, ConditionalMoveLimit,         3);
 define_pd_global(intx, FLOATPRESSURE,                6);
 define_pd_global(intx, FreqInlineSize,               325);
+define_pd_global(intx, MinJumpTableSize,             10);
 #ifdef AMD64
 define_pd_global(intx, INTPRESSURE,                  13);
 define_pd_global(intx, InteriorEntryAlignment,       16);
--- a/src/cpu/x86/vm/macroAssembler_x86.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -1635,7 +1635,7 @@
 #ifdef ASSERT
   // TraceBytecodes does not use r12 but saves it over the call, so don't verify
   // r12 is the heapbase.
-  LP64_ONLY(if ((UseCompressedOops || UseCompressedKlassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
+  LP64_ONLY(if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
 #endif // ASSERT
 
   assert(java_thread != oop_result  , "cannot use the same register for java_thread & oop_result");
@@ -4802,7 +4802,7 @@
 
 void MacroAssembler::load_klass(Register dst, Register src) {
 #ifdef _LP64
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
     decode_klass_not_null(dst);
   } else
@@ -4817,7 +4817,7 @@
 
 void MacroAssembler::store_klass(Register dst, Register src) {
 #ifdef _LP64
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     encode_klass_not_null(src);
     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
   } else
@@ -4892,7 +4892,7 @@
 
 #ifdef _LP64
 void MacroAssembler::store_klass_gap(Register dst, Register src) {
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     // Store to klass gap in destination
     movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
   }
@@ -5075,7 +5075,7 @@
 // when (Universe::heap() != NULL).  Hence, if the instructions they
 // generate change, then this method needs to be updated.
 int MacroAssembler::instr_size_for_decode_klass_not_null() {
-  assert (UseCompressedKlassPointers, "only for compressed klass ptrs");
+  assert (UseCompressedClassPointers, "only for compressed klass ptrs");
   // mov64 + addq + shlq? + mov64  (for reinit_heapbase()).
   return (Universe::narrow_klass_shift() == 0 ? 20 : 24);
 }
@@ -5085,7 +5085,7 @@
 void  MacroAssembler::decode_klass_not_null(Register r) {
   // Note: it will change flags
   assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
-  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   assert(r != r12_heapbase, "Decoding a klass in r12");
   // Cannot assert, unverified entry point counts instructions (see .ad file)
   // vtableStubs also counts instructions in pd_code_size_limit.
@@ -5103,7 +5103,7 @@
 void  MacroAssembler::decode_klass_not_null(Register dst, Register src) {
   // Note: it will change flags
   assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
-  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   if (dst == src) {
     decode_klass_not_null(dst);
   } else {
@@ -5141,7 +5141,7 @@
 }
 
 void  MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
-  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int klass_index = oop_recorder()->find_index(k);
   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@@ -5149,7 +5149,7 @@
 }
 
 void  MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
-  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int klass_index = oop_recorder()->find_index(k);
   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@@ -5175,7 +5175,7 @@
 }
 
 void  MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
-  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int klass_index = oop_recorder()->find_index(k);
   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@@ -5183,7 +5183,7 @@
 }
 
 void  MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
-  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int klass_index = oop_recorder()->find_index(k);
   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@@ -5191,7 +5191,7 @@
 }
 
 void MacroAssembler::reinit_heapbase() {
-  if (UseCompressedOops || UseCompressedKlassPointers) {
+  if (UseCompressedOops || UseCompressedClassPointers) {
     if (Universe::heap() != NULL) {
       if (Universe::narrow_oop_base() == NULL) {
         MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
--- a/src/cpu/x86/vm/templateInterpreter_x86.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/cpu/x86/vm/templateInterpreter_x86.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -34,9 +34,9 @@
   // Run with +PrintInterpreter to get the VM to print out the size.
   // Max size with JVMTI
 #ifdef AMD64
-  const static int InterpreterCodeSize = 200 * 1024;
+  const static int InterpreterCodeSize = 208 * 1024;
 #else
-  const static int InterpreterCodeSize = 168 * 1024;
+  const static int InterpreterCodeSize = 176 * 1024;
 #endif // AMD64
 
 #endif // CPU_X86_VM_TEMPLATEINTERPRETER_X86_HPP
--- a/src/cpu/x86/vm/vtableStubs_x86_32.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/cpu/x86/vm/vtableStubs_x86_32.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -58,6 +58,11 @@
 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
   const int i486_code_length = VtableStub::pd_code_size_limit(true);
   VtableStub* s = new(i486_code_length) VtableStub(true, vtable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+
   ResourceMark rm;
   CodeBuffer cb(s->entry_point(), i486_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
@@ -132,6 +137,11 @@
   //            add code here, bump the code stub size returned by pd_code_size_limit!
   const int i486_code_length = VtableStub::pd_code_size_limit(false);
   VtableStub* s = new(i486_code_length) VtableStub(false, itable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+
   ResourceMark rm;
   CodeBuffer cb(s->entry_point(), i486_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
--- a/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -49,6 +49,11 @@
 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
   const int amd64_code_length = VtableStub::pd_code_size_limit(true);
   VtableStub* s = new(amd64_code_length) VtableStub(true, vtable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+
   ResourceMark rm;
   CodeBuffer cb(s->entry_point(), amd64_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
@@ -126,6 +131,11 @@
   // returned by pd_code_size_limit!
   const int amd64_code_length = VtableStub::pd_code_size_limit(false);
   VtableStub* s = new(amd64_code_length) VtableStub(false, itable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+
   ResourceMark rm;
   CodeBuffer cb(s->entry_point(), amd64_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
@@ -211,11 +221,11 @@
   if (is_vtable_stub) {
     // Vtable stub size
     return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0) +
-           (UseCompressedKlassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
+           (UseCompressedClassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
   } else {
     // Itable stub size
     return (DebugVtables ? 512 : 74) + (CountCompiledCalls ? 13 : 0) +
-           (UseCompressedKlassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
+           (UseCompressedClassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
   }
   // In order to tune these parameters, run the JVM with VM options
   // +PrintMiscellaneous and +WizardMode to see information about
--- a/src/cpu/x86/vm/x86_64.ad	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/cpu/x86/vm/x86_64.ad	Thu Oct 03 19:13:12 2013 +0100
@@ -1391,7 +1391,7 @@
 #ifndef PRODUCT
 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 {
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     st->print_cr("movl    rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
     st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
     st->print_cr("\tcmpq    rax, rscratch1\t # Inline cache check");
@@ -1408,7 +1408,7 @@
 {
   MacroAssembler masm(&cbuf);
   uint insts_size = cbuf.insts_size();
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     masm.load_klass(rscratch1, j_rarg0);
     masm.cmpptr(rax, rscratch1);
   } else {
@@ -1557,7 +1557,7 @@
 }
 
 bool Matcher::narrow_klass_use_complex_address() {
-  assert(UseCompressedKlassPointers, "only for compressed klass code");
+  assert(UseCompressedClassPointers, "only for compressed klass code");
   return (LogKlassAlignmentInBytes <= 3);
 }
 
--- a/src/os/bsd/vm/os_bsd.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/os/bsd/vm/os_bsd.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -3589,8 +3589,6 @@
 #endif
   }
 
-  os::large_page_init();
-
   // initialize suspend/resume support - must do this before signal_sets_init()
   if (SR_initialize() != 0) {
     perror("SR_initialize failed");
--- a/src/os/linux/vm/os_linux.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/os/linux/vm/os_linux.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -131,6 +131,7 @@
 bool os::Linux::_supports_fast_thread_cpu_time = false;
 const char * os::Linux::_glibc_version = NULL;
 const char * os::Linux::_libpthread_version = NULL;
+pthread_condattr_t os::Linux::_condattr[1];
 
 static jlong initial_time_count=0;
 
@@ -1399,12 +1400,15 @@
           clock_gettime_func(CLOCK_MONOTONIC, &tp)  == 0) {
         // yes, monotonic clock is supported
         _clock_gettime = clock_gettime_func;
+        return;
       } else {
         // close librt if there is no monotonic clock
         dlclose(handle);
       }
     }
   }
+  warning("No monotonic clock was available - timed services may " \
+          "be adversely affected if the time-of-day clock changes");
 }
 
 #ifndef SYS_clock_getres
@@ -2165,23 +2169,49 @@
 }
 
 // Try to identify popular distros.
-// Most Linux distributions have /etc/XXX-release file, which contains
-// the OS version string. Some have more than one /etc/XXX-release file
-// (e.g. Mandrake has both /etc/mandrake-release and /etc/redhat-release.),
-// so the order is important.
+// Most Linux distributions have a /etc/XXX-release file, which contains
+// the OS version string. Newer Linux distributions have a /etc/lsb-release
+// file that also contains the OS version string. Some have more than one
+// /etc/XXX-release file (e.g. Mandrake has both /etc/mandrake-release and
+// /etc/redhat-release.), so the order is important.
+// Any Linux that is based on Redhat (i.e. Oracle, Mandrake, Sun JDS...) have
+// their own specific XXX-release file as well as a redhat-release file.
+// Because of this the XXX-release file needs to be searched for before the
+// redhat-release file.
+// Since Red Hat has a lsb-release file that is not very descriptive the
+// search for redhat-release needs to be before lsb-release.
+// Since the lsb-release file is the new standard it needs to be searched
+// before the older style release files.
+// Searching system-release (Red Hat) and os-release (other Linuxes) are a
+// next to last resort.  The os-release file is a new standard that contains
+// distribution information and the system-release file seems to be an old
+// standard that has been replaced by the lsb-release and os-release files.
+// Searching for the debian_version file is the last resort.  It contains
+// an informative string like "6.0.6" or "wheezy/sid". Because of this
+// "Debian " is printed before the contents of the debian_version file.
 void os::Linux::print_distro_info(outputStream* st) {
-  if (!_print_ascii_file("/etc/mandrake-release", st) &&
-      !_print_ascii_file("/etc/sun-release", st) &&
-      !_print_ascii_file("/etc/redhat-release", st) &&
-      !_print_ascii_file("/etc/SuSE-release", st) &&
-      !_print_ascii_file("/etc/turbolinux-release", st) &&
-      !_print_ascii_file("/etc/gentoo-release", st) &&
-      !_print_ascii_file("/etc/debian_version", st) &&
-      !_print_ascii_file("/etc/ltib-release", st) &&
-      !_print_ascii_file("/etc/angstrom-version", st)) {
-      st->print("Linux");
-  }
-  st->cr();
+   if (!_print_ascii_file("/etc/oracle-release", st) &&
+       !_print_ascii_file("/etc/mandriva-release", st) &&
+       !_print_ascii_file("/etc/mandrake-release", st) &&
+       !_print_ascii_file("/etc/sun-release", st) &&
+       !_print_ascii_file("/etc/redhat-release", st) &&
+       !_print_ascii_file("/etc/lsb-release", st) &&
+       !_print_ascii_file("/etc/SuSE-release", st) &&
+       !_print_ascii_file("/etc/turbolinux-release", st) &&
+       !_print_ascii_file("/etc/gentoo-release", st) &&
+       !_print_ascii_file("/etc/ltib-release", st) &&
+       !_print_ascii_file("/etc/angstrom-version", st) &&
+       !_print_ascii_file("/etc/system-release", st) &&
+       !_print_ascii_file("/etc/os-release", st)) {
+
+       if (file_exists("/etc/debian_version")) {
+         st->print("Debian ");
+         _print_ascii_file("/etc/debian_version", st);
+       } else {
+         st->print("Linux");
+       }
+   }
+   st->cr();
 }
 
 void os::Linux::print_libversion_info(outputStream* st) {
@@ -4709,6 +4739,26 @@
 
   Linux::clock_init();
   initial_time_count = os::elapsed_counter();
+
+  // pthread_condattr initialization for monotonic clock
+  int status;
+  pthread_condattr_t* _condattr = os::Linux::condAttr();
+  if ((status = pthread_condattr_init(_condattr)) != 0) {
+    fatal(err_msg("pthread_condattr_init: %s", strerror(status)));
+  }
+  // Only set the clock if CLOCK_MONOTONIC is available
+  if (Linux::supports_monotonic_clock()) {
+    if ((status = pthread_condattr_setclock(_condattr, CLOCK_MONOTONIC)) != 0) {
+      if (status == EINVAL) {
+        warning("Unable to use monotonic clock with relative timed-waits" \
+                " - changes to the time-of-day clock may have adverse affects");
+      } else {
+        fatal(err_msg("pthread_condattr_setclock: %s", strerror(status)));
+      }
+    }
+  }
+  // else it defaults to CLOCK_REALTIME
+
   pthread_mutex_init(&dl_mutex, NULL);
 
   // If the pagesize of the VM is greater than 8K determine the appropriate
@@ -4755,8 +4805,6 @@
 #endif
   }
 
-  os::large_page_init();
-
   // initialize suspend/resume support - must do this before signal_sets_init()
   if (SR_initialize() != 0) {
     perror("SR_initialize failed");
@@ -5519,21 +5567,36 @@
 
 static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
   if (millis < 0)  millis = 0;
-  struct timeval now;
-  int status = gettimeofday(&now, NULL);
-  assert(status == 0, "gettimeofday");
+
   jlong seconds = millis / 1000;
   millis %= 1000;
   if (seconds > 50000000) { // see man cond_timedwait(3T)
     seconds = 50000000;
   }
-  abstime->tv_sec = now.tv_sec  + seconds;
-  long       usec = now.tv_usec + millis * 1000;
-  if (usec >= 1000000) {
-    abstime->tv_sec += 1;
-    usec -= 1000000;
-  }
-  abstime->tv_nsec = usec * 1000;
+
+  if (os::Linux::supports_monotonic_clock()) {
+    struct timespec now;
+    int status = os::Linux::clock_gettime(CLOCK_MONOTONIC, &now);
+    assert_status(status == 0, status, "clock_gettime");
+    abstime->tv_sec = now.tv_sec  + seconds;
+    long nanos = now.tv_nsec + millis * NANOSECS_PER_MILLISEC;
+    if (nanos >= NANOSECS_PER_SEC) {
+      abstime->tv_sec += 1;
+      nanos -= NANOSECS_PER_SEC;
+    }
+    abstime->tv_nsec = nanos;
+  } else {
+    struct timeval now;
+    int status = gettimeofday(&now, NULL);
+    assert(status == 0, "gettimeofday");
+    abstime->tv_sec = now.tv_sec  + seconds;
+    long usec = now.tv_usec + millis * 1000;
+    if (usec >= 1000000) {
+      abstime->tv_sec += 1;
+      usec -= 1000000;
+    }
+    abstime->tv_nsec = usec * 1000;
+  }
   return abstime;
 }
 
@@ -5625,7 +5688,7 @@
     status = os::Linux::safe_cond_timedwait(_cond, _mutex, &abst);
     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
       pthread_cond_destroy (_cond);
-      pthread_cond_init (_cond, NULL) ;
+      pthread_cond_init (_cond, os::Linux::condAttr()) ;
     }
     assert_status(status == 0 || status == EINTR ||
                   status == ETIME || status == ETIMEDOUT,
@@ -5726,32 +5789,50 @@
 
 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
   assert (time > 0, "convertTime");
-
-  struct timeval now;
-  int status = gettimeofday(&now, NULL);
-  assert(status == 0, "gettimeofday");
-
-  time_t max_secs = now.tv_sec + MAX_SECS;
-
-  if (isAbsolute) {
-    jlong secs = time / 1000;
-    if (secs > max_secs) {
-      absTime->tv_sec = max_secs;
+  time_t max_secs = 0;
+
+  if (!os::Linux::supports_monotonic_clock() || isAbsolute) {
+    struct timeval now;
+    int status = gettimeofday(&now, NULL);
+    assert(status == 0, "gettimeofday");
+
+    max_secs = now.tv_sec + MAX_SECS;
+
+    if (isAbsolute) {
+      jlong secs = time / 1000;
+      if (secs > max_secs) {
+        absTime->tv_sec = max_secs;
+      } else {
+        absTime->tv_sec = secs;
+      }
+      absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
+    } else {
+      jlong secs = time / NANOSECS_PER_SEC;
+      if (secs >= MAX_SECS) {
+        absTime->tv_sec = max_secs;
+        absTime->tv_nsec = 0;
+      } else {
+        absTime->tv_sec = now.tv_sec + secs;
+        absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
+        if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
+          absTime->tv_nsec -= NANOSECS_PER_SEC;
+          ++absTime->tv_sec; // note: this must be <= max_secs
+        }
+      }
     }
-    else {
-      absTime->tv_sec = secs;
-    }
-    absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
-  }
-  else {
+  } else {
+    // must be relative using monotonic clock
+    struct timespec now;
+    int status = os::Linux::clock_gettime(CLOCK_MONOTONIC, &now);
+    assert_status(status == 0, status, "clock_gettime");
+    max_secs = now.tv_sec + MAX_SECS;
     jlong secs = time / NANOSECS_PER_SEC;
     if (secs >= MAX_SECS) {
       absTime->tv_sec = max_secs;
       absTime->tv_nsec = 0;
-    }
-    else {
+    } else {
       absTime->tv_sec = now.tv_sec + secs;
-      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
+      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_nsec;
       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
         absTime->tv_nsec -= NANOSECS_PER_SEC;
         ++absTime->tv_sec; // note: this must be <= max_secs
@@ -5831,15 +5912,19 @@
   jt->set_suspend_equivalent();
   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
 
+  assert(_cur_index == -1, "invariant");
   if (time == 0) {
-    status = pthread_cond_wait (_cond, _mutex) ;
+    _cur_index = REL_INDEX; // arbitrary choice when not timed
+    status = pthread_cond_wait (&_cond[_cur_index], _mutex) ;
   } else {
-    status = os::Linux::safe_cond_timedwait (_cond, _mutex, &absTime) ;
+    _cur_index = isAbsolute ? ABS_INDEX : REL_INDEX;
+    status = os::Linux::safe_cond_timedwait (&_cond[_cur_index], _mutex, &absTime) ;
     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
-      pthread_cond_destroy (_cond) ;
-      pthread_cond_init    (_cond, NULL);
+      pthread_cond_destroy (&_cond[_cur_index]) ;
+      pthread_cond_init    (&_cond[_cur_index], isAbsolute ? NULL : os::Linux::condAttr());
     }
   }
+  _cur_index = -1;
   assert_status(status == 0 || status == EINTR ||
                 status == ETIME || status == ETIMEDOUT,
                 status, "cond_timedwait");
@@ -5868,17 +5953,24 @@
   s = _counter;
   _counter = 1;
   if (s < 1) {
-     if (WorkAroundNPTLTimedWaitHang) {
-        status = pthread_cond_signal (_cond) ;
-        assert (status == 0, "invariant") ;
+    // thread might be parked
+    if (_cur_index != -1) {
+      // thread is definitely parked
+      if (WorkAroundNPTLTimedWaitHang) {
+        status = pthread_cond_signal (&_cond[_cur_index]);
+        assert (status == 0, "invariant");
         status = pthread_mutex_unlock(_mutex);
-        assert (status == 0, "invariant") ;
-     } else {
+        assert (status == 0, "invariant");
+      } else {
         status = pthread_mutex_unlock(_mutex);
-        assert (status == 0, "invariant") ;
-        status = pthread_cond_signal (_cond) ;
-        assert (status == 0, "invariant") ;
-     }
+        assert (status == 0, "invariant");
+        status = pthread_cond_signal (&_cond[_cur_index]);
+        assert (status == 0, "invariant");
+      }
+    } else {
+      pthread_mutex_unlock(_mutex);
+      assert (status == 0, "invariant") ;
+    }
   } else {
     pthread_mutex_unlock(_mutex);
     assert (status == 0, "invariant") ;
--- a/src/os/linux/vm/os_linux.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/os/linux/vm/os_linux.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -221,6 +221,13 @@
 
   static jlong fast_thread_cpu_time(clockid_t clockid);
 
+  // pthread_cond clock suppport
+  private:
+  static pthread_condattr_t _condattr[1];
+
+  public:
+  static pthread_condattr_t* condAttr() { return _condattr; }
+
   // Stack repair handling
 
   // none present
@@ -295,7 +302,7 @@
   public:
     PlatformEvent() {
       int status;
-      status = pthread_cond_init (_cond, NULL);
+      status = pthread_cond_init (_cond, os::Linux::condAttr());
       assert_status(status == 0, status, "cond_init");
       status = pthread_mutex_init (_mutex, NULL);
       assert_status(status == 0, status, "mutex_init");
@@ -310,14 +317,19 @@
     void park () ;
     void unpark () ;
     int  TryPark () ;
-    int  park (jlong millis) ;
+    int  park (jlong millis) ; // relative timed-wait only
     void SetAssociation (Thread * a) { _Assoc = a ; }
 } ;
 
 class PlatformParker : public CHeapObj<mtInternal> {
   protected:
+    enum {
+        REL_INDEX = 0,
+        ABS_INDEX = 1
+    };
+    int _cur_index;  // which cond is in use: -1, 0, 1
     pthread_mutex_t _mutex [1] ;
-    pthread_cond_t  _cond  [1] ;
+    pthread_cond_t  _cond  [2] ; // one for relative times and one for abs.
 
   public:       // TODO-FIXME: make dtor private
     ~PlatformParker() { guarantee (0, "invariant") ; }
@@ -325,10 +337,13 @@
   public:
     PlatformParker() {
       int status;
-      status = pthread_cond_init (_cond, NULL);
-      assert_status(status == 0, status, "cond_init");
+      status = pthread_cond_init (&_cond[REL_INDEX], os::Linux::condAttr());
+      assert_status(status == 0, status, "cond_init rel");
+      status = pthread_cond_init (&_cond[ABS_INDEX], NULL);
+      assert_status(status == 0, status, "cond_init abs");
       status = pthread_mutex_init (_mutex, NULL);
       assert_status(status == 0, status, "mutex_init");
+      _cur_index = -1; // mark as unused
     }
 };
 
--- a/src/os/solaris/vm/os_solaris.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/os/solaris/vm/os_solaris.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -5178,9 +5178,7 @@
     if(Verbose && PrintMiscellaneous)
       tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
 #endif
-}
-
-  os::large_page_init();
+  }
 
   // Check minimum allowable stack size for thread creation and to initialize
   // the java system classes, including StackOverflowError - depends on page
--- a/src/os/windows/vm/decoder_windows.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/os/windows/vm/decoder_windows.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -32,7 +32,11 @@
   _can_decode_in_vm = false;
   _pfnSymGetSymFromAddr64 = NULL;
   _pfnUndecorateSymbolName = NULL;
-
+#ifdef AMD64
+  _pfnStackWalk64 = NULL;
+  _pfnSymFunctionTableAccess64 = NULL;
+  _pfnSymGetModuleBase64 = NULL;
+#endif
   _decoder_status = no_error;
   initialize();
 }
@@ -53,14 +57,24 @@
     _pfnUndecorateSymbolName = (pfn_UndecorateSymbolName)::GetProcAddress(handle, "UnDecorateSymbolName");
 
     if (_pfnSymSetOptions == NULL || _pfnSymInitialize == NULL || _pfnSymGetSymFromAddr64 == NULL) {
-      _pfnSymGetSymFromAddr64 = NULL;
-      _pfnUndecorateSymbolName = NULL;
-      ::FreeLibrary(handle);
-      _dbghelp_handle = NULL;
+      uninitialize();
       _decoder_status = helper_func_error;
       return;
     }
 
+#ifdef AMD64
+    _pfnStackWalk64 = (pfn_StackWalk64)::GetProcAddress(handle, "StackWalk64");
+    _pfnSymFunctionTableAccess64 = (pfn_SymFunctionTableAccess64)::GetProcAddress(handle, "SymFunctionTableAccess64");
+    _pfnSymGetModuleBase64 = (pfn_SymGetModuleBase64)::GetProcAddress(handle, "SymGetModuleBase64");
+    if (_pfnStackWalk64 == NULL || _pfnSymFunctionTableAccess64 == NULL || _pfnSymGetModuleBase64 == NULL) {
+      // We can't call StackWalk64 to walk the stack, but we are still
+      // able to decode the symbols. Let's limp on.
+      _pfnStackWalk64 = NULL;
+      _pfnSymFunctionTableAccess64 = NULL;
+      _pfnSymGetModuleBase64 = NULL;
+    }
+#endif
+
     HANDLE hProcess = ::GetCurrentProcess();
     _pfnSymSetOptions(SYMOPT_UNDNAME | SYMOPT_DEFERRED_LOADS | SYMOPT_EXACT_SYMBOLS);
     if (!_pfnSymInitialize(hProcess, NULL, TRUE)) {
@@ -156,6 +170,11 @@
 void WindowsDecoder::uninitialize() {
   _pfnSymGetSymFromAddr64 = NULL;
   _pfnUndecorateSymbolName = NULL;
+#ifdef AMD64
+  _pfnStackWalk64 = NULL;
+  _pfnSymFunctionTableAccess64 = NULL;
+  _pfnSymGetModuleBase64 = NULL;
+#endif
   if (_dbghelp_handle != NULL) {
     ::FreeLibrary(_dbghelp_handle);
   }
@@ -195,3 +214,65 @@
          _pfnUndecorateSymbolName(symbol, buf, buflen, UNDNAME_COMPLETE);
 }
 
+#ifdef AMD64
+BOOL WindowsDbgHelp::StackWalk64(DWORD MachineType,
+                                 HANDLE hProcess,
+                                 HANDLE hThread,
+                                 LPSTACKFRAME64 StackFrame,
+                                 PVOID ContextRecord,
+                                 PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
+                                 PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
+                                 PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
+                                 PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress) {
+  DecoderLocker locker;
+  WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
+
+  if (!wd->has_error() && wd->_pfnStackWalk64) {
+    return wd->_pfnStackWalk64(MachineType,
+                               hProcess,
+                               hThread,
+                               StackFrame,
+                               ContextRecord,
+                               ReadMemoryRoutine,
+                               FunctionTableAccessRoutine,
+                               GetModuleBaseRoutine,
+                               TranslateAddress);
+  } else {
+    return false;
+  }
+}
+
+PVOID WindowsDbgHelp::SymFunctionTableAccess64(HANDLE hProcess, DWORD64 AddrBase) {
+  DecoderLocker locker;
+  WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
+
+  if (!wd->has_error() && wd->_pfnSymFunctionTableAccess64) {
+    return wd->_pfnSymFunctionTableAccess64(hProcess, AddrBase);
+  } else {
+    return NULL;
+  }
+}
+
+pfn_SymFunctionTableAccess64 WindowsDbgHelp::pfnSymFunctionTableAccess64() {
+  DecoderLocker locker;
+  WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
+
+  if (!wd->has_error()) {
+    return wd->_pfnSymFunctionTableAccess64;
+  } else {
+    return NULL;
+  }
+}
+
+pfn_SymGetModuleBase64 WindowsDbgHelp::pfnSymGetModuleBase64() {
+  DecoderLocker locker;
+  WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
+
+  if (!wd->has_error()) {
+    return wd->_pfnSymGetModuleBase64;
+  } else {
+    return NULL;
+  }
+}
+
+#endif // AMD64
--- a/src/os/windows/vm/decoder_windows.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/os/windows/vm/decoder_windows.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -38,6 +38,20 @@
 typedef BOOL  (WINAPI *pfn_SymSetSearchPath)(HANDLE, PCTSTR);
 typedef BOOL  (WINAPI *pfn_SymGetSearchPath)(HANDLE, PTSTR, int);
 
+#ifdef AMD64
+typedef BOOL  (WINAPI *pfn_StackWalk64)(DWORD MachineType,
+                                        HANDLE hProcess,
+                                        HANDLE hThread,
+                                        LPSTACKFRAME64 StackFrame,
+                                        PVOID ContextRecord,
+                                        PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
+                                        PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
+                                        PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
+                                        PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress);
+typedef PVOID (WINAPI *pfn_SymFunctionTableAccess64)(HANDLE hProcess, DWORD64 AddrBase);
+typedef DWORD64 (WINAPI *pfn_SymGetModuleBase64)(HANDLE hProcess, DWORD64 dwAddr);
+#endif
+
 class WindowsDecoder : public AbstractDecoder {
 
 public:
@@ -61,7 +75,34 @@
   bool                      _can_decode_in_vm;
   pfn_SymGetSymFromAddr64   _pfnSymGetSymFromAddr64;
   pfn_UndecorateSymbolName  _pfnUndecorateSymbolName;
+#ifdef AMD64
+  pfn_StackWalk64              _pfnStackWalk64;
+  pfn_SymFunctionTableAccess64 _pfnSymFunctionTableAccess64;
+  pfn_SymGetModuleBase64       _pfnSymGetModuleBase64;
+
+  friend class WindowsDbgHelp;
+#endif
 };
 
+#ifdef AMD64
+// TODO: refactor and move the handling of dbghelp.dll outside of Decoder
+class WindowsDbgHelp : public Decoder {
+public:
+  static BOOL StackWalk64(DWORD MachineType,
+                          HANDLE hProcess,
+                          HANDLE hThread,
+                          LPSTACKFRAME64 StackFrame,
+                          PVOID ContextRecord,
+                          PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
+                          PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
+                          PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
+                          PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress);
+  static PVOID SymFunctionTableAccess64(HANDLE hProcess, DWORD64 AddrBase);
+
+  static pfn_SymFunctionTableAccess64 pfnSymFunctionTableAccess64();
+  static pfn_SymGetModuleBase64       pfnSymGetModuleBase64();
+};
+#endif
+
 #endif // OS_WINDOWS_VM_DECODER_WINDOWS_HPP
 
--- a/src/os/windows/vm/os_windows.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/os/windows/vm/os_windows.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -3189,9 +3189,12 @@
     return p_buf;
 
   } else {
+    if (TracePageSizes && Verbose) {
+       tty->print_cr("Reserving large pages in a single large chunk.");
+    }
     // normal policy just allocate it all at once
     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
-    char * res = (char *)VirtualAlloc(NULL, bytes, flag, prot);
+    char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
     if (res != NULL) {
       address pc = CALLER_PC;
       MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc);
@@ -3917,8 +3920,6 @@
 #endif
   }
 
-  os::large_page_init();
-
   // Setup Windows Exceptions
 
   // for debugging float code generation bugs
@@ -5429,7 +5430,7 @@
       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
         lib_name = ++start;
       } else {
-        // Need to check for C:
+        // Need to check for drive prefix
         if ((start = strchr(lib_name, ':')) != NULL) {
           lib_name = ++start;
         }
@@ -5714,7 +5715,66 @@
 #endif
 
 #ifndef PRODUCT
+
+// test the code path in reserve_memory_special() that tries to allocate memory in a single
+// contiguous memory block at a particular address.
+// The test first tries to find a good approximate address to allocate at by using the same
+// method to allocate some memory at any address. The test then tries to allocate memory in
+// the vicinity (not directly after it to avoid possible by-chance use of that location)
+// This is of course only some dodgy assumption, there is no guarantee that the vicinity of
+// the previously allocated memory is available for allocation. The only actual failure
+// that is reported is when the test tries to allocate at a particular location but gets a
+// different valid one. A NULL return value at this point is not considered an error but may
+// be legitimate.
+// If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages.
 void TestReserveMemorySpecial_test() {
-  // No tests available for this platform
-}
-#endif
+  if (!UseLargePages) {
+    if (VerboseInternalVMTests) {
+      gclog_or_tty->print("Skipping test because large pages are disabled");
+    }
+    return;
+  }
+  // save current value of globals
+  bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
+  bool old_use_numa_interleaving = UseNUMAInterleaving;
+
+  // set globals to make sure we hit the correct code path
+  UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
+
+  // do an allocation at an address selected by the OS to get a good one.
+  const size_t large_allocation_size = os::large_page_size() * 4;
+  char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
+  if (result == NULL) {
+    if (VerboseInternalVMTests) {
+      gclog_or_tty->print("Failed to allocate control block with size "SIZE_FORMAT". Skipping remainder of test.",
+        large_allocation_size);
+    }
+  } else {
+    os::release_memory_special(result, large_allocation_size);
+
+    // allocate another page within the recently allocated memory area which seems to be a good location. At least
+    // we managed to get it once.
+    const size_t expected_allocation_size = os::large_page_size();
+    char* expected_location = result + os::large_page_size();
+    char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
+    if (actual_location == NULL) {
+      if (VerboseInternalVMTests) {
+        gclog_or_tty->print("Failed to allocate any memory at "PTR_FORMAT" size "SIZE_FORMAT". Skipping remainder of test.",
+          expected_location, large_allocation_size);
+      }
+    } else {
+      // release memory
+      os::release_memory_special(actual_location, expected_allocation_size);
+      // only now check, after releasing any memory to avoid any leaks.
+      assert(actual_location == expected_location,
+        err_msg("Failed to allocate memory at requested location "PTR_FORMAT" of size "SIZE_FORMAT", is "PTR_FORMAT" instead",
+          expected_location, expected_allocation_size, actual_location));
+    }
+  }
+
+  // restore globals
+  UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
+  UseNUMAInterleaving = old_use_numa_interleaving;
+}
+#endif // PRODUCT
+
--- a/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -35,7 +35,9 @@
 
 // Used on 64 bit platforms for UseCompressedOops base address
 #ifdef _LP64
-define_pd_global(uintx, HeapBaseMinAddress,      CONST64(4)*G);
+// use 6G as default base address because by default the OS maps the application
+// to 4G on Solaris-Sparc. This leaves at least 2G for the native heap.
+define_pd_global(uintx, HeapBaseMinAddress,      CONST64(6)*G);
 #else
 define_pd_global(uintx, HeapBaseMinAddress,      2*G);
 #endif
--- a/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -29,6 +29,7 @@
 #include "classfile/vmSymbols.hpp"
 #include "code/icBuffer.hpp"
 #include "code/vtableStubs.hpp"
+#include "decoder_windows.hpp"
 #include "interpreter/interpreter.hpp"
 #include "jvm_windows.h"
 #include "memory/allocation.inline.hpp"
@@ -327,6 +328,94 @@
 
 cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
 
+#ifdef AMD64
+/*
+ * Windows/x64 does not use stack frames the way expected by Java:
+ * [1] in most cases, there is no frame pointer. All locals are addressed via RSP
+ * [2] in rare cases, when alloca() is used, a frame pointer is used, but this may
+ *     not be RBP.
+ * See http://msdn.microsoft.com/en-us/library/ew5tede7.aspx
+ *
+ * So it's not possible to print the native stack using the
+ *     while (...) {...  fr = os::get_sender_for_C_frame(&fr); }
+ * loop in vmError.cpp. We need to roll our own loop.
+ */
+bool os::platform_print_native_stack(outputStream* st, void* context,
+                                     char *buf, int buf_size)
+{
+  CONTEXT ctx;
+  if (context != NULL) {
+    memcpy(&ctx, context, sizeof(ctx));
+  } else {
+    RtlCaptureContext(&ctx);
+  }
+
+  st->print_cr("Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native code)");
+
+  STACKFRAME stk;
+  memset(&stk, 0, sizeof(stk));
+  stk.AddrStack.Offset    = ctx.Rsp;
+  stk.AddrStack.Mode      = AddrModeFlat;
+  stk.AddrFrame.Offset    = ctx.Rbp;
+  stk.AddrFrame.Mode      = AddrModeFlat;
+  stk.AddrPC.Offset       = ctx.Rip;
+  stk.AddrPC.Mode         = AddrModeFlat;
+
+  int count = 0;
+  address lastpc = 0;
+  while (count++ < StackPrintLimit) {
+    intptr_t* sp = (intptr_t*)stk.AddrStack.Offset;
+    intptr_t* fp = (intptr_t*)stk.AddrFrame.Offset; // NOT necessarily the same as ctx.Rbp!
+    address pc = (address)stk.AddrPC.Offset;
+
+    if (pc != NULL && sp != NULL && fp != NULL) {
+      if (count == 2 && lastpc == pc) {
+        // Skip it -- StackWalk64() may return the same PC
+        // (but different SP) on the first try.
+      } else {
+        // Don't try to create a frame(sp, fp, pc) -- on WinX64, stk.AddrFrame
+        // may not contain what Java expects, and may cause the frame() constructor
+        // to crash. Let's just print out the symbolic address.
+        frame::print_C_frame(st, buf, buf_size, pc);
+        st->cr();
+      }
+      lastpc = pc;
+    } else {
+      break;
+    }
+
+    PVOID p = WindowsDbgHelp::SymFunctionTableAccess64(GetCurrentProcess(), stk.AddrPC.Offset);
+    if (!p) {
+      // StackWalk64() can't handle this PC. Calling StackWalk64 again may cause crash.
+      break;
+    }
+
+    BOOL result = WindowsDbgHelp::StackWalk64(
+        IMAGE_FILE_MACHINE_AMD64,  // __in      DWORD MachineType,
+        GetCurrentProcess(),       // __in      HANDLE hProcess,
+        GetCurrentThread(),        // __in      HANDLE hThread,
+        &stk,                      // __inout   LP STACKFRAME64 StackFrame,
+        &ctx,                      // __inout   PVOID ContextRecord,
+        NULL,                      // __in_opt  PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
+        WindowsDbgHelp::pfnSymFunctionTableAccess64(),
+                                   // __in_opt  PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
+        WindowsDbgHelp::pfnSymGetModuleBase64(),
+                                   // __in_opt  PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
+        NULL);                     // __in_opt  PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress
+
+    if (!result) {
+      break;
+    }
+  }
+  if (count > StackPrintLimit) {
+    st->print_cr("...<more frames>...");
+  }
+  st->cr();
+
+  return true;
+}
+#endif // AMD64
+
 ExtendedPC os::fetch_frame_from_context(void* ucVoid,
                     intptr_t** ret_sp, intptr_t** ret_fp) {
 
@@ -401,6 +490,9 @@
                                      StubRoutines::x86::get_previous_fp_entry());
   if (func == NULL) return frame();
   intptr_t* fp = (*func)();
+  if (fp == NULL) {
+    return frame();
+  }
 #else
   intptr_t* fp = _get_previous_fp();
 #endif // AMD64
--- a/src/os_cpu/windows_x86/vm/os_windows_x86.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/os_cpu/windows_x86/vm/os_windows_x86.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -62,4 +62,10 @@
 
   static bool      register_code_area(char *low, char *high);
 
+#ifdef AMD64
+#define PLATFORM_PRINT_NATIVE_STACK 1
+static bool platform_print_native_stack(outputStream* st, void* context,
+                                        char *buf, int buf_size);
+#endif
+
 #endif // OS_CPU_WINDOWS_X86_VM_OS_WINDOWS_X86_HPP
--- a/src/share/tools/LogCompilation/README	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/tools/LogCompilation/README	Thu Oct 03 19:13:12 2013 +0100
@@ -4,14 +4,14 @@
 requires a 1.5 JDK to build and simply typing make should build it.
 
 It produces a jar file, logc.jar, that can be run on the
-hotspot.log from LogCompilation output like this:
+HotSpot log (by default, hotspot_pid{pid}.log) from LogCompilation output like this:
 
-  java -jar logc.jar hotspot.log
+  java -jar logc.jar hotspot_pid1234.log
 
 This will produce something like the normal PrintCompilation output.
 Adding the -i option with also report inlining like PrintInlining.
 
-More information about the LogCompilation output can be found at 
+More information about the LogCompilation output can be found at
 
 https://wikis.oracle.com/display/HotSpotInternals/LogCompilation+overview
 https://wikis.oracle.com/display/HotSpotInternals/PrintCompilation
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -4219,7 +4219,9 @@
     }
   }
 
-  if (!PrintInlining)  return;
+  if (!PrintInlining && !compilation()->method()->has_option("PrintInlining")) {
+    return;
+  }
   CompileTask::print_inlining(callee, scope()->level(), bci(), msg);
   if (success && CIPrintMethodCodes) {
     callee->print_codes();
--- a/src/share/vm/c1/c1_Runtime1.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/c1/c1_Runtime1.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -709,10 +709,10 @@
   Bytecodes::Code code       = field_access.code();
 
   // We must load class, initialize class and resolvethe field
-  FieldAccessInfo result; // initialize class if needed
+  fieldDescriptor result; // initialize class if needed
   constantPoolHandle constants(THREAD, caller->constants());
-  LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK_NULL);
-  return result.klass()();
+  LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK_NULL);
+  return result.field_holder();
 }
 
 
@@ -826,11 +826,11 @@
   if (stub_id == Runtime1::access_field_patching_id) {
 
     Bytecode_field field_access(caller_method, bci);
-    FieldAccessInfo result; // initialize class if needed
+    fieldDescriptor result; // initialize class if needed
     Bytecodes::Code code = field_access.code();
     constantPoolHandle constants(THREAD, caller_method->constants());
-    LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK);
-    patch_field_offset = result.field_offset();
+    LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK);
+    patch_field_offset = result.offset();
 
     // If we're patching a field which is volatile then at compile it
     // must not have been know to be volatile, so the generated code
--- a/src/share/vm/ci/ciArray.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/ci/ciArray.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -24,13 +24,92 @@
 
 #include "precompiled.hpp"
 #include "ci/ciArray.hpp"
+#include "ci/ciArrayKlass.hpp"
+#include "ci/ciConstant.hpp"
 #include "ci/ciKlass.hpp"
 #include "ci/ciUtilities.hpp"
+#include "oops/objArrayOop.hpp"
+#include "oops/typeArrayOop.hpp"
 
 // ciArray
 //
 // This class represents an arrayOop in the HotSpot virtual
 // machine.
+static BasicType fixup_element_type(BasicType bt) {
+  if (bt == T_ARRAY)    return T_OBJECT;
+  if (bt == T_BOOLEAN)  return T_BYTE;
+  return bt;
+}
+
+ciConstant ciArray::element_value_impl(BasicType elembt,
+                                       arrayOop ary,
+                                       int index) {
+  if (ary == NULL)
+    return ciConstant();
+  assert(ary->is_array(), "");
+  if (index < 0 || index >= ary->length())
+    return ciConstant();
+  ArrayKlass* ak = (ArrayKlass*) ary->klass();
+  BasicType abt = ak->element_type();
+  if (fixup_element_type(elembt) !=
+      fixup_element_type(abt))
+    return ciConstant();
+  switch (elembt) {
+  case T_ARRAY:
+  case T_OBJECT:
+    {
+      assert(ary->is_objArray(), "");
+      objArrayOop objary = (objArrayOop) ary;
+      oop elem = objary->obj_at(index);
+      ciEnv* env = CURRENT_ENV;
+      ciObject* box = env->get_object(elem);
+      return ciConstant(T_OBJECT, box);
+    }
+  }
+  assert(ary->is_typeArray(), "");
+  typeArrayOop tary = (typeArrayOop) ary;
+  jint value = 0;
+  switch (elembt) {
+  case T_LONG:          return ciConstant(tary->long_at(index));
+  case T_FLOAT:         return ciConstant(tary->float_at(index));
+  case T_DOUBLE:        return ciConstant(tary->double_at(index));
+  default:              return ciConstant();
+  case T_BYTE:          value = tary->byte_at(index);           break;
+  case T_BOOLEAN:       value = tary->byte_at(index) & 1;       break;
+  case T_SHORT:         value = tary->short_at(index);          break;
+  case T_CHAR:          value = tary->char_at(index);           break;
+  case T_INT:           value = tary->int_at(index);            break;
+  }
+  return ciConstant(elembt, value);
+}
+
+// ------------------------------------------------------------------
+// ciArray::element_value
+//
+// Current value of an element.
+// Returns T_ILLEGAL if there is no element at the given index.
+ciConstant ciArray::element_value(int index) {
+  BasicType elembt = element_basic_type();
+  GUARDED_VM_ENTRY(
+    return element_value_impl(elembt, get_arrayOop(), index);
+  )
+}
+
+// ------------------------------------------------------------------
+// ciArray::element_value_by_offset
+//
+// Current value of an element at the specified offset.
+// Returns T_ILLEGAL if there is no element at the given offset.
+ciConstant ciArray::element_value_by_offset(intptr_t element_offset) {
+  BasicType elembt = element_basic_type();
+  intptr_t shift  = exact_log2(type2aelembytes(elembt));
+  intptr_t header = arrayOopDesc::base_offset_in_bytes(elembt);
+  intptr_t index = (element_offset - header) >> shift;
+  intptr_t offset = header + ((intptr_t)index << shift);
+  if (offset != element_offset || index != (jint)index)
+    return ciConstant();
+  return element_value((jint) index);
+}
 
 // ------------------------------------------------------------------
 // ciArray::print_impl
--- a/src/share/vm/ci/ciArray.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/ci/ciArray.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -25,6 +25,8 @@
 #ifndef SHARE_VM_CI_CIARRAY_HPP
 #define SHARE_VM_CI_CIARRAY_HPP
 
+#include "ci/ciArrayKlass.hpp"
+#include "ci/ciConstant.hpp"
 #include "ci/ciObject.hpp"
 #include "oops/arrayOop.hpp"
 #include "oops/objArrayOop.hpp"
@@ -45,15 +47,30 @@
 
   ciArray(ciKlass* klass, int len) : ciObject(klass), _length(len) {}
 
-  arrayOop get_arrayOop() { return (arrayOop)get_oop(); }
+  arrayOop get_arrayOop() const { return (arrayOop)get_oop(); }
 
   const char* type_string() { return "ciArray"; }
 
   void print_impl(outputStream* st);
 
+  ciConstant element_value_impl(BasicType elembt, arrayOop ary, int index);
+
 public:
   int length() { return _length; }
 
+  // Convenience routines.
+  ciArrayKlass* array_type()         { return klass()->as_array_klass(); }
+  ciType*       element_type()       { return array_type()->element_type(); }
+  BasicType     element_basic_type() { return element_type()->basic_type(); }
+
+  // Current value of an element.
+  // Returns T_ILLEGAL if there is no element at the given index.
+  ciConstant element_value(int index);
+
+  // Current value of an element at the specified offset.
+  // Returns T_ILLEGAL if there is no element at the given offset.
+  ciConstant element_value_by_offset(intptr_t element_offset);
+
   // What kind of ciObject is this?
   bool is_array()        { return true; }
   bool is_java_object()  { return true; }
--- a/src/share/vm/ci/ciConstant.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/ci/ciConstant.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -41,7 +41,6 @@
   union {
     jint      _int;
     jlong     _long;
-    jint      _long_half[2];
     jfloat    _float;
     jdouble   _double;
     ciObject* _object;
@@ -111,6 +110,20 @@
     return _value._object;
   }
 
+  bool      is_null_or_zero() const {
+    if (!is_java_primitive(basic_type())) {
+      return as_object()->is_null_object();
+    } else if (type2size[basic_type()] == 1) {
+      // treat float bits as int, to avoid comparison with -0 and NaN
+      return (_value._int == 0);
+    } else if (type2size[basic_type()] == 2) {
+      // treat double bits as long, to avoid comparison with -0 and NaN
+      return (_value._long == 0);
+    } else {
+      return false;
+    }
+  }
+
   // Debugging output
   void print();
 };
--- a/src/share/vm/ci/ciField.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/ci/ciField.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -75,7 +75,6 @@
 
   assert(klass->get_instanceKlass()->is_linked(), "must be linked before using its constan-pool");
 
-  _cp_index = index;
   constantPoolHandle cpool(thread, klass->get_instanceKlass()->constants());
 
   // Get the field's name, signature, and type.
@@ -116,7 +115,7 @@
   // The declared holder of this field may not have been loaded.
   // Bail out with partial field information.
   if (!holder_is_accessible) {
-    // _cp_index and _type have already been set.
+    // _type has already been set.
     // The default values for _flags and _constant_value will suffice.
     // We need values for _holder, _offset,  and _is_constant,
     _holder = declared_holder;
@@ -146,8 +145,6 @@
 ciField::ciField(fieldDescriptor *fd): _known_to_link_with_put(NULL), _known_to_link_with_get(NULL) {
   ASSERT_IN_VM;
 
-  _cp_index = -1;
-
   // Get the field's name, signature, and type.
   ciEnv* env = CURRENT_ENV;
   _name = env->get_symbol(fd->name());
@@ -189,12 +186,14 @@
   _holder = CURRENT_ENV->get_instance_klass(fd->field_holder());
 
   // Check to see if the field is constant.
-  if (_holder->is_initialized() && this->is_final()) {
+  bool is_final = this->is_final();
+  bool is_stable = FoldStableValues && this->is_stable();
+  if (_holder->is_initialized() && (is_final || is_stable)) {
     if (!this->is_static()) {
       // A field can be constant if it's a final static field or if
       // it's a final non-static field of a trusted class (classes in
       // java.lang.invoke and sun.invoke packages and subpackages).
-      if (trust_final_non_static_fields(_holder)) {
+      if (is_stable || trust_final_non_static_fields(_holder)) {
         _is_constant = true;
         return;
       }
@@ -227,7 +226,6 @@
 
     Handle mirror = k->java_mirror();
 
-    _is_constant = true;
     switch(type()->basic_type()) {
     case T_BYTE:
       _constant_value = ciConstant(type()->basic_type(), mirror->byte_field(_offset));
@@ -273,6 +271,12 @@
         }
       }
     }
+    if (is_stable && _constant_value.is_null_or_zero()) {
+      // It is not a constant after all; treat it as uninitialized.
+      _is_constant = false;
+    } else {
+      _is_constant = true;
+    }
   } else {
     _is_constant = false;
   }
@@ -344,12 +348,11 @@
     }
   }
 
-  FieldAccessInfo result;
-  constantPoolHandle c_pool(THREAD,
-                         accessing_klass->get_instanceKlass()->constants());
-  LinkResolver::resolve_field(result, c_pool, _cp_index,
-                              Bytecodes::java_code(bc),
-                              true, false, KILL_COMPILE_ON_FATAL_(false));
+  fieldDescriptor result;
+  LinkResolver::resolve_field(result, _holder->get_instanceKlass(),
+                              _name->get_symbol(), _signature->get_symbol(),
+                              accessing_klass->get_Klass(), bc, true, false,
+                              KILL_COMPILE_ON_FATAL_(false));
 
   // update the hit-cache, unless there is a problem with memory scoping:
   if (accessing_klass->is_shared() || !is_shared()) {
@@ -373,8 +376,11 @@
   tty->print(" signature=");
   _signature->print_symbol();
   tty->print(" offset=%d type=", _offset);
-  if (_type != NULL) _type->print_name();
-  else               tty->print("(reference)");
+  if (_type != NULL)
+    _type->print_name();
+  else
+    tty->print("(reference)");
+  tty->print(" flags=%04x", flags().as_int());
   tty->print(" is_constant=%s", bool_to_str(_is_constant));
   if (_is_constant && is_static()) {
     tty->print(" constant_value=");
--- a/src/share/vm/ci/ciField.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/ci/ciField.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,9 +53,6 @@
   ciInstanceKlass* _known_to_link_with_get;
   ciConstant       _constant_value;
 
-  // Used for will_link
-  int              _cp_index;
-
   ciType* compute_type();
   ciType* compute_type_impl();
 
@@ -139,7 +136,10 @@
   //      non-constant fields.  These are java.lang.System.in
   //      and java.lang.System.out.  Abomination.
   //
-  // Note: the check for case 4 is not yet implemented.
+  // A field is also considered constant if it is marked @Stable
+  // and is non-null (or non-zero, if a primitive).
+  // For non-static fields, the null/zero check must be
+  // arranged by the user, as constant_value().is_null_or_zero().
   bool is_constant() { return _is_constant; }
 
   // Get the constant value of this field.
@@ -173,6 +173,7 @@
   bool is_protected   () { return flags().is_protected(); }
   bool is_static      () { return flags().is_static(); }
   bool is_final       () { return flags().is_final(); }
+  bool is_stable      () { return flags().is_stable(); }
   bool is_volatile    () { return flags().is_volatile(); }
   bool is_transient   () { return flags().is_transient(); }
 
--- a/src/share/vm/ci/ciFlags.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/ci/ciFlags.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -59,6 +59,7 @@
   bool is_interface   () const         { return (_flags & JVM_ACC_INTERFACE   ) != 0; }
   bool is_abstract    () const         { return (_flags & JVM_ACC_ABSTRACT    ) != 0; }
   bool is_strict      () const         { return (_flags & JVM_ACC_STRICT      ) != 0; }
+  bool is_stable      () const         { return (_flags & JVM_ACC_FIELD_STABLE) != 0; }
 
   // Conversion
   jint   as_int()                      { return _flags; }
--- a/src/share/vm/ci/ciInstance.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/ci/ciInstance.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -127,6 +127,8 @@
 ciConstant ciInstance::field_value_by_offset(int field_offset) {
   ciInstanceKlass* ik = klass()->as_instance_klass();
   ciField* field = ik->get_field_by_offset(field_offset, false);
+  if (field == NULL)
+    return ciConstant();  // T_ILLEGAL
   return field_value(field);
 }
 
--- a/src/share/vm/ci/ciInstanceKlass.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/ci/ciInstanceKlass.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -522,8 +522,7 @@
 
   for (JavaFieldStream fs(k); !fs.done(); fs.next()) {
     if (fs.access_flags().is_static())  continue;
-    fieldDescriptor fd;
-    fd.initialize(k, fs.index());
+    fieldDescriptor& fd = fs.field_descriptor();
     ciField* field = new (arena) ciField(&fd);
     fields->append(field);
   }
--- a/src/share/vm/ci/ciMethod.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/ci/ciMethod.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -286,7 +286,10 @@
   check_is_loaded();
   assert(holder()->is_linked(), "must be linked");
   VM_ENTRY_MARK;
-  return klassItable::compute_itable_index(get_Method());
+  Method* m = get_Method();
+  if (!m->has_itable_index())
+    return Method::nonvirtual_vtable_index;
+  return m->itable_index();
 }
 #endif // SHARK
 
@@ -1137,6 +1140,10 @@
 // ------------------------------------------------------------------
 // ciMethod::check_call
 bool ciMethod::check_call(int refinfo_index, bool is_static) const {
+  // This method is used only in C2 from InlineTree::ok_to_inline,
+  // and is only used under -Xcomp or -XX:CompileTheWorld.
+  // It appears to fail when applied to an invokeinterface call site.
+  // FIXME: Remove this method and resolve_method_statically; refactor to use the other LinkResolver entry points.
   VM_ENTRY_MARK;
   {
     EXCEPTION_MARK;
--- a/src/share/vm/ci/ciSymbol.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/ci/ciSymbol.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,6 +44,7 @@
   friend class ciInstanceKlass;
   friend class ciSignature;
   friend class ciMethod;
+  friend class ciField;
   friend class ciObjArrayKlass;
 
 private:
--- a/src/share/vm/ci/ciTypeArray.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/ci/ciTypeArray.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -39,5 +39,10 @@
 jchar ciTypeArray::char_at(int index) {
   VM_ENTRY_MARK;
   assert(index >= 0 && index < length(), "out of range");
-  return get_typeArrayOop()->char_at(index);
+  jchar c = get_typeArrayOop()->char_at(index);
+#ifdef ASSERT
+  jchar d = element_value(index).as_char();
+  assert(c == d, "");
+#endif //ASSERT
+  return c;
 }
--- a/src/share/vm/classfile/classFileParser.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/classfile/classFileParser.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -888,6 +888,7 @@
   int runtime_visible_type_annotations_length = 0;
   u1* runtime_invisible_type_annotations = NULL;
   int runtime_invisible_type_annotations_length = 0;
+  bool runtime_invisible_type_annotations_exists = false;
   while (attributes_count--) {
     cfs->guarantee_more(6, CHECK);  // attribute_name_index, attribute_length
     u2 attribute_name_index = cfs->get_u2_fast();
@@ -946,15 +947,27 @@
         assert(runtime_invisible_annotations != NULL, "null invisible annotations");
         cfs->skip_u1(runtime_invisible_annotations_length, CHECK);
       } else if (attribute_name == vmSymbols::tag_runtime_visible_type_annotations()) {
+        if (runtime_visible_type_annotations != NULL) {
+          classfile_parse_error(
+            "Multiple RuntimeVisibleTypeAnnotations attributes for field in class file %s", CHECK);
+        }
         runtime_visible_type_annotations_length = attribute_length;
         runtime_visible_type_annotations = cfs->get_u1_buffer();
         assert(runtime_visible_type_annotations != NULL, "null visible type annotations");
         cfs->skip_u1(runtime_visible_type_annotations_length, CHECK);
-      } else if (PreserveAllAnnotations && attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
-        runtime_invisible_type_annotations_length = attribute_length;
-        runtime_invisible_type_annotations = cfs->get_u1_buffer();
-        assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
-        cfs->skip_u1(runtime_invisible_type_annotations_length, CHECK);
+      } else if (attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
+        if (runtime_invisible_type_annotations_exists) {
+          classfile_parse_error(
+            "Multiple RuntimeInvisibleTypeAnnotations attributes for field in class file %s", CHECK);
+        } else {
+          runtime_invisible_type_annotations_exists = true;
+        }
+        if (PreserveAllAnnotations) {
+          runtime_invisible_type_annotations_length = attribute_length;
+          runtime_invisible_type_annotations = cfs->get_u1_buffer();
+          assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
+        }
+        cfs->skip_u1(attribute_length, CHECK);
       } else {
         cfs->skip_u1(attribute_length, CHECK);  // Skip unknown attributes
       }
@@ -1774,6 +1787,10 @@
     if (_location != _in_method)  break;  // only allow for methods
     if (!privileged)              break;  // only allow in privileged code
     return _method_LambdaForm_Hidden;
+  case vmSymbols::VM_SYMBOL_ENUM_NAME(sun_invoke_Stable_signature):
+    if (_location != _in_field)   break;  // only allow for fields
+    if (!privileged)              break;  // only allow in privileged code
+    return _field_Stable;
   case vmSymbols::VM_SYMBOL_ENUM_NAME(sun_misc_Contended_signature):
     if (_location != _in_field && _location != _in_class)          break;  // only allow for fields and classes
     if (!EnableContended || (RestrictContended && !privileged))    break;  // honor privileges
@@ -1786,6 +1803,8 @@
 void ClassFileParser::FieldAnnotationCollector::apply_to(FieldInfo* f) {
   if (is_contended())
     f->set_contended_group(contended_group());
+  if (is_stable())
+    f->set_stable(true);
 }
 
 ClassFileParser::FieldAnnotationCollector::~FieldAnnotationCollector() {
@@ -2060,6 +2079,7 @@
   int runtime_visible_type_annotations_length = 0;
   u1* runtime_invisible_type_annotations = NULL;
   int runtime_invisible_type_annotations_length = 0;
+  bool runtime_invisible_type_annotations_exists = false;
   u1* annotation_default = NULL;
   int annotation_default_length = 0;
 
@@ -2316,16 +2336,30 @@
         assert(annotation_default != NULL, "null annotation default");
         cfs->skip_u1(annotation_default_length, CHECK_(nullHandle));
       } else if (method_attribute_name == vmSymbols::tag_runtime_visible_type_annotations()) {
+        if (runtime_visible_type_annotations != NULL) {
+          classfile_parse_error(
+            "Multiple RuntimeVisibleTypeAnnotations attributes for method in class file %s",
+            CHECK_(nullHandle));
+        }
         runtime_visible_type_annotations_length = method_attribute_length;
         runtime_visible_type_annotations = cfs->get_u1_buffer();
         assert(runtime_visible_type_annotations != NULL, "null visible type annotations");
         // No need for the VM to parse Type annotations
         cfs->skip_u1(runtime_visible_type_annotations_length, CHECK_(nullHandle));
-      } else if (PreserveAllAnnotations && method_attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
-        runtime_invisible_type_annotations_length = method_attribute_length;
-        runtime_invisible_type_annotations = cfs->get_u1_buffer();
-        assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
-        cfs->skip_u1(runtime_invisible_type_annotations_length, CHECK_(nullHandle));
+      } else if (method_attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
+        if (runtime_invisible_type_annotations_exists) {
+          classfile_parse_error(
+            "Multiple RuntimeInvisibleTypeAnnotations attributes for method in class file %s",
+            CHECK_(nullHandle));
+        } else {
+          runtime_invisible_type_annotations_exists = true;
+        }
+        if (PreserveAllAnnotations) {
+          runtime_invisible_type_annotations_length = method_attribute_length;
+          runtime_invisible_type_annotations = cfs->get_u1_buffer();
+          assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
+        }
+        cfs->skip_u1(method_attribute_length, CHECK_(nullHandle));
       } else {
         // Skip unknown attributes
         cfs->skip_u1(method_attribute_length, CHECK_(nullHandle));
@@ -2818,6 +2852,7 @@
   int runtime_visible_type_annotations_length = 0;
   u1* runtime_invisible_type_annotations = NULL;
   int runtime_invisible_type_annotations_length = 0;
+  bool runtime_invisible_type_annotations_exists = false;
   u1* inner_classes_attribute_start = NULL;
   u4  inner_classes_attribute_length = 0;
   u2  enclosing_method_class_index = 0;
@@ -2921,16 +2956,28 @@
         parsed_bootstrap_methods_attribute = true;
         parse_classfile_bootstrap_methods_attribute(attribute_length, CHECK);
       } else if (tag == vmSymbols::tag_runtime_visible_type_annotations()) {
+        if (runtime_visible_type_annotations != NULL) {
+          classfile_parse_error(
+            "Multiple RuntimeVisibleTypeAnnotations attributes in class file %s", CHECK);
+        }
         runtime_visible_type_annotations_length = attribute_length;
         runtime_visible_type_annotations = cfs->get_u1_buffer();
         assert(runtime_visible_type_annotations != NULL, "null visible type annotations");
         // No need for the VM to parse Type annotations
         cfs->skip_u1(runtime_visible_type_annotations_length, CHECK);
-      } else if (PreserveAllAnnotations && tag == vmSymbols::tag_runtime_invisible_type_annotations()) {
-        runtime_invisible_type_annotations_length = attribute_length;
-        runtime_invisible_type_annotations = cfs->get_u1_buffer();
-        assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
-        cfs->skip_u1(runtime_invisible_type_annotations_length, CHECK);
+      } else if (tag == vmSymbols::tag_runtime_invisible_type_annotations()) {
+        if (runtime_invisible_type_annotations_exists) {
+          classfile_parse_error(
+            "Multiple RuntimeInvisibleTypeAnnotations attributes in class file %s", CHECK);
+        } else {
+          runtime_invisible_type_annotations_exists = true;
+        }
+        if (PreserveAllAnnotations) {
+          runtime_invisible_type_annotations_length = attribute_length;
+          runtime_invisible_type_annotations = cfs->get_u1_buffer();
+          assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
+        }
+        cfs->skip_u1(attribute_length, CHECK);
       } else {
         // Unknown attribute
         cfs->skip_u1(attribute_length, CHECK);
@@ -3948,9 +3995,8 @@
       this_klass->set_has_final_method();
     }
     this_klass->copy_method_ordering(method_ordering, CHECK_NULL);
-    // The InstanceKlass::_methods_jmethod_ids cache and the
-    // InstanceKlass::_methods_cached_itable_indices cache are
-    // both managed on the assumption that the initial cache
+    // The InstanceKlass::_methods_jmethod_ids cache
+    // is managed on the assumption that the initial cache
     // size is equal to the number of methods in the class. If
     // that changes, then InstanceKlass::idnum_can_increment()
     // has to be changed accordingly.
--- a/src/share/vm/classfile/classFileParser.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/classfile/classFileParser.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -125,6 +125,7 @@
       _method_LambdaForm_Compiled,
       _method_LambdaForm_Hidden,
       _sun_misc_Contended,
+      _field_Stable,
       _annotation_LIMIT
     };
     const Location _location;
@@ -143,14 +144,23 @@
       assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob");
       _annotations_present |= nth_bit((int)id);
     }
+
+    void remove_annotation(ID id) {
+      assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob");
+      _annotations_present &= ~nth_bit((int)id);
+    }
+
     // Report if the annotation is present.
-    bool has_any_annotations() { return _annotations_present != 0; }
-    bool has_annotation(ID id) { return (nth_bit((int)id) & _annotations_present) != 0; }
+    bool has_any_annotations() const { return _annotations_present != 0; }
+    bool has_annotation(ID id) const { return (nth_bit((int)id) & _annotations_present) != 0; }
 
     void set_contended_group(u2 group) { _contended_group = group; }
-    u2 contended_group() { return _contended_group; }
+    u2 contended_group() const { return _contended_group; }
 
-    bool is_contended() { return has_annotation(_sun_misc_Contended); }
+    bool is_contended() const { return has_annotation(_sun_misc_Contended); }
+
+    void set_stable(bool stable) { set_annotation(_field_Stable); }
+    bool is_stable() const { return has_annotation(_field_Stable); }
   };
 
   // This class also doubles as a holder for metadata cleanup.
--- a/src/share/vm/classfile/classLoader.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/classfile/classLoader.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -1319,6 +1319,25 @@
   // The CHECK at the caller will propagate the exception out
 }
 
+/**
+ * Returns if the given method should be compiled when doing compile-the-world.
+ *
+ * TODO:  This should be a private method in a CompileTheWorld class.
+ */
+static bool can_be_compiled(methodHandle m, int comp_level) {
+  assert(CompileTheWorld, "must be");
+
+  // It's not valid to compile a native wrapper for MethodHandle methods
+  // that take a MemberName appendix since the bytecode signature is not
+  // correct.
+  vmIntrinsics::ID iid = m->intrinsic_id();
+  if (MethodHandles::is_signature_polymorphic(iid) && MethodHandles::has_member_arg(iid)) {
+    return false;
+  }
+
+  return CompilationPolicy::can_be_compiled(m, comp_level);
+}
+
 void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
   int len = (int)strlen(name);
   if (len > 6 && strcmp(".class", name + len - 6) == 0) {
@@ -1362,8 +1381,7 @@
           int comp_level = CompilationPolicy::policy()->initial_compile_level();
           for (int n = 0; n < k->methods()->length(); n++) {
             methodHandle m (THREAD, k->methods()->at(n));
-            if (CompilationPolicy::can_be_compiled(m, comp_level)) {
-
+            if (can_be_compiled(m, comp_level)) {
               if (++_codecache_sweep_counter == CompileTheWorldSafepointInterval) {
                 // Give sweeper a chance to keep up with CTW
                 VM_ForceSafepoint op;
@@ -1375,7 +1393,7 @@
                                             methodHandle(), 0, "CTW", THREAD);
               if (HAS_PENDING_EXCEPTION) {
                 clear_pending_exception_if_not_oom(CHECK);
-                tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name()->as_C_string());
+                tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string());
               } else {
                 _compile_the_world_method_counter++;
               }
@@ -1391,11 +1409,13 @@
                                               methodHandle(), 0, "CTW", THREAD);
                 if (HAS_PENDING_EXCEPTION) {
                   clear_pending_exception_if_not_oom(CHECK);
-                  tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name()->as_C_string());
+                  tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string());
                 } else {
                   _compile_the_world_method_counter++;
                 }
               }
+            } else {
+              tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string());
             }
 
             nmethod* nm = m->code();
--- a/src/share/vm/classfile/defaultMethods.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/classfile/defaultMethods.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -450,6 +450,10 @@
     streamIndentor si(str, indent * 2);
     str->indent().print("Selected method: ");
     print_method(str, _selected_target);
+    Klass* method_holder = _selected_target->method_holder();
+    if (!method_holder->is_interface()) {
+      tty->print(" : in superclass");
+    }
     str->print_cr("");
   }
 
@@ -1141,19 +1145,23 @@
 #endif // ndef PRODUCT
       if (method->has_target()) {
         Method* selected = method->get_selected_target();
-        max_stack = assemble_redirect(
+        if (selected->method_holder()->is_interface()) {
+          max_stack = assemble_redirect(
             &bpool, &buffer, slot->signature(), selected, CHECK);
+        }
       } else if (method->throws_exception()) {
         max_stack = assemble_abstract_method_error(
             &bpool, &buffer, method->get_exception_message(), CHECK);
       }
-      AccessFlags flags = accessFlags_from(
+      if (max_stack != 0) {
+        AccessFlags flags = accessFlags_from(
           JVM_ACC_PUBLIC | JVM_ACC_SYNTHETIC | JVM_ACC_BRIDGE);
-      Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(),
+        Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(),
           flags, max_stack, slot->size_of_parameters(),
           ConstMethod::OVERPASS, CHECK);
-      if (m != NULL) {
-        overpasses.push(m);
+        if (m != NULL) {
+          overpasses.push(m);
+        }
       }
     }
   }
--- a/src/share/vm/classfile/javaClasses.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/classfile/javaClasses.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -438,6 +438,29 @@
   return true;
 }
 
+bool java_lang_String::equals(oop str1, oop str2) {
+  assert(str1->klass() == SystemDictionary::String_klass(),
+         "must be java String");
+  assert(str2->klass() == SystemDictionary::String_klass(),
+         "must be java String");
+  typeArrayOop value1  = java_lang_String::value(str1);
+  int          offset1 = java_lang_String::offset(str1);
+  int          length1 = java_lang_String::length(str1);
+  typeArrayOop value2  = java_lang_String::value(str2);
+  int          offset2 = java_lang_String::offset(str2);
+  int          length2 = java_lang_String::length(str2);
+
+  if (length1 != length2) {
+    return false;
+  }
+  for (int i = 0; i < length1; i++) {
+    if (value1->char_at(i + offset1) != value2->char_at(i + offset2)) {
+      return false;
+    }
+  }
+  return true;
+}
+
 void java_lang_String::print(Handle java_string, outputStream* st) {
   oop          obj    = java_string();
   assert(obj->klass() == SystemDictionary::String_klass(), "must be java_string");
--- a/src/share/vm/classfile/javaClasses.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/classfile/javaClasses.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -182,6 +182,7 @@
   static unsigned int hash_string(oop java_string);
 
   static bool equals(oop java_string, jchar* chars, int len);
+  static bool equals(oop str1, oop str2);
 
   // Conversion between '.' and '/' formats
   static Handle externalize_classname(Handle java_string, TRAPS) { return char_converter(java_string, '/', '.', THREAD); }
--- a/src/share/vm/classfile/symbolTable.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/classfile/symbolTable.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -341,7 +341,7 @@
 
 Symbol* SymbolTable::basic_add(int index_arg, u1 *name, int len,
                                unsigned int hashValue_arg, bool c_heap, TRAPS) {
-  assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
+  assert(!Universe::heap()->is_in_reserved(name),
          "proposed name of symbol must be stable");
 
   // Don't allow symbols to be created which cannot fit in a Symbol*.
@@ -685,7 +685,7 @@
   if (found_string != NULL) return found_string;
 
   debug_only(StableMemoryChecker smc(name, len * sizeof(name[0])));
-  assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
+  assert(!Universe::heap()->is_in_reserved(name),
          "proposed name of symbol must be stable");
 
   Handle string;
@@ -807,6 +807,8 @@
   }
 }
 
+// This verification is part of Universe::verify() and needs to be quick.
+// See StringTable::verify_and_compare() below for exhaustive verification.
 void StringTable::verify() {
   for (int i = 0; i < the_table()->table_size(); ++i) {
     HashtableEntry<oop, mtSymbol>* p = the_table()->bucket(i);
@@ -825,6 +827,162 @@
   the_table()->dump_table(st, "StringTable");
 }
 
+StringTable::VerifyRetTypes StringTable::compare_entries(
+                                      int bkt1, int e_cnt1,
+                                      HashtableEntry<oop, mtSymbol>* e_ptr1,
+                                      int bkt2, int e_cnt2,
+                                      HashtableEntry<oop, mtSymbol>* e_ptr2) {
+  // These entries are sanity checked by verify_and_compare_entries()
+  // before this function is called.
+  oop str1 = e_ptr1->literal();
+  oop str2 = e_ptr2->literal();
+
+  if (str1 == str2) {
+    tty->print_cr("ERROR: identical oop values (0x" PTR_FORMAT ") "
+                  "in entry @ bucket[%d][%d] and entry @ bucket[%d][%d]",
+                  str1, bkt1, e_cnt1, bkt2, e_cnt2);
+    return _verify_fail_continue;
+  }
+
+  if (java_lang_String::equals(str1, str2)) {
+    tty->print_cr("ERROR: identical String values in entry @ "
+                  "bucket[%d][%d] and entry @ bucket[%d][%d]",
+                  bkt1, e_cnt1, bkt2, e_cnt2);
+    return _verify_fail_continue;
+  }
+
+  return _verify_pass;
+}
+
+StringTable::VerifyRetTypes StringTable::verify_entry(int bkt, int e_cnt,
+                                      HashtableEntry<oop, mtSymbol>* e_ptr,
+                                      StringTable::VerifyMesgModes mesg_mode) {
+
+  VerifyRetTypes ret = _verify_pass;  // be optimistic
+
+  oop str = e_ptr->literal();
+  if (str == NULL) {
+    if (mesg_mode == _verify_with_mesgs) {
+      tty->print_cr("ERROR: NULL oop value in entry @ bucket[%d][%d]", bkt,
+                    e_cnt);
+    }
+    // NULL oop means no more verifications are possible
+    return _verify_fail_done;
+  }
+
+  if (str->klass() != SystemDictionary::String_klass()) {
+    if (mesg_mode == _verify_with_mesgs) {
+      tty->print_cr("ERROR: oop is not a String in entry @ bucket[%d][%d]",
+                    bkt, e_cnt);
+    }
+    // not a String means no more verifications are possible
+    return _verify_fail_done;
+  }
+
+  unsigned int h = java_lang_String::hash_string(str);
+  if (e_ptr->hash() != h) {
+    if (mesg_mode == _verify_with_mesgs) {
+      tty->print_cr("ERROR: broken hash value in entry @ bucket[%d][%d], "
+                    "bkt_hash=%d, str_hash=%d", bkt, e_cnt, e_ptr->hash(), h);
+    }
+    ret = _verify_fail_continue;
+  }
+
+  if (the_table()->hash_to_index(h) != bkt) {
+    if (mesg_mode == _verify_with_mesgs) {
+      tty->print_cr("ERROR: wrong index value for entry @ bucket[%d][%d], "
+                    "str_hash=%d, hash_to_index=%d", bkt, e_cnt, h,
+                    the_table()->hash_to_index(h));
+    }
+    ret = _verify_fail_continue;
+  }
+
+  return ret;
+}
+
+// See StringTable::verify() above for the quick verification that is
+// part of Universe::verify(). This verification is exhaustive and
+// reports on every issue that is found. StringTable::verify() only
+// reports on the first issue that is found.
+//
+// StringTable::verify_entry() checks:
+// - oop value != NULL (same as verify())
+// - oop value is a String
+// - hash(String) == hash in entry (same as verify())
+// - index for hash == index of entry (same as verify())
+//
+// StringTable::compare_entries() checks:
+// - oops are unique across all entries
+// - String values are unique across all entries
+//
+int StringTable::verify_and_compare_entries() {
+  assert(StringTable_lock->is_locked(), "sanity check");
+
+  int  fail_cnt = 0;
+
+  // first, verify all the entries individually:
+  for (int bkt = 0; bkt < the_table()->table_size(); bkt++) {
+    HashtableEntry<oop, mtSymbol>* e_ptr = the_table()->bucket(bkt);
+    for (int e_cnt = 0; e_ptr != NULL; e_ptr = e_ptr->next(), e_cnt++) {
+      VerifyRetTypes ret = verify_entry(bkt, e_cnt, e_ptr, _verify_with_mesgs);
+      if (ret != _verify_pass) {
+        fail_cnt++;
+      }
+    }
+  }
+
+  // Optimization: if the above check did not find any failures, then
+  // the comparison loop below does not need to call verify_entry()
+  // before calling compare_entries(). If there were failures, then we
+  // have to call verify_entry() to see if the entry can be passed to
+  // compare_entries() safely. When we call verify_entry() in the loop
+  // below, we do so quietly to void duplicate messages and we don't
+  // increment fail_cnt because the failures have already been counted.
+  bool need_entry_verify = (fail_cnt != 0);
+
+  // second, verify all entries relative to each other:
+  for (int bkt1 = 0; bkt1 < the_table()->table_size(); bkt1++) {
+    HashtableEntry<oop, mtSymbol>* e_ptr1 = the_table()->bucket(bkt1);
+    for (int e_cnt1 = 0; e_ptr1 != NULL; e_ptr1 = e_ptr1->next(), e_cnt1++) {
+      if (need_entry_verify) {
+        VerifyRetTypes ret = verify_entry(bkt1, e_cnt1, e_ptr1,
+                                          _verify_quietly);
+        if (ret == _verify_fail_done) {
+          // cannot use the current entry to compare against other entries
+          continue;
+        }
+      }
+
+      for (int bkt2 = bkt1; bkt2 < the_table()->table_size(); bkt2++) {
+        HashtableEntry<oop, mtSymbol>* e_ptr2 = the_table()->bucket(bkt2);
+        int e_cnt2;
+        for (e_cnt2 = 0; e_ptr2 != NULL; e_ptr2 = e_ptr2->next(), e_cnt2++) {
+          if (bkt1 == bkt2 && e_cnt2 <= e_cnt1) {
+            // skip the entries up to and including the one that
+            // we're comparing against
+            continue;
+          }
+
+          if (need_entry_verify) {
+            VerifyRetTypes ret = verify_entry(bkt2, e_cnt2, e_ptr2,
+                                              _verify_quietly);
+            if (ret == _verify_fail_done) {
+              // cannot compare against this entry
+              continue;
+            }
+          }
+
+          // compare two entries, report and count any failures:
+          if (compare_entries(bkt1, e_cnt1, e_ptr1, bkt2, e_cnt2, e_ptr2)
+              != _verify_pass) {
+            fail_cnt++;
+          }
+        }
+      }
+    }
+  }
+  return fail_cnt;
+}
 
 // Create a new table and using alternate hash code, populate the new table
 // with the existing strings.   Set flag to use the alternate hash code afterwards.
--- a/src/share/vm/classfile/symbolTable.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/classfile/symbolTable.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -311,6 +311,26 @@
   static void verify();
   static void dump(outputStream* st);
 
+  enum VerifyMesgModes {
+    _verify_quietly    = 0,
+    _verify_with_mesgs = 1
+  };
+
+  enum VerifyRetTypes {
+    _verify_pass          = 0,
+    _verify_fail_continue = 1,
+    _verify_fail_done     = 2
+  };
+
+  static VerifyRetTypes compare_entries(int bkt1, int e_cnt1,
+                                        HashtableEntry<oop, mtSymbol>* e_ptr1,
+                                        int bkt2, int e_cnt2,
+                                        HashtableEntry<oop, mtSymbol>* e_ptr2);
+  static VerifyRetTypes verify_entry(int bkt, int e_cnt,
+                                     HashtableEntry<oop, mtSymbol>* e_ptr,
+                                     VerifyMesgModes mesg_mode);
+  static int verify_and_compare_entries();
+
   // Sharing
   static void copy_buckets(char** top, char*end) {
     the_table()->Hashtable<oop, mtSymbol>::copy_buckets(top, end);
--- a/src/share/vm/classfile/vmSymbols.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/classfile/vmSymbols.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -270,6 +270,7 @@
   template(java_lang_invoke_LambdaForm,               "java/lang/invoke/LambdaForm")              \
   template(java_lang_invoke_ForceInline_signature,    "Ljava/lang/invoke/ForceInline;")           \
   template(java_lang_invoke_DontInline_signature,     "Ljava/lang/invoke/DontInline;")            \
+  template(sun_invoke_Stable_signature,               "Lsun/invoke/Stable;")                      \
   template(java_lang_invoke_LambdaForm_Compiled_signature, "Ljava/lang/invoke/LambdaForm$Compiled;") \
   template(java_lang_invoke_LambdaForm_Hidden_signature, "Ljava/lang/invoke/LambdaForm$Hidden;")  \
   template(java_lang_invoke_MagicLambdaImpl,          "java/lang/invoke/MagicLambdaImpl")         \
--- a/src/share/vm/code/compiledIC.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/code/compiledIC.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -160,32 +160,42 @@
 // High-level access to an inline cache. Guaranteed to be MT-safe.
 
 
-void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
-  methodHandle method = call_info->selected_method();
-  bool is_invoke_interface = (bytecode == Bytecodes::_invokeinterface && !call_info->has_vtable_index());
+bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
   assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
   assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
   assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
 
   address entry;
-  if (is_invoke_interface) {
-    int index = klassItable::compute_itable_index(call_info->resolved_method()());
-    entry = VtableStubs::create_stub(false, index, method());
-    assert(entry != NULL, "entry not computed");
+  if (call_info->call_kind() == CallInfo::itable_call) {
+    assert(bytecode == Bytecodes::_invokeinterface, "");
+    int itable_index = call_info->itable_index();
+    entry = VtableStubs::find_itable_stub(itable_index);
+    if (entry == false) {
+      return false;
+    }
+#ifdef ASSERT
+    int index = call_info->resolved_method()->itable_index();
+    assert(index == itable_index, "CallInfo pre-computes this");
+#endif //ASSERT
     InstanceKlass* k = call_info->resolved_method()->method_holder();
-    assert(k->is_interface(), "sanity check");
+    assert(k->verify_itable_index(itable_index), "sanity check");
     InlineCacheBuffer::create_transition_stub(this, k, entry);
   } else {
-    // Can be different than method->vtable_index(), due to package-private etc.
+    assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable");
+    // Can be different than selected_method->vtable_index(), due to package-private etc.
     int vtable_index = call_info->vtable_index();
-    entry = VtableStubs::create_stub(true, vtable_index, method());
-    InlineCacheBuffer::create_transition_stub(this, method(), entry);
+    assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check");
+    entry = VtableStubs::find_vtable_stub(vtable_index);
+    if (entry == NULL) {
+      return false;
+    }
+    InlineCacheBuffer::create_transition_stub(this, NULL, entry);
   }
 
   if (TraceICs) {
     ResourceMark rm;
     tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
-                   instruction_address(), method->print_value_string(), entry);
+                   instruction_address(), call_info->selected_method()->print_value_string(), entry);
   }
 
   // We can't check this anymore. With lazy deopt we could have already
@@ -195,6 +205,7 @@
   // race because the IC entry was complete when we safepointed so
   // cleaning it immediately is harmless.
   // assert(is_megamorphic(), "sanity check");
+  return true;
 }
 
 
--- a/src/share/vm/code/compiledIC.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/code/compiledIC.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -226,7 +226,10 @@
   //
   void set_to_clean();  // Can only be called during a safepoint operation
   void set_to_monomorphic(CompiledICInfo& info);
-  void set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
+
+  // Returns true if successful and false otherwise. The call can fail if memory
+  // allocation in the code cache fails.
+  bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
 
   static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass,
                                         bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS);
--- a/src/share/vm/code/vtableStubs.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/code/vtableStubs.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -46,12 +46,9 @@
 address VtableStub::_chunk_end         = NULL;
 VMReg   VtableStub::_receiver_location = VMRegImpl::Bad();
 
-static int num_vtable_chunks = 0;
-
 
 void* VtableStub::operator new(size_t size, int code_size) throw() {
   assert(size == sizeof(VtableStub), "mismatched size");
-  num_vtable_chunks++;
   // compute real VtableStub size (rounded to nearest word)
   const int real_size = round_to(code_size + sizeof(VtableStub), wordSize);
   // malloc them in chunks to minimize header overhead
@@ -60,7 +57,7 @@
     const int bytes = chunk_factor * real_size + pd_code_alignment();
     BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
     if (blob == NULL) {
-      vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "CodeCache: no room for vtable chunks");
+      return NULL;
     }
     _chunk = blob->content_begin();
     _chunk_end = _chunk + bytes;
@@ -111,7 +108,7 @@
 }
 
 
-address VtableStubs::create_stub(bool is_vtable_stub, int vtable_index, Method* method) {
+address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) {
   assert(vtable_index >= 0, "must be positive");
 
   VtableStub* s = ShareVtableStubs ? lookup(is_vtable_stub, vtable_index) : NULL;
@@ -121,6 +118,12 @@
     } else {
       s = create_itable_stub(vtable_index);
     }
+
+    // Creation of vtable or itable can fail if there is not enough free space in the code cache.
+    if (s == NULL) {
+      return NULL;
+    }
+
     enter(is_vtable_stub, vtable_index, s);
     if (PrintAdapterHandlers) {
       tty->print_cr("Decoding VtableStub %s[%d]@%d",
--- a/src/share/vm/code/vtableStubs.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/code/vtableStubs.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -121,9 +121,11 @@
   static VtableStub* lookup            (bool is_vtable_stub, int vtable_index);
   static void        enter             (bool is_vtable_stub, int vtable_index, VtableStub* s);
   static inline uint hash              (bool is_vtable_stub, int vtable_index);
+  static address     find_stub         (bool is_vtable_stub, int vtable_index);
 
  public:
-  static address     create_stub(bool is_vtable_stub, int vtable_index, Method* method); // return the entry point of a stub for this call
+  static address     find_vtable_stub(int vtable_index) { return find_stub(true,  vtable_index); }
+  static address     find_itable_stub(int itable_index) { return find_stub(false, itable_index); }
   static bool        is_entry_point(address pc);                     // is pc a vtable stub entry point?
   static bool        contains(address pc);                           // is pc within any stub?
   static VtableStub* stub_containing(address pc);                    // stub containing pc or NULL
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -230,7 +230,7 @@
   // depends on this property.
   debug_only(
     FreeChunk* junk = NULL;
-    assert(UseCompressedKlassPointers ||
+    assert(UseCompressedClassPointers ||
            junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
            "Offset of FreeChunk::_prev within FreeChunk must match"
            "  that of OopDesc::_klass within OopDesc");
@@ -1407,7 +1407,7 @@
   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
   OrderAccess::storestore();
 
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     // Copy gap missed by (aligned) header size calculation below
     obj->set_klass_gap(old->klass_gap());
   }
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -481,9 +481,8 @@
 
 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
   _g1h(g1h),
-  _markBitMap1(MinObjAlignment - 1),
-  _markBitMap2(MinObjAlignment - 1),
-
+  _markBitMap1(log2_intptr(MinObjAlignment)),
+  _markBitMap2(log2_intptr(MinObjAlignment)),
   _parallel_marking_threads(0),
   _max_parallel_marking_threads(0),
   _sleep_factor(0.0),
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1BiasedArray.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1BiasedArray.hpp"
+
+#ifndef PRODUCT
+void G1BiasedMappedArrayBase::verify_index(idx_t index) const {
+  guarantee(_base != NULL, "Array not initialized");
+  guarantee(index < length(), err_msg("Index out of bounds index: "SIZE_FORMAT" length: "SIZE_FORMAT, index, length()));
+}
+
+void G1BiasedMappedArrayBase::verify_biased_index(idx_t biased_index) const {
+  guarantee(_biased_base != NULL, "Array not initialized");
+  guarantee(biased_index >= bias() && biased_index < (bias() + length()),
+    err_msg("Biased index out of bounds, index: "SIZE_FORMAT" bias: "SIZE_FORMAT" length: "SIZE_FORMAT, biased_index, bias(), length()));
+}
+
+void G1BiasedMappedArrayBase::verify_biased_index_inclusive_end(idx_t biased_index) const {
+  guarantee(_biased_base != NULL, "Array not initialized");
+  guarantee(biased_index >= bias() && biased_index <= (bias() + length()),
+    err_msg("Biased index out of inclusive bounds, index: "SIZE_FORMAT" bias: "SIZE_FORMAT" length: "SIZE_FORMAT, biased_index, bias(), length()));
+}
+
+class TestMappedArray : public G1BiasedMappedArray<int> {
+protected:
+  virtual int default_value() const { return 0xBAADBABE; }
+public:
+  static void test_biasedarray() {
+    const size_t REGION_SIZE_IN_WORDS = 512;
+    const size_t NUM_REGIONS = 20;
+    HeapWord* fake_heap = (HeapWord*)LP64_ONLY(0xBAAA00000) NOT_LP64(0xBA000000); // Any value that is non-zero
+
+    TestMappedArray array;
+    array.initialize(fake_heap, fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS,
+            REGION_SIZE_IN_WORDS * HeapWordSize);
+    // Check address calculation (bounds)
+    assert(array.bottom_address_mapped() == fake_heap,
+      err_msg("bottom mapped address should be "PTR_FORMAT", but is "PTR_FORMAT, fake_heap, array.bottom_address_mapped()));
+    assert(array.end_address_mapped() == (fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS), "must be");
+
+    int* bottom = array.address_mapped_to(fake_heap);
+    assert((void*)bottom == (void*) array.base(), "must be");
+    int* end = array.address_mapped_to(fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS);
+    assert((void*)end == (void*)(array.base() + array.length()), "must be");
+    // The entire array should contain default value elements
+    for (int* current = bottom; current < end; current++) {
+      assert(*current == array.default_value(), "must be");
+    }
+
+    // Test setting values in the table
+
+    HeapWord* region_start_address = fake_heap + REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2);
+    HeapWord* region_end_address = fake_heap + (REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2) + REGION_SIZE_IN_WORDS - 1);
+
+    // Set/get by address tests: invert some value; first retrieve one
+    int actual_value = array.get_by_index(NUM_REGIONS / 2);
+    array.set_by_index(NUM_REGIONS / 2, ~actual_value);
+    // Get the same value by address, should correspond to the start of the "region"
+    int value = array.get_by_address(region_start_address);
+    assert(value == ~actual_value, "must be");
+    // Get the same value by address, at one HeapWord before the start
+    value = array.get_by_address(region_start_address - 1);
+    assert(value == array.default_value(), "must be");
+    // Get the same value by address, at the end of the "region"
+    value = array.get_by_address(region_end_address);
+    assert(value == ~actual_value, "must be");
+    // Make sure the next value maps to another index
+    value = array.get_by_address(region_end_address + 1);
+    assert(value == array.default_value(), "must be");
+
+    // Reset the value in the array
+    array.set_by_address(region_start_address + (region_end_address - region_start_address) / 2, actual_value);
+
+    // The entire array should have the default value again
+    for (int* current = bottom; current < end; current++) {
+      assert(*current == array.default_value(), "must be");
+    }
+
+    // Set/get by index tests: invert some value
+    idx_t index = NUM_REGIONS / 2;
+    actual_value = array.get_by_index(index);
+    array.set_by_index(index, ~actual_value);
+
+    value = array.get_by_index(index);
+    assert(value == ~actual_value, "must be");
+
+    value = array.get_by_index(index - 1);
+    assert(value == array.default_value(), "must be");
+
+    value = array.get_by_index(index + 1);
+    assert(value == array.default_value(), "must be");
+
+    array.set_by_index(0, 0);
+    value = array.get_by_index(0);
+    assert(value == 0, "must be");
+
+    array.set_by_index(array.length() - 1, 0);
+    value = array.get_by_index(array.length() - 1);
+    assert(value == 0, "must be");
+
+    array.set_by_index(index, 0);
+
+    // The array should have three zeros, and default values otherwise
+    size_t num_zeros = 0;
+    for (int* current = bottom; current < end; current++) {
+      assert(*current == array.default_value() || *current == 0, "must be");
+      if (*current == 0) {
+        num_zeros++;
+      }
+    }
+    assert(num_zeros == 3, "must be");
+  }
+};
+
+void TestG1BiasedArray_test() {
+  TestMappedArray::test_biasedarray();
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
+
+#include "utilities/debug.hpp"
+#include "memory/allocation.inline.hpp"
+
+// Implements the common base functionality for arrays that contain provisions
+// for accessing its elements using a biased index.
+// The element type is defined by the instantiating the template.
+class G1BiasedMappedArrayBase VALUE_OBJ_CLASS_SPEC {
+  friend class VMStructs;
+public:
+  typedef size_t idx_t;
+protected:
+  address _base;          // the real base address
+  size_t _length;         // the length of the array
+  address _biased_base;   // base address biased by "bias" elements
+  size_t _bias;           // the bias, i.e. the offset biased_base is located to the right in elements
+  uint _shift_by;         // the amount of bits to shift right when mapping to an index of the array.
+
+protected:
+
+  G1BiasedMappedArrayBase() : _base(NULL), _length(0), _biased_base(NULL),
+    _bias(0), _shift_by(0) { }
+
+  // Allocate a new array, generic version.
+  static address create_new_base_array(size_t length, size_t elem_size) {
+    assert(length > 0, "just checking");
+    assert(elem_size > 0, "just checking");
+    return NEW_C_HEAP_ARRAY(u_char, length * elem_size, mtGC);
+  }
+
+  // Initialize the members of this class. The biased start address of this array
+  // is the bias (in elements) multiplied by the element size.
+  void initialize_base(address base, size_t length, size_t bias, size_t elem_size, uint shift_by) {
+    assert(base != NULL, "just checking");
+    assert(length > 0, "just checking");
+    assert(shift_by < sizeof(uintptr_t) * 8, err_msg("Shifting by %zd, larger than word size?", shift_by));
+    _base = base;
+    _length = length;
+    _biased_base = base - (bias * elem_size);
+    _bias = bias;
+    _shift_by = shift_by;
+  }
+
+  // Allocate and initialize this array to cover the heap addresses in the range
+  // of [bottom, end).
+  void initialize(HeapWord* bottom, HeapWord* end, size_t target_elem_size_in_bytes, size_t mapping_granularity_in_bytes) {
+    assert(mapping_granularity_in_bytes > 0, "just checking");
+    assert(is_power_of_2(mapping_granularity_in_bytes),
+      err_msg("mapping granularity must be power of 2, is %zd", mapping_granularity_in_bytes));
+    assert((uintptr_t)bottom % mapping_granularity_in_bytes == 0,
+      err_msg("bottom mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT,
+        mapping_granularity_in_bytes, bottom));
+    assert((uintptr_t)end % mapping_granularity_in_bytes == 0,
+      err_msg("end mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT,
+        mapping_granularity_in_bytes, end));
+    size_t num_target_elems = (end - bottom) / (mapping_granularity_in_bytes / HeapWordSize);
+    idx_t bias = (uintptr_t)bottom / mapping_granularity_in_bytes;
+    address base = create_new_base_array(num_target_elems, target_elem_size_in_bytes);
+    initialize_base(base, num_target_elems, bias, target_elem_size_in_bytes, log2_intptr(mapping_granularity_in_bytes));
+  }
+
+  size_t bias() const { return _bias; }
+  uint shift_by() const { return _shift_by; }
+
+  void verify_index(idx_t index) const PRODUCT_RETURN;
+  void verify_biased_index(idx_t biased_index) const PRODUCT_RETURN;
+  void verify_biased_index_inclusive_end(idx_t biased_index) const PRODUCT_RETURN;
+
+public:
+   // Return the length of the array in elements.
+   size_t length() const { return _length; }
+};
+
+// Array that provides biased access and mapping from (valid) addresses in the
+// heap into this array.
+template<class T>
+class G1BiasedMappedArray : public G1BiasedMappedArrayBase {
+public:
+  typedef G1BiasedMappedArrayBase::idx_t idx_t;
+
+  T* base() const { return (T*)G1BiasedMappedArrayBase::_base; }
+  // Return the element of the given array at the given index. Assume
+  // the index is valid. This is a convenience method that does sanity
+  // checking on the index.
+  T get_by_index(idx_t index) const {
+    verify_index(index);
+    return this->base()[index];
+  }
+
+  // Set the element of the given array at the given index to the
+  // given value. Assume the index is valid. This is a convenience
+  // method that does sanity checking on the index.
+  void set_by_index(idx_t index, T value) {
+    verify_index(index);
+    this->base()[index] = value;
+  }
+
+  // The raw biased base pointer.
+  T* biased_base() const { return (T*)G1BiasedMappedArrayBase::_biased_base; }
+
+  // Return the element of the given array that covers the given word in the
+  // heap. Assumes the index is valid.
+  T get_by_address(HeapWord* value) const {
+    idx_t biased_index = ((uintptr_t)value) >> this->shift_by();
+    this->verify_biased_index(biased_index);
+    return biased_base()[biased_index];
+  }
+
+  // Set the value of the array entry that corresponds to the given array.
+  void set_by_address(HeapWord * address, T value) {
+    idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
+    this->verify_biased_index(biased_index);
+    biased_base()[biased_index] = value;
+  }
+
+protected:
+  // Returns the address of the element the given address maps to
+  T* address_mapped_to(HeapWord* address) {
+    idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
+    this->verify_biased_index_inclusive_end(biased_index);
+    return biased_base() + biased_index;
+  }
+
+public:
+  // Return the smallest address (inclusive) in the heap that this array covers.
+  HeapWord* bottom_address_mapped() const {
+    return (HeapWord*) ((uintptr_t)this->bias() << this->shift_by());
+  }
+
+  // Return the highest address (exclusive) in the heap that this array covers.
+  HeapWord* end_address_mapped() const {
+    return (HeapWord*) ((uintptr_t)(this->bias() + this->length()) << this->shift_by());
+  }
+
+protected:
+  virtual T default_value() const = 0;
+  // Set all elements of the given array to the given value.
+  void clear() {
+    T value = default_value();
+    for (idx_t i = 0; i < length(); i++) {
+      set_by_index(i, value);
+    }
+  }
+public:
+  G1BiasedMappedArray() {}
+
+  // Allocate and initialize this array to cover the heap addresses in the range
+  // of [bottom, end).
+  void initialize(HeapWord* bottom, HeapWord* end, size_t mapping_granularity) {
+    G1BiasedMappedArrayBase::initialize(bottom, end, sizeof(T), mapping_granularity);
+    this->clear();
+  }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
--- a/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -33,8 +33,8 @@
 
 void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) {
   if (has_count_table()) {
-    check_card_num(from_card_num,
-                   err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
+    assert(from_card_num >= 0 && from_card_num < _committed_max_card_num,
+           err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
     assert(from_card_num < to_card_num,
            err_msg("Wrong order? from: " SIZE_FORMAT ", to: "SIZE_FORMAT,
                    from_card_num, to_card_num));
--- a/src/share/vm/gc_implementation/g1/g1CardCounts.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CardCounts.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -72,25 +72,21 @@
     return has_reserved_count_table() && _committed_max_card_num > 0;
   }
 
-  void check_card_num(size_t card_num, const char* msg) {
-    assert(card_num >= 0 && card_num < _committed_max_card_num, msg);
-  }
-
   size_t ptr_2_card_num(const jbyte* card_ptr) {
     assert(card_ptr >= _ct_bot,
-           err_msg("Inavalied card pointer: "
+           err_msg("Invalid card pointer: "
                    "card_ptr: " PTR_FORMAT ", "
                    "_ct_bot: " PTR_FORMAT,
                    card_ptr, _ct_bot));
     size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte));
-    check_card_num(card_num,
-                   err_msg("card pointer out of range: " PTR_FORMAT, card_ptr));
+    assert(card_num >= 0 && card_num < _committed_max_card_num,
+           err_msg("card pointer out of range: " PTR_FORMAT, card_ptr));
     return card_num;
   }
 
   jbyte* card_num_2_ptr(size_t card_num) {
-    check_card_num(card_num,
-                   err_msg("card num out of range: "SIZE_FORMAT, card_num));
+    assert(card_num >= 0 && card_num < _committed_max_card_num,
+           err_msg("card num out of range: "SIZE_FORMAT, card_num));
     return (jbyte*) (_ct_bot + card_num);
   }
 
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -2069,8 +2069,10 @@
   _g1_storage.initialize(g1_rs, 0);
   _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
   _hrs.initialize((HeapWord*) _g1_reserved.start(),
-                  (HeapWord*) _g1_reserved.end(),
-                  _expansion_regions);
+                  (HeapWord*) _g1_reserved.end());
+  assert(_hrs.max_length() == _expansion_regions,
+         err_msg("max length: %u expansion regions: %u",
+                 _hrs.max_length(), _expansion_regions));
 
   // Do later initialization work for concurrent refinement.
   _cg1r->init();
@@ -2191,6 +2193,10 @@
   return JNI_OK;
 }
 
+size_t G1CollectedHeap::conservative_max_heap_alignment() {
+  return HeapRegion::max_region_size();
+}
+
 void G1CollectedHeap::ref_processing_init() {
   // Reference processing in G1 currently works as follows:
   //
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -1092,6 +1092,9 @@
   // specified by the policy object.
   jint initialize();
 
+  // Return the (conservative) maximum heap alignment for any G1 heap
+  static size_t conservative_max_heap_alignment();
+
   // Initialize weak reference processing.
   virtual void ref_processing_init();
 
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -149,6 +149,10 @@
 // many regions in the heap (based on the min heap size).
 #define TARGET_REGION_NUMBER          2048
 
+size_t HeapRegion::max_region_size() {
+  return (size_t)MAX_REGION_SIZE;
+}
+
 void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
   uintx region_size = G1HeapRegionSize;
   if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -355,6 +355,8 @@
                                       ~((1 << (size_t) LogOfHRGrainBytes) - 1);
   }
 
+  static size_t max_region_size();
+
   // It sets up the heap region size (GrainBytes / GrainWords), as
   // well as other related fields that are based on the heap region
   // size (LogOfHRGrainBytes / LogOfHRGrainWords /
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -71,27 +71,16 @@
 
 // Public
 
-void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end,
-                               uint max_length) {
+void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end) {
   assert((uintptr_t) bottom % HeapRegion::GrainBytes == 0,
          "bottom should be heap region aligned");
   assert((uintptr_t) end % HeapRegion::GrainBytes == 0,
          "end should be heap region aligned");
 
-  _length = 0;
-  _heap_bottom = bottom;
-  _heap_end = end;
-  _region_shift = HeapRegion::LogOfHRGrainBytes;
   _next_search_index = 0;
   _allocated_length = 0;
-  _max_length = max_length;
 
-  _regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length, mtGC);
-  memset(_regions, 0, (size_t) max_length * sizeof(HeapRegion*));
-  _regions_biased = _regions - ((uintx) bottom >> _region_shift);
-
-  assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)],
-         "bottom should be included in the region with index 0");
+  _regions.initialize(bottom, end, HeapRegion::GrainBytes);
 }
 
 MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
@@ -101,15 +90,15 @@
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
   HeapWord* next_bottom = old_end;
-  assert(_heap_bottom <= next_bottom, "invariant");
+  assert(heap_bottom() <= next_bottom, "invariant");
   while (next_bottom < new_end) {
-    assert(next_bottom < _heap_end, "invariant");
+    assert(next_bottom < heap_end(), "invariant");
     uint index = length();
 
-    assert(index < _max_length, "otherwise we cannot expand further");
+    assert(index < max_length(), "otherwise we cannot expand further");
     if (index == 0) {
       // We have not allocated any regions so far
-      assert(next_bottom == _heap_bottom, "invariant");
+      assert(next_bottom == heap_bottom(), "invariant");
     } else {
       // next_bottom should match the end of the last/previous region
       assert(next_bottom == at(index - 1)->end(), "invariant");
@@ -122,8 +111,8 @@
         // allocation failed, we bail out and return what we have done so far
         return MemRegion(old_end, next_bottom);
       }
-      assert(_regions[index] == NULL, "invariant");
-      _regions[index] = new_hr;
+      assert(_regions.get_by_index(index) == NULL, "invariant");
+      _regions.set_by_index(index, new_hr);
       increment_allocated_length();
     }
     // Have to increment the length first, otherwise we will get an
@@ -228,26 +217,26 @@
 
 #ifndef PRODUCT
 void HeapRegionSeq::verify_optional() {
-  guarantee(_length <= _allocated_length,
+  guarantee(length() <= _allocated_length,
             err_msg("invariant: _length: %u _allocated_length: %u",
-                    _length, _allocated_length));
-  guarantee(_allocated_length <= _max_length,
+                    length(), _allocated_length));
+  guarantee(_allocated_length <= max_length(),
             err_msg("invariant: _allocated_length: %u _max_length: %u",
-                    _allocated_length, _max_length));
-  guarantee(_next_search_index <= _length,
+                    _allocated_length, max_length()));
+  guarantee(_next_search_index <= length(),
             err_msg("invariant: _next_search_index: %u _length: %u",
-                    _next_search_index, _length));
+                    _next_search_index, length()));
 
-  HeapWord* prev_end = _heap_bottom;
+  HeapWord* prev_end = heap_bottom();
   for (uint i = 0; i < _allocated_length; i += 1) {
-    HeapRegion* hr = _regions[i];
+    HeapRegion* hr = _regions.get_by_index(i);
     guarantee(hr != NULL, err_msg("invariant: i: %u", i));
     guarantee(hr->bottom() == prev_end,
               err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
                       i, HR_FORMAT_PARAMS(hr), prev_end));
     guarantee(hr->hrs_index() == i,
               err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index()));
-    if (i < _length) {
+    if (i < length()) {
       // Asserts will fire if i is >= _length
       HeapWord* addr = hr->bottom();
       guarantee(addr_to_region(addr) == hr, "sanity");
@@ -265,8 +254,8 @@
       prev_end = hr->end();
     }
   }
-  for (uint i = _allocated_length; i < _max_length; i += 1) {
-    guarantee(_regions[i] == NULL, err_msg("invariant i: %u", i));
+  for (uint i = _allocated_length; i < max_length(); i += 1) {
+    guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i));
   }
 }
 #endif // PRODUCT
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -25,10 +25,17 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
 
+#include "gc_implementation/g1/g1BiasedArray.hpp"
+
 class HeapRegion;
 class HeapRegionClosure;
 class FreeRegionList;
 
+class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
+ protected:
+   virtual HeapRegion* default_value() const { return NULL; }
+};
+
 // This class keeps track of the region metadata (i.e., HeapRegion
 // instances). They are kept in the _regions array in address
 // order. A region's index in the array corresponds to its index in
@@ -44,35 +51,21 @@
 //
 // We keep track of three lengths:
 //
-// * _length (returned by length()) is the number of currently
+// * _committed_length (returned by length()) is the number of currently
 //   committed regions.
 // * _allocated_length (not exposed outside this class) is the
 //   number of regions for which we have HeapRegions.
-// * _max_length (returned by max_length()) is the maximum number of
-//   regions the heap can have.
+// * max_length() returns the maximum number of regions the heap can have.
 //
-// and maintain that: _length <= _allocated_length <= _max_length
+// and maintain that: _committed_length <= _allocated_length <= max_length()
 
 class HeapRegionSeq: public CHeapObj<mtGC> {
   friend class VMStructs;
 
-  // The array that holds the HeapRegions.
-  HeapRegion** _regions;
-
-  // Version of _regions biased to address 0
-  HeapRegion** _regions_biased;
+  G1HeapRegionTable _regions;
 
   // The number of regions committed in the heap.
-  uint _length;
-
-  // The address of the first reserved word in the heap.
-  HeapWord* _heap_bottom;
-
-  // The address of the last reserved word in the heap - 1.
-  HeapWord* _heap_end;
-
-  // The log of the region byte size.
-  uint _region_shift;
+  uint _committed_length;
 
   // A hint for which index to start searching from for humongous
   // allocations.
@@ -81,37 +74,33 @@
   // The number of regions for which we have allocated HeapRegions for.
   uint _allocated_length;
 
-  // The maximum number of regions in the heap.
-  uint _max_length;
-
   // Find a contiguous set of empty regions of length num, starting
   // from the given index.
   uint find_contiguous_from(uint from, uint num);
 
-  // Map a heap address to a biased region index. Assume that the
-  // address is valid.
-  inline uintx addr_to_index_biased(HeapWord* addr) const;
-
   void increment_allocated_length() {
-    assert(_allocated_length < _max_length, "pre-condition");
+    assert(_allocated_length < max_length(), "pre-condition");
     _allocated_length++;
   }
 
   void increment_length() {
-    assert(_length < _max_length, "pre-condition");
-    _length++;
+    assert(length() < max_length(), "pre-condition");
+    _committed_length++;
   }
 
   void decrement_length() {
-    assert(_length > 0, "pre-condition");
-    _length--;
+    assert(length() > 0, "pre-condition");
+    _committed_length--;
   }
 
+  HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
+  HeapWord* heap_end() const {return _regions.end_address_mapped(); }
+
  public:
   // Empty contructor, we'll initialize it with the initialize() method.
-  HeapRegionSeq() { }
+  HeapRegionSeq() : _regions(), _committed_length(0), _next_search_index(0), _allocated_length(0) { }
 
-  void initialize(HeapWord* bottom, HeapWord* end, uint max_length);
+  void initialize(HeapWord* bottom, HeapWord* end);
 
   // Return the HeapRegion at the given index. Assume that the index
   // is valid.
@@ -126,10 +115,10 @@
   inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const;
 
   // Return the number of regions that have been committed in the heap.
-  uint length() const { return _length; }
+  uint length() const { return _committed_length; }
 
   // Return the maximum number of regions in the heap.
-  uint max_length() const { return _max_length; }
+  uint max_length() const { return (uint)_regions.length(); }
 
   // Expand the sequence to reflect that the heap has grown from
   // old_end to new_end. Either create new HeapRegions, or re-use
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -28,28 +28,16 @@
 #include "gc_implementation/g1/heapRegion.hpp"
 #include "gc_implementation/g1/heapRegionSeq.hpp"
 
-inline uintx HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const {
-  assert(_heap_bottom <= addr && addr < _heap_end,
-         err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
-                 addr, _heap_bottom, _heap_end));
-  uintx index = (uintx) addr >> _region_shift;
-  return index;
-}
-
 inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const {
-  assert(_heap_bottom <= addr && addr < _heap_end,
-         err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
-                 addr, _heap_bottom, _heap_end));
-  uintx index_biased = addr_to_index_biased(addr);
-  HeapRegion* hr = _regions_biased[index_biased];
+  HeapRegion* hr = _regions.get_by_address(addr);
   assert(hr != NULL, "invariant");
   return hr;
 }
 
 inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
-  if (addr != NULL && addr < _heap_end) {
-    assert(addr >= _heap_bottom,
-          err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, _heap_bottom));
+  if (addr != NULL && addr < heap_end()) {
+    assert(addr >= heap_bottom(),
+          err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, heap_bottom()));
     return addr_to_region_unsafe(addr);
   }
   return NULL;
@@ -57,7 +45,7 @@
 
 inline HeapRegion* HeapRegionSeq::at(uint index) const {
   assert(index < length(), "pre-condition");
-  HeapRegion* hr = _regions[index];
+  HeapRegion* hr = _regions.get_by_index(index);
   assert(hr != NULL, "sanity");
   assert(hr->hrs_index() == index, "sanity");
   return hr;
--- a/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -38,6 +38,7 @@
 
 class PtrQueueSet;
 class PtrQueue VALUE_OBJ_CLASS_SPEC {
+  friend class VMStructs;
 
 protected:
   // The ptr queue set to which this queue belongs.
--- a/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -31,10 +31,17 @@
 
 #define VM_STRUCTS_G1(nonstatic_field, static_field)                          \
                                                                               \
-  static_field(HeapRegion, GrainBytes, size_t)                                \
+  static_field(HeapRegion, GrainBytes,        size_t)                         \
+  static_field(HeapRegion, LogOfHRGrainBytes, int)                            \
                                                                               \
-  nonstatic_field(HeapRegionSeq,   _regions, HeapRegion**)                    \
-  nonstatic_field(HeapRegionSeq,   _length,  uint)                            \
+  nonstatic_field(G1HeapRegionTable, _base,             address)              \
+  nonstatic_field(G1HeapRegionTable, _length,           size_t)               \
+  nonstatic_field(G1HeapRegionTable, _biased_base,      address)              \
+  nonstatic_field(G1HeapRegionTable, _bias,             size_t)               \
+  nonstatic_field(G1HeapRegionTable, _shift_by,         uint)                 \
+                                                                              \
+  nonstatic_field(HeapRegionSeq,   _regions,            G1HeapRegionTable)    \
+  nonstatic_field(HeapRegionSeq,   _committed_length,   uint)                 \
                                                                               \
   nonstatic_field(G1CollectedHeap, _hrs,                HeapRegionSeq)        \
   nonstatic_field(G1CollectedHeap, _g1_committed,       MemRegion)            \
@@ -57,6 +64,8 @@
 
 #define VM_TYPES_G1(declare_type, declare_toplevel_type)                      \
                                                                               \
+  declare_toplevel_type(G1HeapRegionTable)                                    \
+                                                                              \
   declare_type(G1CollectedHeap, SharedHeap)                                   \
                                                                               \
   declare_type(HeapRegion, ContiguousSpace)                                   \
--- a/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -68,9 +68,6 @@
   size_t min_old_gen_size()   { return _min_gen1_size; }
   size_t old_gen_size()       { return _initial_gen1_size; }
   size_t max_old_gen_size()   { return _max_gen1_size; }
-
-  size_t metaspace_size()      { return MetaspaceSize; }
-  size_t max_metaspace_size()  { return MaxMetaspaceSize; }
 };
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_GENERATIONSIZER_HPP
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -86,6 +86,11 @@
     set_alignment(_old_gen_alignment, intra_heap_alignment());
   }
 
+  // Return the (conservative) maximum heap alignment
+  static size_t conservative_max_heap_alignment() {
+    return intra_heap_alignment();
+  }
+
   // For use by VM operations
   enum CollectionType {
     Scavenge,
@@ -122,7 +127,7 @@
 
   // The alignment used for eden and survivors within the young gen
   // and for boundary between young gen and old gen.
-  size_t intra_heap_alignment() const { return 64 * K * HeapWordSize; }
+  static size_t intra_heap_alignment() { return 64 * K * HeapWordSize; }
 
   size_t capacity() const;
   size_t used() const;
--- a/src/share/vm/gc_implementation/shared/allocationStats.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/gc_implementation/shared/allocationStats.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,11 +26,9 @@
 #define SHARE_VM_GC_IMPLEMENTATION_SHARED_ALLOCATIONSTATS_HPP
 
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc_implementation/shared/gcUtil.hpp"
 #include "memory/allocation.hpp"
 #include "utilities/globalDefinitions.hpp"
-#endif // INCLUDE_ALL_GCS
+#include "gc_implementation/shared/gcUtil.hpp"
 
 class AllocationStats VALUE_OBJ_CLASS_SPEC {
   // A duration threshold (in ms) used to filter
--- a/src/share/vm/gc_implementation/shared/hSpaceCounters.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/gc_implementation/shared/hSpaceCounters.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,11 +26,9 @@
 #define SHARE_VM_GC_IMPLEMENTATION_SHARED_HSPACECOUNTERS_HPP
 
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
 #include "gc_implementation/shared/generationCounters.hpp"
 #include "memory/generation.hpp"
 #include "runtime/perfData.hpp"
-#endif // INCLUDE_ALL_GCS
 
 // A HSpaceCounter is a holder class for performance counters
 // that track a collections (logical spaces) in a heap;
--- a/src/share/vm/gc_interface/collectedHeap.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/gc_interface/collectedHeap.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -87,15 +87,15 @@
   const MetaspaceSizes meta_space(
       MetaspaceAux::allocated_capacity_bytes(),
       MetaspaceAux::allocated_used_bytes(),
-      MetaspaceAux::reserved_in_bytes());
+      MetaspaceAux::reserved_bytes());
   const MetaspaceSizes data_space(
       MetaspaceAux::allocated_capacity_bytes(Metaspace::NonClassType),
       MetaspaceAux::allocated_used_bytes(Metaspace::NonClassType),
-      MetaspaceAux::reserved_in_bytes(Metaspace::NonClassType));
+      MetaspaceAux::reserved_bytes(Metaspace::NonClassType));
   const MetaspaceSizes class_space(
       MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType),
       MetaspaceAux::allocated_used_bytes(Metaspace::ClassType),
-      MetaspaceAux::reserved_in_bytes(Metaspace::ClassType));
+      MetaspaceAux::reserved_bytes(Metaspace::ClassType));
 
   return MetaspaceSummary(meta_space, data_space, class_space);
 }
--- a/src/share/vm/interpreter/interpreterRuntime.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/interpreter/interpreterRuntime.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -496,15 +496,15 @@
 
 IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecodes::Code bytecode))
   // resolve field
-  FieldAccessInfo info;
+  fieldDescriptor info;
   constantPoolHandle pool(thread, method(thread)->constants());
   bool is_put    = (bytecode == Bytecodes::_putfield  || bytecode == Bytecodes::_putstatic);
   bool is_static = (bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic);
 
   {
     JvmtiHideSingleStepping jhss(thread);
-    LinkResolver::resolve_field(info, pool, get_index_u2_cpcache(thread, bytecode),
-                                bytecode, false, CHECK);
+    LinkResolver::resolve_field_access(info, pool, get_index_u2_cpcache(thread, bytecode),
+                                       bytecode, CHECK);
   } // end JvmtiHideSingleStepping
 
   // check if link resolution caused cpCache to be updated
@@ -524,7 +524,7 @@
   // class is intitialized.  This is required so that access to the static
   // field will call the initialization function every time until the class
   // is completely initialized ala. in 2.17.5 in JVM Specification.
-  InstanceKlass *klass = InstanceKlass::cast(info.klass()());
+  InstanceKlass* klass = InstanceKlass::cast(info.field_holder());
   bool uninitialized_static = ((bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic) &&
                                !klass->is_initialized());
   Bytecodes::Code get_code = (Bytecodes::Code)0;
@@ -539,9 +539,9 @@
   cache_entry(thread)->set_field(
     get_code,
     put_code,
-    info.klass(),
-    info.field_index(),
-    info.field_offset(),
+    info.field_holder(),
+    info.index(),
+    info.offset(),
     state,
     info.access_flags().is_final(),
     info.access_flags().is_volatile(),
@@ -686,29 +686,55 @@
   if (already_resolved(thread)) return;
 
   if (bytecode == Bytecodes::_invokeinterface) {
-
     if (TraceItables && Verbose) {
       ResourceMark rm(thread);
       tty->print_cr("Resolving: klass: %s to method: %s", info.resolved_klass()->name()->as_C_string(), info.resolved_method()->name()->as_C_string());
     }
+  }
+#ifdef ASSERT
+  if (bytecode == Bytecodes::_invokeinterface) {
     if (info.resolved_method()->method_holder() ==
                                             SystemDictionary::Object_klass()) {
       // NOTE: THIS IS A FIX FOR A CORNER CASE in the JVM spec
-      // (see also cpCacheOop.cpp for details)
+      // (see also CallInfo::set_interface for details)
+      assert(info.call_kind() == CallInfo::vtable_call ||
+             info.call_kind() == CallInfo::direct_call, "");
       methodHandle rm = info.resolved_method();
       assert(rm->is_final() || info.has_vtable_index(),
              "should have been set already");
-      cache_entry(thread)->set_method(bytecode, rm, info.vtable_index());
+    } else if (!info.resolved_method()->has_itable_index()) {
+      // Resolved something like CharSequence.toString.  Use vtable not itable.
+      assert(info.call_kind() != CallInfo::itable_call, "");
     } else {
       // Setup itable entry
-      int index = klassItable::compute_itable_index(info.resolved_method()());
-      cache_entry(thread)->set_interface_call(info.resolved_method(), index);
+      assert(info.call_kind() == CallInfo::itable_call, "");
+      int index = info.resolved_method()->itable_index();
+      assert(info.itable_index() == index, "");
     }
   } else {
-    cache_entry(thread)->set_method(
+    assert(info.call_kind() == CallInfo::direct_call ||
+           info.call_kind() == CallInfo::vtable_call, "");
+  }
+#endif
+  switch (info.call_kind()) {
+  case CallInfo::direct_call:
+    cache_entry(thread)->set_direct_call(
+      bytecode,
+      info.resolved_method());
+    break;
+  case CallInfo::vtable_call:
+    cache_entry(thread)->set_vtable_call(
       bytecode,
       info.resolved_method(),
       info.vtable_index());
+    break;
+  case CallInfo::itable_call:
+    cache_entry(thread)->set_itable_call(
+      bytecode,
+      info.resolved_method(),
+      info.itable_index());
+    break;
+  default:  ShouldNotReachHere();
   }
 }
 IRT_END
--- a/src/share/vm/interpreter/linkResolver.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/interpreter/linkResolver.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -46,19 +46,6 @@
 #include "runtime/thread.inline.hpp"
 #include "runtime/vmThread.hpp"
 
-//------------------------------------------------------------------------------------------------------------------------
-// Implementation of FieldAccessInfo
-
-void FieldAccessInfo::set(KlassHandle klass, Symbol* name, int field_index, int field_offset,
-BasicType field_type, AccessFlags access_flags) {
-  _klass        = klass;
-  _name         = name;
-  _field_index  = field_index;
-  _field_offset = field_offset;
-  _field_type   = field_type;
-  _access_flags = access_flags;
-}
-
 
 //------------------------------------------------------------------------------------------------------------------------
 // Implementation of CallInfo
@@ -66,26 +53,25 @@
 
 void CallInfo::set_static(KlassHandle resolved_klass, methodHandle resolved_method, TRAPS) {
   int vtable_index = Method::nonvirtual_vtable_index;
-  set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, vtable_index, CHECK);
+  set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, CallInfo::direct_call, vtable_index, CHECK);
 }
 
 
-void CallInfo::set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, TRAPS) {
+void CallInfo::set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int itable_index, TRAPS) {
   // This is only called for interface methods. If the resolved_method
   // comes from java/lang/Object, it can be the subject of a virtual call, so
   // we should pick the vtable index from the resolved method.
-  // Other than that case, there is no valid vtable index to specify.
-  int vtable_index = Method::invalid_vtable_index;
-  if (resolved_method->method_holder() == SystemDictionary::Object_klass()) {
-    assert(resolved_method->vtable_index() == selected_method->vtable_index(), "sanity check");
-    vtable_index = resolved_method->vtable_index();
-  }
-  set_common(resolved_klass, selected_klass, resolved_method, selected_method, vtable_index, CHECK);
+  // In that case, the caller must call set_virtual instead of set_interface.
+  assert(resolved_method->method_holder()->is_interface(), "");
+  assert(itable_index == resolved_method()->itable_index(), "");
+  set_common(resolved_klass, selected_klass, resolved_method, selected_method, CallInfo::itable_call, itable_index, CHECK);
 }
 
 void CallInfo::set_virtual(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS) {
   assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index, "valid index");
-  set_common(resolved_klass, selected_klass, resolved_method, selected_method, vtable_index, CHECK);
+  assert(vtable_index < 0 || !resolved_method->has_vtable_index() || vtable_index == resolved_method->vtable_index(), "");
+  CallKind kind = (vtable_index >= 0 && !resolved_method->can_be_statically_bound() ? CallInfo::vtable_call : CallInfo::direct_call);
+  set_common(resolved_klass, selected_klass, resolved_method, selected_method, kind, vtable_index, CHECK);
   assert(!resolved_method->is_compiled_lambda_form(), "these must be handled via an invokehandle call");
 }
 
@@ -98,20 +84,29 @@
          resolved_method->is_compiled_lambda_form(),
          "linkMethod must return one of these");
   int vtable_index = Method::nonvirtual_vtable_index;
-  assert(resolved_method->vtable_index() == vtable_index, "");
-  set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, vtable_index, CHECK);
+  assert(!resolved_method->has_vtable_index(), "");
+  set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, CallInfo::direct_call, vtable_index, CHECK);
   _resolved_appendix    = resolved_appendix;
   _resolved_method_type = resolved_method_type;
 }
 
-void CallInfo::set_common(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS) {
+void CallInfo::set_common(KlassHandle resolved_klass,
+                          KlassHandle selected_klass,
+                          methodHandle resolved_method,
+                          methodHandle selected_method,
+                          CallKind kind,
+                          int index,
+                          TRAPS) {
   assert(resolved_method->signature() == selected_method->signature(), "signatures must correspond");
   _resolved_klass  = resolved_klass;
   _selected_klass  = selected_klass;
   _resolved_method = resolved_method;
   _selected_method = selected_method;
-  _vtable_index    = vtable_index;
+  _call_kind       = kind;
+  _call_index      = index;
   _resolved_appendix = Handle();
+  DEBUG_ONLY(verify());  // verify before making side effects
+
   if (CompilationPolicy::must_be_compiled(selected_method)) {
     // This path is unusual, mostly used by the '-Xcomp' stress test mode.
 
@@ -138,6 +133,65 @@
   }
 }
 
+// utility query for unreflecting a method
+CallInfo::CallInfo(Method* resolved_method, Klass* resolved_klass) {
+  Klass* resolved_method_holder = resolved_method->method_holder();
+  if (resolved_klass == NULL) { // 2nd argument defaults to holder of 1st
+    resolved_klass = resolved_method_holder;
+  }
+  _resolved_klass  = resolved_klass;
+  _selected_klass  = resolved_klass;
+  _resolved_method = resolved_method;
+  _selected_method = resolved_method;
+  // classify:
+  CallKind kind = CallInfo::unknown_kind;
+  int index = resolved_method->vtable_index();
+  if (resolved_method->can_be_statically_bound()) {
+    kind = CallInfo::direct_call;
+  } else if (!resolved_method_holder->is_interface()) {
+    // Could be an Object method inherited into an interface, but still a vtable call.
+    kind = CallInfo::vtable_call;
+  } else if (!resolved_klass->is_interface()) {
+    // A miranda method.  Compute the vtable index.
+    ResourceMark rm;
+    klassVtable* vt = InstanceKlass::cast(resolved_klass)->vtable();
+    index = vt->index_of_miranda(resolved_method->name(),
+                                 resolved_method->signature());
+    kind = CallInfo::vtable_call;
+  } else {
+    // A regular interface call.
+    kind = CallInfo::itable_call;
+    index = resolved_method->itable_index();
+  }
+  assert(index == Method::nonvirtual_vtable_index || index >= 0, err_msg("bad index %d", index));
+  _call_kind  = kind;
+  _call_index = index;
+  _resolved_appendix = Handle();
+  DEBUG_ONLY(verify());
+}
+
+#ifdef ASSERT
+void CallInfo::verify() {
+  switch (call_kind()) {  // the meaning and allowed value of index depends on kind
+  case CallInfo::direct_call:
+    if (_call_index == Method::nonvirtual_vtable_index)  break;
+    // else fall through to check vtable index:
+  case CallInfo::vtable_call:
+    assert(resolved_klass()->verify_vtable_index(_call_index), "");
+    break;
+  case CallInfo::itable_call:
+    assert(resolved_method()->method_holder()->verify_itable_index(_call_index), "");
+    break;
+  case CallInfo::unknown_kind:
+    assert(call_kind() != CallInfo::unknown_kind, "CallInfo must be set");
+    break;
+  default:
+    fatal(err_msg_res("Unexpected call kind %d", call_kind()));
+  }
+}
+#endif //ASSERT
+
+
 
 //------------------------------------------------------------------------------------------------------------------------
 // Klass resolution
@@ -163,13 +217,6 @@
   result = KlassHandle(THREAD, result_oop);
 }
 
-void LinkResolver::resolve_klass_no_update(KlassHandle& result, constantPoolHandle pool, int index, TRAPS) {
-  Klass* result_oop =
-         ConstantPool::klass_ref_at_if_loaded_check(pool, index, CHECK);
-  result = KlassHandle(THREAD, result_oop);
-}
-
-
 //------------------------------------------------------------------------------------------------------------------------
 // Method resolution
 //
@@ -360,7 +407,12 @@
 
 void LinkResolver::resolve_method_statically(methodHandle& resolved_method, KlassHandle& resolved_klass,
                                              Bytecodes::Code code, constantPoolHandle pool, int index, TRAPS) {
-
+  // This method is used only
+  // (1) in C2 from InlineTree::ok_to_inline (via ciMethod::check_call),
+  // and
+  // (2) in Bytecode_invoke::static_target
+  // It appears to fail when applied to an invokeinterface call site.
+  // FIXME: Remove this method and ciMethod::check_call; refactor to use the other LinkResolver entry points.
   // resolve klass
   if (code == Bytecodes::_invokedynamic) {
     resolved_klass = SystemDictionary::MethodHandle_klass();
@@ -580,45 +632,49 @@
   }
 }
 
-void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, TRAPS) {
-  resolve_field(result, pool, index, byte, check_only, true, CHECK);
+void LinkResolver::resolve_field_access(fieldDescriptor& result, constantPoolHandle pool, int index, Bytecodes::Code byte, TRAPS) {
+  // Load these early in case the resolve of the containing klass fails
+  Symbol* field = pool->name_ref_at(index);
+  Symbol* sig   = pool->signature_ref_at(index);
+
+  // resolve specified klass
+  KlassHandle resolved_klass;
+  resolve_klass(resolved_klass, pool, index, CHECK);
+
+  KlassHandle  current_klass(THREAD, pool->pool_holder());
+  resolve_field(result, resolved_klass, field, sig, current_klass, byte, true, true, CHECK);
 }
 
-void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, bool update_pool, TRAPS) {
+void LinkResolver::resolve_field(fieldDescriptor& fd, KlassHandle resolved_klass, Symbol* field, Symbol* sig,
+                                 KlassHandle current_klass, Bytecodes::Code byte, bool check_access, bool initialize_class,
+                                 TRAPS) {
   assert(byte == Bytecodes::_getstatic || byte == Bytecodes::_putstatic ||
-         byte == Bytecodes::_getfield  || byte == Bytecodes::_putfield, "bad bytecode");
+         byte == Bytecodes::_getfield  || byte == Bytecodes::_putfield  ||
+         (byte == Bytecodes::_nop && !check_access), "bad field access bytecode");
 
   bool is_static = (byte == Bytecodes::_getstatic || byte == Bytecodes::_putstatic);
   bool is_put    = (byte == Bytecodes::_putfield  || byte == Bytecodes::_putstatic);
 
-  // resolve specified klass
-  KlassHandle resolved_klass;
-  if (update_pool) {
-    resolve_klass(resolved_klass, pool, index, CHECK);
-  } else {
-    resolve_klass_no_update(resolved_klass, pool, index, CHECK);
-  }
-  // Load these early in case the resolve of the containing klass fails
-  Symbol* field = pool->name_ref_at(index);
-  Symbol* sig   = pool->signature_ref_at(index);
   // Check if there's a resolved klass containing the field
-  if( resolved_klass.is_null() ) {
+  if (resolved_klass.is_null()) {
     ResourceMark rm(THREAD);
     THROW_MSG(vmSymbols::java_lang_NoSuchFieldError(), field->as_C_string());
   }
 
   // Resolve instance field
-  fieldDescriptor fd; // find_field initializes fd if found
   KlassHandle sel_klass(THREAD, InstanceKlass::cast(resolved_klass())->find_field(field, sig, &fd));
   // check if field exists; i.e., if a klass containing the field def has been selected
-  if (sel_klass.is_null()){
+  if (sel_klass.is_null()) {
     ResourceMark rm(THREAD);
     THROW_MSG(vmSymbols::java_lang_NoSuchFieldError(), field->as_C_string());
   }
 
+  if (!check_access)
+    // Access checking may be turned off when calling from within the VM.
+    return;
+
   // check access
-  KlassHandle ref_klass(THREAD, pool->pool_holder());
-  check_field_accessability(ref_klass, resolved_klass, sel_klass, fd, CHECK);
+  check_field_accessability(current_klass, resolved_klass, sel_klass, fd, CHECK);
 
   // check for errors
   if (is_static != fd.is_static()) {
@@ -629,7 +685,7 @@
   }
 
   // Final fields can only be accessed from its own class.
-  if (is_put && fd.access_flags().is_final() && sel_klass() != pool->pool_holder()) {
+  if (is_put && fd.access_flags().is_final() && sel_klass() != current_klass()) {
     THROW(vmSymbols::java_lang_IllegalAccessError());
   }
 
@@ -639,19 +695,18 @@
   //
   // note 2: we don't want to force initialization if we are just checking
   //         if the field access is legal; e.g., during compilation
-  if (is_static && !check_only) {
+  if (is_static && initialize_class) {
     sel_klass->initialize(CHECK);
   }
 
-  {
+  if (sel_klass() != current_klass()) {
     HandleMark hm(THREAD);
-    Handle ref_loader (THREAD, InstanceKlass::cast(ref_klass())->class_loader());
+    Handle ref_loader (THREAD, InstanceKlass::cast(current_klass())->class_loader());
     Handle sel_loader (THREAD, InstanceKlass::cast(sel_klass())->class_loader());
-    Symbol*  signature_ref  = pool->signature_ref_at(index);
     {
       ResourceMark rm(THREAD);
       Symbol* failed_type_symbol =
-        SystemDictionary::check_signature_loaders(signature_ref,
+        SystemDictionary::check_signature_loaders(sig,
                                                   ref_loader, sel_loader,
                                                   false,
                                                   CHECK);
@@ -677,9 +732,6 @@
 
   // return information. note that the klass is set to the actual klass containing the
   // field, otherwise access of static fields in superclasses will not work.
-  KlassHandle holder (THREAD, fd.field_holder());
-  Symbol*  name   = fd.name();
-  result.set(holder, name, fd.index(), fd.offset(), fd.field_type(), fd.access_flags());
 }
 
 
@@ -907,10 +959,6 @@
   }
 
   // Virtual methods cannot be resolved before its klass has been linked, for otherwise the Method*'s
-  // has not been rewritten, and the vtable initialized.
-  assert(resolved_method->method_holder()->is_linked(), "must be linked");
-
-  // Virtual methods cannot be resolved before its klass has been linked, for otherwise the Method*'s
   // has not been rewritten, and the vtable initialized. Make sure to do this after the nullcheck, since
   // a missing receiver might result in a bogus lookup.
   assert(resolved_method->method_holder()->is_linked(), "must be linked");
@@ -920,6 +968,7 @@
     vtable_index = vtable_index_of_miranda_method(resolved_klass,
                            resolved_method->name(),
                            resolved_method->signature(), CHECK);
+
     assert(vtable_index >= 0 , "we should have valid vtable index at this point");
 
     InstanceKlass* inst = InstanceKlass::cast(recv_klass());
@@ -927,6 +976,7 @@
   } else {
     // at this point we are sure that resolved_method is virtual and not
     // a miranda method; therefore, it must have a valid vtable index.
+    assert(!resolved_method->has_itable_index(), "");
     vtable_index = resolved_method->vtable_index();
     // We could get a negative vtable_index for final methods,
     // because as an optimization they are they are never put in the vtable,
@@ -1006,6 +1056,12 @@
   lookup_instance_method_in_klasses(sel_method, recv_klass,
             resolved_method->name(),
             resolved_method->signature(), CHECK);
+  if (sel_method.is_null() && !check_null_and_abstract) {
+    // In theory this is a harmless placeholder value, but
+    // in practice leaving in null affects the nsk default method tests.
+    // This needs further study.
+    sel_method = resolved_method;
+  }
   // check if method exists
   if (sel_method.is_null()) {
     ResourceMark rm(THREAD);
@@ -1046,7 +1102,14 @@
                                                       sel_method->signature()));
   }
   // setup result
-  result.set_interface(resolved_klass, recv_klass, resolved_method, sel_method, CHECK);
+  if (!resolved_method->has_itable_index()) {
+    int vtable_index = resolved_method->vtable_index();
+    assert(vtable_index == sel_method->vtable_index(), "sanity check");
+    result.set_virtual(resolved_klass, recv_klass, resolved_method, sel_method, vtable_index, CHECK);
+    return;
+  }
+  int itable_index = resolved_method()->itable_index();
+  result.set_interface(resolved_klass, recv_klass, resolved_method, sel_method, itable_index, CHECK);
 }
 
 
@@ -1293,7 +1356,8 @@
   }
 
   if (TraceMethodHandles) {
-    tty->print_cr("resolve_invokedynamic #%d %s %s",
+      ResourceMark rm(THREAD);
+      tty->print_cr("resolve_invokedynamic #%d %s %s",
                   ConstantPool::decode_invokedynamic_index(index),
                   method_name->as_C_string(), method_signature->as_C_string());
     tty->print("  BSM info: "); bootstrap_specifier->print();
@@ -1342,9 +1406,16 @@
 //------------------------------------------------------------------------------------------------------------------------
 #ifndef PRODUCT
 
-void FieldAccessInfo::print() {
+void CallInfo::print() {
   ResourceMark rm;
-  tty->print_cr("Field %s@%d", name()->as_C_string(), field_offset());
+  const char* kindstr = "unknown";
+  switch (_call_kind) {
+  case direct_call: kindstr = "direct"; break;
+  case vtable_call: kindstr = "vtable"; break;
+  case itable_call: kindstr = "itable"; break;
+  }
+  tty->print_cr("Call %s@%d %s", kindstr, _call_index,
+                _resolved_method.is_null() ? "(none)" : _resolved_method->name_and_sig_as_C_string());
 }
 
 #endif
--- a/src/share/vm/interpreter/linkResolver.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/interpreter/linkResolver.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,63 +30,54 @@
 
 // All the necessary definitions for run-time link resolution.
 
-// LinkInfo & its subclasses provide all the information gathered
-// for a particular link after resolving it. A link is any reference
+// CallInfo provides all the information gathered for a particular
+// linked call site after resolving it. A link is any reference
 // made from within the bytecodes of a method to an object outside of
 // that method. If the info is invalid, the link has not been resolved
 // successfully.
 
-class LinkInfo VALUE_OBJ_CLASS_SPEC {
-};
-
-
-// Link information for getfield/putfield & getstatic/putstatic bytecodes.
-
-class FieldAccessInfo: public LinkInfo {
- protected:
-  KlassHandle  _klass;
-  Symbol*      _name;
-  AccessFlags  _access_flags;
-  int          _field_index;  // original index in the klass
-  int          _field_offset;
-  BasicType    _field_type;
-
+class CallInfo VALUE_OBJ_CLASS_SPEC {
  public:
-  void         set(KlassHandle klass, Symbol* name, int field_index, int field_offset,
-                 BasicType field_type, AccessFlags access_flags);
-  KlassHandle  klass() const                     { return _klass; }
-  Symbol* name() const                           { return _name; }
-  int          field_index() const               { return _field_index; }
-  int          field_offset() const              { return _field_offset; }
-  BasicType    field_type() const                { return _field_type; }
-  AccessFlags  access_flags() const              { return _access_flags; }
-
-  // debugging
-  void print()  PRODUCT_RETURN;
-};
-
-
-// Link information for all calls.
-
-class CallInfo: public LinkInfo {
+  // Ways that a method call might be selected (or not) based on receiver type.
+  // Note that an invokevirtual instruction might be linked with no_dispatch,
+  // and an invokeinterface instruction might be linked with any of the three options
+  enum CallKind {
+    direct_call,                        // jump into resolved_method (must be concrete)
+    vtable_call,                        // select recv.klass.method_at_vtable(index)
+    itable_call,                        // select recv.klass.method_at_itable(resolved_method.holder, index)
+    unknown_kind = -1
+  };
  private:
-  KlassHandle  _resolved_klass;         // static receiver klass
+  KlassHandle  _resolved_klass;         // static receiver klass, resolved from a symbolic reference
   KlassHandle  _selected_klass;         // dynamic receiver class (same as static, or subklass)
   methodHandle _resolved_method;        // static target method
   methodHandle _selected_method;        // dynamic (actual) target method
-  int          _vtable_index;           // vtable index of selected method
+  CallKind     _call_kind;              // kind of call (static(=bytecode static/special +
+                                        //               others inferred), vtable, itable)
+  int          _call_index;             // vtable or itable index of selected class method (if any)
   Handle       _resolved_appendix;      // extra argument in constant pool (if CPCE::has_appendix)
   Handle       _resolved_method_type;   // MethodType (for invokedynamic and invokehandle call sites)
 
   void         set_static(   KlassHandle resolved_klass,                             methodHandle resolved_method                                                       , TRAPS);
-  void         set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method                         , TRAPS);
+  void         set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int itable_index       , TRAPS);
   void         set_virtual(  KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index       , TRAPS);
   void         set_handle(                                                           methodHandle resolved_method, Handle resolved_appendix, Handle resolved_method_type, TRAPS);
-  void         set_common(   KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index       , TRAPS);
+  void         set_common(   KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, CallKind kind, int index, TRAPS);
 
   friend class LinkResolver;
 
  public:
+  CallInfo() {
+#ifndef PRODUCT
+    _call_kind  = CallInfo::unknown_kind;
+    _call_index = Method::garbage_vtable_index;
+#endif //PRODUCT
+  }
+
+  // utility to extract an effective CallInfo from a method and an optional receiver limit
+  // does not queue the method for compilation
+  CallInfo(Method* resolved_method, Klass* resolved_klass = NULL);
+
   KlassHandle  resolved_klass() const            { return _resolved_klass; }
   KlassHandle  selected_klass() const            { return _selected_klass; }
   methodHandle resolved_method() const           { return _resolved_method; }
@@ -95,21 +86,43 @@
   Handle       resolved_method_type() const      { return _resolved_method_type; }
 
   BasicType    result_type() const               { return selected_method()->result_type(); }
-  bool         has_vtable_index() const          { return _vtable_index >= 0; }
-  bool         is_statically_bound() const       { return _vtable_index == Method::nonvirtual_vtable_index; }
+  CallKind     call_kind() const                 { return _call_kind; }
+  int          call_index() const                { return _call_index; }
   int          vtable_index() const {
     // Even for interface calls the vtable index could be non-negative.
     // See CallInfo::set_interface.
     assert(has_vtable_index() || is_statically_bound(), "");
-    return _vtable_index;
+    assert(call_kind() == vtable_call || call_kind() == direct_call, "");
+    // The returned value is < 0 if the call is statically bound.
+    // But, the returned value may be >= 0 even if the kind is direct_call.
+    // It is up to the caller to decide which way to go.
+    return _call_index;
   }
+  int          itable_index() const {
+    assert(call_kind() == itable_call, "");
+    // The returned value is always >= 0, a valid itable index.
+    return _call_index;
+  }
+
+  // debugging
+#ifdef ASSERT
+  bool         has_vtable_index() const          { return _call_index >= 0 && _call_kind != CallInfo::itable_call; }
+  bool         is_statically_bound() const       { return _call_index == Method::nonvirtual_vtable_index; }
+#endif //ASSERT
+  void         verify() PRODUCT_RETURN;
+  void         print()  PRODUCT_RETURN;
 };
 
+// Link information for getfield/putfield & getstatic/putstatic bytecodes
+// is represented using a fieldDescriptor.
 
 // The LinkResolver is used to resolve constant-pool references at run-time.
 // It does all necessary link-time checks & throws exceptions if necessary.
 
 class LinkResolver: AllStatic {
+  friend class klassVtable;
+  friend class klassItable;
+
  private:
   static void lookup_method_in_klasses          (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
   static void lookup_instance_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
@@ -120,7 +133,6 @@
   static int vtable_index_of_miranda_method(KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
 
   static void resolve_klass           (KlassHandle& result, constantPoolHandle  pool, int index, TRAPS);
-  static void resolve_klass_no_update (KlassHandle& result, constantPoolHandle pool, int index, TRAPS); // no update of constantPool entry
 
   static void resolve_pool  (KlassHandle& resolved_klass, Symbol*& method_name, Symbol*& method_signature, KlassHandle& current_klass, constantPoolHandle pool, int index, TRAPS);
 
@@ -148,9 +160,16 @@
                                         Bytecodes::Code code, constantPoolHandle pool, int index, TRAPS);
 
   // runtime/static resolving for fields
-  static void resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, TRAPS);
-  // takes an extra bool argument "update_pool" to decide whether to update the constantPool during klass resolution.
-  static void resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, bool update_pool, TRAPS);
+  static void resolve_field_access(fieldDescriptor& result, constantPoolHandle pool, int index, Bytecodes::Code byte, TRAPS);
+  static void resolve_field(fieldDescriptor& result, KlassHandle resolved_klass, Symbol* field_name, Symbol* field_signature,
+                            KlassHandle current_klass, Bytecodes::Code access_kind, bool check_access, bool initialize_class, TRAPS);
+
+  // source of access_kind codes:
+  static Bytecodes::Code field_access_kind(bool is_static, bool is_put) {
+    return (is_static
+            ? (is_put ? Bytecodes::_putstatic : Bytecodes::_getstatic)
+            : (is_put ? Bytecodes::_putfield  : Bytecodes::_getfield ));
+  }
 
   // runtime resolving:
   //   resolved_klass = specified class (i.e., static receiver class)
--- a/src/share/vm/memory/binaryTreeDictionary.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/memory/binaryTreeDictionary.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,10 +33,10 @@
 #include "runtime/globals.hpp"
 #include "utilities/ostream.hpp"
 #include "utilities/macros.hpp"
+#include "gc_implementation/shared/spaceDecorator.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
 #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
-#include "gc_implementation/shared/spaceDecorator.hpp"
 #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
 #endif // INCLUDE_ALL_GCS
 
--- a/src/share/vm/memory/collectorPolicy.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/memory/collectorPolicy.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -47,6 +47,11 @@
 
 // CollectorPolicy methods.
 
+// Align down. If the aligning result in 0, return 'alignment'.
+static size_t restricted_align_down(size_t size, size_t alignment) {
+  return MAX2(alignment, align_size_down_(size, alignment));
+}
+
 void CollectorPolicy::initialize_flags() {
   assert(max_alignment() >= min_alignment(),
       err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT,
@@ -59,18 +64,24 @@
     vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
   }
 
-  if (MetaspaceSize > MaxMetaspaceSize) {
-    MaxMetaspaceSize = MetaspaceSize;
+  if (!is_size_aligned(MaxMetaspaceSize, max_alignment())) {
+    FLAG_SET_ERGO(uintx, MaxMetaspaceSize,
+        restricted_align_down(MaxMetaspaceSize, max_alignment()));
   }
-  MetaspaceSize = MAX2(min_alignment(), align_size_down_(MetaspaceSize, min_alignment()));
-  // Don't increase Metaspace size limit above specified.
-  MaxMetaspaceSize = align_size_down(MaxMetaspaceSize, max_alignment());
+
   if (MetaspaceSize > MaxMetaspaceSize) {
-    MetaspaceSize = MaxMetaspaceSize;
+    FLAG_SET_ERGO(uintx, MetaspaceSize, MaxMetaspaceSize);
   }
 
-  MinMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MinMetaspaceExpansion, min_alignment()));
-  MaxMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MaxMetaspaceExpansion, min_alignment()));
+  if (!is_size_aligned(MetaspaceSize, min_alignment())) {
+    FLAG_SET_ERGO(uintx, MetaspaceSize,
+        restricted_align_down(MetaspaceSize, min_alignment()));
+  }
+
+  assert(MetaspaceSize <= MaxMetaspaceSize, "Must be");
+
+  MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, min_alignment());
+  MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, min_alignment());
 
   MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment());
 
@@ -145,6 +156,30 @@
   _all_soft_refs_clear = true;
 }
 
+size_t CollectorPolicy::compute_max_alignment() {
+  // The card marking array and the offset arrays for old generations are
+  // committed in os pages as well. Make sure they are entirely full (to
+  // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
+  // byte entry and the os page size is 4096, the maximum heap size should
+  // be 512*4096 = 2MB aligned.
+
+  // There is only the GenRemSet in Hotspot and only the GenRemSet::CardTable
+  // is supported.
+  // Requirements of any new remembered set implementations must be added here.
+  size_t alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable);
+
+  // Parallel GC does its own alignment of the generations to avoid requiring a
+  // large page (256M on some platforms) for the permanent generation.  The
+  // other collectors should also be updated to do their own alignment and then
+  // this use of lcm() should be removed.
+  if (UseLargePages && !UseParallelGC) {
+      // in presence of large pages we have to make sure that our
+      // alignment is large page aware
+      alignment = lcm(os::large_page_size(), alignment);
+  }
+
+  return alignment;
+}
 
 // GenCollectorPolicy methods.
 
@@ -175,29 +210,6 @@
                                         GCTimeRatio);
 }
 
-size_t GenCollectorPolicy::compute_max_alignment() {
-  // The card marking array and the offset arrays for old generations are
-  // committed in os pages as well. Make sure they are entirely full (to
-  // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
-  // byte entry and the os page size is 4096, the maximum heap size should
-  // be 512*4096 = 2MB aligned.
-  size_t alignment = GenRemSet::max_alignment_constraint(rem_set_name());
-
-  // Parallel GC does its own alignment of the generations to avoid requiring a
-  // large page (256M on some platforms) for the permanent generation.  The
-  // other collectors should also be updated to do their own alignment and then
-  // this use of lcm() should be removed.
-  if (UseLargePages && !UseParallelGC) {
-      // in presence of large pages we have to make sure that our
-      // alignment is large page aware
-      alignment = lcm(os::large_page_size(), alignment);
-  }
-
-  assert(alignment >= min_alignment(), "Must be");
-
-  return alignment;
-}
-
 void GenCollectorPolicy::initialize_flags() {
   // All sizes must be multiples of the generation granularity.
   set_min_alignment((uintx) Generation::GenGrain);
--- a/src/share/vm/memory/collectorPolicy.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/memory/collectorPolicy.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -98,6 +98,9 @@
   {}
 
  public:
+  // Return maximum heap alignment that may be imposed by the policy
+  static size_t compute_max_alignment();
+
   void set_min_alignment(size_t align)         { _min_alignment = align; }
   size_t min_alignment()                       { return _min_alignment; }
   void set_max_alignment(size_t align)         { _max_alignment = align; }
@@ -234,9 +237,6 @@
   // Try to allocate space by expanding the heap.
   virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
 
-  // compute max heap alignment
-  size_t compute_max_alignment();
-
  // Scale the base_size by NewRation according to
  //     result = base_size / (NewRatio + 1)
  // and align by min_alignment()
--- a/src/share/vm/memory/gcLocker.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/memory/gcLocker.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -122,7 +122,7 @@
     // strictly needed. It's added here to make it clear that
     // the GC will NOT be performed if any other caller
     // of GC_locker::lock() still needs GC locked.
-    if (!is_active()) {
+    if (!is_active_internal()) {
       _doing_gc = true;
       {
         // Must give up the lock while at a safepoint
--- a/src/share/vm/memory/gcLocker.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/memory/gcLocker.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -88,7 +88,7 @@
  public:
   // Accessors
   static bool is_active() {
-    assert(_needs_gc || SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
+    assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
     return is_active_internal();
   }
   static bool needs_gc()       { return _needs_gc;                        }
--- a/src/share/vm/memory/genCollectedHeap.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/memory/genCollectedHeap.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -148,6 +148,11 @@
     return gen_policy()->size_policy();
   }
 
+  // Return the (conservative) maximum heap alignment
+  static size_t conservative_max_heap_alignment() {
+    return Generation::GenGrain;
+  }
+
   size_t capacity() const;
   size_t used() const;
 
--- a/src/share/vm/memory/metablock.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/memory/metablock.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -50,13 +50,6 @@
 // Chunks, change Chunks so that they can be allocated out of a VirtualSpace.
 size_t Metablock::_min_block_byte_size = sizeof(Metablock);
 
-#ifdef ASSERT
-size_t Metablock::_overhead =
-  Chunk::aligned_overhead_size(sizeof(Metablock)) / BytesPerWord;
-#else
-size_t Metablock::_overhead = 0;
-#endif
-
 // New blocks returned by the Metaspace are zero initialized.
 // We should fix the constructors to not assume this instead.
 Metablock* Metablock::initialize(MetaWord* p, size_t word_size) {
--- a/src/share/vm/memory/metablock.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/memory/metablock.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -48,7 +48,6 @@
     } _header;
   } _block;
   static size_t _min_block_byte_size;
-  static size_t _overhead;
 
   typedef union block_t Block;
   typedef struct header_t Header;
@@ -73,7 +72,6 @@
   void set_prev(Metablock* v) { _block._header._prev = v; }
 
   static size_t min_block_byte_size() { return _min_block_byte_size; }
-  static size_t overhead() { return _overhead; }
 
   bool is_free()                 { return header()->_word_size != 0; }
   void clear_next()              { set_next(NULL); }
--- a/src/share/vm/memory/metaspace.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/memory/metaspace.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -23,6 +23,7 @@
  */
 #include "precompiled.hpp"
 #include "gc_interface/collectedHeap.hpp"
+#include "memory/allocation.hpp"
 #include "memory/binaryTreeDictionary.hpp"
 #include "memory/freeList.hpp"
 #include "memory/collectorPolicy.hpp"
@@ -51,7 +52,7 @@
 // Parameters for stress mode testing
 const uint metadata_deallocate_a_lot_block = 10;
 const uint metadata_deallocate_a_lock_chunk = 3;
-size_t const allocation_from_dictionary_limit = 64 * K;
+size_t const allocation_from_dictionary_limit = 4 * K;
 
 MetaWord* last_allocated = 0;
 
@@ -111,7 +112,7 @@
 // Has three lists of free chunks, and a total size and
 // count that includes all three
 
-class ChunkManager VALUE_OBJ_CLASS_SPEC {
+class ChunkManager : public CHeapObj<mtInternal> {
 
   // Free list of chunks of different sizes.
   //   SpecializedChunk
@@ -158,7 +159,12 @@
 
  public:
 
-  ChunkManager() : _free_chunks_total(0), _free_chunks_count(0) {}
+  ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
+      : _free_chunks_total(0), _free_chunks_count(0) {
+    _free_chunks[SpecializedIndex].set_size(specialized_size);
+    _free_chunks[SmallIndex].set_size(small_size);
+    _free_chunks[MediumIndex].set_size(medium_size);
+  }
 
   // add or delete (return) a chunk to the global freelist.
   Metachunk* chunk_freelist_allocate(size_t word_size);
@@ -177,8 +183,8 @@
   void return_chunks(ChunkIndex index, Metachunk* chunks);
 
   // Total of the space in the free chunks list
-  size_t free_chunks_total();
-  size_t free_chunks_total_in_bytes();
+  size_t free_chunks_total_words();
+  size_t free_chunks_total_bytes();
 
   // Number of chunks in the free chunks list
   size_t free_chunks_count();
@@ -219,7 +225,7 @@
   void locked_print_free_chunks(outputStream* st);
   void locked_print_sum_free_chunks(outputStream* st);
 
-  void print_on(outputStream* st);
+  void print_on(outputStream* st) const;
 };
 
 // Used to manage the free list of Metablocks (a block corresponds
@@ -228,6 +234,10 @@
   BlockTreeDictionary* _dictionary;
   static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size);
 
+  // Only allocate and split from freelist if the size of the allocation
+  // is at least 1/4th the size of the available block.
+  const static int WasteMultiplier = 4;
+
   // Accessors
   BlockTreeDictionary* dictionary() const { return _dictionary; }
 
@@ -272,11 +282,6 @@
   // VirtualSpace
   Metachunk* first_chunk() { return (Metachunk*) bottom(); }
 
-  void inc_container_count();
-#ifdef ASSERT
-  uint container_count_slow();
-#endif
-
  public:
 
   VirtualSpaceNode(size_t byte_size);
@@ -287,6 +292,10 @@
   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 
+  size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
+  size_t expanded_words() const  { return _virtual_space.committed_size() / BytesPerWord; }
+  size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
+
   // address of next available space in _virtual_space;
   // Accessors
   VirtualSpaceNode* next() { return _next; }
@@ -306,8 +315,10 @@
   void inc_top(size_t word_size) { _top += word_size; }
 
   uintx container_count() { return _container_count; }
+  void inc_container_count();
   void dec_container_count();
 #ifdef ASSERT
+  uint container_count_slow();
   void verify_container_count();
 #endif
 
@@ -323,12 +334,10 @@
 
   // Allocate a chunk from the virtual space and return it.
   Metachunk* get_chunk_vs(size_t chunk_word_size);
-  Metachunk* get_chunk_vs_with_expand(size_t chunk_word_size);
 
   // Expands/shrinks the committed space in a virtual space.  Delegates
   // to Virtualspace
   bool expand_by(size_t words, bool pre_touch = false);
-  bool shrink_by(size_t words);
 
   // In preparation for deleting this node, remove all the chunks
   // in the node from any freelist.
@@ -336,8 +345,6 @@
 
 #ifdef ASSERT
   // Debug support
-  static void verify_virtual_space_total();
-  static void verify_virtual_space_count();
   void mangle();
 #endif
 
@@ -417,16 +424,17 @@
   VirtualSpaceNode* _virtual_space_list;
   // virtual space currently being used for allocations
   VirtualSpaceNode* _current_virtual_space;
-  // Free chunk list for all other metadata
-  ChunkManager      _chunk_manager;
 
   // Can this virtual list allocate >1 spaces?  Also, used to determine
   // whether to allocate unlimited small chunks in this virtual space
   bool _is_class;
-  bool can_grow() const { return !is_class() || !UseCompressedKlassPointers; }
-
-  // Sum of space in all virtual spaces and number of virtual spaces
-  size_t _virtual_space_total;
+  bool can_grow() const { return !is_class() || !UseCompressedClassPointers; }
+
+  // Sum of reserved and committed memory in the virtual spaces
+  size_t _reserved_words;
+  size_t _committed_words;
+
+  // Number of virtual spaces
   size_t _virtual_space_count;
 
   ~VirtualSpaceList();
@@ -440,7 +448,7 @@
     _current_virtual_space = v;
   }
 
-  void link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size);
+  void link_vs(VirtualSpaceNode* new_entry);
 
   // Get another virtual space and add it to the list.  This
   // is typically prompted by a failed attempt to allocate a chunk
@@ -457,6 +465,8 @@
                            size_t grow_chunks_by_words,
                            size_t medium_chunk_bunch);
 
+  bool expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch = false);
+
   // Get the first chunk for a Metaspace.  Used for
   // special cases such as the boot class loader, reflection
   // class loader and anonymous class loader.
@@ -466,28 +476,25 @@
     return _current_virtual_space;
   }
 
-  ChunkManager* chunk_manager() { return &_chunk_manager; }
   bool is_class() const { return _is_class; }
 
   // Allocate the first virtualspace.
   void initialize(size_t word_size);
 
-  size_t virtual_space_total() { return _virtual_space_total; }
-
-  void inc_virtual_space_total(size_t v);
-  void dec_virtual_space_total(size_t v);
+  size_t reserved_words()  { return _reserved_words; }
+  size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
+  size_t committed_words() { return _committed_words; }
+  size_t committed_bytes() { return committed_words() * BytesPerWord; }
+
+  void inc_reserved_words(size_t v);
+  void dec_reserved_words(size_t v);
+  void inc_committed_words(size_t v);
+  void dec_committed_words(size_t v);
   void inc_virtual_space_count();
   void dec_virtual_space_count();
 
   // Unlink empty VirtualSpaceNodes and free it.
-  void purge();
-
-  // Used and capacity in the entire list of virtual spaces.
-  // These are global values shared by all Metaspaces
-  size_t capacity_words_sum();
-  size_t capacity_bytes_sum() { return capacity_words_sum() * BytesPerWord; }
-  size_t used_words_sum();
-  size_t used_bytes_sum() { return used_words_sum() * BytesPerWord; }
+  void purge(ChunkManager* chunk_manager);
 
   bool contains(const void *ptr);
 
@@ -568,18 +575,12 @@
   // Type of metadata allocated.
   Metaspace::MetadataType _mdtype;
 
-  // Chunk related size
-  size_t _medium_chunk_bunch;
-
   // List of chunks in use by this SpaceManager.  Allocations
   // are done from the current chunk.  The list is used for deallocating
   // chunks when the SpaceManager is freed.
   Metachunk* _chunks_in_use[NumberOfInUseLists];
   Metachunk* _current_chunk;
 
-  // Virtual space where allocation comes from.
-  VirtualSpaceList* _vs_list;
-
   // Number of small chunks to allocate to a manager
   // If class space manager, small chunks are unlimited
   static uint const _small_chunk_limit;
@@ -612,7 +613,9 @@
   }
 
   Metaspace::MetadataType mdtype() { return _mdtype; }
-  VirtualSpaceList* vs_list() const    { return _vs_list; }
+
+  VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
+  ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
 
   Metachunk* current_chunk() const { return _current_chunk; }
   void set_current_chunk(Metachunk* v) {
@@ -623,6 +626,7 @@
 
   // Add chunk to the list of chunks in use
   void add_chunk(Metachunk* v, bool make_current);
+  void retire_current_chunk();
 
   Mutex* lock() const { return _lock; }
 
@@ -633,18 +637,19 @@
 
  public:
   SpaceManager(Metaspace::MetadataType mdtype,
-               Mutex* lock,
-               VirtualSpaceList* vs_list);
+               Mutex* lock);
   ~SpaceManager();
 
   enum ChunkMultiples {
     MediumChunkMultiple = 4
   };
 
+  bool is_class() { return _mdtype == Metaspace::ClassType; }
+
   // Accessors
   size_t specialized_chunk_size() { return SpecializedChunk; }
-  size_t small_chunk_size() { return (size_t) vs_list()->is_class() ? ClassSmallChunk : SmallChunk; }
-  size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; }
+  size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
+  size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
   size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
 
   size_t allocated_blocks_words() const { return _allocated_blocks_words; }
@@ -722,9 +727,7 @@
     // MinChunkSize is a placeholder for the real minimum size JJJ
     size_t byte_size = word_size * BytesPerWord;
 
-    size_t byte_size_with_overhead = byte_size + Metablock::overhead();
-
-    size_t raw_bytes_size = MAX2(byte_size_with_overhead,
+    size_t raw_bytes_size = MAX2(byte_size,
                                  Metablock::min_block_byte_size());
     raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
     size_t raw_word_size = raw_bytes_size / BytesPerWord;
@@ -749,7 +752,7 @@
   _container_count++;
   assert(_container_count == container_count_slow(),
          err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
-                 "container_count_slow() " SIZE_FORMAT,
+                 " container_count_slow() " SIZE_FORMAT,
                  _container_count, container_count_slow()));
 }
 
@@ -762,7 +765,7 @@
 void VirtualSpaceNode::verify_container_count() {
   assert(_container_count == container_count_slow(),
     err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
-            "container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
+            " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
 }
 #endif
 
@@ -807,12 +810,25 @@
   }
 
   Metablock* free_block =
-    dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::exactly);
+    dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
   if (free_block == NULL) {
     return NULL;
   }
 
-  return (MetaWord*) free_block;
+  const size_t block_size = free_block->size();
+  if (block_size > WasteMultiplier * word_size) {
+    return_block((MetaWord*)free_block, block_size);
+    return NULL;
+  }
+
+  MetaWord* new_block = (MetaWord*)free_block;
+  assert(block_size >= word_size, "Incorrect size of block from freelist");
+  const size_t unused = block_size - word_size;
+  if (unused >= TreeChunk<Metablock, FreeList>::min_size()) {
+    return_block(new_block + word_size, unused);
+  }
+
+  return new_block;
 }
 
 void BlockFreelist::print_on(outputStream* st) const {
@@ -855,9 +871,9 @@
 
   if (!is_available(chunk_word_size)) {
     if (TraceMetadataChunkAllocation) {
-      tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
+      gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
       // Dump some information about the virtual space that is nearly full
-      print_on(tty);
+      print_on(gclog_or_tty);
     }
     return NULL;
   }
@@ -878,20 +894,11 @@
   if (TraceMetavirtualspaceAllocation && !result) {
     gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed "
                            "for byte size " SIZE_FORMAT, bytes);
-    virtual_space()->print();
+    virtual_space()->print_on(gclog_or_tty);
   }
   return result;
 }
 
-// Shrink the virtual space (commit more of the reserved space)
-bool VirtualSpaceNode::shrink_by(size_t words) {
-  size_t bytes = words * BytesPerWord;
-  virtual_space()->shrink_by(bytes);
-  return true;
-}
-
-// Add another chunk to the chunk list.
-
 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
   assert_lock_strong(SpaceManager::expand_lock());
   Metachunk* result = take_from_committed(chunk_word_size);
@@ -901,23 +908,6 @@
   return result;
 }
 
-Metachunk* VirtualSpaceNode::get_chunk_vs_with_expand(size_t chunk_word_size) {
-  assert_lock_strong(SpaceManager::expand_lock());
-
-  Metachunk* new_chunk = get_chunk_vs(chunk_word_size);
-
-  if (new_chunk == NULL) {
-    // Only a small part of the virtualspace is committed when first
-    // allocated so committing more here can be expected.
-    size_t page_size_words = os::vm_page_size() / BytesPerWord;
-    size_t aligned_expand_vs_by_words = align_size_up(chunk_word_size,
-                                                    page_size_words);
-    expand_by(aligned_expand_vs_by_words, false);
-    new_chunk = get_chunk_vs(chunk_word_size);
-  }
-  return new_chunk;
-}
-
 bool VirtualSpaceNode::initialize() {
 
   if (!_rs.is_reserved()) {
@@ -977,13 +967,22 @@
   }
 }
 
-void VirtualSpaceList::inc_virtual_space_total(size_t v) {
+void VirtualSpaceList::inc_reserved_words(size_t v) {
   assert_lock_strong(SpaceManager::expand_lock());
-  _virtual_space_total = _virtual_space_total + v;
+  _reserved_words = _reserved_words + v;
+}
+void VirtualSpaceList::dec_reserved_words(size_t v) {
+  assert_lock_strong(SpaceManager::expand_lock());
+  _reserved_words = _reserved_words - v;
 }
-void VirtualSpaceList::dec_virtual_space_total(size_t v) {
+
+void VirtualSpaceList::inc_committed_words(size_t v) {
   assert_lock_strong(SpaceManager::expand_lock());
-  _virtual_space_total = _virtual_space_total - v;
+  _committed_words = _committed_words + v;
+}
+void VirtualSpaceList::dec_committed_words(size_t v) {
+  assert_lock_strong(SpaceManager::expand_lock());
+  _committed_words = _committed_words - v;
 }
 
 void VirtualSpaceList::inc_virtual_space_count() {
@@ -1011,7 +1010,7 @@
 // Walk the list of VirtualSpaceNodes and delete
 // nodes with a 0 container_count.  Remove Metachunks in
 // the node from their respective freelists.
-void VirtualSpaceList::purge() {
+void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
   assert_lock_strong(SpaceManager::expand_lock());
   // Don't use a VirtualSpaceListIterator because this
   // list is being changed and a straightforward use of an iterator is not safe.
@@ -1033,8 +1032,9 @@
         prev_vsl->set_next(vsl->next());
       }
 
-      vsl->purge(chunk_manager());
-      dec_virtual_space_total(vsl->reserved()->word_size());
+      vsl->purge(chunk_manager);
+      dec_reserved_words(vsl->reserved_words());
+      dec_committed_words(vsl->committed_words());
       dec_virtual_space_count();
       purged_vsl = vsl;
       delete vsl;
@@ -1054,49 +1054,16 @@
 #endif
 }
 
-size_t VirtualSpaceList::used_words_sum() {
-  size_t allocated_by_vs = 0;
-  VirtualSpaceListIterator iter(virtual_space_list());
-  while (iter.repeat()) {
-    VirtualSpaceNode* vsl = iter.get_next();
-    // Sum used region [bottom, top) in each virtualspace
-    allocated_by_vs += vsl->used_words_in_vs();
-  }
-  assert(allocated_by_vs >= chunk_manager()->free_chunks_total(),
-    err_msg("Total in free chunks " SIZE_FORMAT
-            " greater than total from virtual_spaces " SIZE_FORMAT,
-            allocated_by_vs, chunk_manager()->free_chunks_total()));
-  size_t used =
-    allocated_by_vs - chunk_manager()->free_chunks_total();
-  return used;
-}
-
-// Space available in all MetadataVirtualspaces allocated
-// for metadata.  This is the upper limit on the capacity
-// of chunks allocated out of all the MetadataVirtualspaces.
-size_t VirtualSpaceList::capacity_words_sum() {
-  size_t capacity = 0;
-  VirtualSpaceListIterator iter(virtual_space_list());
-  while (iter.repeat()) {
-    VirtualSpaceNode* vsl = iter.get_next();
-    capacity += vsl->capacity_words_in_vs();
-  }
-  return capacity;
-}
-
 VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
                                    _is_class(false),
                                    _virtual_space_list(NULL),
                                    _current_virtual_space(NULL),
-                                   _virtual_space_total(0),
+                                   _reserved_words(0),
+                                   _committed_words(0),
                                    _virtual_space_count(0) {
   MutexLockerEx cl(SpaceManager::expand_lock(),
                    Mutex::_no_safepoint_check_flag);
   bool initialization_succeeded = grow_vs(word_size);
-
-  _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
-  _chunk_manager.free_chunks(SmallIndex)->set_size(SmallChunk);
-  _chunk_manager.free_chunks(MediumIndex)->set_size(MediumChunk);
   assert(initialization_succeeded,
     " VirtualSpaceList initialization should not fail");
 }
@@ -1105,17 +1072,15 @@
                                    _is_class(true),
                                    _virtual_space_list(NULL),
                                    _current_virtual_space(NULL),
-                                   _virtual_space_total(0),
+                                   _reserved_words(0),
+                                   _committed_words(0),
                                    _virtual_space_count(0) {
   MutexLockerEx cl(SpaceManager::expand_lock(),
                    Mutex::_no_safepoint_check_flag);
   VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
   bool succeeded = class_entry->initialize();
-  _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
-  _chunk_manager.free_chunks(SmallIndex)->set_size(ClassSmallChunk);
-  _chunk_manager.free_chunks(MediumIndex)->set_size(ClassMediumChunk);
   assert(succeeded, " VirtualSpaceList initialization should not fail");
-  link_vs(class_entry, rs.size()/BytesPerWord);
+  link_vs(class_entry);
 }
 
 size_t VirtualSpaceList::free_bytes() {
@@ -1130,7 +1095,7 @@
   }
   // Reserve the space
   size_t vs_byte_size = vs_word_size * BytesPerWord;
-  assert(vs_byte_size % os::vm_page_size() == 0, "Not aligned");
+  assert(vs_byte_size % os::vm_allocation_granularity() == 0, "Not aligned");
 
   // Allocate the meta virtual space and initialize it.
   VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
@@ -1138,44 +1103,53 @@
     delete new_entry;
     return false;
   } else {
+    assert(new_entry->reserved_words() == vs_word_size, "Must be");
     // ensure lock-free iteration sees fully initialized node
     OrderAccess::storestore();
-    link_vs(new_entry, vs_word_size);
+    link_vs(new_entry);
     return true;
   }
 }
 
-void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size) {
+void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
   if (virtual_space_list() == NULL) {
       set_virtual_space_list(new_entry);
   } else {
     current_virtual_space()->set_next(new_entry);
   }
   set_current_virtual_space(new_entry);
-  inc_virtual_space_total(vs_word_size);
+  inc_reserved_words(new_entry->reserved_words());
+  inc_committed_words(new_entry->committed_words());
   inc_virtual_space_count();
 #ifdef ASSERT
   new_entry->mangle();
 #endif
   if (TraceMetavirtualspaceAllocation && Verbose) {
     VirtualSpaceNode* vsl = current_virtual_space();
-    vsl->print_on(tty);
+    vsl->print_on(gclog_or_tty);
   }
 }
 
+bool VirtualSpaceList::expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch) {
+  size_t before = node->committed_words();
+
+  bool result = node->expand_by(word_size, pre_touch);
+
+  size_t after = node->committed_words();
+
+  // after and before can be the same if the memory was pre-committed.
+  assert(after >= before, "Must be");
+  inc_committed_words(after - before);
+
+  return result;
+}
+
 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
                                            size_t grow_chunks_by_words,
                                            size_t medium_chunk_bunch) {
 
-  // Get a chunk from the chunk freelist
-  Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
-
-  if (next != NULL) {
-    next->container()->inc_container_count();
-  } else {
-    // Allocate a chunk out of the current virtual space.
-    next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
-  }
+  // Allocate a chunk out of the current virtual space.
+  Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
 
   if (next == NULL) {
     // Not enough room in current virtual space.  Try to commit
@@ -1186,18 +1160,27 @@
     size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
                                                         page_size_words);
     bool vs_expanded =
-      current_virtual_space()->expand_by(aligned_expand_vs_by_words, false);
+      expand_by(current_virtual_space(), aligned_expand_vs_by_words);
     if (!vs_expanded) {
       // Should the capacity of the metaspaces be expanded for
       // this allocation?  If it's the virtual space for classes and is
       // being used for CompressedHeaders, don't allocate a new virtualspace.
       if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
         // Get another virtual space.
-          size_t grow_vs_words =
-            MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words);
+        size_t allocation_aligned_expand_words =
+            align_size_up(aligned_expand_vs_by_words, os::vm_allocation_granularity() / BytesPerWord);
+        size_t grow_vs_words =
+            MAX2((size_t)VirtualSpaceSize, allocation_aligned_expand_words);
         if (grow_vs(grow_vs_words)) {
           // Got it.  It's on the list now.  Get a chunk from it.
-          next = current_virtual_space()->get_chunk_vs_with_expand(grow_chunks_by_words);
+          assert(current_virtual_space()->expanded_words() == 0,
+              "New virtual space nodes should not have expanded");
+
+          size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words,
+                                                              page_size_words);
+          // We probably want to expand by aligned_expand_vs_by_words here.
+          expand_by(current_virtual_space(), grow_chunks_by_words_aligned);
+          next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
         }
       } else {
         // Allocation will fail and induce a GC
@@ -1307,8 +1290,9 @@
   // reserved space, because this is a larger space prereserved for compressed
   // class pointers.
   if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) {
-    size_t real_allocated = Metaspace::space_list()->virtual_space_total() +
-              MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
+    size_t nonclass_allocated = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
+    size_t class_allocated    = MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
+    size_t real_allocated     = nonclass_allocated + class_allocated;
     if (real_allocated >= MaxMetaspaceSize) {
       return false;
     }
@@ -1501,15 +1485,15 @@
       if (dummy_chunk == NULL) {
         break;
       }
-      vsl->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
+      sm->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
 
       if (TraceMetadataChunkAllocation && Verbose) {
         gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
                                sm->sum_count_in_chunks_in_use());
         dummy_chunk->print_on(gclog_or_tty);
         gclog_or_tty->print_cr("  Free chunks total %d  count %d",
-                               vsl->chunk_manager()->free_chunks_total(),
-                               vsl->chunk_manager()->free_chunks_count());
+                               sm->chunk_manager()->free_chunks_total_words(),
+                               sm->chunk_manager()->free_chunks_count());
       }
     }
   } else {
@@ -1565,12 +1549,12 @@
 
 // ChunkManager methods
 
-size_t ChunkManager::free_chunks_total() {
+size_t ChunkManager::free_chunks_total_words() {
   return _free_chunks_total;
 }
 
-size_t ChunkManager::free_chunks_total_in_bytes() {
-  return free_chunks_total() * BytesPerWord;
+size_t ChunkManager::free_chunks_total_bytes() {
+  return free_chunks_total_words() * BytesPerWord;
 }
 
 size_t ChunkManager::free_chunks_count() {
@@ -1698,9 +1682,9 @@
   assert_lock_strong(SpaceManager::expand_lock());
   slow_locked_verify();
   if (TraceMetadataChunkAllocation) {
-    tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
-                  PTR_FORMAT "  size " SIZE_FORMAT,
-                  chunk, chunk->word_size());
+    gclog_or_tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
+                           PTR_FORMAT "  size " SIZE_FORMAT,
+                           chunk, chunk->word_size());
   }
   free_chunks_put(chunk);
 }
@@ -1729,9 +1713,9 @@
     dec_free_chunks_total(chunk->capacity_word_size());
 
     if (TraceMetadataChunkAllocation && Verbose) {
-      tty->print_cr("ChunkManager::free_chunks_get: free_list "
-                    PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
-                    free_list, chunk, chunk->word_size());
+      gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
+                             PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
+                             free_list, chunk, chunk->word_size());
     }
   } else {
     chunk = humongous_dictionary()->get_chunk(
@@ -1741,10 +1725,10 @@
     if (chunk != NULL) {
       if (TraceMetadataHumongousAllocation) {
         size_t waste = chunk->word_size() - word_size;
-        tty->print_cr("Free list allocate humongous chunk size " SIZE_FORMAT
-                      " for requested size " SIZE_FORMAT
-                      " waste " SIZE_FORMAT,
-                      chunk->word_size(), word_size, waste);
+        gclog_or_tty->print_cr("Free list allocate humongous chunk size "
+                               SIZE_FORMAT " for requested size " SIZE_FORMAT
+                               " waste " SIZE_FORMAT,
+                               chunk->word_size(), word_size, waste);
       }
       // Chunk is being removed from the chunks free list.
       dec_free_chunks_total(chunk->capacity_word_size());
@@ -1761,6 +1745,8 @@
   // work.
   chunk->set_is_free(false);
 #endif
+  chunk->container()->inc_container_count();
+
   slow_locked_verify();
   return chunk;
 }
@@ -1786,18 +1772,18 @@
     } else {
       list_count = humongous_dictionary()->total_count();
     }
-    tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
-               PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
-               this, chunk, chunk->word_size(), list_count);
-    locked_print_free_chunks(tty);
+    gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
+                        PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
+                        this, chunk, chunk->word_size(), list_count);
+    locked_print_free_chunks(gclog_or_tty);
   }
 
   return chunk;
 }
 
-void ChunkManager::print_on(outputStream* out) {
+void ChunkManager::print_on(outputStream* out) const {
   if (PrintFLSStatistics != 0) {
-    humongous_dictionary()->report_statistics();
+    const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
   }
 }
 
@@ -1944,8 +1930,8 @@
     }
   }
 
-  vs_list()->chunk_manager()->locked_print_free_chunks(st);
-  vs_list()->chunk_manager()->locked_print_sum_free_chunks(st);
+  chunk_manager()->locked_print_free_chunks(st);
+  chunk_manager()->locked_print_sum_free_chunks(st);
 }
 
 size_t SpaceManager::calc_chunk_size(size_t word_size) {
@@ -2049,9 +2035,7 @@
 }
 
 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
-                           Mutex* lock,
-                           VirtualSpaceList* vs_list) :
-  _vs_list(vs_list),
+                           Mutex* lock) :
   _mdtype(mdtype),
   _allocated_blocks_words(0),
   _allocated_chunks_words(0),
@@ -2137,9 +2121,7 @@
   MutexLockerEx fcl(SpaceManager::expand_lock(),
                     Mutex::_no_safepoint_check_flag);
 
-  ChunkManager* chunk_manager = vs_list()->chunk_manager();
-
-  chunk_manager->slow_locked_verify();
+  chunk_manager()->slow_locked_verify();
 
   dec_total_from_size_metrics();
 
@@ -2153,8 +2135,8 @@
 
   // Have to update before the chunks_in_use lists are emptied
   // below.
-  chunk_manager->inc_free_chunks_total(allocated_chunks_words(),
-                                       sum_count_in_chunks_in_use());
+  chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
+                                         sum_count_in_chunks_in_use());
 
   // Add all the chunks in use by this space manager
   // to the global list of free chunks.
@@ -2169,11 +2151,11 @@
                              chunk_size_name(i));
     }
     Metachunk* chunks = chunks_in_use(i);
-    chunk_manager->return_chunks(i, chunks);
+    chunk_manager()->return_chunks(i, chunks);
     set_chunks_in_use(i, NULL);
     if (TraceMetadataChunkAllocation && Verbose) {
       gclog_or_tty->print_cr("updated freelist count %d %s",
-                             chunk_manager->free_chunks(i)->count(),
+                             chunk_manager()->free_chunks(i)->count(),
                              chunk_size_name(i));
     }
     assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
@@ -2210,16 +2192,16 @@
                    humongous_chunks->word_size(), HumongousChunkGranularity));
     Metachunk* next_humongous_chunks = humongous_chunks->next();
     humongous_chunks->container()->dec_container_count();
-    chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks);
+    chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
     humongous_chunks = next_humongous_chunks;
   }
   if (TraceMetadataChunkAllocation && Verbose) {
     gclog_or_tty->print_cr("");
     gclog_or_tty->print_cr("updated dictionary count %d %s",
-                     chunk_manager->humongous_dictionary()->total_count(),
+                     chunk_manager()->humongous_dictionary()->total_count(),
                      chunk_size_name(HumongousIndex));
   }
-  chunk_manager->slow_locked_verify();
+  chunk_manager()->slow_locked_verify();
 }
 
 const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
@@ -2278,6 +2260,7 @@
   ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
 
   if (index != HumongousIndex) {
+    retire_current_chunk();
     set_current_chunk(new_chunk);
     new_chunk->set_next(chunks_in_use(index));
     set_chunks_in_use(index, new_chunk);
@@ -2307,23 +2290,35 @@
     gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
                         sum_count_in_chunks_in_use());
     new_chunk->print_on(gclog_or_tty);
-    if (vs_list() != NULL) {
-      vs_list()->chunk_manager()->locked_print_free_chunks(tty);
+    chunk_manager()->locked_print_free_chunks(gclog_or_tty);
+  }
+}
+
+void SpaceManager::retire_current_chunk() {
+  if (current_chunk() != NULL) {
+    size_t remaining_words = current_chunk()->free_word_size();
+    if (remaining_words >= TreeChunk<Metablock, FreeList>::min_size()) {
+      block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
+      inc_used_metrics(remaining_words);
     }
   }
 }
 
 Metachunk* SpaceManager::get_new_chunk(size_t word_size,
                                        size_t grow_chunks_by_words) {
-
-  Metachunk* next = vs_list()->get_new_chunk(word_size,
-                                             grow_chunks_by_words,
-                                             medium_chunk_bunch());
-
-  if (TraceMetadataHumongousAllocation &&
+  // Get a chunk from the chunk freelist
+  Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
+
+  if (next == NULL) {
+    next = vs_list()->get_new_chunk(word_size,
+                                    grow_chunks_by_words,
+                                    medium_chunk_bunch());
+  }
+
+  if (TraceMetadataHumongousAllocation && next != NULL &&
       SpaceManager::is_humongous(next->word_size())) {
-    gclog_or_tty->print_cr("  new humongous chunk word size " PTR_FORMAT,
-                           next->word_size());
+    gclog_or_tty->print_cr("  new humongous chunk word size "
+                           PTR_FORMAT, next->word_size());
   }
 
   return next;
@@ -2441,9 +2436,6 @@
          curr = curr->next()) {
       out->print("%d) ", i++);
       curr->print_on(out);
-      if (TraceMetadataChunkAllocation && Verbose) {
-        block_freelists()->print_on(out);
-      }
       curr_total += curr->word_size();
       used += curr->used_word_size();
       capacity += curr->capacity_word_size();
@@ -2451,6 +2443,10 @@
     }
   }
 
+  if (TraceMetadataChunkAllocation && Verbose) {
+    block_freelists()->print_on(out);
+  }
+
   size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
   // Free space isn't wasted.
   waste -= free;
@@ -2538,13 +2534,13 @@
   return used * BytesPerWord;
 }
 
-size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) {
+size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
   size_t free = 0;
   ClassLoaderDataGraphMetaspaceIterator iter;
   while (iter.repeat()) {
     Metaspace* msp = iter.get_next();
     if (msp != NULL) {
-      free += msp->free_words(mdtype);
+      free += msp->free_words_slow(mdtype);
     }
   }
   return free * BytesPerWord;
@@ -2567,34 +2563,55 @@
   return capacity * BytesPerWord;
 }
 
-size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
-  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
-  return list == NULL ? 0 : list->virtual_space_total();
+size_t MetaspaceAux::capacity_bytes_slow() {
+#ifdef PRODUCT
+  // Use allocated_capacity_bytes() in PRODUCT instead of this function.
+  guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
+#endif
+  size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
+  size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
+  assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
+      err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
+        " class_capacity + non_class_capacity " SIZE_FORMAT
+        " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
+        allocated_capacity_bytes(), class_capacity + non_class_capacity,
+        class_capacity, non_class_capacity));
+
+  return class_capacity + non_class_capacity;
 }
 
-size_t MetaspaceAux::min_chunk_size() { return Metaspace::first_chunk_word_size(); }
-
-size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) {
+size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
+  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
+  return list == NULL ? 0 : list->reserved_bytes();
+}
+
+size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
   VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
-  if (list == NULL) {
+  return list == NULL ? 0 : list->committed_bytes();
+}
+
+size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
+
+size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
+  ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
+  if (chunk_manager == NULL) {
     return 0;
   }
-  ChunkManager* chunk = list->chunk_manager();
-  chunk->slow_verify();
-  return chunk->free_chunks_total();
+  chunk_manager->slow_verify();
+  return chunk_manager->free_chunks_total_words();
 }
 
-size_t MetaspaceAux::free_chunks_total_in_bytes(Metaspace::MetadataType mdtype) {
-  return free_chunks_total(mdtype) * BytesPerWord;
+size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
+  return free_chunks_total_words(mdtype) * BytesPerWord;
 }
 
-size_t MetaspaceAux::free_chunks_total() {
-  return free_chunks_total(Metaspace::ClassType) +
-         free_chunks_total(Metaspace::NonClassType);
+size_t MetaspaceAux::free_chunks_total_words() {
+  return free_chunks_total_words(Metaspace::ClassType) +
+         free_chunks_total_words(Metaspace::NonClassType);
 }
 
-size_t MetaspaceAux::free_chunks_total_in_bytes() {
-  return free_chunks_total() * BytesPerWord;
+size_t MetaspaceAux::free_chunks_total_bytes() {
+  return free_chunks_total_words() * BytesPerWord;
 }
 
 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
@@ -2605,14 +2622,14 @@
                         "("  SIZE_FORMAT ")",
                         prev_metadata_used,
                         allocated_used_bytes(),
-                        reserved_in_bytes());
+                        reserved_bytes());
   } else {
     gclog_or_tty->print(" "  SIZE_FORMAT "K"
                         "->" SIZE_FORMAT "K"
                         "("  SIZE_FORMAT "K)",
-                        prev_metadata_used / K,
-                        allocated_used_bytes() / K,
-                        reserved_in_bytes()/ K);
+                        prev_metadata_used/K,
+                        allocated_used_bytes()/K,
+                        reserved_bytes()/K);
   }
 
   gclog_or_tty->print("]");
@@ -2625,14 +2642,14 @@
   out->print_cr(" Metaspace total "
                 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
                 " reserved " SIZE_FORMAT "K",
-                allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_in_bytes()/K);
+                allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_bytes()/K);
 
   out->print_cr("  data space     "
                 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
                 " reserved " SIZE_FORMAT "K",
                 allocated_capacity_bytes(nct)/K,
                 allocated_used_bytes(nct)/K,
-                reserved_in_bytes(nct)/K);
+                reserved_bytes(nct)/K);
   if (Metaspace::using_class_space()) {
     Metaspace::MetadataType ct = Metaspace::ClassType;
     out->print_cr("  class space    "
@@ -2640,17 +2657,17 @@
                   " reserved " SIZE_FORMAT "K",
                   allocated_capacity_bytes(ct)/K,
                   allocated_used_bytes(ct)/K,
-                  reserved_in_bytes(ct)/K);
+                  reserved_bytes(ct)/K);
   }
 }
 
 // Print information for class space and data space separately.
 // This is almost the same as above.
 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
-  size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype);
+  size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
   size_t capacity_bytes = capacity_bytes_slow(mdtype);
   size_t used_bytes = used_bytes_slow(mdtype);
-  size_t free_bytes = free_in_bytes(mdtype);
+  size_t free_bytes = free_bytes_slow(mdtype);
   size_t used_and_free = used_bytes + free_bytes +
                            free_chunks_capacity_bytes;
   out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
@@ -2732,9 +2749,9 @@
 }
 
 void MetaspaceAux::verify_free_chunks() {
-  Metaspace::space_list()->chunk_manager()->verify();
+  Metaspace::chunk_manager_metadata()->verify();
   if (Metaspace::using_class_space()) {
-    Metaspace::class_space_list()->chunk_manager()->verify();
+    Metaspace::chunk_manager_class()->verify();
   }
 }
 
@@ -2805,6 +2822,9 @@
 VirtualSpaceList* Metaspace::_space_list = NULL;
 VirtualSpaceList* Metaspace::_class_space_list = NULL;
 
+ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
+ChunkManager* Metaspace::_chunk_manager_class = NULL;
+
 #define VIRTUALSPACEMULTIPLIER 2
 
 #ifdef _LP64
@@ -2836,7 +2856,7 @@
 // to work with compressed klass pointers.
 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
   assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
-  assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
+  assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
   address lower_base = MIN2((address)metaspace_base, cds_base);
   address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
                                 (address)(metaspace_base + class_metaspace_size()));
@@ -2846,7 +2866,7 @@
 // Try to allocate the metaspace at the requested addr.
 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
   assert(using_class_space(), "called improperly");
-  assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
+  assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
   assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
          "Metaspace size is too big");
 
@@ -2869,9 +2889,9 @@
 
     // If no successful allocation then try to allocate the space anywhere.  If
     // that fails then OOM doom.  At this point we cannot try allocating the
-    // metaspace as if UseCompressedKlassPointers is off because too much
-    // initialization has happened that depends on UseCompressedKlassPointers.
-    // So, UseCompressedKlassPointers cannot be turned off at this point.
+    // metaspace as if UseCompressedClassPointers is off because too much
+    // initialization has happened that depends on UseCompressedClassPointers.
+    // So, UseCompressedClassPointers cannot be turned off at this point.
     if (!metaspace_rs.is_reserved()) {
       metaspace_rs = ReservedSpace(class_metaspace_size(),
                                    os::vm_allocation_granularity(), false);
@@ -2904,14 +2924,15 @@
   }
 }
 
-// For UseCompressedKlassPointers the class space is reserved above the top of
+// For UseCompressedClassPointers the class space is reserved above the top of
 // the Java heap.  The argument passed in is at the base of the compressed space.
 void Metaspace::initialize_class_space(ReservedSpace rs) {
   // The reserved space size may be bigger because of alignment, esp with UseLargePages
-  assert(rs.size() >= ClassMetaspaceSize,
-         err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
+  assert(rs.size() >= CompressedClassSpaceSize,
+         err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
   assert(using_class_space(), "Must be using class space");
   _class_space_list = new VirtualSpaceList(rs);
+  _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
 }
 
 #endif
@@ -2921,7 +2942,7 @@
   int max_alignment = os::vm_page_size();
   size_t cds_total = 0;
 
-  set_class_metaspace_size(align_size_up(ClassMetaspaceSize,
+  set_class_metaspace_size(align_size_up(CompressedClassSpaceSize,
                                          os::vm_allocation_granularity()));
 
   MetaspaceShared::set_max_alignment(max_alignment);
@@ -2937,12 +2958,13 @@
     // remainder is the misc code and data chunks.
     cds_total = FileMapInfo::shared_spaces_size();
     _space_list = new VirtualSpaceList(cds_total/wordSize);
+    _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
 
 #ifdef _LP64
     // Set the compressed klass pointer base so that decoding of these pointers works
     // properly when creating the shared archive.
-    assert(UseCompressedOops && UseCompressedKlassPointers,
-      "UseCompressedOops and UseCompressedKlassPointers must be set");
+    assert(UseCompressedOops && UseCompressedClassPointers,
+      "UseCompressedOops and UseCompressedClassPointers must be set");
     Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
     if (TraceMetavirtualspaceAllocation && Verbose) {
       gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
@@ -2979,7 +3001,7 @@
     }
 
 #ifdef _LP64
-    // If UseCompressedKlassPointers is set then allocate the metaspace area
+    // If UseCompressedClassPointers is set then allocate the metaspace area
     // above the heap and above the CDS area (if it exists).
     if (using_class_space()) {
       if (UseSharedSpaces) {
@@ -2997,22 +3019,37 @@
     // on the medium chunk list.   The next chunk will be small and progress
     // from there.  This size calculated by -version.
     _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
-                                       (ClassMetaspaceSize/BytesPerWord)*2);
+                                       (CompressedClassSpaceSize/BytesPerWord)*2);
     _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
     // Arbitrarily set the initial virtual space to a multiple
     // of the boot class loader size.
     size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();
     // Initialize the list of virtual spaces.
     _space_list = new VirtualSpaceList(word_size);
+    _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
   }
 }
 
+Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
+                                               size_t chunk_word_size,
+                                               size_t chunk_bunch) {
+  // Get a chunk from the chunk freelist
+  Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
+  if (chunk != NULL) {
+    return chunk;
+  }
+
+  return get_space_list(mdtype)->get_initialization_chunk(chunk_word_size, chunk_bunch);
+}
+
 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
 
   assert(space_list() != NULL,
     "Metadata VirtualSpaceList has not been initialized");
-
-  _vsm = new SpaceManager(NonClassType, lock, space_list());
+  assert(chunk_manager_metadata() != NULL,
+    "Metadata ChunkManager has not been initialized");
+
+  _vsm = new SpaceManager(NonClassType, lock);
   if (_vsm == NULL) {
     return;
   }
@@ -3021,11 +3058,13 @@
   vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
 
   if (using_class_space()) {
-    assert(class_space_list() != NULL,
-      "Class VirtualSpaceList has not been initialized");
+  assert(class_space_list() != NULL,
+    "Class VirtualSpaceList has not been initialized");
+  assert(chunk_manager_class() != NULL,
+    "Class ChunkManager has not been initialized");
 
     // Allocate SpaceManager for classes.
-    _class_vsm = new SpaceManager(ClassType, lock, class_space_list());
+    _class_vsm = new SpaceManager(ClassType, lock);
     if (_class_vsm == NULL) {
       return;
     }
@@ -3034,9 +3073,9 @@
   MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
 
   // Allocate chunk for metadata objects
-  Metachunk* new_chunk =
-     space_list()->get_initialization_chunk(word_size,
-                                            vsm()->medium_chunk_bunch());
+  Metachunk* new_chunk = get_initialization_chunk(NonClassType,
+                                                  word_size,
+                                                  vsm()->medium_chunk_bunch());
   assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
   if (new_chunk != NULL) {
     // Add to this manager's list of chunks in use and current_chunk().
@@ -3045,9 +3084,9 @@
 
   // Allocate chunk for class metadata objects
   if (using_class_space()) {
-    Metachunk* class_chunk =
-       class_space_list()->get_initialization_chunk(class_word_size,
-                                                    class_vsm()->medium_chunk_bunch());
+    Metachunk* class_chunk = get_initialization_chunk(ClassType,
+                                                      class_word_size,
+                                                      class_vsm()->medium_chunk_bunch());
     if (class_chunk != NULL) {
       class_vsm()->add_chunk(class_chunk, true);
     }
@@ -3064,7 +3103,7 @@
 
 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
   // DumpSharedSpaces doesn't use class metadata area (yet)
-  // Also, don't use class_vsm() unless UseCompressedKlassPointers is true.
+  // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
   if (mdtype == ClassType && using_class_space()) {
     return  class_vsm()->allocate(word_size);
   } else {
@@ -3103,7 +3142,7 @@
   }
 }
 
-size_t Metaspace::free_words(MetadataType mdtype) const {
+size_t Metaspace::free_words_slow(MetadataType mdtype) const {
   if (mdtype == ClassType) {
     return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
   } else {
@@ -3213,7 +3252,7 @@
         MetaspaceAux::dump(gclog_or_tty);
       }
       // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
-      const char* space_string = (mdtype == ClassType) ? "Class Metadata space" :
+      const char* space_string = (mdtype == ClassType) ? "Compressed class space" :
                                                          "Metadata space";
       report_java_out_of_memory(space_string);
 
@@ -3264,12 +3303,16 @@
   }
 }
 
+void Metaspace::purge(MetadataType mdtype) {
+  get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
+}
+
 void Metaspace::purge() {
   MutexLockerEx cl(SpaceManager::expand_lock(),
                    Mutex::_no_safepoint_check_flag);
-  space_list()->purge();
+  purge(NonClassType);
   if (using_class_space()) {
-    class_space_list()->purge();
+    purge(ClassType);
   }
 }
 
@@ -3311,3 +3354,70 @@
     class_vsm()->dump(out);
   }
 }
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+class TestMetaspaceAuxTest : AllStatic {
+ public:
+  static void test_reserved() {
+    size_t reserved = MetaspaceAux::reserved_bytes();
+
+    assert(reserved > 0, "assert");
+
+    size_t committed  = MetaspaceAux::committed_bytes();
+    assert(committed <= reserved, "assert");
+
+    size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
+    assert(reserved_metadata > 0, "assert");
+    assert(reserved_metadata <= reserved, "assert");
+
+    if (UseCompressedClassPointers) {
+      size_t reserved_class    = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
+      assert(reserved_class > 0, "assert");
+      assert(reserved_class < reserved, "assert");
+    }
+  }
+
+  static void test_committed() {
+    size_t committed = MetaspaceAux::committed_bytes();
+
+    assert(committed > 0, "assert");
+
+    size_t reserved  = MetaspaceAux::reserved_bytes();
+    assert(committed <= reserved, "assert");
+
+    size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
+    assert(committed_metadata > 0, "assert");
+    assert(committed_metadata <= committed, "assert");
+
+    if (UseCompressedClassPointers) {
+      size_t committed_class    = MetaspaceAux::committed_bytes(Metaspace::ClassType);
+      assert(committed_class > 0, "assert");
+      assert(committed_class < committed, "assert");
+    }
+  }
+
+  static void test_virtual_space_list_large_chunk() {
+    VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
+    MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
+    // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
+    // vm_allocation_granularity aligned on Windows.
+    size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
+    large_size += (os::vm_page_size()/BytesPerWord);
+    vs_list->get_new_chunk(large_size, large_size, 0);
+  }
+
+  static void test() {
+    test_reserved();
+    test_committed();
+    test_virtual_space_list_large_chunk();
+  }
+};
+
+void TestMetaspaceAux_test() {
+  TestMetaspaceAuxTest::test();
+}
+
+#endif
--- a/src/share/vm/memory/metaspace.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/memory/metaspace.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -56,12 +56,15 @@
 //                       +-------------------+
 //
 
+class ChunkManager;
 class ClassLoaderData;
 class Metablock;
+class Metachunk;
 class MetaWord;
 class Mutex;
 class outputStream;
 class SpaceManager;
+class VirtualSpaceList;
 
 // Metaspaces each have a  SpaceManager and allocations
 // are done by the SpaceManager.  Allocations are done
@@ -76,8 +79,6 @@
 // allocate() method returns a block for use as a
 // quantum of metadata.
 
-class VirtualSpaceList;
-
 class Metaspace : public CHeapObj<mtClass> {
   friend class VMStructs;
   friend class SpaceManager;
@@ -102,6 +103,10 @@
  private:
   void initialize(Mutex* lock, MetaspaceType type);
 
+  Metachunk* get_initialization_chunk(MetadataType mdtype,
+                                      size_t chunk_word_size,
+                                      size_t chunk_bunch);
+
   // Align up the word size to the allocation word size
   static size_t align_word_size_up(size_t);
 
@@ -134,6 +139,10 @@
   static VirtualSpaceList* _space_list;
   static VirtualSpaceList* _class_space_list;
 
+  static ChunkManager* _chunk_manager_metadata;
+  static ChunkManager* _chunk_manager_class;
+
+ public:
   static VirtualSpaceList* space_list()       { return _space_list; }
   static VirtualSpaceList* class_space_list() { return _class_space_list; }
   static VirtualSpaceList* get_space_list(MetadataType mdtype) {
@@ -141,6 +150,14 @@
     return mdtype == ClassType ? class_space_list() : space_list();
   }
 
+  static ChunkManager* chunk_manager_metadata() { return _chunk_manager_metadata; }
+  static ChunkManager* chunk_manager_class()    { return _chunk_manager_class; }
+  static ChunkManager* get_chunk_manager(MetadataType mdtype) {
+    assert(mdtype != MetadataTypeCount, "MetadaTypeCount can't be used as mdtype");
+    return mdtype == ClassType ? chunk_manager_class() : chunk_manager_metadata();
+  }
+
+ private:
   // This is used by DumpSharedSpaces only, where only _vsm is used. So we will
   // maintain a single list for now.
   void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size);
@@ -182,9 +199,8 @@
 
   char*  bottom() const;
   size_t used_words_slow(MetadataType mdtype) const;
-  size_t free_words(MetadataType mdtype) const;
+  size_t free_words_slow(MetadataType mdtype) const;
   size_t capacity_words_slow(MetadataType mdtype) const;
-  size_t waste_words(MetadataType mdtype) const;
 
   size_t used_bytes_slow(MetadataType mdtype) const;
   size_t capacity_bytes_slow(MetadataType mdtype) const;
@@ -200,6 +216,7 @@
   void dump(outputStream* const out) const;
 
   // Free empty virtualspaces
+  static void purge(MetadataType mdtype);
   static void purge();
 
   void print_on(outputStream* st) const;
@@ -213,27 +230,22 @@
 
   void iterate(AllocRecordClosure *closure);
 
-  // Return TRUE only if UseCompressedKlassPointers is True and DumpSharedSpaces is False.
+  // Return TRUE only if UseCompressedClassPointers is True and DumpSharedSpaces is False.
   static bool using_class_space() {
-    return NOT_LP64(false) LP64_ONLY(UseCompressedKlassPointers && !DumpSharedSpaces);
+    return NOT_LP64(false) LP64_ONLY(UseCompressedClassPointers && !DumpSharedSpaces);
   }
 
 };
 
 class MetaspaceAux : AllStatic {
-  static size_t free_chunks_total(Metaspace::MetadataType mdtype);
-
- public:
-  // Statistics for class space and data space in metaspace.
+  static size_t free_chunks_total_words(Metaspace::MetadataType mdtype);
 
   // These methods iterate over the classloader data graph
   // for the given Metaspace type.  These are slow.
   static size_t used_bytes_slow(Metaspace::MetadataType mdtype);
-  static size_t free_in_bytes(Metaspace::MetadataType mdtype);
+  static size_t free_bytes_slow(Metaspace::MetadataType mdtype);
   static size_t capacity_bytes_slow(Metaspace::MetadataType mdtype);
-
-  // Iterates over the virtual space list.
-  static size_t reserved_in_bytes(Metaspace::MetadataType mdtype);
+  static size_t capacity_bytes_slow();
 
   // Running sum of space in all Metachunks that has been
   // allocated to a Metaspace.  This is used instead of
@@ -263,17 +275,16 @@
   }
 
   // Used by MetaspaceCounters
-  static size_t free_chunks_total();
-  static size_t free_chunks_total_in_bytes();
-  static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype);
+  static size_t free_chunks_total_words();
+  static size_t free_chunks_total_bytes();
+  static size_t free_chunks_total_bytes(Metaspace::MetadataType mdtype);
 
   static size_t allocated_capacity_words(Metaspace::MetadataType mdtype) {
     return _allocated_capacity_words[mdtype];
   }
   static size_t allocated_capacity_words() {
-    return _allocated_capacity_words[Metaspace::NonClassType] +
-           (Metaspace::using_class_space() ?
-           _allocated_capacity_words[Metaspace::ClassType] : 0);
+    return allocated_capacity_words(Metaspace::NonClassType) +
+           allocated_capacity_words(Metaspace::ClassType);
   }
   static size_t allocated_capacity_bytes(Metaspace::MetadataType mdtype) {
     return allocated_capacity_words(mdtype) * BytesPerWord;
@@ -286,9 +297,8 @@
     return _allocated_used_words[mdtype];
   }
   static size_t allocated_used_words() {
-    return _allocated_used_words[Metaspace::NonClassType] +
-           (Metaspace::using_class_space() ?
-           _allocated_used_words[Metaspace::ClassType] : 0);
+    return allocated_used_words(Metaspace::NonClassType) +
+           allocated_used_words(Metaspace::ClassType);
   }
   static size_t allocated_used_bytes(Metaspace::MetadataType mdtype) {
     return allocated_used_words(mdtype) * BytesPerWord;
@@ -300,31 +310,22 @@
   static size_t free_bytes();
   static size_t free_bytes(Metaspace::MetadataType mdtype);
 
-  // Total capacity in all Metaspaces
-  static size_t capacity_bytes_slow() {
-#ifdef PRODUCT
-    // Use allocated_capacity_bytes() in PRODUCT instead of this function.
-    guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
-#endif
-    size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
-    size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
-    assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
-           err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
-             " class_capacity + non_class_capacity " SIZE_FORMAT
-             " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
-             allocated_capacity_bytes(), class_capacity + non_class_capacity,
-             class_capacity, non_class_capacity));
-
-    return class_capacity + non_class_capacity;
+  static size_t reserved_bytes(Metaspace::MetadataType mdtype);
+  static size_t reserved_bytes() {
+    return reserved_bytes(Metaspace::ClassType) +
+           reserved_bytes(Metaspace::NonClassType);
   }
 
-  // Total space reserved in all Metaspaces
-  static size_t reserved_in_bytes() {
-    return reserved_in_bytes(Metaspace::ClassType) +
-           reserved_in_bytes(Metaspace::NonClassType);
+  static size_t committed_bytes(Metaspace::MetadataType mdtype);
+  static size_t committed_bytes() {
+    return committed_bytes(Metaspace::ClassType) +
+           committed_bytes(Metaspace::NonClassType);
   }
 
-  static size_t min_chunk_size();
+  static size_t min_chunk_size_words();
+  static size_t min_chunk_size_bytes() {
+    return min_chunk_size_words() * BytesPerWord;
+  }
 
   // Print change in used metadata.
   static void print_metaspace_change(size_t prev_metadata_used);
--- a/src/share/vm/memory/metaspaceCounters.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/memory/metaspaceCounters.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -65,26 +65,25 @@
 
 MetaspacePerfCounters* MetaspaceCounters::_perf_counters = NULL;
 
-size_t MetaspaceCounters::calculate_capacity() {
-  // The total capacity is the sum of
-  //   1) capacity of Metachunks in use by all Metaspaces
-  //   2) unused space at the end of each Metachunk
-  //   3) space in the freelist
-  size_t total_capacity = MetaspaceAux::allocated_capacity_bytes()
-    + MetaspaceAux::free_bytes() + MetaspaceAux::free_chunks_total_in_bytes();
-  return total_capacity;
+size_t MetaspaceCounters::used() {
+  return MetaspaceAux::allocated_used_bytes();
+}
+
+size_t MetaspaceCounters::capacity() {
+  return MetaspaceAux::committed_bytes();
+}
+
+size_t MetaspaceCounters::max_capacity() {
+  return MetaspaceAux::reserved_bytes();
 }
 
 void MetaspaceCounters::initialize_performance_counters() {
   if (UsePerfData) {
     assert(_perf_counters == NULL, "Should only be initialized once");
 
-    size_t min_capacity = MetaspaceAux::min_chunk_size();
-    size_t capacity = calculate_capacity();
-    size_t max_capacity = MetaspaceAux::reserved_in_bytes();
-    size_t used = MetaspaceAux::allocated_used_bytes();
-
-    _perf_counters = new MetaspacePerfCounters("metaspace", min_capacity, capacity, max_capacity, used);
+    size_t min_capacity = 0;
+    _perf_counters = new MetaspacePerfCounters("metaspace", min_capacity,
+                                               capacity(), max_capacity(), used());
   }
 }
 
@@ -92,31 +91,29 @@
   if (UsePerfData) {
     assert(_perf_counters != NULL, "Should be initialized");
 
-    size_t capacity = calculate_capacity();
-    size_t max_capacity = MetaspaceAux::reserved_in_bytes();
-    size_t used = MetaspaceAux::allocated_used_bytes();
-
-    _perf_counters->update(capacity, max_capacity, used);
+    _perf_counters->update(capacity(), max_capacity(), used());
   }
 }
 
 MetaspacePerfCounters* CompressedClassSpaceCounters::_perf_counters = NULL;
 
-size_t CompressedClassSpaceCounters::calculate_capacity() {
-    return MetaspaceAux::allocated_capacity_bytes(_class_type) +
-           MetaspaceAux::free_bytes(_class_type) +
-           MetaspaceAux::free_chunks_total_in_bytes(_class_type);
+size_t CompressedClassSpaceCounters::used() {
+  return MetaspaceAux::allocated_used_bytes(Metaspace::ClassType);
+}
+
+size_t CompressedClassSpaceCounters::capacity() {
+  return MetaspaceAux::committed_bytes(Metaspace::ClassType);
+}
+
+size_t CompressedClassSpaceCounters::max_capacity() {
+  return MetaspaceAux::reserved_bytes(Metaspace::ClassType);
 }
 
 void CompressedClassSpaceCounters::update_performance_counters() {
-  if (UsePerfData && UseCompressedKlassPointers) {
+  if (UsePerfData && UseCompressedClassPointers) {
     assert(_perf_counters != NULL, "Should be initialized");
 
-    size_t capacity = calculate_capacity();
-    size_t max_capacity = MetaspaceAux::reserved_in_bytes(_class_type);
-    size_t used = MetaspaceAux::allocated_used_bytes(_class_type);
-
-    _perf_counters->update(capacity, max_capacity, used);
+    _perf_counters->update(capacity(), max_capacity(), used());
   }
 }
 
@@ -125,13 +122,10 @@
     assert(_perf_counters == NULL, "Should only be initialized once");
     const char* ns = "compressedclassspace";
 
-    if (UseCompressedKlassPointers) {
-      size_t min_capacity = MetaspaceAux::min_chunk_size();
-      size_t capacity = calculate_capacity();
-      size_t max_capacity = MetaspaceAux::reserved_in_bytes(_class_type);
-      size_t used = MetaspaceAux::allocated_used_bytes(_class_type);
-
-      _perf_counters = new MetaspacePerfCounters(ns, min_capacity, capacity, max_capacity, used);
+    if (UseCompressedClassPointers) {
+      size_t min_capacity = 0;
+      _perf_counters = new MetaspacePerfCounters(ns, min_capacity, capacity(),
+                                                 max_capacity(), used());
     } else {
       _perf_counters = new MetaspacePerfCounters(ns, 0, 0, 0, 0);
     }
--- a/src/share/vm/memory/metaspaceCounters.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/memory/metaspaceCounters.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -25,13 +25,15 @@
 #ifndef SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
 #define SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
 
-#include "memory/metaspace.hpp"
+#include "memory/allocation.hpp"
 
 class MetaspacePerfCounters;
 
 class MetaspaceCounters: public AllStatic {
   static MetaspacePerfCounters* _perf_counters;
-  static size_t calculate_capacity();
+  static size_t used();
+  static size_t capacity();
+  static size_t max_capacity();
 
  public:
   static void initialize_performance_counters();
@@ -40,8 +42,9 @@
 
 class CompressedClassSpaceCounters: public AllStatic {
   static MetaspacePerfCounters* _perf_counters;
-  static size_t calculate_capacity();
-  static const Metaspace::MetadataType _class_type = Metaspace::ClassType;
+  static size_t used();
+  static size_t capacity();
+  static size_t max_capacity();
 
  public:
   static void initialize_performance_counters();
--- a/src/share/vm/memory/metaspaceShared.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/memory/metaspaceShared.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -103,9 +103,10 @@
     if (k->oop_is_instance()) {
       InstanceKlass* ik = InstanceKlass::cast(k);
       for (int i = 0; i < ik->methods()->length(); i++) {
-        ResourceMark rm;
         Method* m = ik->methods()->at(i);
-        (new Fingerprinter(m))->fingerprint();
+        Fingerprinter fp(m);
+        // The side effect of this call sets method's fingerprint field.
+        fp.fingerprint();
       }
     }
   }
--- a/src/share/vm/memory/universe.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/memory/universe.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -602,7 +602,7 @@
   }
 }
 
-static intptr_t non_oop_bits = 0;
+intptr_t Universe::_non_oop_bits = 0;
 
 void* Universe::non_oop_word() {
   // Neither the high bits nor the low bits of this value is allowed
@@ -616,11 +616,11 @@
   // Using the OS-supplied non-memory-address word (usually 0 or -1)
   // will take care of the high bits, however many there are.
 
-  if (non_oop_bits == 0) {
-    non_oop_bits = (intptr_t)os::non_memory_address_word() | 1;
+  if (_non_oop_bits == 0) {
+    _non_oop_bits = (intptr_t)os::non_memory_address_word() | 1;
   }
 
-  return (void*)non_oop_bits;
+  return (void*)_non_oop_bits;
 }
 
 jint universe_init() {
@@ -872,13 +872,16 @@
 
 // Reserve the Java heap, which is now the same for all GCs.
 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
+  assert(alignment <= Arguments::conservative_max_heap_alignment(),
+      err_msg("actual alignment "SIZE_FORMAT" must be within maximum heap alignment "SIZE_FORMAT,
+          alignment, Arguments::conservative_max_heap_alignment()));
   size_t total_reserved = align_size_up(heap_size, alignment);
   assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
       "heap size is too big for compressed oops");
 
   bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size());
   assert(!UseLargePages
-      || UseParallelOldGC
+      || UseParallelGC
       || use_large_pages, "Wrong alignment to use large pages");
 
   char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop);
@@ -1028,7 +1031,7 @@
 
     msg = java_lang_String::create_from_str("Metadata space", CHECK_false);
     java_lang_Throwable::set_message(Universe::_out_of_memory_error_metaspace, msg());
-    msg = java_lang_String::create_from_str("Class Metadata space", CHECK_false);
+    msg = java_lang_String::create_from_str("Compressed class space", CHECK_false);
     java_lang_Throwable::set_message(Universe::_out_of_memory_error_class_metaspace, msg());
 
     msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false);
--- a/src/share/vm/memory/universe.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/memory/universe.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -179,9 +179,11 @@
   // The particular choice of collected heap.
   static CollectedHeap* _collectedHeap;
 
+  static intptr_t _non_oop_bits;
+
   // For UseCompressedOops.
   static struct NarrowPtrStruct _narrow_oop;
-  // For UseCompressedKlassPointers.
+  // For UseCompressedClassPointers.
   static struct NarrowPtrStruct _narrow_klass;
   static address _narrow_ptrs_base;
 
@@ -229,7 +231,7 @@
     _narrow_oop._base    = base;
   }
   static void     set_narrow_klass_base(address base) {
-    assert(UseCompressedKlassPointers, "no compressed klass ptrs?");
+    assert(UseCompressedClassPointers, "no compressed klass ptrs?");
     _narrow_klass._base   = base;
   }
   static void     set_narrow_oop_use_implicit_null_checks(bool use) {
@@ -353,7 +355,7 @@
   static int      narrow_oop_shift()                      { return  _narrow_oop._shift; }
   static bool     narrow_oop_use_implicit_null_checks()   { return  _narrow_oop._use_implicit_null_checks; }
 
-  // For UseCompressedKlassPointers
+  // For UseCompressedClassPointers
   static address  narrow_klass_base()                     { return  _narrow_klass._base; }
   static bool  is_narrow_klass_base(void* addr)           { return (narrow_klass_base() == (address)addr); }
   static int      narrow_klass_shift()                    { return  _narrow_klass._shift; }
--- a/src/share/vm/oops/arrayOop.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/oops/arrayOop.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -65,7 +65,7 @@
   // declared nonstatic fields in arrayOopDesc if not compressed, otherwise
   // it occupies the second half of the _klass field in oopDesc.
   static int length_offset_in_bytes() {
-    return UseCompressedKlassPointers ? klass_gap_offset_in_bytes() :
+    return UseCompressedClassPointers ? klass_gap_offset_in_bytes() :
                                sizeof(arrayOopDesc);
   }
 
--- a/src/share/vm/oops/constantPool.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/oops/constantPool.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -108,16 +108,16 @@
 void ConstantPool::initialize_resolved_references(ClassLoaderData* loader_data,
                                                   intStack reference_map,
                                                   int constant_pool_map_length,
-                                                   TRAPS) {
+                                                  TRAPS) {
   // Initialized the resolved object cache.
   int map_length = reference_map.length();
   if (map_length > 0) {
     // Only need mapping back to constant pool entries.  The map isn't used for
-    // invokedynamic resolved_reference entries.  The constant pool cache index
-    // has the mapping back to both the constant pool and to the resolved
-    // reference index.
+    // invokedynamic resolved_reference entries.  For invokedynamic entries,
+    // the constant pool cache index has the mapping back to both the constant
+    // pool and to the resolved reference index.
     if (constant_pool_map_length > 0) {
-      Array<u2>* om = MetadataFactory::new_array<u2>(loader_data, map_length, CHECK);
+      Array<u2>* om = MetadataFactory::new_array<u2>(loader_data, constant_pool_map_length, CHECK);
 
       for (int i = 0; i < constant_pool_map_length; i++) {
         int x = reference_map.at(i);
@@ -182,16 +182,9 @@
 
 int ConstantPool::cp_to_object_index(int cp_index) {
   // this is harder don't do this so much.
-  for (int i = 0; i< reference_map()->length(); i++) {
-    if (reference_map()->at(i) == cp_index) return i;
-    // Zero entry is divider between constant pool indices for strings,
-    // method handles and method types. After that the index is a constant
-    // pool cache index for invokedynamic.  Stop when zero (which can never
-    // be a constant pool index)
-    if (reference_map()->at(i) == 0) break;
-  }
-  // We might not find the index.
-  return _no_index_sentinel;
+  int i = reference_map()->find(cp_index);
+  // We might not find the index for jsr292 call.
+  return (i < 0) ? _no_index_sentinel : i;
 }
 
 Klass* ConstantPool::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS) {
@@ -396,32 +389,6 @@
 }
 
 
-// This is an interface for the compiler that allows accessing non-resolved entries
-// in the constant pool - but still performs the validations tests. Must be used
-// in a pre-parse of the compiler - to determine what it can do and not do.
-// Note: We cannot update the ConstantPool from the vm_thread.
-Klass* ConstantPool::klass_ref_at_if_loaded_check(constantPoolHandle this_oop, int index, TRAPS) {
-  int which = this_oop->klass_ref_index_at(index);
-  CPSlot entry = this_oop->slot_at(which);
-  if (entry.is_resolved()) {
-    assert(entry.get_klass()->is_klass(), "must be");
-    return entry.get_klass();
-  } else {
-    assert(entry.is_unresolved(), "must be either symbol or klass");
-    Symbol*  name  = entry.get_symbol();
-    oop loader = this_oop->pool_holder()->class_loader();
-    oop protection_domain = this_oop->pool_holder()->protection_domain();
-    Handle h_loader(THREAD, loader);
-    Handle h_prot  (THREAD, protection_domain);
-    KlassHandle k(THREAD, SystemDictionary::find(name, h_loader, h_prot, THREAD));
-
-    // Do access check for klasses
-    if( k.not_null() ) verify_constant_pool_resolve(this_oop, k, CHECK_NULL);
-    return k();
-  }
-}
-
-
 Method* ConstantPool::method_at_if_loaded(constantPoolHandle cpool,
                                                    int which) {
   if (cpool->cache() == NULL)  return NULL;  // nothing to load yet
@@ -866,8 +833,7 @@
   // If the string has already been interned, this entry will be non-null
   oop str = this_oop->resolved_references()->obj_at(obj_index);
   if (str != NULL) return str;
-
-      Symbol* sym = this_oop->unresolved_string_at(which);
+  Symbol* sym = this_oop->unresolved_string_at(which);
   str = StringTable::intern(sym, CHECK_(NULL));
   this_oop->string_at_put(which, obj_index, str);
   assert(java_lang_String::is_instance(str), "must be string");
@@ -1645,9 +1611,11 @@
     case JVM_CONSTANT_UnresolvedClassInError:
     case JVM_CONSTANT_StringIndex:
     case JVM_CONSTANT_MethodType:
+    case JVM_CONSTANT_MethodTypeInError:
       return 3;
 
     case JVM_CONSTANT_MethodHandle:
+    case JVM_CONSTANT_MethodHandleInError:
       return 4; //tag, ref_kind, ref_index
 
     case JVM_CONSTANT_Integer:
@@ -1828,8 +1796,8 @@
       case JVM_CONSTANT_MethodHandle:
       case JVM_CONSTANT_MethodHandleInError: {
         *bytes = JVM_CONSTANT_MethodHandle;
-        int kind = method_handle_ref_kind_at(idx);
-        idx1 = method_handle_index_at(idx);
+        int kind = method_handle_ref_kind_at_error_ok(idx);
+        idx1 = method_handle_index_at_error_ok(idx);
         *(bytes+1) = (unsigned char) kind;
         Bytes::put_Java_u2((address) (bytes+2), idx1);
         DBG(printf("JVM_CONSTANT_MethodHandle: %d %hd", kind, idx1));
@@ -1838,7 +1806,7 @@
       case JVM_CONSTANT_MethodType:
       case JVM_CONSTANT_MethodTypeInError: {
         *bytes = JVM_CONSTANT_MethodType;
-        idx1 = method_type_index_at(idx);
+        idx1 = method_type_index_at_error_ok(idx);
         Bytes::put_Java_u2((address) (bytes+1), idx1);
         DBG(printf("JVM_CONSTANT_MethodType: %hd", idx1));
         break;
@@ -2026,12 +1994,12 @@
       break;
     case JVM_CONSTANT_MethodHandle :
     case JVM_CONSTANT_MethodHandleInError :
-      st->print("ref_kind=%d", method_handle_ref_kind_at(index));
-      st->print(" ref_index=%d", method_handle_index_at(index));
+      st->print("ref_kind=%d", method_handle_ref_kind_at_error_ok(index));
+      st->print(" ref_index=%d", method_handle_index_at_error_ok(index));
       break;
     case JVM_CONSTANT_MethodType :
     case JVM_CONSTANT_MethodTypeInError :
-      st->print("signature_index=%d", method_type_index_at(index));
+      st->print("signature_index=%d", method_type_index_at_error_ok(index));
       break;
     case JVM_CONSTANT_InvokeDynamic :
       {
--- a/src/share/vm/oops/constantPool.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/oops/constantPool.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -231,7 +231,6 @@
   static int cache_offset_in_bytes()        { return offset_of(ConstantPool, _cache); }
   static int pool_holder_offset_in_bytes()  { return offset_of(ConstantPool, _pool_holder); }
   static int resolved_references_offset_in_bytes() { return offset_of(ConstantPool, _resolved_references); }
-  static int reference_map_offset_in_bytes() { return offset_of(ConstantPool, _reference_map); }
 
   // Storing constants
 
@@ -475,18 +474,42 @@
     return *int_at_addr(which);
   }
 
+ private:
+  int method_handle_ref_kind_at(int which, bool error_ok) {
+    assert(tag_at(which).is_method_handle() ||
+           (error_ok && tag_at(which).is_method_handle_in_error()), "Corrupted constant pool");
+    return extract_low_short_from_int(*int_at_addr(which));  // mask out unwanted ref_index bits
+  }
+  int method_handle_index_at(int which, bool error_ok) {
+    assert(tag_at(which).is_method_handle() ||
+           (error_ok && tag_at(which).is_method_handle_in_error()), "Corrupted constant pool");
+    return extract_high_short_from_int(*int_at_addr(which));  // shift out unwanted ref_kind bits
+  }
+  int method_type_index_at(int which, bool error_ok) {
+    assert(tag_at(which).is_method_type() ||
+           (error_ok && tag_at(which).is_method_type_in_error()), "Corrupted constant pool");
+    return *int_at_addr(which);
+  }
+ public:
   int method_handle_ref_kind_at(int which) {
-    assert(tag_at(which).is_method_handle(), "Corrupted constant pool");
-    return extract_low_short_from_int(*int_at_addr(which));  // mask out unwanted ref_index bits
+    return method_handle_ref_kind_at(which, false);
+  }
+  int method_handle_ref_kind_at_error_ok(int which) {
+    return method_handle_ref_kind_at(which, true);
   }
   int method_handle_index_at(int which) {
-    assert(tag_at(which).is_method_handle(), "Corrupted constant pool");
-    return extract_high_short_from_int(*int_at_addr(which));  // shift out unwanted ref_kind bits
+    return method_handle_index_at(which, false);
+  }
+  int method_handle_index_at_error_ok(int which) {
+    return method_handle_index_at(which, true);
   }
   int method_type_index_at(int which) {
-    assert(tag_at(which).is_method_type(), "Corrupted constant pool");
-    return *int_at_addr(which);
+    return method_type_index_at(which, false);
   }
+  int method_type_index_at_error_ok(int which) {
+    return method_type_index_at(which, true);
+  }
+
   // Derived queries:
   Symbol* method_handle_name_ref_at(int which) {
     int member = method_handle_index_at(which);
@@ -730,8 +753,6 @@
   static oop         method_type_at_if_loaded      (constantPoolHandle this_oop, int which);
   static Klass*            klass_at_if_loaded      (constantPoolHandle this_oop, int which);
   static Klass*        klass_ref_at_if_loaded      (constantPoolHandle this_oop, int which);
-  // Same as above - but does LinkResolving.
-  static Klass*        klass_ref_at_if_loaded_check(constantPoolHandle this_oop, int which, TRAPS);
 
   // Routines currently used for annotations (only called by jvm.cpp) but which might be used in the
   // future by other Java code. These take constant pool indices rather than
--- a/src/share/vm/oops/cpCache.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/oops/cpCache.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -140,9 +140,10 @@
             err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value));
 }
 
-void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
-                                        methodHandle method,
-                                        int vtable_index) {
+void ConstantPoolCacheEntry::set_direct_or_vtable_call(Bytecodes::Code invoke_code,
+                                                       methodHandle method,
+                                                       int vtable_index) {
+  bool is_vtable_call = (vtable_index >= 0);  // FIXME: split this method on this boolean
   assert(method->interpreter_entry() != NULL, "should have been set at this point");
   assert(!method->is_obsolete(),  "attempt to write obsolete method to cpCache");
 
@@ -160,7 +161,8 @@
       // ...and fall through as if we were handling invokevirtual:
     case Bytecodes::_invokevirtual:
       {
-        if (method->can_be_statically_bound()) {
+        if (!is_vtable_call) {
+          assert(method->can_be_statically_bound(), "");
           // set_f2_as_vfinal_method checks if is_vfinal flag is true.
           set_method_flags(as_TosState(method->result_type()),
                            (                             1      << is_vfinal_shift) |
@@ -169,6 +171,7 @@
                            method()->size_of_parameters());
           set_f2_as_vfinal_method(method());
         } else {
+          assert(!method->can_be_statically_bound(), "");
           assert(vtable_index >= 0, "valid index");
           assert(!method->is_final_method(), "sanity");
           set_method_flags(as_TosState(method->result_type()),
@@ -182,6 +185,7 @@
 
     case Bytecodes::_invokespecial:
     case Bytecodes::_invokestatic:
+      assert(!is_vtable_call, "");
       // Note:  Read and preserve the value of the is_vfinal flag on any
       // invokevirtual bytecode shared with this constant pool cache entry.
       // It is cheap and safe to consult is_vfinal() at all times.
@@ -232,8 +236,22 @@
   NOT_PRODUCT(verify(tty));
 }
 
+void ConstantPoolCacheEntry::set_direct_call(Bytecodes::Code invoke_code, methodHandle method) {
+  int index = Method::nonvirtual_vtable_index;
+  // index < 0; FIXME: inline and customize set_direct_or_vtable_call
+  set_direct_or_vtable_call(invoke_code, method, index);
+}
 
-void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) {
+void ConstantPoolCacheEntry::set_vtable_call(Bytecodes::Code invoke_code, methodHandle method, int index) {
+  // either the method is a miranda or its holder should accept the given index
+  assert(method->method_holder()->is_interface() || method->method_holder()->verify_vtable_index(index), "");
+  // index >= 0; FIXME: inline and customize set_direct_or_vtable_call
+  set_direct_or_vtable_call(invoke_code, method, index);
+}
+
+void ConstantPoolCacheEntry::set_itable_call(Bytecodes::Code invoke_code, methodHandle method, int index) {
+  assert(method->method_holder()->verify_itable_index(index), "");
+  assert(invoke_code == Bytecodes::_invokeinterface, "");
   InstanceKlass* interf = method->method_holder();
   assert(interf->is_interface(), "must be an interface");
   assert(!method->is_final_method(), "interfaces do not have final methods; cannot link to one here");
--- a/src/share/vm/oops/cpCache.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/oops/cpCache.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -219,15 +219,29 @@
     Klass*          root_klass                   // needed by the GC to dirty the klass
   );
 
-  void set_method(                               // sets entry to resolved method entry
+ private:
+  void set_direct_or_vtable_call(
     Bytecodes::Code invoke_code,                 // the bytecode used for invoking the method
     methodHandle    method,                      // the method/prototype if any (NULL, otherwise)
     int             vtable_index                 // the vtable index if any, else negative
   );
 
-  void set_interface_call(
-    methodHandle method,                         // Resolved method
-    int index                                    // Method index into interface
+ public:
+  void set_direct_call(                          // sets entry to exact concrete method entry
+    Bytecodes::Code invoke_code,                 // the bytecode used for invoking the method
+    methodHandle    method                       // the method to call
+  );
+
+  void set_vtable_call(                          // sets entry to vtable index
+    Bytecodes::Code invoke_code,                 // the bytecode used for invoking the method
+    methodHandle    method,                      // resolved method which declares the vtable index
+    int             vtable_index                 // the vtable index
+  );
+
+  void set_itable_call(
+    Bytecodes::Code invoke_code,                 // the bytecode used; must be invokeinterface
+    methodHandle method,                         // the resolved interface method
+    int itable_index                             // index into itable for the method
   );
 
   void set_method_handle(
--- a/src/share/vm/oops/fieldInfo.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/oops/fieldInfo.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -240,6 +240,14 @@
     return (access_flags() & JVM_ACC_FIELD_INTERNAL) != 0;
   }
 
+  bool is_stable() const {
+    return (access_flags() & JVM_ACC_FIELD_STABLE) != 0;
+  }
+  void set_stable(bool z) {
+    if (z) _shorts[access_flags_offset] |=  JVM_ACC_FIELD_STABLE;
+    else   _shorts[access_flags_offset] &= ~JVM_ACC_FIELD_STABLE;
+  }
+
   Symbol* lookup_symbol(int symbol_index) const {
     assert(is_internal(), "only internal fields");
     return vmSymbols::symbol_at((vmSymbols::SID)symbol_index);
--- a/src/share/vm/oops/fieldStreams.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/oops/fieldStreams.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
 
 #include "oops/instanceKlass.hpp"
 #include "oops/fieldInfo.hpp"
+#include "runtime/fieldDescriptor.hpp"
 
 // The is the base class for iteration over the fields array
 // describing the declared fields in the class.  Several subclasses
@@ -43,8 +44,10 @@
   int                 _index;
   int                 _limit;
   int                 _generic_signature_slot;
+  fieldDescriptor     _fd_buf;
 
   FieldInfo* field() const { return FieldInfo::from_field_array(_fields, _index); }
+  InstanceKlass* field_holder() const { return _constants->pool_holder(); }
 
   int init_generic_signature_start_slot() {
     int length = _fields->length();
@@ -102,6 +105,7 @@
     _index = 0;
     _limit = klass->java_fields_count();
     init_generic_signature_start_slot();
+    assert(klass == field_holder(), "");
   }
   FieldStreamBase(instanceKlassHandle klass) {
     _fields = klass->fields();
@@ -109,6 +113,7 @@
     _index = 0;
     _limit = klass->java_fields_count();
     init_generic_signature_start_slot();
+    assert(klass == field_holder(), "");
   }
 
   // accessors
@@ -180,6 +185,12 @@
     return field()->contended_group();
   }
 
+  // bridge to a heavier API:
+  fieldDescriptor& field_descriptor() const {
+    fieldDescriptor& field = const_cast<fieldDescriptor&>(_fd_buf);
+    field.reinitialize(field_holder(), _index);
+    return field;
+  }
 };
 
 // Iterate over only the internal fields
--- a/src/share/vm/oops/instanceKlass.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/oops/instanceKlass.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -286,7 +286,6 @@
   init_previous_versions();
   set_generic_signature_index(0);
   release_set_methods_jmethod_ids(NULL);
-  release_set_methods_cached_itable_indices(NULL);
   set_annotations(NULL);
   set_jvmti_cached_class_field_map(NULL);
   set_initial_method_idnum(0);
@@ -1149,7 +1148,7 @@
     Symbol* f_name = fs.name();
     Symbol* f_sig  = fs.signature();
     if (f_name == name && f_sig == sig) {
-      fd->initialize(const_cast<InstanceKlass*>(this), fs.index());
+      fd->reinitialize(const_cast<InstanceKlass*>(this), fs.index());
       return true;
     }
   }
@@ -1218,7 +1217,7 @@
 bool InstanceKlass::find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
     if (fs.offset() == offset) {
-      fd->initialize(const_cast<InstanceKlass*>(this), fs.index());
+      fd->reinitialize(const_cast<InstanceKlass*>(this), fs.index());
       if (fd->is_static() == is_static) return true;
     }
   }
@@ -1251,8 +1250,7 @@
 void InstanceKlass::do_local_static_fields(FieldClosure* cl) {
   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
     if (fs.access_flags().is_static()) {
-      fieldDescriptor fd;
-      fd.initialize(this, fs.index());
+      fieldDescriptor& fd = fs.field_descriptor();
       cl->do_field(&fd);
     }
   }
@@ -1268,8 +1266,7 @@
 void InstanceKlass::do_local_static_fields_impl(instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS) {
   for (JavaFieldStream fs(this_oop()); !fs.done(); fs.next()) {
     if (fs.access_flags().is_static()) {
-      fieldDescriptor fd;
-      fd.initialize(this_oop(), fs.index());
+      fieldDescriptor& fd = fs.field_descriptor();
       f(&fd, CHECK);
     }
   }
@@ -1291,7 +1288,7 @@
   int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1), mtClass);
   int j = 0;
   for (int i = 0; i < length; i += 1) {
-    fd.initialize(this, i);
+    fd.reinitialize(this, i);
     if (!fd.is_static()) {
       fields_sorted[j + 0] = fd.offset();
       fields_sorted[j + 1] = i;
@@ -1303,7 +1300,7 @@
     // _sort_Fn is defined in growableArray.hpp.
     qsort(fields_sorted, length/2, 2*sizeof(int), (_sort_Fn)compare_fields_by_offset);
     for (int i = 0; i < length; i += 2) {
-      fd.initialize(this, fields_sorted[i + 1]);
+      fd.reinitialize(this, fields_sorted[i + 1]);
       assert(!fd.is_static() && fd.offset() == fields_sorted[i], "only nonstatic fields");
       cl->do_field(&fd);
     }
@@ -1686,87 +1683,6 @@
 }
 
 
-// Cache an itable index
-void InstanceKlass::set_cached_itable_index(size_t idnum, int index) {
-  int* indices = methods_cached_itable_indices_acquire();
-  int* to_dealloc_indices = NULL;
-
-  // We use a double-check locking idiom here because this cache is
-  // performance sensitive. In the normal system, this cache only
-  // transitions from NULL to non-NULL which is safe because we use
-  // release_set_methods_cached_itable_indices() to advertise the
-  // new cache. A partially constructed cache should never be seen
-  // by a racing thread. Cache reads and writes proceed without a
-  // lock, but creation of the cache itself requires no leaks so a
-  // lock is generally acquired in that case.
-  //
-  // If the RedefineClasses() API has been used, then this cache can
-  // grow and we'll have transitions from non-NULL to bigger non-NULL.
-  // Cache creation requires no leaks and we require safety between all
-  // cache accesses and freeing of the old cache so a lock is generally
-  // acquired when the RedefineClasses() API has been used.
-
-  if (indices == NULL || idnum_can_increment()) {
-    // we need a cache or the cache can grow
-    MutexLocker ml(JNICachedItableIndex_lock);
-    // reacquire the cache to see if another thread already did the work
-    indices = methods_cached_itable_indices_acquire();
-    size_t length = 0;
-    // cache size is stored in element[0], other elements offset by one
-    if (indices == NULL || (length = (size_t)indices[0]) <= idnum) {
-      size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count());
-      int* new_indices = NEW_C_HEAP_ARRAY(int, size+1, mtClass);
-      new_indices[0] = (int)size;
-      // copy any existing entries
-      size_t i;
-      for (i = 0; i < length; i++) {
-        new_indices[i+1] = indices[i+1];
-      }
-      // Set all the rest to -1
-      for (i = length; i < size; i++) {
-        new_indices[i+1] = -1;
-      }
-      if (indices != NULL) {
-        // We have an old cache to delete so save it for after we
-        // drop the lock.
-        to_dealloc_indices = indices;
-      }
-      release_set_methods_cached_itable_indices(indices = new_indices);
-    }
-
-    if (idnum_can_increment()) {
-      // this cache can grow so we have to write to it safely
-      indices[idnum+1] = index;
-    }
-  } else {
-    CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
-  }
-
-  if (!idnum_can_increment()) {
-    // The cache cannot grow and this JNI itable index value does not
-    // have to be unique like a jmethodID. If there is a race to set it,
-    // it doesn't matter.
-    indices[idnum+1] = index;
-  }
-
-  if (to_dealloc_indices != NULL) {
-    // we allocated a new cache so free the old one
-    FreeHeap(to_dealloc_indices);
-  }
-}
-
-
-// Retrieve a cached itable index
-int InstanceKlass::cached_itable_index(size_t idnum) {
-  int* indices = methods_cached_itable_indices_acquire();
-  if (indices != NULL && ((size_t)indices[0]) > idnum) {
-     // indices exist and are long enough, retrieve possible cached
-    return indices[idnum+1];
-  }
-  return -1;
-}
-
-
 //
 // Walk the list of dependent nmethods searching for nmethods which
 // are dependent on the changes that were passed in and mark them for
@@ -2326,12 +2242,6 @@
     }
   }
 
-  int* indices = methods_cached_itable_indices_acquire();
-  if (indices != (int*)NULL) {
-    release_set_methods_cached_itable_indices(NULL);
-    FreeHeap(indices);
-  }
-
   // release dependencies
   nmethodBucket* b = _dependencies;
   _dependencies = NULL;
@@ -2782,6 +2692,18 @@
   "allocated", "loaded", "linked", "being_initialized", "fully_initialized", "initialization_error"
 };
 
+static void print_vtable(intptr_t* start, int len, outputStream* st) {
+  for (int i = 0; i < len; i++) {
+    intptr_t e = start[i];
+    st->print("%d : " INTPTR_FORMAT, i, e);
+    if (e != 0 && ((Metadata*)e)->is_metaspace_object()) {
+      st->print(" ");
+      ((Metadata*)e)->print_value_on(st);
+    }
+    st->cr();
+  }
+}
+
 void InstanceKlass::print_on(outputStream* st) const {
   assert(is_klass(), "must be klass");
   Klass::print_on(st);
@@ -2816,7 +2738,7 @@
 
   st->print(BULLET"arrays:            "); array_klasses()->print_value_on_maybe_null(st); st->cr();
   st->print(BULLET"methods:           "); methods()->print_value_on(st);                  st->cr();
-  if (Verbose) {
+  if (Verbose || WizardMode) {
     Array<Method*>* method_array = methods();
     for(int i = 0; i < method_array->length(); i++) {
       st->print("%d : ", i); method_array->at(i)->print_value(); st->cr();
@@ -2847,24 +2769,17 @@
   st->print(BULLET"field annotations:       "); fields_annotations()->print_value_on(st); st->cr();
   st->print(BULLET"field type annotations:  "); fields_type_annotations()->print_value_on(st); st->cr();
   {
-    ResourceMark rm;
-    // PreviousVersionInfo objects returned via PreviousVersionWalker
-    // contain a GrowableArray of handles. We have to clean up the
-    // GrowableArray _after_ the PreviousVersionWalker destructor
-    // has destroyed the handles.
-    {
-      bool have_pv = false;
-      PreviousVersionWalker pvw((InstanceKlass*)this);
-      for (PreviousVersionInfo * pv_info = pvw.next_previous_version();
-           pv_info != NULL; pv_info = pvw.next_previous_version()) {
-        if (!have_pv)
-          st->print(BULLET"previous version:  ");
-        have_pv = true;
-        pv_info->prev_constant_pool_handle()()->print_value_on(st);
-      }
-      if (have_pv)  st->cr();
-    } // pvw is cleaned up
-  } // rm is cleaned up
+    bool have_pv = false;
+    PreviousVersionWalker pvw(Thread::current(), (InstanceKlass*)this);
+    for (PreviousVersionNode * pv_node = pvw.next_previous_version();
+         pv_node != NULL; pv_node = pvw.next_previous_version()) {
+      if (!have_pv)
+        st->print(BULLET"previous version:  ");
+      have_pv = true;
+      pv_node->prev_constant_pool()->print_value_on(st);
+    }
+    if (have_pv) st->cr();
+  } // pvw is cleaned up
 
   if (generic_signature() != NULL) {
     st->print(BULLET"generic signature: ");
@@ -2874,7 +2789,9 @@
   st->print(BULLET"inner classes:     "); inner_classes()->print_value_on(st);     st->cr();
   st->print(BULLET"java mirror:       "); java_mirror()->print_value_on(st);       st->cr();
   st->print(BULLET"vtable length      %d  (start addr: " INTPTR_FORMAT ")", vtable_length(), start_of_vtable());  st->cr();
+  if (vtable_length() > 0 && (Verbose || WizardMode))  print_vtable(start_of_vtable(), vtable_length(), st);
   st->print(BULLET"itable length      %d (start addr: " INTPTR_FORMAT ")", itable_length(), start_of_itable()); st->cr();
+  if (itable_length() > 0 && (Verbose || WizardMode))  print_vtable(start_of_itable(), itable_length(), st);
   st->print_cr(BULLET"---- static fields (%d words):", static_field_size());
   FieldPrinter print_static_field(st);
   ((InstanceKlass*)this)->do_local_static_fields(&print_static_field);
@@ -2896,6 +2813,7 @@
 
 void InstanceKlass::print_value_on(outputStream* st) const {
   assert(is_klass(), "must be klass");
+  if (Verbose || WizardMode)  access_flags().print_on(st);
   name()->print_value_on(st);
 }
 
@@ -3392,34 +3310,34 @@
   Array<Method*>* old_methods = ikh->methods();
 
   if (cp_ref->on_stack()) {
-  PreviousVersionNode * pv_node = NULL;
-  if (emcp_method_count == 0) {
+    PreviousVersionNode * pv_node = NULL;
+    if (emcp_method_count == 0) {
       // non-shared ConstantPool gets a reference
-      pv_node = new PreviousVersionNode(cp_ref, !cp_ref->is_shared(), NULL);
-    RC_TRACE(0x00000400,
-        ("add: all methods are obsolete; flushing any EMCP refs"));
-  } else {
-    int local_count = 0;
+      pv_node = new PreviousVersionNode(cp_ref, NULL);
+      RC_TRACE(0x00000400,
+          ("add: all methods are obsolete; flushing any EMCP refs"));
+    } else {
+      int local_count = 0;
       GrowableArray<Method*>* method_refs = new (ResourceObj::C_HEAP, mtClass)
-        GrowableArray<Method*>(emcp_method_count, true);
-    for (int i = 0; i < old_methods->length(); i++) {
-      if (emcp_methods->at(i)) {
-          // this old method is EMCP. Save it only if it's on the stack
-          Method* old_method = old_methods->at(i);
-          if (old_method->on_stack()) {
-            method_refs->append(old_method);
+          GrowableArray<Method*>(emcp_method_count, true);
+      for (int i = 0; i < old_methods->length(); i++) {
+        if (emcp_methods->at(i)) {
+            // this old method is EMCP. Save it only if it's on the stack
+            Method* old_method = old_methods->at(i);
+            if (old_method->on_stack()) {
+              method_refs->append(old_method);
+            }
+          if (++local_count >= emcp_method_count) {
+            // no more EMCP methods so bail out now
+            break;
           }
-        if (++local_count >= emcp_method_count) {
-          // no more EMCP methods so bail out now
-          break;
         }
       }
-    }
       // non-shared ConstantPool gets a reference
-      pv_node = new PreviousVersionNode(cp_ref, !cp_ref->is_shared(), method_refs);
+      pv_node = new PreviousVersionNode(cp_ref, method_refs);
     }
     // append new previous version.
-  _previous_versions->append(pv_node);
+    _previous_versions->append(pv_node);
   }
 
   // Since the caller is the VMThread and we are at a safepoint, this
@@ -3520,6 +3438,8 @@
         return m;
       }
     }
+    // None found, return null for the caller to handle.
+    return NULL;
   }
   return m;
 }
@@ -3536,10 +3456,9 @@
 // Construct a PreviousVersionNode entry for the array hung off
 // the InstanceKlass.
 PreviousVersionNode::PreviousVersionNode(ConstantPool* prev_constant_pool,
-  bool prev_cp_is_weak, GrowableArray<Method*>* prev_EMCP_methods) {
+  GrowableArray<Method*>* prev_EMCP_methods) {
 
   _prev_constant_pool = prev_constant_pool;
-  _prev_cp_is_weak = prev_cp_is_weak;
   _prev_EMCP_methods = prev_EMCP_methods;
 }
 
@@ -3555,99 +3474,38 @@
   }
 }
 
-
-// Construct a PreviousVersionInfo entry
-PreviousVersionInfo::PreviousVersionInfo(PreviousVersionNode *pv_node) {
-  _prev_constant_pool_handle = constantPoolHandle();  // NULL handle
-  _prev_EMCP_method_handles = NULL;
-
-  ConstantPool* cp = pv_node->prev_constant_pool();
-  assert(cp != NULL, "constant pool ref was unexpectedly cleared");
-  if (cp == NULL) {
-    return;  // robustness
-  }
-
-  // make the ConstantPool* safe to return
-  _prev_constant_pool_handle = constantPoolHandle(cp);
-
-  GrowableArray<Method*>* method_refs = pv_node->prev_EMCP_methods();
-  if (method_refs == NULL) {
-    // the InstanceKlass did not have any EMCP methods
-    return;
-  }
-
-  _prev_EMCP_method_handles = new GrowableArray<methodHandle>(10);
-
-  int n_methods = method_refs->length();
-  for (int i = 0; i < n_methods; i++) {
-    Method* method = method_refs->at(i);
-    assert (method != NULL, "method has been cleared");
-    if (method == NULL) {
-      continue;  // robustness
-    }
-    // make the Method* safe to return
-    _prev_EMCP_method_handles->append(methodHandle(method));
-  }
-}
-
-
-// Destroy a PreviousVersionInfo
-PreviousVersionInfo::~PreviousVersionInfo() {
-  // Since _prev_EMCP_method_handles is not C-heap allocated, we
-  // don't have to delete it.
-}
-
-
 // Construct a helper for walking the previous versions array
-PreviousVersionWalker::PreviousVersionWalker(InstanceKlass *ik) {
+PreviousVersionWalker::PreviousVersionWalker(Thread* thread, InstanceKlass *ik) {
+  _thread = thread;
   _previous_versions = ik->previous_versions();
   _current_index = 0;
-  // _hm needs no initialization
   _current_p = NULL;
-}
-
-
-// Destroy a PreviousVersionWalker
-PreviousVersionWalker::~PreviousVersionWalker() {
-  // Delete the current info just in case the caller didn't walk to
-  // the end of the previous versions list. No harm if _current_p is
-  // already NULL.
-  delete _current_p;
-
-  // When _hm is destroyed, all the Handles returned in
-  // PreviousVersionInfo objects will be destroyed.
-  // Also, after this destructor is finished it will be
-  // safe to delete the GrowableArray allocated in the
-  // PreviousVersionInfo objects.
+  _current_constant_pool_handle = constantPoolHandle(thread, ik->constants());
 }
 
 
 // Return the interesting information for the next previous version
 // of the klass. Returns NULL if there are no more previous versions.
-PreviousVersionInfo* PreviousVersionWalker::next_previous_version() {
+PreviousVersionNode* PreviousVersionWalker::next_previous_version() {
   if (_previous_versions == NULL) {
     // no previous versions so nothing to return
     return NULL;
   }
 
-  delete _current_p;  // cleanup the previous info for the caller
-  _current_p = NULL;  // reset to NULL so we don't delete same object twice
+  _current_p = NULL;  // reset to NULL
+  _current_constant_pool_handle = NULL;
 
   int length = _previous_versions->length();
 
   while (_current_index < length) {
     PreviousVersionNode * pv_node = _previous_versions->at(_current_index++);
-    PreviousVersionInfo * pv_info = new (ResourceObj::C_HEAP, mtClass)
-                                          PreviousVersionInfo(pv_node);
-
-    constantPoolHandle cp_h = pv_info->prev_constant_pool_handle();
-    assert (!cp_h.is_null(), "null cp found in previous version");
-
-    // The caller will need to delete pv_info when they are done with it.
-    _current_p = pv_info;
-    return pv_info;
+
+    // Save a handle to the constant pool for this previous version,
+    // which keeps all the methods from being deallocated.
+    _current_constant_pool_handle = constantPoolHandle(_thread, pv_node->prev_constant_pool());
+    _current_p = pv_node;
+    return pv_node;
   }
 
-  // all of the underlying nodes' info has been deleted
   return NULL;
 } // end next_previous_version()
--- a/src/share/vm/oops/instanceKlass.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/oops/instanceKlass.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -245,7 +245,6 @@
   MemberNameTable* _member_names;        // Member names
   JNIid*          _jni_ids;              // First JNI identifier for static fields in this class
   jmethodID*      _methods_jmethod_ids;  // jmethodIDs corresponding to method_idnum, or NULL if none
-  int*            _methods_cached_itable_indices;  // itable_index cache for JNI invoke corresponding to methods idnum, or NULL
   nmethodBucket*  _dependencies;         // list of dependent nmethods
   nmethod*        _osr_nmethods_head;    // Head of list of on-stack replacement nmethods for this class
   BreakpointInfo* _breakpoints;          // bpt lists, managed by Method*
@@ -690,10 +689,6 @@
                 size_t *length_p, jmethodID* id_p);
   jmethodID jmethod_id_or_null(Method* method);
 
-  // cached itable index support
-  void set_cached_itable_index(size_t idnum, int index);
-  int cached_itable_index(size_t idnum);
-
   // annotations support
   Annotations* annotations() const          { return _annotations; }
   void set_annotations(Annotations* anno)   { _annotations = anno; }
@@ -994,11 +989,6 @@
   void release_set_methods_jmethod_ids(jmethodID* jmeths)
          { OrderAccess::release_store_ptr(&_methods_jmethod_ids, jmeths); }
 
-  int* methods_cached_itable_indices_acquire() const
-         { return (int*)OrderAccess::load_ptr_acquire(&_methods_cached_itable_indices); }
-  void release_set_methods_cached_itable_indices(int* indices)
-         { OrderAccess::release_store_ptr(&_methods_cached_itable_indices, indices); }
-
   // Lock during initialization
 public:
   // Lock for (1) initialization; (2) access to the ConstantPool of this class.
@@ -1136,21 +1126,11 @@
 
 
 // A collection point for interesting information about the previous
-// version(s) of an InstanceKlass. This class uses weak references to
-// the information so that the information may be collected as needed
-// by the system. If the information is shared, then a regular
-// reference must be used because a weak reference would be seen as
-// collectible. A GrowableArray of PreviousVersionNodes is attached
-// to the InstanceKlass as needed. See PreviousVersionWalker below.
+// version(s) of an InstanceKlass.  A GrowableArray of PreviousVersionNodes
+// is attached to the InstanceKlass as needed. See PreviousVersionWalker below.
 class PreviousVersionNode : public CHeapObj<mtClass> {
  private:
-  // A shared ConstantPool is never collected so we'll always have
-  // a reference to it so we can update items in the cache. We'll
-  // have a weak reference to a non-shared ConstantPool until all
-  // of the methods (EMCP or obsolete) have been collected; the
-  // non-shared ConstantPool becomes collectible at that point.
-  ConstantPool*    _prev_constant_pool;  // regular or weak reference
-  bool    _prev_cp_is_weak;     // true if not a shared ConstantPool
+  ConstantPool*    _prev_constant_pool;
 
   // If the previous version of the InstanceKlass doesn't have any
   // EMCP methods, then _prev_EMCP_methods will be NULL. If all the
@@ -1159,8 +1139,8 @@
   GrowableArray<Method*>* _prev_EMCP_methods;
 
 public:
-  PreviousVersionNode(ConstantPool* prev_constant_pool, bool prev_cp_is_weak,
-    GrowableArray<Method*>* prev_EMCP_methods);
+  PreviousVersionNode(ConstantPool* prev_constant_pool,
+                      GrowableArray<Method*>* prev_EMCP_methods);
   ~PreviousVersionNode();
   ConstantPool* prev_constant_pool() const {
     return _prev_constant_pool;
@@ -1171,59 +1151,26 @@
 };
 
 
-// A Handle-ized version of PreviousVersionNode.
-class PreviousVersionInfo : public ResourceObj {
- private:
-  constantPoolHandle   _prev_constant_pool_handle;
-  // If the previous version of the InstanceKlass doesn't have any
-  // EMCP methods, then _prev_EMCP_methods will be NULL. Since the
-  // methods cannot be collected while we hold a handle,
-  // _prev_EMCP_methods should never have a length of zero.
-  GrowableArray<methodHandle>* _prev_EMCP_method_handles;
-
-public:
-  PreviousVersionInfo(PreviousVersionNode *pv_node);
-  ~PreviousVersionInfo();
-  constantPoolHandle prev_constant_pool_handle() const {
-    return _prev_constant_pool_handle;
-  }
-  GrowableArray<methodHandle>* prev_EMCP_method_handles() const {
-    return _prev_EMCP_method_handles;
-  }
-};
-
-
-// Helper object for walking previous versions. This helper cleans up
-// the Handles that it allocates when the helper object is destroyed.
-// The PreviousVersionInfo object returned by next_previous_version()
-// is only valid until a subsequent call to next_previous_version() or
-// the helper object is destroyed.
+// Helper object for walking previous versions.
 class PreviousVersionWalker : public StackObj {
  private:
+  Thread*                               _thread;
   GrowableArray<PreviousVersionNode *>* _previous_versions;
   int                                   _current_index;
-  // Fields for cleaning up when we are done walking the previous versions:
-  // A HandleMark for the PreviousVersionInfo handles:
-  HandleMark                            _hm;
+
+  // A pointer to the current node object so we can handle the deletes.
+  PreviousVersionNode*                  _current_p;
 
-  // It would be nice to have a ResourceMark field in this helper also,
-  // but the ResourceMark code says to be careful to delete handles held
-  // in GrowableArrays _before_ deleting the GrowableArray. Since we
-  // can't guarantee the order in which the fields are destroyed, we
-  // have to let the creator of the PreviousVersionWalker object do
-  // the right thing. Also, adding a ResourceMark here causes an
-  // include loop.
-
-  // A pointer to the current info object so we can handle the deletes.
-  PreviousVersionInfo *                 _current_p;
+  // The constant pool handle keeps all the methods in this class from being
+  // deallocated from the metaspace during class unloading.
+  constantPoolHandle                    _current_constant_pool_handle;
 
  public:
-  PreviousVersionWalker(InstanceKlass *ik);
-  ~PreviousVersionWalker();
+  PreviousVersionWalker(Thread* thread, InstanceKlass *ik);
 
   // Return the interesting information for the next previous version
   // of the klass. Returns NULL if there are no more previous versions.
-  PreviousVersionInfo* next_previous_version();
+  PreviousVersionNode* next_previous_version();
 };
 
 
--- a/src/share/vm/oops/instanceOop.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/oops/instanceOop.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -37,9 +37,9 @@
 
   // If compressed, the offset of the fields of the instance may not be aligned.
   static int base_offset_in_bytes() {
-    // offset computation code breaks if UseCompressedKlassPointers
+    // offset computation code breaks if UseCompressedClassPointers
     // only is true
-    return (UseCompressedOops && UseCompressedKlassPointers) ?
+    return (UseCompressedOops && UseCompressedClassPointers) ?
              klass_gap_offset_in_bytes() :
              sizeof(instanceOopDesc);
   }
--- a/src/share/vm/oops/klass.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/oops/klass.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -674,13 +674,23 @@
 
 #ifndef PRODUCT
 
-void Klass::verify_vtable_index(int i) {
+bool Klass::verify_vtable_index(int i) {
   if (oop_is_instance()) {
-    assert(i>=0 && i<((InstanceKlass*)this)->vtable_length()/vtableEntry::size(), "index out of bounds");
+    int limit = ((InstanceKlass*)this)->vtable_length()/vtableEntry::size();
+    assert(i >= 0 && i < limit, err_msg("index %d out of bounds %d", i, limit));
   } else {
     assert(oop_is_array(), "Must be");
-    assert(i>=0 && i<((ArrayKlass*)this)->vtable_length()/vtableEntry::size(), "index out of bounds");
+    int limit = ((ArrayKlass*)this)->vtable_length()/vtableEntry::size();
+    assert(i >= 0 && i < limit, err_msg("index %d out of bounds %d", i, limit));
   }
+  return true;
+}
+
+bool Klass::verify_itable_index(int i) {
+  assert(oop_is_instance(), "");
+  int method_count = klassItable::method_count_for_interface(this);
+  assert(i >= 0 && i < method_count, "index out of bounds");
+  return true;
 }
 
 #endif
--- a/src/share/vm/oops/klass.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/oops/klass.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -699,7 +699,8 @@
   void verify(bool check_dictionary = true) { verify_on(tty, check_dictionary); }
 
 #ifndef PRODUCT
-  void verify_vtable_index(int index);
+  bool verify_vtable_index(int index);
+  bool verify_itable_index(int index);
 #endif
 
   virtual void oop_verify_on(oop obj, outputStream* st);
--- a/src/share/vm/oops/klassVtable.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/oops/klassVtable.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -47,11 +47,12 @@
 
 
 // this function computes the vtable size (including the size needed for miranda
-// methods) and the number of miranda methods in this class
+// methods) and the number of miranda methods in this class.
 // Note on Miranda methods: Let's say there is a class C that implements
-// interface I.  Let's say there is a method m in I that neither C nor any
-// of its super classes implement (i.e there is no method of any access, with
-// the same name and signature as m), then m is a Miranda method which is
+// interface I, and none of C's superclasses implements I.
+// Let's say there is an abstract method m in I that neither C
+// nor any of its super classes implement (i.e there is no method of any access,
+// with the same name and signature as m), then m is a Miranda method which is
 // entered as a public abstract method in C's vtable.  From then on it should
 // treated as any other public method in C for method over-ride purposes.
 void klassVtable::compute_vtable_size_and_num_mirandas(
@@ -111,10 +112,13 @@
 }
 
 int klassVtable::index_of(Method* m, int len) const {
-  assert(m->vtable_index() >= 0, "do not ask this of non-vtable methods");
+  assert(m->has_vtable_index(), "do not ask this of non-vtable methods");
   return m->vtable_index();
 }
 
+// Copy super class's vtable to the first part (prefix) of this class's vtable,
+// and return the number of entries copied.  Expects that 'super' is the Java
+// super class (arrays can have "array" super classes that must be skipped).
 int klassVtable::initialize_from_super(KlassHandle super) {
   if (super.is_null()) {
     return 0;
@@ -139,14 +143,14 @@
   }
 }
 
-// Revised lookup semantics   introduced 1.3 (Kestral beta)
+//
+// Revised lookup semantics   introduced 1.3 (Kestrel beta)
 void klassVtable::initialize_vtable(bool checkconstraints, TRAPS) {
 
   // Note:  Arrays can have intermediate array supers.  Use java_super to skip them.
   KlassHandle super (THREAD, klass()->java_super());
   int nofNewEntries = 0;
 
-
   if (PrintVtables && !klass()->oop_is_array()) {
     ResourceMark rm(THREAD);
     tty->print_cr("Initializing: %s", _klass->name()->as_C_string());
@@ -174,8 +178,10 @@
     int len = methods->length();
     int initialized = super_vtable_len;
 
-    // update_inherited_vtable can stop for gc - ensure using handles
+    // Check each of this class's methods against super;
+    // if override, replace in copy of super vtable, otherwise append to end
     for (int i = 0; i < len; i++) {
+      // update_inherited_vtable can stop for gc - ensure using handles
       HandleMark hm(THREAD);
       assert(methods->at(i)->is_method(), "must be a Method*");
       methodHandle mh(THREAD, methods->at(i));
@@ -189,11 +195,11 @@
       }
     }
 
-    // add miranda methods; it will also update the value of initialized
-    fill_in_mirandas(&initialized);
+    // add miranda methods to end of vtable.
+    initialized = fill_in_mirandas(initialized);
 
     // In class hierarchies where the accessibility is not increasing (i.e., going from private ->
-    // package_private -> publicprotected), the vtable might actually be smaller than our initial
+    // package_private -> public/protected), the vtable might actually be smaller than our initial
     // calculation.
     assert(initialized <= _length, "vtable initialization failed");
     for(;initialized < _length; initialized++) {
@@ -248,14 +254,8 @@
   return superk;
 }
 
-// Methods that are "effectively" final don't need vtable entries.
-bool method_is_effectively_final(
-    AccessFlags klass_flags, methodHandle target) {
-  return target->is_final() || klass_flags.is_final() && !target->is_overpass();
-}
-
 // Update child's copy of super vtable for overrides
-// OR return true if a new vtable entry is required
+// OR return true if a new vtable entry is required.
 // Only called for InstanceKlass's, i.e. not for arrays
 // If that changed, could not use _klass as handle for klass
 bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle target_method, int super_vtable_len,
@@ -263,6 +263,7 @@
   ResourceMark rm;
   bool allocate_new = true;
   assert(klass->oop_is_instance(), "must be InstanceKlass");
+  assert(klass == target_method()->method_holder(), "caller resp.");
 
   // Initialize the method's vtable index to "nonvirtual".
   // If we allocate a vtable entry, we will update it to a non-negative number.
@@ -273,11 +274,17 @@
     return false;
   }
 
-  if (method_is_effectively_final(klass->access_flags(), target_method)) {
+  if (target_method->is_final_method(klass->access_flags())) {
     // a final method never needs a new entry; final methods can be statically
     // resolved and they have to be present in the vtable only if they override
     // a super's method, in which case they re-use its entry
     allocate_new = false;
+  } else if (klass->is_interface()) {
+    allocate_new = false;  // see note below in needs_new_vtable_entry
+    // An interface never allocates new vtable slots, only inherits old ones.
+    // This method will either be assigned its own itable index later,
+    // or be assigned an inherited vtable index in the loop below.
+    target_method()->set_vtable_index(Method::pending_itable_index);
   }
 
   // we need a new entry if there is no superclass
@@ -411,8 +418,14 @@
                                          Symbol* classname,
                                          AccessFlags class_flags,
                                          TRAPS) {
+  if (class_flags.is_interface()) {
+    // Interfaces do not use vtables, so there is no point to assigning
+    // a vtable index to any of their methods.  If we refrain from doing this,
+    // we can use Method::_vtable_index to hold the itable index
+    return false;
+  }
 
-  if (method_is_effectively_final(class_flags, target_method) ||
+  if (target_method->is_final_method(class_flags) ||
       // a final method never needs a new entry; final methods can be statically
       // resolved and they have to be present in the vtable only if they override
       // a super's method, in which case they re-use its entry
@@ -500,7 +513,8 @@
   return Method::invalid_vtable_index;
 }
 
-// check if an entry is miranda
+// check if an entry at an index is miranda
+// requires that method m at entry be declared ("held") by an interface.
 bool klassVtable::is_miranda_entry_at(int i) {
   Method* m = method_at(i);
   Klass* method_holder = m->method_holder();
@@ -516,7 +530,9 @@
   return false;
 }
 
-// check if a method is a miranda method, given a class's methods table and it's super
+// check if a method is a miranda method, given a class's methods table and its super
+// "miranda" means not static, not defined by this class, and not defined
+// in super unless it is private and therefore inaccessible to this class.
 // the caller must make sure that the method belongs to an interface implemented by the class
 bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods, Klass* super) {
   if (m->is_static()) {
@@ -541,6 +557,14 @@
   return false;
 }
 
+// Scans current_interface_methods for miranda methods that do not
+// already appear in new_mirandas and are also not defined-and-non-private
+// in super (superclass).  These mirandas are added to all_mirandas if it is
+// not null; in addition, those that are not duplicates of miranda methods
+// inherited by super from its interfaces are added to new_mirandas.
+// Thus, new_mirandas will be the set of mirandas that this class introduces,
+// all_mirandas will be the set of all mirandas applicable to this class
+// including all defined in superclasses.
 void klassVtable::add_new_mirandas_to_lists(
     GrowableArray<Method*>* new_mirandas, GrowableArray<Method*>* all_mirandas,
     Array<Method*>* current_interface_methods, Array<Method*>* class_methods,
@@ -599,17 +623,22 @@
   }
 }
 
-// fill in mirandas
-void klassVtable::fill_in_mirandas(int* initialized) {
+// Discover miranda methods ("miranda" = "interface abstract, no binding"),
+// and append them into the vtable starting at index initialized,
+// return the new value of initialized.
+int klassVtable::fill_in_mirandas(int initialized) {
   GrowableArray<Method*> mirandas(20);
   get_mirandas(&mirandas, NULL, ik()->super(), ik()->methods(),
                ik()->local_interfaces());
   for (int i = 0; i < mirandas.length(); i++) {
-    put_method_at(mirandas.at(i), *initialized);
-    ++(*initialized);
+    put_method_at(mirandas.at(i), initialized);
+    ++initialized;
   }
+  return initialized;
 }
 
+// Copy this class's vtable to the vtable beginning at start.
+// Used to copy superclass vtable to prefix of subclass's vtable.
 void klassVtable::copy_vtable_to(vtableEntry* start) {
   Copy::disjoint_words((HeapWord*)table(), (HeapWord*)start, _length * vtableEntry::size());
 }
@@ -723,6 +752,12 @@
 
 // Initialization
 void klassItable::initialize_itable(bool checkconstraints, TRAPS) {
+  if (_klass->is_interface()) {
+    // This needs to go after vtable indexes are assigned but
+    // before implementors need to know the number of itable indexes.
+    assign_itable_indexes_for_interface(_klass());
+  }
+
   // Cannot be setup doing bootstrapping, interfaces don't have
   // itables, and klass with only ones entry have empty itables
   if (Universe::is_bootstrapping() ||
@@ -754,45 +789,89 @@
 }
 
 
+inline bool interface_method_needs_itable_index(Method* m) {
+  if (m->is_static())           return false;   // e.g., Stream.empty
+  if (m->is_initializer())      return false;   // <init> or <clinit>
+  // If an interface redeclares a method from java.lang.Object,
+  // it should already have a vtable index, don't touch it.
+  // e.g., CharSequence.toString (from initialize_vtable)
+  // if (m->has_vtable_index())  return false; // NO!
+  return true;
+}
+
+int klassItable::assign_itable_indexes_for_interface(Klass* klass) {
+  // an interface does not have an itable, but its methods need to be numbered
+  if (TraceItables) tty->print_cr("%3d: Initializing itable for interface %s", ++initialize_count,
+                                  klass->name()->as_C_string());
+  Array<Method*>* methods = InstanceKlass::cast(klass)->methods();
+  int nof_methods = methods->length();
+  int ime_num = 0;
+  for (int i = 0; i < nof_methods; i++) {
+    Method* m = methods->at(i);
+    if (interface_method_needs_itable_index(m)) {
+      assert(!m->is_final_method(), "no final interface methods");
+      // If m is already assigned a vtable index, do not disturb it.
+      if (!m->has_vtable_index()) {
+        assert(m->vtable_index() == Method::pending_itable_index, "set by initialize_vtable");
+        m->set_itable_index(ime_num);
+        // Progress to next itable entry
+        ime_num++;
+      }
+    }
+  }
+  assert(ime_num == method_count_for_interface(klass), "proper sizing");
+  return ime_num;
+}
+
+int klassItable::method_count_for_interface(Klass* interf) {
+  assert(interf->oop_is_instance(), "must be");
+  assert(interf->is_interface(), "must be");
+  Array<Method*>* methods = InstanceKlass::cast(interf)->methods();
+  int nof_methods = methods->length();
+  while (nof_methods > 0) {
+    Method* m = methods->at(nof_methods-1);
+    if (m->has_itable_index()) {
+      int length = m->itable_index() + 1;
+#ifdef ASSERT
+      while (nof_methods = 0) {
+        m = methods->at(--nof_methods);
+        assert(!m->has_itable_index() || m->itable_index() < length, "");
+      }
+#endif //ASSERT
+      return length;  // return the rightmost itable index, plus one
+    }
+    nof_methods -= 1;
+  }
+  // no methods have itable indexes
+  return 0;
+}
+
+
 void klassItable::initialize_itable_for_interface(int method_table_offset, KlassHandle interf_h, bool checkconstraints, TRAPS) {
   Array<Method*>* methods = InstanceKlass::cast(interf_h())->methods();
   int nof_methods = methods->length();
   HandleMark hm;
-  KlassHandle klass = _klass;
   assert(nof_methods > 0, "at least one method must exist for interface to be in vtable");
   Handle interface_loader (THREAD, InstanceKlass::cast(interf_h())->class_loader());
-  int ime_num = 0;
 
-  // Skip first Method* if it is a class initializer
-  int i = methods->at(0)->is_static_initializer() ? 1 : 0;
-
-  // m, method_name, method_signature, klass reset each loop so they
-  // don't need preserving across check_signature_loaders call
-  // methods needs a handle in case of gc from check_signature_loaders
-  for(; i < nof_methods; i++) {
+  int ime_count = method_count_for_interface(interf_h());
+  for (int i = 0; i < nof_methods; i++) {
     Method* m = methods->at(i);
-    Symbol* method_name = m->name();
-    Symbol* method_signature = m->signature();
-
-    // This is same code as in Linkresolver::lookup_instance_method_in_klasses
-    Method* target = klass->uncached_lookup_method(method_name, method_signature);
-    while (target != NULL && target->is_static()) {
-      // continue with recursive lookup through the superclass
-      Klass* super = target->method_holder()->super();
-      target = (super == NULL) ? (Method*)NULL : super->uncached_lookup_method(method_name, method_signature);
+    methodHandle target;
+    if (m->has_itable_index()) {
+      LinkResolver::lookup_instance_method_in_klasses(target, _klass, m->name(), m->signature(), CHECK);
     }
     if (target == NULL || !target->is_public() || target->is_abstract()) {
       // Entry do not resolve. Leave it empty
     } else {
       // Entry did resolve, check loader constraints before initializing
       // if checkconstraints requested
-      methodHandle  target_h (THREAD, target); // preserve across gc
       if (checkconstraints) {
         Handle method_holder_loader (THREAD, target->method_holder()->class_loader());
         if (method_holder_loader() != interface_loader()) {
           ResourceMark rm(THREAD);
           Symbol* failed_type_symbol =
-            SystemDictionary::check_signature_loaders(method_signature,
+            SystemDictionary::check_signature_loaders(m->signature(),
                                                       method_holder_loader,
                                                       interface_loader,
                                                       true, CHECK);
@@ -803,9 +882,9 @@
               "and the class loader (instance of %s) for interface "
               "%s have different Class objects for the type %s "
               "used in the signature";
-            char* sig = target_h()->name_and_sig_as_C_string();
+            char* sig = target()->name_and_sig_as_C_string();
             const char* loader1 = SystemDictionary::loader_name(method_holder_loader());
-            char* current = klass->name()->as_C_string();
+            char* current = _klass->name()->as_C_string();
             const char* loader2 = SystemDictionary::loader_name(interface_loader());
             char* iface = InstanceKlass::cast(interf_h())->name()->as_C_string();
             char* failed_type_name = failed_type_symbol->as_C_string();
@@ -821,10 +900,10 @@
       }
 
       // ime may have moved during GC so recalculate address
-      itableOffsetEntry::method_entry(_klass(), method_table_offset)[ime_num].initialize(target_h());
+      int ime_num = m->itable_index();
+      assert(ime_num < ime_count, "oob");
+      itableOffsetEntry::method_entry(_klass(), method_table_offset)[ime_num].initialize(target());
     }
-    // Progress to next entry
-    ime_num++;
   }
 }
 
@@ -913,20 +992,22 @@
   virtual void doit(Klass* intf, int method_count) = 0;
 };
 
-// Visit all interfaces with at-least one method (excluding <clinit>)
+// Visit all interfaces with at least one itable method
 void visit_all_interfaces(Array<Klass*>* transitive_intf, InterfaceVisiterClosure *blk) {
   // Handle array argument
   for(int i = 0; i < transitive_intf->length(); i++) {
     Klass* intf = transitive_intf->at(i);
     assert(intf->is_interface(), "sanity check");
 
-    // Find no. of methods excluding a <clinit>
-    int method_count = InstanceKlass::cast(intf)->methods()->length();
-    if (method_count > 0) {
-      Method* m = InstanceKlass::cast(intf)->methods()->at(0);
-      assert(m != NULL && m->is_method(), "sanity check");
-      if (m->name() == vmSymbols::object_initializer_name()) {
-        method_count--;
+    // Find no. of itable methods
+    int method_count = 0;
+    // method_count = klassItable::method_count_for_interface(intf);
+    Array<Method*>* methods = InstanceKlass::cast(intf)->methods();
+    if (methods->length() > 0) {
+      for (int i = methods->length(); --i >= 0; ) {
+        if (interface_method_needs_itable_index(methods->at(i))) {
+          method_count++;
+        }
       }
     }
 
@@ -1024,40 +1105,26 @@
 }
 
 
-// m must be a method in an interface
-int klassItable::compute_itable_index(Method* m) {
-  InstanceKlass* intf = m->method_holder();
-  assert(intf->is_interface(), "sanity check");
-  Array<Method*>* methods = intf->methods();
-  int index = 0;
-  while(methods->at(index) != m) {
-    index++;
-    assert(index < methods->length(), "should find index for resolve_invoke");
-  }
-  // Adjust for <clinit>, which is left out of table if first method
-  if (methods->length() > 0 && methods->at(0)->is_static_initializer()) {
-    index--;
-  }
-  return index;
-}
-
-
-// inverse to compute_itable_index
+// inverse to itable_index
 Method* klassItable::method_for_itable_index(Klass* intf, int itable_index) {
   assert(InstanceKlass::cast(intf)->is_interface(), "sanity check");
+  assert(intf->verify_itable_index(itable_index), "");
   Array<Method*>* methods = InstanceKlass::cast(intf)->methods();
 
+  if (itable_index < 0 || itable_index >= method_count_for_interface(intf))
+    return NULL;                // help caller defend against bad indexes
+
   int index = itable_index;
-  // Adjust for <clinit>, which is left out of table if first method
-  if (methods->length() > 0 && methods->at(0)->is_static_initializer()) {
-    index++;
+  Method* m = methods->at(index);
+  int index2 = -1;
+  while (!m->has_itable_index() ||
+         (index2 = m->itable_index()) != itable_index) {
+    assert(index2 < itable_index, "monotonic");
+    if (++index == methods->length())
+      return NULL;
+    m = methods->at(index);
   }
-
-  if (itable_index < 0 || index >= methods->length())
-    return NULL;                // help caller defend against bad indexes
-
-  Method* m = methods->at(index);
-  assert(compute_itable_index(m) == itable_index, "correct inverse");
+  assert(m->itable_index() == itable_index, "correct inverse");
 
   return m;
 }
--- a/src/share/vm/oops/klassVtable.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/oops/klassVtable.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -124,7 +124,7 @@
 
   // support for miranda methods
   bool is_miranda_entry_at(int i);
-  void fill_in_mirandas(int* initialized);
+  int fill_in_mirandas(int initialized);
   static bool is_miranda(Method* m, Array<Method*>* class_methods, Klass* super);
   static void add_new_mirandas_to_lists(
       GrowableArray<Method*>* new_mirandas,
@@ -150,6 +150,8 @@
 //      from_compiled_code_entry_point -> nmethod entry point
 //      from_interpreter_entry_point   -> i2cadapter
 class vtableEntry VALUE_OBJ_CLASS_SPEC {
+  friend class VMStructs;
+
  public:
   // size in words
   static int size() {
@@ -288,12 +290,12 @@
 #endif // INCLUDE_JVMTI
 
   // Setup of itable
+  static int assign_itable_indexes_for_interface(Klass* klass);
+  static int method_count_for_interface(Klass* klass);
   static int compute_itable_size(Array<Klass*>* transitive_interfaces);
   static void setup_itable_offset_table(instanceKlassHandle klass);
 
   // Resolving of method to index
-  static int compute_itable_index(Method* m);
-  // ...and back again:
   static Method* method_for_itable_index(Klass* klass, int itable_index);
 
   // Debugging/Statistics
--- a/src/share/vm/oops/method.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/oops/method.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -509,24 +509,31 @@
   return _access_flags.has_loops();
 }
 
-
-bool Method::is_final_method() const {
-  // %%% Should return true for private methods also,
-  // since there is no way to override them.
-  return is_final() || method_holder()->is_final();
+bool Method::is_final_method(AccessFlags class_access_flags) const {
+  // or "does_not_require_vtable_entry"
+  // overpass can occur, is not final (reuses vtable entry)
+  // private methods get vtable entries for backward class compatibility.
+  if (is_overpass())  return false;
+  return is_final() || class_access_flags.is_final();
 }
 
-
-bool Method::is_strict_method() const {
-  return is_strict();
+bool Method::is_final_method() const {
+  return is_final_method(method_holder()->access_flags());
 }
 
-
-bool Method::can_be_statically_bound() const {
-  if (is_final_method())  return true;
+bool Method::can_be_statically_bound(AccessFlags class_access_flags) const {
+  if (is_final_method(class_access_flags))  return true;
+#ifdef ASSERT
+  bool is_nonv = (vtable_index() == nonvirtual_vtable_index);
+  if (class_access_flags.is_interface())  assert(is_nonv == is_static(), err_msg("is_nonv=%s", is_nonv));
+#endif
+  assert(valid_vtable_index() || valid_itable_index(), "method must be linked before we ask this question");
   return vtable_index() == nonvirtual_vtable_index;
 }
 
+bool Method::can_be_statically_bound() const {
+  return can_be_statically_bound(method_holder()->access_flags());
+}
 
 bool Method::is_accessor() const {
   if (code_size() != 5) return false;
@@ -967,7 +974,7 @@
 
   assert(ik->is_subclass_of(method_holder()), "should be subklass");
   assert(ik->vtable() != NULL, "vtable should exist");
-  if (vtable_index() == nonvirtual_vtable_index) {
+  if (!has_vtable_index()) {
     return false;
   } else {
     Method* vt_m = ik->method_at_vtable(vtable_index());
@@ -1959,7 +1966,7 @@
 
 void Method::print_value_on(outputStream* st) const {
   assert(is_method(), "must be method");
-  st->print_cr(internal_name());
+  st->print(internal_name());
   print_address_on(st);
   st->print(" ");
   name()->print_value_on(st);
@@ -1967,6 +1974,7 @@
   signature()->print_value_on(st);
   st->print(" in ");
   method_holder()->print_value_on(st);
+  if (WizardMode) st->print("#%d", _vtable_index);
   if (WizardMode) st->print("[%d,%d]", size_of_parameters(), max_locals());
   if (WizardMode && code() != NULL) st->print(" ((nmethod*)%p)", code());
 }
--- a/src/share/vm/oops/method.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/oops/method.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -448,16 +448,22 @@
   enum VtableIndexFlag {
     // Valid vtable indexes are non-negative (>= 0).
     // These few negative values are used as sentinels.
-    highest_unused_vtable_index_value = -5,
+    itable_index_max        = -10, // first itable index, growing downward
+    pending_itable_index    = -9,  // itable index will be assigned
     invalid_vtable_index    = -4,  // distinct from any valid vtable index
     garbage_vtable_index    = -3,  // not yet linked; no vtable layout yet
     nonvirtual_vtable_index = -2   // there is no need for vtable dispatch
     // 6330203 Note:  Do not use -1, which was overloaded with many meanings.
   };
   DEBUG_ONLY(bool valid_vtable_index() const     { return _vtable_index >= nonvirtual_vtable_index; })
-  int  vtable_index() const                      { assert(valid_vtable_index(), "");
-                                                   return _vtable_index; }
+  bool has_vtable_index() const                  { return _vtable_index >= 0; }
+  int  vtable_index() const                      { return _vtable_index; }
   void set_vtable_index(int index)               { _vtable_index = index; }
+  DEBUG_ONLY(bool valid_itable_index() const     { return _vtable_index <= pending_itable_index; })
+  bool has_itable_index() const                  { return _vtable_index <= itable_index_max; }
+  int  itable_index() const                      { assert(valid_itable_index(), "");
+                                                   return itable_index_max - _vtable_index; }
+  void set_itable_index(int index)               { _vtable_index = itable_index_max - index; assert(valid_itable_index(), ""); }
 
   // interpreter entry
   address interpreter_entry() const              { return _i2i_entry; }
@@ -560,10 +566,11 @@
 
   // checks method and its method holder
   bool is_final_method() const;
-  bool is_strict_method() const;
+  bool is_final_method(AccessFlags class_access_flags) const;
 
   // true if method needs no dynamic dispatch (final and/or no vtable entry)
   bool can_be_statically_bound() const;
+  bool can_be_statically_bound(AccessFlags class_access_flags) const;
 
   // returns true if the method has any backward branches.
   bool has_loops() {
@@ -740,10 +747,6 @@
   // so handles are not used to avoid deadlock.
   jmethodID find_jmethod_id_or_null()               { return method_holder()->jmethod_id_or_null(this); }
 
-  // JNI static invoke cached itable index accessors
-  int cached_itable_index()                         { return method_holder()->cached_itable_index(method_idnum()); }
-  void set_cached_itable_index(int index)           { method_holder()->set_cached_itable_index(method_idnum(), index); }
-
   // Support for inlining of intrinsic methods
   vmIntrinsics::ID intrinsic_id() const          { return (vmIntrinsics::ID) _intrinsic_id;           }
   void     set_intrinsic_id(vmIntrinsics::ID id) {                           _intrinsic_id = (u1) id; }
--- a/src/share/vm/oops/methodData.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/oops/methodData.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -72,6 +72,8 @@
 //
 // Overlay for generic profiling data.
 class DataLayout VALUE_OBJ_CLASS_SPEC {
+  friend class VMStructs;
+
 private:
   // Every data layout begins with a header.  This header
   // contains a tag, which is used to indicate the size/layout
--- a/src/share/vm/oops/oop.inline.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/oops/oop.inline.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -69,7 +69,7 @@
 }
 
 inline Klass* oopDesc::klass() const {
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     return Klass::decode_klass_not_null(_metadata._compressed_klass);
   } else {
     return _metadata._klass;
@@ -78,7 +78,7 @@
 
 inline Klass* oopDesc::klass_or_null() const volatile {
   // can be NULL in CMS
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     return Klass::decode_klass(_metadata._compressed_klass);
   } else {
     return _metadata._klass;
@@ -86,19 +86,19 @@
 }
 
 inline int oopDesc::klass_gap_offset_in_bytes() {
-  assert(UseCompressedKlassPointers, "only applicable to compressed klass pointers");
+  assert(UseCompressedClassPointers, "only applicable to compressed klass pointers");
   return oopDesc::klass_offset_in_bytes() + sizeof(narrowKlass);
 }
 
 inline Klass** oopDesc::klass_addr() {
   // Only used internally and with CMS and will not work with
   // UseCompressedOops
-  assert(!UseCompressedKlassPointers, "only supported with uncompressed klass pointers");
+  assert(!UseCompressedClassPointers, "only supported with uncompressed klass pointers");
   return (Klass**) &_metadata._klass;
 }
 
 inline narrowKlass* oopDesc::compressed_klass_addr() {
-  assert(UseCompressedKlassPointers, "only called by compressed klass pointers");
+  assert(UseCompressedClassPointers, "only called by compressed klass pointers");
   return &_metadata._compressed_klass;
 }
 
@@ -106,7 +106,7 @@
   // since klasses are promoted no store check is needed
   assert(Universe::is_bootstrapping() || k != NULL, "must be a real Klass*");
   assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass*");
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     *compressed_klass_addr() = Klass::encode_klass_not_null(k);
   } else {
     *klass_addr() = k;
@@ -118,7 +118,7 @@
 }
 
 inline void oopDesc::set_klass_gap(int v) {
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v;
   }
 }
@@ -126,7 +126,7 @@
 inline void oopDesc::set_klass_to_list_ptr(oop k) {
   // This is only to be used during GC, for from-space objects, so no
   // barrier is needed.
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     _metadata._compressed_klass = (narrowKlass)encode_heap_oop(k);  // may be null (parnew overflow handling)
   } else {
     _metadata._klass = (Klass*)(address)k;
@@ -135,7 +135,7 @@
 
 inline oop oopDesc::list_ptr_from_klass() {
   // This is only to be used during GC, for from-space objects.
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     return decode_heap_oop((narrowOop)_metadata._compressed_klass);
   } else {
     // Special case for GC
--- a/src/share/vm/oops/symbol.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/oops/symbol.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -45,7 +45,7 @@
 // in the SymbolTable bucket (the _literal field in HashtableEntry)
 // that points to the Symbol.  All other stores of a Symbol*
 // to a field of a persistent variable (e.g., the _name filed in
-// FieldAccessInfo or _ptr in a CPSlot) is reference counted.
+// fieldDescriptor or _ptr in a CPSlot) is reference counted.
 //
 // 1) The lookup of a "name" in the SymbolTable either creates a Symbol F for
 // "name" and returns a pointer to F or finds a pre-existing Symbol F for
--- a/src/share/vm/opto/bytecodeInfo.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/opto/bytecodeInfo.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -123,7 +123,7 @@
   // Allows targeted inlining
   if(callee_method->should_inline()) {
     *wci_result = *(WarmCallInfo::always_hot());
-    if (PrintInlining && Verbose) {
+    if (C->print_inlining() && Verbose) {
       CompileTask::print_inline_indent(inline_level());
       tty->print_cr("Inlined method is hot: ");
     }
@@ -137,7 +137,7 @@
   if(callee_method->interpreter_throwout_count() > InlineThrowCount &&
      size < InlineThrowMaxSize ) {
     wci_result->set_profit(wci_result->profit() * 100);
-    if (PrintInlining && Verbose) {
+    if (C->print_inlining() && Verbose) {
       CompileTask::print_inline_indent(inline_level());
       tty->print_cr("Inlined method with many throws (throws=%d):", callee_method->interpreter_throwout_count());
     }
@@ -491,7 +491,7 @@
       C->log()->inline_fail(inline_msg);
     }
   }
-  if (PrintInlining) {
+  if (C->print_inlining()) {
     C->print_inlining(callee_method, inline_level(), caller_bci, inline_msg);
     if (callee_method == NULL) tty->print(" callee not monotonic or profiled");
     if (Verbose && callee_method) {
@@ -540,7 +540,7 @@
 
 #ifndef PRODUCT
   if (UseOldInlining && InlineWarmCalls
-      && (PrintOpto || PrintOptoInlining || PrintInlining)) {
+      && (PrintOpto || C->print_inlining())) {
     bool cold = wci.is_cold();
     bool hot  = !cold && wci.is_hot();
     bool old_cold = !success;
@@ -617,7 +617,7 @@
              callee_method->is_compiled_lambda_form()) {
       max_inline_level_adjust += 1;  // don't count method handle calls from java.lang.invoke implem
     }
-    if (max_inline_level_adjust != 0 && PrintInlining && (Verbose || WizardMode)) {
+    if (max_inline_level_adjust != 0 && C->print_inlining() && (Verbose || WizardMode)) {
       CompileTask::print_inline_indent(inline_level());
       tty->print_cr(" \\-> discounting inline depth");
     }
--- a/src/share/vm/opto/c2_globals.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/opto/c2_globals.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -421,7 +421,7 @@
   product(bool, UseDivMod, true,                                            \
           "Use combined DivMod instruction if available")                   \
                                                                             \
-  product(intx, MinJumpTableSize, 18,                                       \
+  product_pd(intx, MinJumpTableSize,                                        \
           "Minimum number of targets in a generated jump table")            \
                                                                             \
   product(intx, MaxJumpTableSize, 65000,                                    \
@@ -448,6 +448,9 @@
   product(bool, EliminateAutoBox, true,                                     \
           "Control optimizations for autobox elimination")                  \
                                                                             \
+  experimental(bool, UseImplicitStableValues, false,                        \
+          "Mark well-known stable fields as such (e.g. String.value)")      \
+                                                                            \
   product(intx, AutoBoxCacheMax, 128,                                       \
           "Sets max value cached by the java.lang.Integer autobox cache")   \
                                                                             \
--- a/src/share/vm/opto/callGenerator.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/opto/callGenerator.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -159,8 +159,9 @@
   virtual void print_inlining_late(const char* msg) { ShouldNotReachHere(); }
 
   static void print_inlining(Compile* C, ciMethod* callee, int inline_level, int bci, const char* msg) {
-    if (PrintInlining)
+    if (C->print_inlining()) {
       C->print_inlining(callee, inline_level, bci, msg);
+    }
   }
 };
 
--- a/src/share/vm/opto/cfgnode.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/opto/cfgnode.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -1932,7 +1932,7 @@
 #ifdef _LP64
   // Push DecodeN/DecodeNKlass down through phi.
   // The rest of phi graph will transform by split EncodeP node though phis up.
-  if ((UseCompressedOops || UseCompressedKlassPointers) && can_reshape && progress == NULL) {
+  if ((UseCompressedOops || UseCompressedClassPointers) && can_reshape && progress == NULL) {
     bool may_push = true;
     bool has_decodeN = false;
     bool is_decodeN = false;
--- a/src/share/vm/opto/chaitin.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/opto/chaitin.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -122,40 +122,23 @@
   return score;
 }
 
-LRG_List::LRG_List( uint max ) : _cnt(max), _max(max), _lidxs(NEW_RESOURCE_ARRAY(uint,max)) {
-  memset( _lidxs, 0, sizeof(uint)*max );
-}
-
-void LRG_List::extend( uint nidx, uint lidx ) {
-  _nesting.check();
-  if( nidx >= _max ) {
-    uint size = 16;
-    while( size <= nidx ) size <<=1;
-    _lidxs = REALLOC_RESOURCE_ARRAY( uint, _lidxs, _max, size );
-    _max = size;
-  }
-  while( _cnt <= nidx )
-    _lidxs[_cnt++] = 0;
-  _lidxs[nidx] = lidx;
-}
-
 #define NUMBUCKS 3
 
 // Straight out of Tarjan's union-find algorithm
 uint LiveRangeMap::find_compress(uint lrg) {
   uint cur = lrg;
-  uint next = _uf_map[cur];
+  uint next = _uf_map.at(cur);
   while (next != cur) { // Scan chain of equivalences
     assert( next < cur, "always union smaller");
     cur = next; // until find a fixed-point
-    next = _uf_map[cur];
+    next = _uf_map.at(cur);
   }
 
   // Core of union-find algorithm: update chain of
   // equivalences to be equal to the root.
   while (lrg != next) {
-    uint tmp = _uf_map[lrg];
-    _uf_map.map(lrg, next);
+    uint tmp = _uf_map.at(lrg);
+    _uf_map.at_put(lrg, next);
     lrg = tmp;
   }
   return lrg;
@@ -165,10 +148,10 @@
 void LiveRangeMap::reset_uf_map(uint max_lrg_id) {
   _max_lrg_id= max_lrg_id;
   // Force the Union-Find mapping to be at least this large
-  _uf_map.extend(_max_lrg_id, 0);
+  _uf_map.at_put_grow(_max_lrg_id, 0);
   // Initialize it to be the ID mapping.
   for (uint i = 0; i < _max_lrg_id; ++i) {
-    _uf_map.map(i, i);
+    _uf_map.at_put(i, i);
   }
 }
 
@@ -176,12 +159,12 @@
 // the Union-Find mapping after this call.
 void LiveRangeMap::compress_uf_map_for_nodes() {
   // For all Nodes, compress mapping
-  uint unique = _names.Size();
+  uint unique = _names.length();
   for (uint i = 0; i < unique; ++i) {
-    uint lrg = _names[i];
+    uint lrg = _names.at(i);
     uint compressed_lrg = find(lrg);
     if (lrg != compressed_lrg) {
-      _names.map(i, compressed_lrg);
+      _names.at_put(i, compressed_lrg);
     }
   }
 }
@@ -198,11 +181,11 @@
     return lrg;
   }
 
-  uint next = _uf_map[lrg];
+  uint next = _uf_map.at(lrg);
   while (next != lrg) { // Scan chain of equivalences
     assert(next < lrg, "always union smaller");
     lrg = next; // until find a fixed-point
-    next = _uf_map[lrg];
+    next = _uf_map.at(lrg);
   }
   return next;
 }
@@ -215,7 +198,7 @@
        NULL
 #endif
        )
-  , _lrg_map(unique)
+  , _lrg_map(Thread::current()->resource_area(), unique)
   , _live(0)
   , _spilled_once(Thread::current()->resource_area())
   , _spilled_twice(Thread::current()->resource_area())
@@ -692,6 +675,7 @@
       _lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0);
     }
   }
+
   // Reset the Union-Find mapping to be identity
   _lrg_map.reset_uf_map(lr_counter);
 }
--- a/src/share/vm/opto/chaitin.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/opto/chaitin.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -283,8 +283,8 @@
 
   // Straight out of Tarjan's union-find algorithm
   uint find_compress(const Node *node) {
-    uint lrg_id = find_compress(_names[node->_idx]);
-    _names.map(node->_idx, lrg_id);
+    uint lrg_id = find_compress(_names.at(node->_idx));
+    _names.at_put(node->_idx, lrg_id);
     return lrg_id;
   }
 
@@ -305,40 +305,40 @@
   }
 
   uint size() const {
-    return _names.Size();
+    return _names.length();
   }
 
   uint live_range_id(uint idx) const {
-    return _names[idx];
+    return _names.at(idx);
   }
 
   uint live_range_id(const Node *node) const {
-    return _names[node->_idx];
+    return _names.at(node->_idx);
   }
 
   uint uf_live_range_id(uint lrg_id) const {
-    return _uf_map[lrg_id];
+    return _uf_map.at(lrg_id);
   }
 
   void map(uint idx, uint lrg_id) {
-    _names.map(idx, lrg_id);
+    _names.at_put(idx, lrg_id);
   }
 
   void uf_map(uint dst_lrg_id, uint src_lrg_id) {
-    _uf_map.map(dst_lrg_id, src_lrg_id);
+    _uf_map.at_put(dst_lrg_id, src_lrg_id);
   }
 
   void extend(uint idx, uint lrg_id) {
-    _names.extend(idx, lrg_id);
+    _names.at_put_grow(idx, lrg_id);
   }
 
   void uf_extend(uint dst_lrg_id, uint src_lrg_id) {
-    _uf_map.extend(dst_lrg_id, src_lrg_id);
+    _uf_map.at_put_grow(dst_lrg_id, src_lrg_id);
   }
 
-  LiveRangeMap(uint unique)
-  : _names(unique)
-  , _uf_map(unique)
+  LiveRangeMap(Arena* arena, uint unique)
+  : _names(arena, unique, unique, 0)
+  , _uf_map(arena, unique, unique, 0)
   , _max_lrg_id(0) {}
 
   uint find_id( const Node *n ) {
@@ -355,14 +355,14 @@
   void compress_uf_map_for_nodes();
 
   uint find(uint lidx) {
-    uint uf_lidx = _uf_map[lidx];
+    uint uf_lidx = _uf_map.at(lidx);
     return (uf_lidx == lidx) ? uf_lidx : find_compress(lidx);
   }
 
   // Convert a Node into a Live Range Index - a lidx
   uint find(const Node *node) {
     uint lidx = live_range_id(node);
-    uint uf_lidx = _uf_map[lidx];
+    uint uf_lidx = _uf_map.at(lidx);
     return (uf_lidx == lidx) ? uf_lidx : find_compress(node);
   }
 
@@ -371,10 +371,10 @@
 
   // Like Find above, but no path compress, so bad asymptotic behavior
   uint find_const(const Node *node) const {
-    if(node->_idx >= _names.Size()) {
+    if(node->_idx >= (uint)_names.length()) {
       return 0; // not mapped, usual for debug dump
     }
-    return find_const(_names[node->_idx]);
+    return find_const(_names.at(node->_idx));
   }
 };
 
--- a/src/share/vm/opto/coalesce.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/opto/coalesce.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -29,7 +29,6 @@
 
 class LoopTree;
 class LRG;
-class LRG_List;
 class Matcher;
 class PhaseIFG;
 class PhaseCFG;
--- a/src/share/vm/opto/compile.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/opto/compile.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -654,7 +654,7 @@
                   _inlining_progress(false),
                   _inlining_incrementally(false),
                   _print_inlining_list(NULL),
-                  _print_inlining(0) {
+                  _print_inlining_idx(0) {
   C = this;
 
   CompileWrapper cw(this);
@@ -679,6 +679,8 @@
   set_print_assembly(print_opto_assembly);
   set_parsed_irreducible_loop(false);
 #endif
+  set_print_inlining(PrintInlining || method()->has_option("PrintInlining") NOT_PRODUCT( || PrintOptoInlining));
+  set_print_intrinsics(PrintIntrinsics || method()->has_option("PrintIntrinsics"));
 
   if (ProfileTraps) {
     // Make sure the method being compiled gets its own MDO,
@@ -710,7 +712,7 @@
   PhaseGVN gvn(node_arena(), estimated_size);
   set_initial_gvn(&gvn);
 
-  if (PrintInlining  || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
+  if (print_inlining() || print_intrinsics()) {
     _print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
   }
   { // Scope for timing the parser
@@ -937,7 +939,7 @@
     _inlining_progress(false),
     _inlining_incrementally(false),
     _print_inlining_list(NULL),
-    _print_inlining(0) {
+    _print_inlining_idx(0) {
   C = this;
 
 #ifndef PRODUCT
@@ -1297,6 +1299,10 @@
 
   // Array pointers need some flattening
   const TypeAryPtr *ta = tj->isa_aryptr();
+  if (ta && ta->is_stable()) {
+    // Erase stability property for alias analysis.
+    tj = ta = ta->cast_to_stable(false);
+  }
   if( ta && is_known_inst ) {
     if ( offset != Type::OffsetBot &&
          offset > arrayOopDesc::length_offset_in_bytes() ) {
@@ -1497,6 +1503,7 @@
   _index = i;
   _adr_type = at;
   _field = NULL;
+  _element = NULL;
   _is_rewritable = true; // default
   const TypeOopPtr *atoop = (at != NULL) ? at->isa_oopptr() : NULL;
   if (atoop != NULL && atoop->is_known_instance()) {
@@ -1615,6 +1622,16 @@
           && flat->is_instptr()->klass() == env()->Class_klass())
         alias_type(idx)->set_rewritable(false);
     }
+    if (flat->isa_aryptr()) {
+#ifdef ASSERT
+      const int header_size_min  = arrayOopDesc::base_offset_in_bytes(T_BYTE);
+      // (T_BYTE has the weakest alignment and size restrictions...)
+      assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
+#endif
+      if (flat->offset() == TypePtr::OffsetBot) {
+        alias_type(idx)->set_element(flat->is_aryptr()->elem());
+      }
+    }
     if (flat->isa_klassptr()) {
       if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
         alias_type(idx)->set_rewritable(false);
@@ -1677,7 +1694,7 @@
   else
     t = TypeOopPtr::make_from_klass_raw(field->holder());
   AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
-  assert(field->is_final() == !atp->is_rewritable(), "must get the rewritable bits correct");
+  assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
   return atp;
 }
 
@@ -2631,7 +2648,7 @@
             addp->in(AddPNode::Base) == n->in(AddPNode::Base),
             "Base pointers must match" );
 #ifdef _LP64
-    if ((UseCompressedOops || UseCompressedKlassPointers) &&
+    if ((UseCompressedOops || UseCompressedClassPointers) &&
         addp->Opcode() == Op_ConP &&
         addp == n->in(AddPNode::Base) &&
         n->in(AddPNode::Offset)->is_Con()) {
@@ -3018,7 +3035,7 @@
 
   // Skip next transformation if compressed oops are not used.
   if ((UseCompressedOops && !Matcher::gen_narrow_oop_implicit_null_checks()) ||
-      (!UseCompressedOops && !UseCompressedKlassPointers))
+      (!UseCompressedOops && !UseCompressedClassPointers))
     return;
 
   // Go over safepoints nodes to skip DecodeN/DecodeNKlass nodes for debug edges.
@@ -3596,7 +3613,7 @@
 }
 
 void Compile::dump_inlining() {
-  if (PrintInlining || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
+  if (print_inlining() || print_intrinsics()) {
     // Print inlining message for candidates that we couldn't inline
     // for lack of space or non constant receiver
     for (int i = 0; i < _late_inlines.length(); i++) {
@@ -3620,7 +3637,7 @@
       }
     }
     for (int i = 0; i < _print_inlining_list->length(); i++) {
-      tty->print(_print_inlining_list->at(i).ss()->as_string());
+      tty->print(_print_inlining_list->adr_at(i)->ss()->as_string());
     }
   }
 }
--- a/src/share/vm/opto/compile.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/opto/compile.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -72,6 +72,7 @@
 class StartNode;
 class SafePointNode;
 class JVMState;
+class Type;
 class TypeData;
 class TypePtr;
 class TypeOopPtr;
@@ -119,6 +120,7 @@
     int             _index;         // unique index, used with MergeMemNode
     const TypePtr*  _adr_type;      // normalized address type
     ciField*        _field;         // relevant instance field, or null if none
+    const Type*     _element;       // relevant array element type, or null if none
     bool            _is_rewritable; // false if the memory is write-once only
     int             _general_index; // if this is type is an instance, the general
                                     // type that this is an instance of
@@ -129,6 +131,7 @@
     int             index()         const { return _index; }
     const TypePtr*  adr_type()      const { return _adr_type; }
     ciField*        field()         const { return _field; }
+    const Type*     element()       const { return _element; }
     bool            is_rewritable() const { return _is_rewritable; }
     bool            is_volatile()   const { return (_field ? _field->is_volatile() : false); }
     int             general_index() const { return (_general_index != 0) ? _general_index : _index; }
@@ -137,7 +140,14 @@
     void set_field(ciField* f) {
       assert(!_field,"");
       _field = f;
-      if (f->is_final())  _is_rewritable = false;
+      if (f->is_final() || f->is_stable()) {
+        // In the case of @Stable, multiple writes are possible but may be assumed to be no-ops.
+        _is_rewritable = false;
+      }
+    }
+    void set_element(const Type* e) {
+      assert(_element == NULL, "");
+      _element = e;
     }
 
     void print_on(outputStream* st) PRODUCT_RETURN;
@@ -302,6 +312,8 @@
   bool                  _do_method_data_update; // True if we generate code to update MethodData*s
   int                   _AliasLevel;            // Locally-adjusted version of AliasLevel flag.
   bool                  _print_assembly;        // True if we should dump assembly code for this compilation
+  bool                  _print_inlining;        // True if we should print inlining for this compilation
+  bool                  _print_intrinsics;      // True if we should print intrinsics for this compilation
 #ifndef PRODUCT
   bool                  _trace_opto_output;
   bool                  _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
@@ -404,7 +416,7 @@
   };
 
   GrowableArray<PrintInliningBuffer>* _print_inlining_list;
-  int _print_inlining;
+  int _print_inlining_idx;
 
   // Only keep nodes in the expensive node list that need to be optimized
   void cleanup_expensive_nodes(PhaseIterGVN &igvn);
@@ -416,24 +428,24 @@
  public:
 
   outputStream* print_inlining_stream() const {
-    return _print_inlining_list->at(_print_inlining).ss();
+    return _print_inlining_list->adr_at(_print_inlining_idx)->ss();
   }
 
   void print_inlining_skip(CallGenerator* cg) {
-    if (PrintInlining) {
-      _print_inlining_list->at(_print_inlining).set_cg(cg);
-      _print_inlining++;
-      _print_inlining_list->insert_before(_print_inlining, PrintInliningBuffer());
+    if (_print_inlining) {
+      _print_inlining_list->adr_at(_print_inlining_idx)->set_cg(cg);
+      _print_inlining_idx++;
+      _print_inlining_list->insert_before(_print_inlining_idx, PrintInliningBuffer());
     }
   }
 
   void print_inlining_insert(CallGenerator* cg) {
-    if (PrintInlining) {
+    if (_print_inlining) {
       for (int i = 0; i < _print_inlining_list->length(); i++) {
-        if (_print_inlining_list->at(i).cg() == cg) {
+        if (_print_inlining_list->adr_at(i)->cg() == cg) {
           _print_inlining_list->insert_before(i+1, PrintInliningBuffer());
-          _print_inlining = i+1;
-          _print_inlining_list->at(i).set_cg(NULL);
+          _print_inlining_idx = i+1;
+          _print_inlining_list->adr_at(i)->set_cg(NULL);
           return;
         }
       }
@@ -562,6 +574,10 @@
   int               AliasLevel() const          { return _AliasLevel; }
   bool              print_assembly() const       { return _print_assembly; }
   void          set_print_assembly(bool z)       { _print_assembly = z; }
+  bool              print_inlining() const       { return _print_inlining; }
+  void          set_print_inlining(bool z)       { _print_inlining = z; }
+  bool              print_intrinsics() const     { return _print_intrinsics; }
+  void          set_print_intrinsics(bool z)     { _print_intrinsics = z; }
   // check the CompilerOracle for special behaviours for this compile
   bool          method_has_option(const char * option) {
     return method() != NULL && method()->has_option(option);
--- a/src/share/vm/opto/connode.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/opto/connode.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -630,7 +630,7 @@
   if (t == Type::TOP) return Type::TOP;
   assert (t != TypePtr::NULL_PTR, "null klass?");
 
-  assert(UseCompressedKlassPointers && t->isa_klassptr(), "only klass ptr here");
+  assert(UseCompressedClassPointers && t->isa_klassptr(), "only klass ptr here");
   return t->make_narrowklass();
 }
 
--- a/src/share/vm/opto/doCall.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/opto/doCall.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -41,9 +41,9 @@
 #include "runtime/sharedRuntime.hpp"
 
 void trace_type_profile(Compile* C, ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
-  if (TraceTypeProfile || PrintInlining NOT_PRODUCT(|| PrintOptoInlining)) {
+  if (TraceTypeProfile || C->print_inlining()) {
     outputStream* out = tty;
-    if (!PrintInlining) {
+    if (!C->print_inlining()) {
       if (NOT_PRODUCT(!PrintOpto &&) !PrintCompilation) {
         method->print_short_name();
         tty->cr();
--- a/src/share/vm/opto/graphKit.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/opto/graphKit.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -3825,8 +3825,13 @@
                                                    TypeAry::make(TypeInt::CHAR,TypeInt::POS),
                                                    ciTypeArrayKlass::make(T_CHAR), true, 0);
   int value_field_idx = C->get_alias_index(value_field_type);
-  return make_load(ctrl, basic_plus_adr(str, str, value_offset),
-                   value_type, T_OBJECT, value_field_idx);
+  Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset),
+                         value_type, T_OBJECT, value_field_idx);
+  // String.value field is known to be @Stable.
+  if (UseImplicitStableValues) {
+    load = cast_array_to_stable(load, value_type);
+  }
+  return load;
 }
 
 void GraphKit::store_String_offset(Node* ctrl, Node* str, Node* value) {
@@ -3844,9 +3849,6 @@
   const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
                                                      false, NULL, 0);
   const TypePtr* value_field_type = string_type->add_offset(value_offset);
-  const TypeAryPtr*  value_type = TypeAryPtr::make(TypePtr::NotNull,
-                                                   TypeAry::make(TypeInt::CHAR,TypeInt::POS),
-                                                   ciTypeArrayKlass::make(T_CHAR), true, 0);
   int value_field_idx = C->get_alias_index(value_field_type);
   store_to_memory(ctrl, basic_plus_adr(str, value_offset),
                   value, T_OBJECT, value_field_idx);
@@ -3861,3 +3863,9 @@
   store_to_memory(ctrl, basic_plus_adr(str, count_offset),
                   value, T_INT, count_field_idx);
 }
+
+Node* GraphKit::cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type) {
+  // Reify the property as a CastPP node in Ideal graph to comply with monotonicity
+  // assumption of CCP analysis.
+  return _gvn.transform(new(C) CastPPNode(ary, ary_type->cast_to_stable(true)));
+}
--- a/src/share/vm/opto/graphKit.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/opto/graphKit.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -836,6 +836,9 @@
   // Insert a loop predicate into the graph
   void add_predicate(int nargs = 0);
   void add_predicate_impl(Deoptimization::DeoptReason reason, int nargs);
+
+  // Produce new array node of stable type
+  Node* cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type);
 };
 
 // Helper class to support building of control flow branches. Upon
--- a/src/share/vm/opto/library_call.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/opto/library_call.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -543,7 +543,7 @@
   Compile* C = kit.C;
   int nodes = C->unique();
 #ifndef PRODUCT
-  if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) {
+  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
     char buf[1000];
     const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
     tty->print_cr("Intrinsic %s", str);
@@ -554,7 +554,7 @@
 
   // Try to inline the intrinsic.
   if (kit.try_to_inline()) {
-    if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+    if (C->print_intrinsics() || C->print_inlining()) {
       C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
     }
     C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
@@ -570,7 +570,7 @@
   }
 
   // The intrinsic bailed out
-  if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+  if (C->print_intrinsics() || C->print_inlining()) {
     if (jvms->has_method()) {
       // Not a root compile.
       const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
@@ -592,7 +592,7 @@
   int nodes = C->unique();
 #ifndef PRODUCT
   assert(is_predicted(), "sanity");
-  if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) {
+  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
     char buf[1000];
     const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
     tty->print_cr("Predicate for intrinsic %s", str);
@@ -603,7 +603,7 @@
 
   Node* slow_ctl = kit.try_to_predicate();
   if (!kit.failing()) {
-    if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+    if (C->print_intrinsics() || C->print_inlining()) {
       C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
     }
     C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
@@ -617,7 +617,7 @@
   }
 
   // The intrinsic bailed out
-  if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+  if (C->print_intrinsics() || C->print_inlining()) {
     if (jvms->has_method()) {
       // Not a root compile.
       const char* msg = "failed to generate predicate for intrinsic";
@@ -1280,6 +1280,11 @@
   const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin));
   const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot);
 
+  // String.value field is known to be @Stable.
+  if (UseImplicitStableValues) {
+    target = cast_array_to_stable(target, target_type);
+  }
+
   IdealKit kit(this, false, true);
 #define __ kit.
   Node* zero             = __ ConI(0);
@@ -2294,7 +2299,7 @@
     const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
 
 #ifndef PRODUCT
-    if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
+    if (C->print_intrinsics() || C->print_inlining()) {
       tty->print("  from base type: ");  adr_type->dump();
       tty->print("  sharpened value: ");  tjp->dump();
     }
@@ -3255,7 +3260,7 @@
   if (mirror_con == NULL)  return false;  // cannot happen?
 
 #ifndef PRODUCT
-  if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
+  if (C->print_intrinsics() || C->print_inlining()) {
     ciType* k = mirror_con->java_mirror_type();
     if (k) {
       tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id()));
@@ -3729,6 +3734,8 @@
                                              RegionNode* slow_region) {
   ciMethod* method = callee();
   int vtable_index = method->vtable_index();
+  assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
+         err_msg_res("bad index %d", vtable_index));
   // Get the Method* out of the appropriate vtable entry.
   int entry_offset  = (InstanceKlass::vtable_start_offset() +
                      vtable_index*vtableEntry::size()) * wordSize +
@@ -3779,6 +3786,8 @@
       // so the vtable index is fixed.
       // No need to use the linkResolver to get it.
        vtable_index = method->vtable_index();
+       assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
+              err_msg_res("bad index %d", vtable_index));
     }
     slow_call = new(C) CallDynamicJavaNode(tf,
                           SharedRuntime::get_resolve_virtual_call_stub(),
@@ -3943,14 +3952,14 @@
 // caller sensitive methods.
 bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
 #ifndef PRODUCT
-  if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
     tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
   }
 #endif
 
   if (!jvms()->has_method()) {
 #ifndef PRODUCT
-    if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+    if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
       tty->print_cr("  Bailing out because intrinsic was inlined at top level");
     }
 #endif
@@ -3974,7 +3983,7 @@
       // Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass).
       if (!m->caller_sensitive()) {
 #ifndef PRODUCT
-        if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+        if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
           tty->print_cr("  Bailing out: CallerSensitive annotation expected at frame %d", n);
         }
 #endif
@@ -3990,7 +3999,7 @@
         set_result(makecon(TypeInstPtr::make(caller_mirror)));
 
 #ifndef PRODUCT
-        if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+        if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
           tty->print_cr("  Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth());
           tty->print_cr("  JVM state at this point:");
           for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
@@ -4006,7 +4015,7 @@
   }
 
 #ifndef PRODUCT
-  if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
     tty->print_cr("  Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
     tty->print_cr("  JVM state at this point:");
     for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
@@ -4199,7 +4208,7 @@
   // 12 - 64-bit VM, compressed klass
   // 16 - 64-bit VM, normal klass
   if (base_off % BytesPerLong != 0) {
-    assert(UseCompressedKlassPointers, "");
+    assert(UseCompressedClassPointers, "");
     if (is_array) {
       // Exclude length to copy by 8 bytes words.
       base_off += sizeof(int);
--- a/src/share/vm/opto/live.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/opto/live.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -91,7 +91,7 @@
         break;
       }
 
-      uint r = _names[n->_idx];
+      uint r = _names.at(n->_idx);
       assert(!def_outside->member(r), "Use of external LRG overlaps the same LRG defined in this block");
       def->insert( r );
       use->remove( r );
@@ -100,7 +100,7 @@
         Node *nk = n->in(k);
         uint nkidx = nk->_idx;
         if (_cfg.get_block_for_node(nk) != block) {
-          uint u = _names[nkidx];
+          uint u = _names.at(nkidx);
           use->insert(u);
           DEBUG_ONLY(def_outside->insert(u);)
         }
@@ -112,7 +112,7 @@
 #endif
     // Remove anything defined by Phis and the block start instruction
     for (uint k = i; k > 0; k--) {
-      uint r = _names[block->get_node(k - 1)->_idx];
+      uint r = _names.at(block->get_node(k - 1)->_idx);
       def->insert(r);
       use->remove(r);
     }
@@ -124,7 +124,7 @@
 
       // PhiNode uses go in the live-out set of prior blocks.
       for (uint k = i; k > 0; k--) {
-        add_liveout(p, _names[block->get_node(k-1)->in(l)->_idx], first_pass);
+        add_liveout(p, _names.at(block->get_node(k-1)->in(l)->_idx), first_pass);
       }
     }
     freeset(block);
@@ -256,7 +256,7 @@
   tty->print("LiveOut: ");  _live[b->_pre_order-1].dump();
   uint cnt = b->number_of_nodes();
   for( uint i=0; i<cnt; i++ ) {
-    tty->print("L%d/", _names[b->get_node(i)->_idx] );
+    tty->print("L%d/", _names.at(b->get_node(i)->_idx));
     b->get_node(i)->dump();
   }
   tty->print("\n");
@@ -321,7 +321,7 @@
 #ifdef _LP64
                       UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_CastPP ||
                       UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_DecodeN ||
-                      UseCompressedKlassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass ||
+                      UseCompressedClassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass ||
 #endif
                       check->as_Mach()->ideal_Opcode() == Op_LoadP ||
                       check->as_Mach()->ideal_Opcode() == Op_LoadKlass)) {
--- a/src/share/vm/opto/live.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/opto/live.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -40,27 +40,7 @@
 //------------------------------LRG_List---------------------------------------
 // Map Node indices to Live RanGe indices.
 // Array lookup in the optimized case.
-class LRG_List : public ResourceObj {
-  friend class VMStructs;
-  uint _cnt, _max;
-  uint* _lidxs;
-  ReallocMark _nesting;         // assertion check for reallocations
-public:
-  LRG_List( uint max );
-
-  uint lookup( uint nidx ) const {
-    return _lidxs[nidx];
-  }
-  uint operator[] (uint nidx) const { return lookup(nidx); }
-
-  void map( uint nidx, uint lidx ) {
-    assert( nidx < _cnt, "oob" );
-    _lidxs[nidx] = lidx;
-  }
-  void extend( uint nidx, uint lidx );
-
-  uint Size() const { return _cnt; }
-};
+typedef GrowableArray<uint> LRG_List;
 
 //------------------------------PhaseLive--------------------------------------
 // Compute live-in/live-out
--- a/src/share/vm/opto/macro.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/opto/macro.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -2191,7 +2191,7 @@
       Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
       klass_node = transform_later( LoadKlassNode::make(_igvn, mem, k_adr, _igvn.type(k_adr)->is_ptr()) );
 #ifdef _LP64
-      if (UseCompressedKlassPointers && klass_node->is_DecodeNKlass()) {
+      if (UseCompressedClassPointers && klass_node->is_DecodeNKlass()) {
         assert(klass_node->in(1)->Opcode() == Op_LoadNKlass, "sanity");
         klass_node->in(1)->init_req(0, ctrl);
       } else
--- a/src/share/vm/opto/memnode.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/opto/memnode.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -962,6 +962,19 @@
   return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address);
 }
 
+static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) {
+  if ((atp != NULL) && (atp->index() >= Compile::AliasIdxRaw)) {
+    bool non_volatile = (atp->field() != NULL) && !atp->field()->is_volatile();
+    bool is_stable_ary = FoldStableValues &&
+                         (tp != NULL) && (tp->isa_aryptr() != NULL) &&
+                         tp->isa_aryptr()->is_stable();
+
+    return (eliminate_boxing && non_volatile) || is_stable_ary;
+  }
+
+  return false;
+}
+
 //---------------------------can_see_stored_value------------------------------
 // This routine exists to make sure this set of tests is done the same
 // everywhere.  We need to make a coordinated change: first LoadNode::Ideal
@@ -976,11 +989,9 @@
   const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
   Compile::AliasType* atp = (tp != NULL) ? phase->C->alias_type(tp) : NULL;
   // This is more general than load from boxing objects.
-  if (phase->C->eliminate_boxing() && (atp != NULL) &&
-      (atp->index() >= Compile::AliasIdxRaw) &&
-      (atp->field() != NULL) && !atp->field()->is_volatile()) {
+  if (skip_through_membars(atp, tp, phase->C->eliminate_boxing())) {
     uint alias_idx = atp->index();
-    bool final = atp->field()->is_final();
+    bool final = !atp->is_rewritable();
     Node* result = NULL;
     Node* current = st;
     // Skip through chains of MemBarNodes checking the MergeMems for
@@ -1015,7 +1026,6 @@
     }
   }
 
-
   // Loop around twice in the case Load -> Initialize -> Store.
   // (See PhaseIterGVN::add_users_to_worklist, which knows about this case.)
   for (int trip = 0; trip <= 1; trip++) {
@@ -1577,6 +1587,40 @@
   return NULL;
 }
 
+// Try to constant-fold a stable array element.
+static const Type* fold_stable_ary_elem(const TypeAryPtr* ary, int off, BasicType loadbt) {
+  assert(ary->is_stable(), "array should be stable");
+
+  if (ary->const_oop() != NULL) {
+    // Decode the results of GraphKit::array_element_address.
+    ciArray* aobj = ary->const_oop()->as_array();
+    ciConstant con = aobj->element_value_by_offset(off);
+
+    if (con.basic_type() != T_ILLEGAL && !con.is_null_or_zero()) {
+      const Type* con_type = Type::make_from_constant(con);
+      if (con_type != NULL) {
+        if (con_type->isa_aryptr()) {
+          // Join with the array element type, in case it is also stable.
+          int dim = ary->stable_dimension();
+          con_type = con_type->is_aryptr()->cast_to_stable(true, dim-1);
+        }
+        if (loadbt == T_NARROWOOP && con_type->isa_oopptr()) {
+          con_type = con_type->make_narrowoop();
+        }
+#ifndef PRODUCT
+        if (TraceIterativeGVN) {
+          tty->print("FoldStableValues: array element [off=%d]: con_type=", off);
+          con_type->dump(); tty->cr();
+        }
+#endif //PRODUCT
+        return con_type;
+      }
+    }
+  }
+
+  return NULL;
+}
+
 //------------------------------Value-----------------------------------------
 const Type *LoadNode::Value( PhaseTransform *phase ) const {
   // Either input is TOP ==> the result is TOP
@@ -1591,8 +1635,31 @@
   Compile* C = phase->C;
 
   // Try to guess loaded type from pointer type
-  if (tp->base() == Type::AryPtr) {
-    const Type *t = tp->is_aryptr()->elem();
+  if (tp->isa_aryptr()) {
+    const TypeAryPtr* ary = tp->is_aryptr();
+    const Type *t = ary->elem();
+
+    // Determine whether the reference is beyond the header or not, by comparing
+    // the offset against the offset of the start of the array's data.
+    // Different array types begin at slightly different offsets (12 vs. 16).
+    // We choose T_BYTE as an example base type that is least restrictive
+    // as to alignment, which will therefore produce the smallest
+    // possible base offset.
+    const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
+    const bool off_beyond_header = ((uint)off >= (uint)min_base_off);
+
+    // Try to constant-fold a stable array element.
+    if (FoldStableValues && ary->is_stable()) {
+      // Make sure the reference is not into the header
+      if (off_beyond_header && off != Type::OffsetBot) {
+        assert(adr->is_AddP() && adr->in(AddPNode::Offset)->is_Con(), "offset is a constant");
+        const Type* con_type = fold_stable_ary_elem(ary, off, memory_type());
+        if (con_type != NULL) {
+          return con_type;
+        }
+      }
+    }
+
     // Don't do this for integer types. There is only potential profit if
     // the element type t is lower than _type; that is, for int types, if _type is
     // more restrictive than t.  This only happens here if one is short and the other
@@ -1613,14 +1680,7 @@
         && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
       // t might actually be lower than _type, if _type is a unique
       // concrete subclass of abstract class t.
-      // Make sure the reference is not into the header, by comparing
-      // the offset against the offset of the start of the array's data.
-      // Different array types begin at slightly different offsets (12 vs. 16).
-      // We choose T_BYTE as an example base type that is least restrictive
-      // as to alignment, which will therefore produce the smallest
-      // possible base offset.
-      const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE);
-      if ((uint)off >= (uint)min_base_off) {  // is the offset beyond the header?
+      if (off_beyond_header) {  // is the offset beyond the header?
         const Type* jt = t->join(_type);
         // In any case, do not allow the join, per se, to empty out the type.
         if (jt->empty() && !t->empty()) {
@@ -1971,7 +2031,7 @@
   assert(adr_type != NULL, "expecting TypeKlassPtr");
 #ifdef _LP64
   if (adr_type->is_ptr_to_narrowklass()) {
-    assert(UseCompressedKlassPointers, "no compressed klasses");
+    assert(UseCompressedClassPointers, "no compressed klasses");
     Node* load_klass = gvn.transform(new (C) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass()));
     return new (C) DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
   }
@@ -2309,7 +2369,7 @@
       val = gvn.transform(new (C) EncodePNode(val, val->bottom_type()->make_narrowoop()));
       return new (C) StoreNNode(ctl, mem, adr, adr_type, val);
     } else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
-               (UseCompressedKlassPointers && val->bottom_type()->isa_klassptr() &&
+               (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() &&
                 adr->bottom_type()->isa_rawptr())) {
       val = gvn.transform(new (C) EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
       return new (C) StoreNKlassNode(ctl, mem, adr, adr_type, val);
--- a/src/share/vm/opto/parse.hpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/opto/parse.hpp	Thu Oct 03 19:13:12 2013 +0100
@@ -518,7 +518,7 @@
 
   // loading from a constant field or the constant pool
   // returns false if push failed (non-perm field constants only, not ldcs)
-  bool push_constant(ciConstant con, bool require_constant = false, bool is_autobox_cache = false);
+  bool push_constant(ciConstant con, bool require_constant = false, bool is_autobox_cache = false, const Type* basic_type = NULL);
 
   // implementation of object creation bytecodes
   void emit_guard_for_new(ciInstanceKlass* klass);
--- a/src/share/vm/opto/parse3.cpp	Sat Sep 14 20:40:34 2013 +0100
+++ b/src/share/vm/opto/parse3.cpp	Thu Oct 03 19:13:12 2013 +0100
@@ -147,7 +147,15 @@
 void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
   // Does this field have a constant value?  If so, just push the value.
   if (field->is_constant()) {
-    // final field
+    // final or stable field
+    const Type* stable_type = NULL;
+    if (FoldStableValues && field->is_stable()) {
+      stable_type = Type::get_const_type(field->type());
+      if (field->type()->is_array_klass()) {
+        int stable_dimension = field->type()->as_array_klass()->dimension();
+        stable_type = stable_type->is_aryptr()->cast_to_stable(true, stable_dimension);
+      }
+    }
     if (field->is_static()) {
       // final static field
       if (C->eliminate_boxing()) {
@@ -167,11 +175,10 @@
           }
         }
       }
-      if (push_constant(field->constant_value()))
+      if (push_constant(field->constant_value(), false, false, stable_type))
         return;
-    }
-    else {
-      // final non-static field
+    } else {
+      // final or stable non-static field
       // Treat final non-static fields of trusted classes (class