OpenJDK / portola / portola
changeset 6160:f84f82e24ceb
Merge
author | lana |
---|---|
date | Thu, 29 Jul 2010 22:04:41 -0700 |
parents | 88930a1c409e 647709708658 |
children | ab857bfb695a |
files | hotspot/src/os/linux/vm/vtune_linux.cpp hotspot/src/os/solaris/vm/vtune_solaris.cpp hotspot/src/os/windows/vm/vtune_windows.cpp hotspot/src/share/vm/runtime/vtune.hpp jdk/src/linux/doc/man/ja/kinit.1 jdk/src/linux/doc/man/ja/klist.1 jdk/src/linux/doc/man/ja/ktab.1 jdk/test/java/nio/channels/ServerSocketChannel/AcceptAddress.java jdk/test/java/nio/charset/coders/Surrogate.java jdk/test/tools/launcher/Makefile.SolarisRunpath jdk/test/tools/launcher/lib/i386/lib32/lib32/liblibrary.so jdk/test/tools/launcher/lib/i386/lib32/liblibrary.so jdk/test/tools/launcher/lib/sparc/lib32/lib32/liblibrary.so jdk/test/tools/launcher/lib/sparc/lib32/liblibrary.so jdk/test/tools/launcher/lib/sparc/lib64/lib64/liblibrary.so jdk/test/tools/launcher/lib/sparc/lib64/liblibrary.so |
diffstat | 843 files changed, 18566 insertions(+), 11808 deletions(-) [+] |
line wrap: on
line diff
--- a/.hgtags Thu Jul 29 19:30:35 2010 -0700 +++ b/.hgtags Thu Jul 29 22:04:41 2010 -0700 @@ -73,3 +73,7 @@ d260f892491e040ae385a8e6df59557a7d721abf jdk7-b96 7e406ebed9a5968b584f3c3e6b60893b5d6d9741 jdk7-b97 db6e660120446c407e2d908d52ec046592b21726 jdk7-b98 +c4c8a5bc54f66abc68cd185d9294042121922154 jdk7-b99 +2d6ba7a221915bdf0311acc5641c7f3875cb793e jdk7-b100 +2548ac036b8fca3326d058d758e6df8355a42469 jdk7-b101 +88db80c8e49cea352c2900f689600dc410761c1f jdk7-b102
--- a/.hgtags-top-repo Thu Jul 29 19:30:35 2010 -0700 +++ b/.hgtags-top-repo Thu Jul 29 22:04:41 2010 -0700 @@ -73,3 +73,7 @@ cf71cb5151166f35433afebaf67dbf34a704a170 jdk7-b96 5e197c942c6ebd8b92f324a31049c5f1d26d40ef jdk7-b97 6cea9984d73d74de0cd01f30d07ac0a1ed196117 jdk7-b98 +e7f18db469a3e947b7096bfd12e87380e5a042cd jdk7-b99 +b218a53ec7d3d42be61d31d6917a6c5c037b6f56 jdk7-b100 +4193eaf5f1b82794c6a0fb1a8d11af43d1b1d611 jdk7-b101 +a136a51f5113da4dad3853b74a8536ab583ab112 jdk7-b102
--- a/Makefile Thu Jul 29 19:30:35 2010 -0700 +++ b/Makefile Thu Jul 29 22:04:41 2010 -0700 @@ -1,5 +1,5 @@ # -# Copyright (c) 1995, 2009, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1995, 2010, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -29,10 +29,6 @@ TOPDIR:=. endif -ifndef CONTROL_TOPDIR - CONTROL_TOPDIR=$(TOPDIR) -endif - # Openjdk sources (only used if SKIP_OPENJDK_BUILD!=true) OPENJDK_SOURCETREE=$(TOPDIR)/openjdk OPENJDK_BUILDDIR:=$(shell \ @@ -120,7 +116,7 @@ all_product_build:: @$(FINISH_ECHO) -# Generis build of basic repo series +# Generic build of basic repo series generic_build_repo_series:: $(MKDIR) -p $(OUTPUTDIR) $(MKDIR) -p $(OUTPUTDIR)/j2sdk-image @@ -179,11 +175,15 @@ # The install process needs to know what the DEBUG_NAME is, so # look for INSTALL_DEBUG_NAME in the install rules. # +# NOTE: On windows, do not use $(ABS_BOOTDIR_OUTPUTDIR)-$(DEBUG_NAME). +# Due to the use of short paths in $(ABS_OUTPUTDIR), this may +# not be the same location. +# # Location of fresh bootdir output ABS_BOOTDIR_OUTPUTDIR=$(ABS_OUTPUTDIR)/bootjdk FRESH_BOOTDIR=$(ABS_BOOTDIR_OUTPUTDIR)/j2sdk-image -FRESH_DEBUG_BOOTDIR=$(ABS_BOOTDIR_OUTPUTDIR)-$(DEBUG_NAME)/j2sdk-image +FRESH_DEBUG_BOOTDIR=$(ABS_BOOTDIR_OUTPUTDIR)/../$(PLATFORM)-$(ARCH)-$(DEBUG_NAME)/j2sdk-image create_fresh_product_bootdir: FRC @$(START_ECHO) @@ -248,10 +248,14 @@ generic_build_repo_series @$(FINISH_ECHO) +# NOTE: On windows, do not use $(ABS_OUTPUTDIR)-$(DEBUG_NAME). +# Due to the use of short paths in $(ABS_OUTPUTDIR), this may +# not be the same location. + generic_debug_build: @$(START_ECHO) $(MAKE) \ - ALT_OUTPUTDIR=$(ABS_OUTPUTDIR)-$(DEBUG_NAME) \ + ALT_OUTPUTDIR=$(ABS_OUTPUTDIR)/../$(PLATFORM)-$(ARCH)-$(DEBUG_NAME) \ DEBUG_NAME=$(DEBUG_NAME) \ GENERATE_DOCS=false \ $(BOOT_CYCLE_DEBUG_SETTINGS) \ @@ -348,8 +352,8 @@ clobber:: $(RM) -r $(OUTPUTDIR)/* - $(RM) -r $(OUTPUTDIR)-debug/* - $(RM) -r $(OUTPUTDIR)-fastdebug/* + $(RM) -r $(OUTPUTDIR)/../$(PLATFORM)-$(ARCH)-debug/* + $(RM) -r $(OUTPUTDIR)/../$(PLATFORM)-$(ARCH)-fastdebug/* -($(RMDIR) -p $(OUTPUTDIR) > $(DEV_NULL) 2>&1; $(TRUE)) clean: clobber @@ -551,6 +555,56 @@ endif ################################################################ +# rule to test +################################################################ + +.NOTPARALLEL: test + +test: test_clean test_start test_summary + +test_start: + @$(ECHO) "Tests started at `$(DATE)`" + +test_clean: + $(RM) $(OUTPUTDIR)/test_failures.txt $(OUTPUTDIR)/test_log.txt + +test_summary: $(OUTPUTDIR)/test_failures.txt + @$(ECHO) "#################################################" + @$(ECHO) "Tests completed at `$(DATE)`" + @( $(EGREP) '^TEST STATS:' $(OUTPUTDIR)/test_log.txt \ + || $(ECHO) "No TEST STATS seen in log" ) + @$(ECHO) "For complete details see: $(OUTPUTDIR)/test_log.txt" + @$(ECHO) "#################################################" + @if [ -s $< ] ; then \ + $(ECHO) "ERROR: Test failure count: `$(CAT) $< | $(WC) -l`"; \ + $(CAT) $<; \ + exit 1; \ + else \ + $(ECHO) "Success! No failures detected"; \ + fi + +# Get failure list from log +$(OUTPUTDIR)/test_failures.txt: $(OUTPUTDIR)/test_log.txt + @$(RM) $@ + @( $(EGREP) '^FAILED:' $< || $(ECHO) "" ) > $@ + +# Get log file of all tests run +JDK_TO_TEST := $(shell \ + if [ -d "$(ABS_OUTPUTDIR)/j2sdk-image" ] ; then \ + $(ECHO) "$(ABS_OUTPUTDIR)/j2sdk-image"; \ + elif [ -d "$(ABS_OUTPUTDIR)/bin" ] ; then \ + $(ECHO) "$(ABS_OUTPUTDIR)"; \ + elif [ "$(PRODUCT_HOME)" != "" -a -d "$(PRODUCT_HOME)/bin" ] ; then \ + $(ECHO) "$(PRODUCT_HOME)"; \ + fi \ +) +$(OUTPUTDIR)/test_log.txt: + $(RM) $@ + ( $(CD) test && \ + $(MAKE) NO_STOPPING=- PRODUCT_HOME=$(JDK_TO_TEST) \ + ) | tee $@ + +################################################################ # JPRT rule to build ################################################################ @@ -560,7 +614,7 @@ # PHONY ################################################################ -.PHONY: all \ +.PHONY: all test test_start test_summary test_clean \ generic_build_repo_series \ what clobber insane \ dev dev-build dev-sanity dev-clobber \
--- a/corba/.hgtags Thu Jul 29 19:30:35 2010 -0700 +++ b/corba/.hgtags Thu Jul 29 22:04:41 2010 -0700 @@ -73,3 +73,7 @@ edc2a2659c77dabc55cb55bb617bad89e3a05bb3 jdk7-b96 4ec9d59374caa1e5d72fa802291b4d66955a4936 jdk7-b97 3b99409057e4c255da946f9f540d051a5ef4ab23 jdk7-b98 +95db968660e7d87c345d5cf3dc2e3db037fb7220 jdk7-b99 +a56d734a1e970e1a21a8f4feb13053e9a33674c7 jdk7-b100 +86a239832646a74811695428984b6947c0bd6dc8 jdk7-b101 +78561a95779090b5106c8d0f1a75360a027ef087 jdk7-b102
--- a/corba/src/share/classes/com/sun/corba/se/impl/orbutil/CorbaResourceUtil.java Thu Jul 29 19:30:35 2010 -0700 +++ b/corba/src/share/classes/com/sun/corba/se/impl/orbutil/CorbaResourceUtil.java Thu Jul 29 22:04:41 2010 -0700 @@ -75,7 +75,7 @@ args[1] = (arg1 != null ? arg1.toString() : "null"); args[2] = (arg2 != null ? arg2.toString() : "null"); - return java.text.MessageFormat.format(format, args); + return java.text.MessageFormat.format(format, (Object[]) args); } private static boolean resourcesInitialized = false;
--- a/corba/src/share/classes/com/sun/corba/se/impl/orbutil/ObjectUtility.java Thu Jul 29 19:30:35 2010 -0700 +++ b/corba/src/share/classes/com/sun/corba/se/impl/orbutil/ObjectUtility.java Thu Jul 29 22:04:41 2010 -0700 @@ -350,7 +350,7 @@ if (useToString) { try { - cls.getDeclaredMethod( "toString", null ) ; + cls.getDeclaredMethod( "toString", (Class[])null ) ; return true ; } catch (Exception exc) { return false ;
--- a/corba/src/share/classes/com/sun/corba/se/impl/presentation/rmi/ExceptionHandlerImpl.java Thu Jul 29 19:30:35 2010 -0700 +++ b/corba/src/share/classes/com/sun/corba/se/impl/presentation/rmi/ExceptionHandlerImpl.java Thu Jul 29 22:04:41 2010 -0700 @@ -108,8 +108,8 @@ try { helperClass = Class.forName( helperName, true, loader ) ; - Method idMethod = helperClass.getDeclaredMethod( "id", null ) ; - setId( (String)idMethod.invoke( null, null ) ) ; + Method idMethod = helperClass.getDeclaredMethod( "id", (Class[])null ) ; + setId( (String)idMethod.invoke( null, (Object[])null ) ) ; } catch (Exception ex) { throw wrapper.badHelperIdMethod( ex, helperName ) ; }
--- a/corba/src/share/classes/org/omg/CORBA/ORB.java Thu Jul 29 19:30:35 2010 -0700 +++ b/corba/src/share/classes/org/omg/CORBA/ORB.java Thu Jul 29 22:04:41 2010 -0700 @@ -589,7 +589,7 @@ this.getClass().getMethod("create_operation_list", argc); // OK, the method exists, so invoke it and be happy. - Object[] argx = { oper }; + java.lang.Object[] argx = { oper }; return (org.omg.CORBA.NVList)meth.invoke(this, argx); } catch( java.lang.reflect.InvocationTargetException exs ) {
--- a/corba/src/share/classes/sun/corba/Bridge.java Thu Jul 29 19:30:35 2010 -0700 +++ b/corba/src/share/classes/sun/corba/Bridge.java Thu Jul 29 22:04:41 2010 -0700 @@ -187,7 +187,7 @@ try { // Invoke the ObjectInputStream.latestUserDefinedLoader method return (ClassLoader)latestUserDefinedLoaderMethod.invoke(null, - NO_ARGS); + (Object[])NO_ARGS); } catch (InvocationTargetException ite) { Error err = new Error( "sun.corba.Bridge.latestUserDefinedLoader: " + ite ) ;
--- a/hotspot/.hgtags Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/.hgtags Thu Jul 29 22:04:41 2010 -0700 @@ -102,3 +102,8 @@ 573e8ea5fd68e8e51eb6308d283ac3b3889d15e0 hs19-b02 5f42499e57adc16380780f40541e1a66cd601891 jdk7-b97 8a045b3f5c13eaad92ff4baf15ca671845fcad1a jdk7-b98 +6a236384a379642b5a2398e2819db9ab4e711e9b jdk7-b99 +ad1977f08c4d69162a0775fe3f9576b9fd521d10 jdk7-b100 +6c3a919105b68c15b7db923ec9a00006e9560910 jdk7-b101 +ad1977f08c4d69162a0775fe3f9576b9fd521d10 hs19-b03 +c5cadf1a07717955cf60dbaec16e35b529fd2cb0 jdk7-b102
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java Thu Jul 29 22:04:41 2010 -0700 @@ -35,7 +35,6 @@ public class NMethod extends CodeBlob { private static long pcDescSize; - private static CIntegerField zombieInstructionSizeField; private static sun.jvm.hotspot.types.OopField methodField; /** != InvocationEntryBci if this nmethod is an on-stack replacement method */ private static CIntegerField entryBCIField; @@ -88,7 +87,6 @@ private static void initialize(TypeDataBase db) { Type type = db.lookupType("nmethod"); - zombieInstructionSizeField = type.getCIntegerField("_zombie_instruction_size"); methodField = type.getOopField("_method"); entryBCIField = type.getCIntegerField("_entry_bci"); osrLinkField = type.getAddressField("_osr_link");
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeDisassembler.java Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeDisassembler.java Thu Jul 29 22:04:41 2010 -0700 @@ -72,6 +72,7 @@ addBytecodeClass(Bytecodes._invokestatic, BytecodeInvoke.class); addBytecodeClass(Bytecodes._invokespecial, BytecodeInvoke.class); addBytecodeClass(Bytecodes._invokeinterface, BytecodeInvoke.class); + addBytecodeClass(Bytecodes._invokedynamic, BytecodeInvoke.class); addBytecodeClass(Bytecodes._jsr, BytecodeJsr.class); addBytecodeClass(Bytecodes._jsr_w, BytecodeJsrW.class); addBytecodeClass(Bytecodes._iload, BytecodeLoad.class);
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeInvoke.java Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeInvoke.java Thu Jul 29 22:04:41 2010 -0700 @@ -54,15 +54,31 @@ // returns the name of the invoked method public Symbol name() { ConstantPool cp = method().getConstants(); + if (isInvokedynamic()) { + int[] nt = cp.getNameAndTypeAt(indexForFieldOrMethod()); + return cp.getSymbolAt(nt[0]); + } return cp.getNameRefAt(index()); } // returns the signature of the invoked method public Symbol signature() { ConstantPool cp = method().getConstants(); + if (isInvokedynamic()) { + int[] nt = cp.getNameAndTypeAt(indexForFieldOrMethod()); + return cp.getSymbolAt(nt[1]); + } return cp.getSignatureRefAt(index()); } + public int getSecondaryIndex() { + if (isInvokedynamic()) { + // change byte-ordering of 4-byte integer + return VM.getVM().getBytes().swapInt(javaSignedWordAt(1)); + } + return super.getSecondaryIndex(); // throw an error + } + public Method getInvokedMethod() { return method().getConstants().getMethodRefAt(index()); } @@ -87,6 +103,7 @@ public boolean isInvokevirtual() { return adjustedInvokeCode() == Bytecodes._invokevirtual; } public boolean isInvokestatic() { return adjustedInvokeCode() == Bytecodes._invokestatic; } public boolean isInvokespecial() { return adjustedInvokeCode() == Bytecodes._invokespecial; } + public boolean isInvokedynamic() { return adjustedInvokeCode() == Bytecodes._invokedynamic; } public boolean isValid() { return isInvokeinterface() || isInvokevirtual() || @@ -104,6 +121,11 @@ buf.append(spaces); buf.append('#'); buf.append(Integer.toString(indexForFieldOrMethod())); + if (isInvokedynamic()) { + buf.append('('); + buf.append(Integer.toString(getSecondaryIndex())); + buf.append(')'); + } buf.append(" [Method "); StringBuffer sigBuf = new StringBuffer(); new SignatureConverter(signature(), sigBuf).iterateReturntype();
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeLoadConstant.java Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeLoadConstant.java Thu Jul 29 22:04:41 2010 -0700 @@ -25,6 +25,7 @@ package sun.jvm.hotspot.interpreter; import sun.jvm.hotspot.oops.*; +import sun.jvm.hotspot.runtime.*; import sun.jvm.hotspot.utilities.*; public class BytecodeLoadConstant extends BytecodeWithCPIndex { @@ -32,10 +33,47 @@ super(method, bci); } + public boolean hasCacheIndex() { + // normal ldc uses CP index, but fast_aldc uses swapped CP cache index + return javaCode() != code(); + } + public int index() { - return javaCode() == Bytecodes._ldc ? + int i = javaCode() == Bytecodes._ldc ? (int) (0xFF & javaByteAt(1)) : (int) (0xFFFF & javaShortAt(1)); + if (hasCacheIndex()) { + return (0xFFFF & VM.getVM().getBytes().swapShort((short) i)); + } else { + return i; + } + } + + public int poolIndex() { + int i = index(); + if (hasCacheIndex()) { + ConstantPoolCache cpCache = method().getConstants().getCache(); + return cpCache.getEntryAt(i).getConstantPoolIndex(); + } else { + return i; + } + } + + public int cacheIndex() { + if (hasCacheIndex()) { + return index(); + } else { + return -1; // no cache index + } + } + + private Oop getCachedConstant() { + int i = cacheIndex(); + if (i >= 0) { + ConstantPoolCache cpCache = method().getConstants().getCache(); + return cpCache.getEntryAt(i).getF1(); + } + return null; } public void verify() { @@ -58,6 +96,7 @@ // has to be int or float or String or Klass return (ctag.isUnresolvedString() || ctag.isString() || ctag.isUnresolvedKlass() || ctag.isKlass() + || ctag.isMethodHandle() || ctag.isMethodType() || ctag.isInt() || ctag.isFloat())? true: false; } } @@ -112,7 +151,7 @@ public String getConstantValue() { ConstantPool cpool = method().getConstants(); - int cpIndex = index(); + int cpIndex = poolIndex(); ConstantTag ctag = cpool.getTagAt(cpIndex); if (ctag.isInt()) { return "<int " + Integer.toString(cpool.getIntAt(cpIndex)) +">"; @@ -149,6 +188,18 @@ } else { throw new RuntimeException("should not reach here"); } + } else if (ctag.isMethodHandle() || ctag.isMethodType()) { + Oop x = getCachedConstant(); + int refidx = cpool.getMethodHandleIndexAt(cpIndex); + int refkind = cpool.getMethodHandleRefKindAt(cpIndex); + return "<MethodHandle kind=" + Integer.toString(refkind) + + " ref=" + Integer.toString(refidx) + + (x == null ? "" : " @" + x.getHandle()) + ">"; + } else if (ctag.isMethodType()) { + Oop x = getCachedConstant(); + int refidx = cpool.getMethodTypeIndexAt(cpIndex); + return "<MethodType " + cpool.getSymbolAt(refidx).asString() + + (x == null ? "" : " @" + x.getHandle()) + ">"; } else { if (Assert.ASSERTS_ENABLED) { Assert.that(false, "invalid load constant type"); @@ -162,7 +213,12 @@ buf.append(getJavaBytecodeName()); buf.append(spaces); buf.append('#'); - buf.append(Integer.toString(index())); + buf.append(Integer.toString(poolIndex())); + if (hasCacheIndex()) { + buf.append('('); + buf.append(Integer.toString(cacheIndex())); + buf.append(')'); + } buf.append(spaces); buf.append(getConstantValue()); if (code() != javaCode()) {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeWithCPIndex.java Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeWithCPIndex.java Thu Jul 29 22:04:41 2010 -0700 @@ -37,12 +37,19 @@ // the constant pool index for this bytecode public int index() { return 0xFFFF & javaShortAt(1); } + public int getSecondaryIndex() { + throw new IllegalArgumentException("must be invokedynamic"); + } + protected int indexForFieldOrMethod() { ConstantPoolCache cpCache = method().getConstants().getCache(); // get ConstantPool index from ConstantPoolCacheIndex at given bci int cpCacheIndex = index(); if (cpCache == null) { return cpCacheIndex; + } else if (code() == Bytecodes._invokedynamic) { + int secondaryIndex = getSecondaryIndex(); + return cpCache.getMainEntryAt(secondaryIndex).getConstantPoolIndex(); } else { // change byte-ordering and go via cache return cpCache.getEntryAt((int) (0xFFFF & VM.getVM().getBytes().swapShort((short) cpCacheIndex))).getConstantPoolIndex();
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/interpreter/Bytecodes.java Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/interpreter/Bytecodes.java Thu Jul 29 22:04:41 2010 -0700 @@ -222,7 +222,7 @@ public static final int _invokespecial = 183; // 0xb7 public static final int _invokestatic = 184; // 0xb8 public static final int _invokeinterface = 185; // 0xb9 - public static final int _xxxunusedxxx = 186; // 0xba + public static final int _invokedynamic = 186; // 0xba public static final int _new = 187; // 0xbb public static final int _newarray = 188; // 0xbc public static final int _anewarray = 189; // 0xbd @@ -269,9 +269,12 @@ public static final int _fast_invokevfinal = 226; public static final int _fast_linearswitch = 227; public static final int _fast_binaryswitch = 228; - public static final int _shouldnotreachhere = 229; // For debugging + public static final int _fast_aldc = 229; + public static final int _fast_aldc_w = 230; + public static final int _return_register_finalizer = 231; + public static final int _shouldnotreachhere = 232; // For debugging - public static final int number_of_codes = 230; + public static final int number_of_codes = 233; public static int specialLengthAt(Method method, int bci) { int code = codeAt(method, bci); @@ -458,9 +461,9 @@ def(_dconst_1 , "dconst_1" , "b" , null , BasicType.getTDouble() , 2, false); def(_bipush , "bipush" , "bc" , null , BasicType.getTInt() , 1, false); def(_sipush , "sipush" , "bcc" , null , BasicType.getTInt() , 1, false); - def(_ldc , "ldc" , "bi" , null , BasicType.getTIllegal(), 1, true ); - def(_ldc_w , "ldc_w" , "bii" , null , BasicType.getTIllegal(), 1, true ); - def(_ldc2_w , "ldc2_w" , "bii" , null , BasicType.getTIllegal(), 2, true ); + def(_ldc , "ldc" , "bk" , null , BasicType.getTIllegal(), 1, true ); + def(_ldc_w , "ldc_w" , "bkk" , null , BasicType.getTIllegal(), 1, true ); + def(_ldc2_w , "ldc2_w" , "bkk" , null , BasicType.getTIllegal(), 2, true ); def(_iload , "iload" , "bi" , "wbii" , BasicType.getTInt() , 1, false); def(_lload , "lload" , "bi" , "wbii" , BasicType.getTLong() , 2, false); def(_fload , "fload" , "bi" , "wbii" , BasicType.getTFloat() , 1, false); @@ -618,26 +621,26 @@ def(_dreturn , "dreturn" , "b" , null , BasicType.getTDouble() , -2, true ); def(_areturn , "areturn" , "b" , null , BasicType.getTObject() , -1, true ); def(_return , "return" , "b" , null , BasicType.getTVoid() , 0, true ); - def(_getstatic , "getstatic" , "bjj" , null , BasicType.getTIllegal(), 1, true ); - def(_putstatic , "putstatic" , "bjj" , null , BasicType.getTIllegal(), -1, true ); - def(_getfield , "getfield" , "bjj" , null , BasicType.getTIllegal(), 0, true ); - def(_putfield , "putfield" , "bjj" , null , BasicType.getTIllegal(), -2, true ); - def(_invokevirtual , "invokevirtual" , "bjj" , null , BasicType.getTIllegal(), -1, true ); - def(_invokespecial , "invokespecial" , "bjj" , null , BasicType.getTIllegal(), -1, true ); - def(_invokestatic , "invokestatic" , "bjj" , null , BasicType.getTIllegal(), 0, true ); - def(_invokeinterface , "invokeinterface" , "bjj__", null , BasicType.getTIllegal(), -1, true ); - def(_xxxunusedxxx , "xxxunusedxxx" , null , null , BasicType.getTVoid() , 0, false); - def(_new , "new" , "bii" , null , BasicType.getTObject() , 1, true ); + def(_getstatic , "getstatic" , "bJJ" , null , BasicType.getTIllegal(), 1, true ); + def(_putstatic , "putstatic" , "bJJ" , null , BasicType.getTIllegal(), -1, true ); + def(_getfield , "getfield" , "bJJ" , null , BasicType.getTIllegal(), 0, true ); + def(_putfield , "putfield" , "bJJ" , null , BasicType.getTIllegal(), -2, true ); + def(_invokevirtual , "invokevirtual" , "bJJ" , null , BasicType.getTIllegal(), -1, true ); + def(_invokespecial , "invokespecial" , "bJJ" , null , BasicType.getTIllegal(), -1, true ); + def(_invokestatic , "invokestatic" , "bJJ" , null , BasicType.getTIllegal(), 0, true ); + def(_invokeinterface , "invokeinterface" , "bJJ__", null , BasicType.getTIllegal(), -1, true ); + def(_invokedynamic , "invokedynamic" , "bJJJJ", null , BasicType.getTIllegal(), -1, true ); + def(_new , "new" , "bkk" , null , BasicType.getTObject() , 1, true ); def(_newarray , "newarray" , "bc" , null , BasicType.getTObject() , 0, true ); - def(_anewarray , "anewarray" , "bii" , null , BasicType.getTObject() , 0, true ); + def(_anewarray , "anewarray" , "bkk" , null , BasicType.getTObject() , 0, true ); def(_arraylength , "arraylength" , "b" , null , BasicType.getTVoid() , 0, true ); def(_athrow , "athrow" , "b" , null , BasicType.getTVoid() , -1, true ); - def(_checkcast , "checkcast" , "bii" , null , BasicType.getTObject() , 0, true ); - def(_instanceof , "instanceof" , "bii" , null , BasicType.getTInt() , 0, true ); + def(_checkcast , "checkcast" , "bkk" , null , BasicType.getTObject() , 0, true ); + def(_instanceof , "instanceof" , "bkk" , null , BasicType.getTInt() , 0, true ); def(_monitorenter , "monitorenter" , "b" , null , BasicType.getTVoid() , -1, true ); def(_monitorexit , "monitorexit" , "b" , null , BasicType.getTVoid() , -1, true ); def(_wide , "wide" , "" , null , BasicType.getTVoid() , 0, false); - def(_multianewarray , "multianewarray" , "biic" , null , BasicType.getTObject() , 1, true ); + def(_multianewarray , "multianewarray" , "bkkc" , null , BasicType.getTObject() , 1, true ); def(_ifnull , "ifnull" , "boo" , null , BasicType.getTVoid() , -1, false); def(_ifnonnull , "ifnonnull" , "boo" , null , BasicType.getTVoid() , -1, false); def(_goto_w , "goto_w" , "boooo", null , BasicType.getTVoid() , 0, false); @@ -646,38 +649,44 @@ // JVM bytecodes // bytecode bytecode name format wide f. result tp stk traps std code - def(_fast_agetfield , "fast_agetfield" , "bjj" , null , BasicType.getTObject() , 0, true , _getfield ); - def(_fast_bgetfield , "fast_bgetfield" , "bjj" , null , BasicType.getTInt() , 0, true , _getfield ); - def(_fast_cgetfield , "fast_cgetfield" , "bjj" , null , BasicType.getTChar() , 0, true , _getfield ); - def(_fast_dgetfield , "fast_dgetfield" , "bjj" , null , BasicType.getTDouble() , 0, true , _getfield ); - def(_fast_fgetfield , "fast_fgetfield" , "bjj" , null , BasicType.getTFloat() , 0, true , _getfield ); - def(_fast_igetfield , "fast_igetfield" , "bjj" , null , BasicType.getTInt() , 0, true , _getfield ); - def(_fast_lgetfield , "fast_lgetfield" , "bjj" , null , BasicType.getTLong() , 0, true , _getfield ); - def(_fast_sgetfield , "fast_sgetfield" , "bjj" , null , BasicType.getTShort() , 0, true , _getfield ); + def(_fast_agetfield , "fast_agetfield" , "bJJ" , null , BasicType.getTObject() , 0, true , _getfield ); + def(_fast_bgetfield , "fast_bgetfield" , "bJJ" , null , BasicType.getTInt() , 0, true , _getfield ); + def(_fast_cgetfield , "fast_cgetfield" , "bJJ" , null , BasicType.getTChar() , 0, true , _getfield ); + def(_fast_dgetfield , "fast_dgetfield" , "bJJ" , null , BasicType.getTDouble() , 0, true , _getfield ); + def(_fast_fgetfield , "fast_fgetfield" , "bJJ" , null , BasicType.getTFloat() , 0, true , _getfield ); + def(_fast_igetfield , "fast_igetfield" , "bJJ" , null , BasicType.getTInt() , 0, true , _getfield ); + def(_fast_lgetfield , "fast_lgetfield" , "bJJ" , null , BasicType.getTLong() , 0, true , _getfield ); + def(_fast_sgetfield , "fast_sgetfield" , "bJJ" , null , BasicType.getTShort() , 0, true , _getfield ); - def(_fast_aputfield , "fast_aputfield" , "bjj" , null , BasicType.getTObject() , 0, true , _putfield ); - def(_fast_bputfield , "fast_bputfield" , "bjj" , null , BasicType.getTInt() , 0, true , _putfield ); - def(_fast_cputfield , "fast_cputfield" , "bjj" , null , BasicType.getTChar() , 0, true , _putfield ); - def(_fast_dputfield , "fast_dputfield" , "bjj" , null , BasicType.getTDouble() , 0, true , _putfield ); - def(_fast_fputfield , "fast_fputfield" , "bjj" , null , BasicType.getTFloat() , 0, true , _putfield ); - def(_fast_iputfield , "fast_iputfield" , "bjj" , null , BasicType.getTInt() , 0, true , _putfield ); - def(_fast_lputfield , "fast_lputfield" , "bjj" , null , BasicType.getTLong() , 0, true , _putfield ); - def(_fast_sputfield , "fast_sputfield" , "bjj" , null , BasicType.getTShort() , 0, true , _putfield ); + def(_fast_aputfield , "fast_aputfield" , "bJJ" , null , BasicType.getTObject() , 0, true , _putfield ); + def(_fast_bputfield , "fast_bputfield" , "bJJ" , null , BasicType.getTInt() , 0, true , _putfield ); + def(_fast_cputfield , "fast_cputfield" , "bJJ" , null , BasicType.getTChar() , 0, true , _putfield ); + def(_fast_dputfield , "fast_dputfield" , "bJJ" , null , BasicType.getTDouble() , 0, true , _putfield ); + def(_fast_fputfield , "fast_fputfield" , "bJJ" , null , BasicType.getTFloat() , 0, true , _putfield ); + def(_fast_iputfield , "fast_iputfield" , "bJJ" , null , BasicType.getTInt() , 0, true , _putfield ); + def(_fast_lputfield , "fast_lputfield" , "bJJ" , null , BasicType.getTLong() , 0, true , _putfield ); + def(_fast_sputfield , "fast_sputfield" , "bJJ" , null , BasicType.getTShort() , 0, true , _putfield ); def(_fast_aload_0 , "fast_aload_0" , "b" , null , BasicType.getTObject() , 1, true , _aload_0 ); - def(_fast_iaccess_0 , "fast_iaccess_0" , "b_jj" , null , BasicType.getTInt() , 1, true , _aload_0 ); - def(_fast_aaccess_0 , "fast_aaccess_0" , "b_jj" , null , BasicType.getTObject() , 1, true , _aload_0 ); - def(_fast_faccess_0 , "fast_faccess_0" , "b_jj" , null , BasicType.getTObject() , 1, true , _aload_0 ); + def(_fast_iaccess_0 , "fast_iaccess_0" , "b_JJ" , null , BasicType.getTInt() , 1, true , _aload_0 ); + def(_fast_aaccess_0 , "fast_aaccess_0" , "b_JJ" , null , BasicType.getTObject() , 1, true , _aload_0 ); + def(_fast_faccess_0 , "fast_faccess_0" , "b_JJ" , null , BasicType.getTObject() , 1, true , _aload_0 ); def(_fast_iload , "fast_iload" , "bi" , null , BasicType.getTInt() , 1, false, _iload); def(_fast_iload2 , "fast_iload2" , "bi_i" , null , BasicType.getTInt() , 2, false, _iload); def(_fast_icaload , "fast_icaload" , "bi_" , null , BasicType.getTInt() , 0, false, _iload); // Faster method invocation. - def(_fast_invokevfinal , "fast_invokevfinal" , "bjj" , null , BasicType.getTIllegal(), -1, true, _invokevirtual); + def(_fast_invokevfinal , "fast_invokevfinal" , "bJJ" , null , BasicType.getTIllegal(), -1, true, _invokevirtual); def(_fast_linearswitch , "fast_linearswitch" , "" , null , BasicType.getTVoid() , -1, false, _lookupswitch ); def(_fast_binaryswitch , "fast_binaryswitch" , "" , null , BasicType.getTVoid() , -1, false, _lookupswitch ); + + def(_return_register_finalizer, "return_register_finalizer", "b" , null , BasicType.getTVoid() , 0, true, _return ); + + def(_fast_aldc , "fast_aldc" , "bj" , null , BasicType.getTObject(), 1, true, _ldc ); + def(_fast_aldc_w , "fast_aldc_w" , "bJJ" , null , BasicType.getTObject(), 1, true, _ldc_w ); + def(_shouldnotreachhere , "_shouldnotreachhere" , "b" , null , BasicType.getTVoid() , 0, false); if (Assert.ASSERTS_ENABLED) {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java Thu Jul 29 22:04:41 2010 -0700 @@ -152,7 +152,7 @@ return res; } - public int getNameAndTypeAt(int which) { + public int[] getNameAndTypeAt(int which) { if (Assert.ASSERTS_ENABLED) { Assert.that(getTagAt(which).isNameAndType(), "Corrupted constant pool"); } @@ -160,18 +160,16 @@ if (DEBUG) { System.err.println("ConstantPool.getNameAndTypeAt(" + which + "): result = " + i); } - return i; + return new int[] { extractLowShortFromInt(i), extractHighShortFromInt(i) }; } public Symbol getNameRefAt(int which) { - int refIndex = getNameAndTypeAt(getNameAndTypeRefIndexAt(which)); - int nameIndex = extractLowShortFromInt(refIndex); + int nameIndex = getNameAndTypeAt(getNameAndTypeRefIndexAt(which))[0]; return getSymbolAt(nameIndex); } public Symbol getSignatureRefAt(int which) { - int refIndex = getNameAndTypeAt(getNameAndTypeRefIndexAt(which)); - int sigIndex = extractHighShortFromInt(refIndex); + int sigIndex = getNameAndTypeAt(getNameAndTypeRefIndexAt(which))[1]; return getSymbolAt(sigIndex); } @@ -220,11 +218,11 @@ /** Lookup for entries consisting of (name_index, signature_index) */ public int getNameRefIndexAt(int index) { - int refIndex = getNameAndTypeAt(index); + int[] refIndex = getNameAndTypeAt(index); if (DEBUG) { - System.err.println("ConstantPool.getNameRefIndexAt(" + index + "): refIndex = " + refIndex); + System.err.println("ConstantPool.getNameRefIndexAt(" + index + "): refIndex = " + refIndex[0]+"/"+refIndex[1]); } - int i = extractLowShortFromInt(refIndex); + int i = refIndex[0]; if (DEBUG) { System.err.println("ConstantPool.getNameRefIndexAt(" + index + "): result = " + i); } @@ -233,17 +231,53 @@ /** Lookup for entries consisting of (name_index, signature_index) */ public int getSignatureRefIndexAt(int index) { - int refIndex = getNameAndTypeAt(index); + int[] refIndex = getNameAndTypeAt(index); if (DEBUG) { - System.err.println("ConstantPool.getSignatureRefIndexAt(" + index + "): refIndex = " + refIndex); + System.err.println("ConstantPool.getSignatureRefIndexAt(" + index + "): refIndex = " + refIndex[0]+"/"+refIndex[1]); } - int i = extractHighShortFromInt(refIndex); + int i = refIndex[1]; if (DEBUG) { System.err.println("ConstantPool.getSignatureRefIndexAt(" + index + "): result = " + i); } return i; } + /** Lookup for MethodHandle entries. */ + public int getMethodHandleIndexAt(int i) { + if (Assert.ASSERTS_ENABLED) { + Assert.that(getTagAt(i).isMethodHandle(), "Corrupted constant pool"); + } + int res = extractHighShortFromInt(getIntAt(i)); + if (DEBUG) { + System.err.println("ConstantPool.getMethodHandleIndexAt(" + i + "): result = " + res); + } + return res; + } + + /** Lookup for MethodHandle entries. */ + public int getMethodHandleRefKindAt(int i) { + if (Assert.ASSERTS_ENABLED) { + Assert.that(getTagAt(i).isMethodHandle(), "Corrupted constant pool"); + } + int res = extractLowShortFromInt(getIntAt(i)); + if (DEBUG) { + System.err.println("ConstantPool.getMethodHandleRefKindAt(" + i + "): result = " + res); + } + return res; + } + + /** Lookup for MethodType entries. */ + public int getMethodTypeIndexAt(int i) { + if (Assert.ASSERTS_ENABLED) { + Assert.that(getTagAt(i).isMethodType(), "Corrupted constant pool"); + } + int res = getIntAt(i); + if (DEBUG) { + System.err.println("ConstantPool.getMethodHandleTypeAt(" + i + "): result = " + res); + } + return res; + } + final private static String[] nameForTag = new String[] { }; @@ -261,6 +295,8 @@ case JVM_CONSTANT_Methodref: return "JVM_CONSTANT_Methodref"; case JVM_CONSTANT_InterfaceMethodref: return "JVM_CONSTANT_InterfaceMethodref"; case JVM_CONSTANT_NameAndType: return "JVM_CONSTANT_NameAndType"; + case JVM_CONSTANT_MethodHandle: return "JVM_CONSTANT_MethodHandle"; + case JVM_CONSTANT_MethodType: return "JVM_CONSTANT_MethodType"; case JVM_CONSTANT_Invalid: return "JVM_CONSTANT_Invalid"; case JVM_CONSTANT_UnresolvedClass: return "JVM_CONSTANT_UnresolvedClass"; case JVM_CONSTANT_UnresolvedClassInError: return "JVM_CONSTANT_UnresolvedClassInError"; @@ -317,6 +353,8 @@ case JVM_CONSTANT_Methodref: case JVM_CONSTANT_InterfaceMethodref: case JVM_CONSTANT_NameAndType: + case JVM_CONSTANT_MethodHandle: + case JVM_CONSTANT_MethodType: visitor.doInt(new IntField(new NamedFieldIdentifier(nameForTag(ctag)), indexOffset(index), true), true); break; } @@ -467,6 +505,18 @@ + ", type = " + signatureIndex); break; } + + case JVM_CONSTANT_MethodHandle: { + dos.writeByte(cpConstType); + int value = getIntAt(ci); + short nameIndex = (short) extractLowShortFromInt(value); + short signatureIndex = (short) extractHighShortFromInt(value); + dos.writeShort(nameIndex); + dos.writeShort(signatureIndex); + if (DEBUG) debugMessage("CP[" + ci + "] = N&T name = " + nameIndex + + ", type = " + signatureIndex); + break; + } default: throw new InternalError("unknown tag: " + cpConstType); } // switch @@ -488,10 +538,12 @@ // private static int extractHighShortFromInt(int val) { + // must stay in sync with constantPoolOopDesc::name_and_type_at_put, method_at_put, etc. return (val >> 16) & 0xFFFF; } private static int extractLowShortFromInt(int val) { + // must stay in sync with constantPoolOopDesc::name_and_type_at_put, method_at_put, etc. return val & 0xFFFF; } }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCache.java Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCache.java Thu Jul 29 22:04:41 2010 -0700 @@ -78,6 +78,31 @@ return new ConstantPoolCacheEntry(this, i); } + public static boolean isSecondaryIndex(int i) { return (i < 0); } + public static int decodeSecondaryIndex(int i) { return isSecondaryIndex(i) ? ~i : i; } + public static int encodeSecondaryIndex(int i) { return !isSecondaryIndex(i) ? ~i : i; } + + // secondary entries hold invokedynamic call site bindings + public ConstantPoolCacheEntry getSecondaryEntryAt(int i) { + ConstantPoolCacheEntry e = new ConstantPoolCacheEntry(this, decodeSecondaryIndex(i)); + if (Assert.ASSERTS_ENABLED) { + Assert.that(e.isSecondaryEntry(), "must be a secondary entry"); + } + return e; + } + + public ConstantPoolCacheEntry getMainEntryAt(int i) { + if (isSecondaryIndex(i)) { + // run through an extra level of indirection: + i = getSecondaryEntryAt(i).getMainEntryIndex(); + } + ConstantPoolCacheEntry e = new ConstantPoolCacheEntry(this, i); + if (Assert.ASSERTS_ENABLED) { + Assert.that(!e.isSecondaryEntry(), "must not be a secondary entry"); + } + return e; + } + public int getIntAt(int entry, int fld) { //alignObjectSize ? long offset = baseOffset + /*alignObjectSize*/entry * elementSize + fld* getHeap().getIntSize();
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCacheEntry.java Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCacheEntry.java Thu Jul 29 22:04:41 2010 -0700 @@ -28,6 +28,7 @@ import sun.jvm.hotspot.debugger.*; import sun.jvm.hotspot.runtime.*; import sun.jvm.hotspot.types.*; +import sun.jvm.hotspot.utilities.*; public class ConstantPoolCacheEntry { private static long size; @@ -67,9 +68,23 @@ } public int getConstantPoolIndex() { + if (Assert.ASSERTS_ENABLED) { + Assert.that(!isSecondaryEntry(), "must not be a secondary CP entry"); + } return (int) (getIndices() & 0xFFFF); } + public boolean isSecondaryEntry() { + return (getIndices() & 0xFFFF) == 0; + } + + public int getMainEntryIndex() { + if (Assert.ASSERTS_ENABLED) { + Assert.that(isSecondaryEntry(), "must be a secondary CP entry"); + } + return (int) (getIndices() >>> 16); + } + private long getIndices() { return cp.getHandle().getCIntegerAt(indices.getOffset() + offset, indices.getSize(), indices.isUnsigned()); }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/GenerateOopMap.java Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/GenerateOopMap.java Thu Jul 29 22:04:41 2010 -0700 @@ -566,6 +566,7 @@ case Bytecodes._invokespecial: case Bytecodes._invokestatic: case Bytecodes._invokeinterface: + case Bytecodes._invokedynamic: // FIXME: print signature of referenced method (need more // accessors in ConstantPool and ConstantPoolCache) int idx = currentBC.getIndexBig(); @@ -605,6 +606,7 @@ case Bytecodes._invokespecial: case Bytecodes._invokestatic: case Bytecodes._invokeinterface: + case Bytecodes._invokedynamic: // FIXME: print signature of referenced method (need more // accessors in ConstantPool and ConstantPoolCache) int idx = currentBC.getIndexBig(); @@ -1134,6 +1136,7 @@ case Bytecodes._invokespecial: case Bytecodes._invokestatic: case Bytecodes._invokeinterface: + case Bytecodes._invokedynamic: _itr_send = itr; _report_result_for_send = true; break; @@ -1379,6 +1382,7 @@ case Bytecodes._invokevirtual: case Bytecodes._invokespecial: doMethod(false, false, itr.getIndexBig(), itr.bci()); break; case Bytecodes._invokestatic: doMethod(true, false, itr.getIndexBig(), itr.bci()); break; + case Bytecodes._invokedynamic: doMethod(false, true, itr.getIndexBig(), itr.bci()); break; case Bytecodes._invokeinterface: doMethod(false, true, itr.getIndexBig(), itr.bci()); break; case Bytecodes._newarray: case Bytecodes._anewarray: ppNewRef(vCTS, itr.bci()); break; @@ -1725,7 +1729,7 @@ void doMethod (boolean is_static, boolean is_interface, int idx, int bci) { // Dig up signature for field in constant pool ConstantPool cp = _method.getConstants(); - int nameAndTypeIdx = cp.getNameAndTypeRefIndexAt(idx); + int nameAndTypeIdx = cp.getTagAt(idx).isNameAndType() ? idx : cp.getNameAndTypeRefIndexAt(idx); int signatureIdx = cp.getSignatureRefIndexAt(nameAndTypeIdx); Symbol signature = cp.getSymbolAt(signatureIdx);
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/ClassConstants.java Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/ClassConstants.java Thu Jul 29 22:04:41 2010 -0700 @@ -40,6 +40,19 @@ public static final int JVM_CONSTANT_Methodref = 10; public static final int JVM_CONSTANT_InterfaceMethodref = 11; public static final int JVM_CONSTANT_NameAndType = 12; + public static final int JVM_CONSTANT_MethodHandle = 15; + public static final int JVM_CONSTANT_MethodType = 16; + + // JVM_CONSTANT_MethodHandle subtypes + public static final int JVM_REF_getField = 1; + public static final int JVM_REF_getStatic = 2; + public static final int JVM_REF_putField = 3; + public static final int JVM_REF_putStatic = 4; + public static final int JVM_REF_invokeVirtual = 5; + public static final int JVM_REF_invokeStatic = 6; + public static final int JVM_REF_invokeSpecial = 7; + public static final int JVM_REF_newInvokeSpecial = 8; + public static final int JVM_REF_invokeInterface = 9; // HotSpot specific constant pool constant types.
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ByteCodeRewriter.java Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ByteCodeRewriter.java Thu Jul 29 22:04:41 2010 -0700 @@ -54,14 +54,34 @@ } - protected short getConstantPoolIndex(int bci) { + protected short getConstantPoolIndex(int rawcode, int bci) { // get ConstantPool index from ConstantPoolCacheIndex at given bci - short cpCacheIndex = method.getBytecodeShortArg(bci); + String fmt = Bytecodes.format(rawcode); + int cpCacheIndex; + switch (fmt.length()) { + case 2: cpCacheIndex = method.getBytecodeByteArg(bci); break; + case 3: cpCacheIndex = method.getBytecodeShortArg(bci); break; + case 5: + if (fmt.indexOf("__") >= 0) + cpCacheIndex = method.getBytecodeShortArg(bci); + else + cpCacheIndex = method.getBytecodeIntArg(bci); + break; + default: throw new IllegalArgumentException(); + } if (cpCache == null) { - return cpCacheIndex; + return (short) cpCacheIndex; + } else if (fmt.indexOf("JJJJ") >= 0) { + // change byte-ordering and go via secondary cache entry + return (short) cpCache.getMainEntryAt(bytes.swapInt(cpCacheIndex)).getConstantPoolIndex(); + } else if (fmt.indexOf("JJ") >= 0) { + // change byte-ordering and go via cache + return (short) cpCache.getEntryAt((int) (0xFFFF & bytes.swapShort((short)cpCacheIndex))).getConstantPoolIndex(); + } else if (fmt.indexOf("j") >= 0) { + // go via cache + return (short) cpCache.getEntryAt((int) (0xFF & cpCacheIndex)).getConstantPoolIndex(); } else { - // change byte-ordering and go via cache - return (short) cpCache.getEntryAt((int) (0xFFFF & bytes.swapShort(cpCacheIndex))).getConstantPoolIndex(); + return (short) cpCacheIndex; } } @@ -100,10 +120,31 @@ case Bytecodes._invokespecial: case Bytecodes._invokestatic: case Bytecodes._invokeinterface: { - cpoolIndex = getConstantPoolIndex(bci + 1); + cpoolIndex = getConstantPoolIndex(hotspotcode, bci + 1); writeShort(code, bci + 1, cpoolIndex); break; } + + case Bytecodes._invokedynamic: + cpoolIndex = getConstantPoolIndex(hotspotcode, bci + 1); + writeShort(code, bci + 1, cpoolIndex); + writeShort(code, bci + 3, (short)0); // clear out trailing bytes + break; + + case Bytecodes._ldc_w: + if (hotspotcode != bytecode) { + // fast_aldc_w puts constant in CP cache + cpoolIndex = getConstantPoolIndex(hotspotcode, bci + 1); + writeShort(code, bci + 1, cpoolIndex); + } + break; + case Bytecodes._ldc: + if (hotspotcode != bytecode) { + // fast_aldc puts constant in CP cache + cpoolIndex = getConstantPoolIndex(hotspotcode, bci + 1); + code[bci + 1] = (byte)(cpoolIndex); + } + break; } len = Bytecodes.lengthFor(bytecode);
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java Thu Jul 29 22:04:41 2010 -0700 @@ -61,10 +61,12 @@ protected short _signatureIndex; protected static int extractHighShortFromInt(int val) { + // must stay in sync with constantPoolOopDesc::name_and_type_at_put, method_at_put, etc. return (val >> 16) & 0xFFFF; } protected static int extractLowShortFromInt(int val) { + // must stay in sync with constantPoolOopDesc::name_and_type_at_put, method_at_put, etc. return val & 0xFFFF; } @@ -297,6 +299,28 @@ + ", type = " + signatureIndex); break; } + + case JVM_CONSTANT_MethodHandle: { + dos.writeByte(cpConstType); + int value = cpool.getIntAt(ci); + short refIndex = (short) extractHighShortFromInt(value); + byte refKind = (byte) extractLowShortFromInt(value); + dos.writeByte(refKind); + dos.writeShort(refIndex); + if (DEBUG) debugMessage("CP[" + ci + "] = MH index = " + refIndex + + ", kind = " + refKind); + break; + } + + case JVM_CONSTANT_MethodType: { + dos.writeByte(cpConstType); + int value = cpool.getIntAt(ci); + short refIndex = (short) value; + dos.writeShort(refIndex); + if (DEBUG) debugMessage("CP[" + ci + "] = MT index = " + refIndex); + break; + } + default: throw new InternalError("Unknown tag: " + cpConstType); } // switch
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java Thu Jul 29 22:04:41 2010 -0700 @@ -572,6 +572,16 @@ buf.cell(Integer.toString(cpool.getIntAt(index))); break; + case JVM_CONSTANT_MethodHandle: + buf.cell("JVM_CONSTANT_MethodHandle"); + buf.cell(genLowHighShort(cpool.getIntAt(index))); + break; + + case JVM_CONSTANT_MethodType: + buf.cell("JVM_CONSTANT_MethodType"); + buf.cell(Integer.toString(cpool.getIntAt(index))); + break; + default: throw new InternalError("unknown tag: " + ctag); }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/ConstantTag.java Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/ConstantTag.java Thu Jul 29 22:04:41 2010 -0700 @@ -38,12 +38,26 @@ private static int JVM_CONSTANT_Methodref = 10; private static int JVM_CONSTANT_InterfaceMethodref = 11; private static int JVM_CONSTANT_NameAndType = 12; + private static int JVM_CONSTANT_MethodHandle = 15; // JSR 292 + private static int JVM_CONSTANT_MethodType = 16; // JSR 292 private static int JVM_CONSTANT_Invalid = 0; // For bad value initialization private static int JVM_CONSTANT_UnresolvedClass = 100; // Temporary tag until actual use private static int JVM_CONSTANT_ClassIndex = 101; // Temporary tag while constructing constant pool private static int JVM_CONSTANT_UnresolvedString = 102; // Temporary tag until actual use private static int JVM_CONSTANT_StringIndex = 103; // Temporary tag while constructing constant pool private static int JVM_CONSTANT_UnresolvedClassInError = 104; // Resolution failed + private static int JVM_CONSTANT_Object = 105; // Required for BoundMethodHandle arguments. + + // JVM_CONSTANT_MethodHandle subtypes //FIXME: connect these to data structure + private static int JVM_REF_getField = 1; + private static int JVM_REF_getStatic = 2; + private static int JVM_REF_putField = 3; + private static int JVM_REF_putStatic = 4; + private static int JVM_REF_invokeVirtual = 5; + private static int JVM_REF_invokeStatic = 6; + private static int JVM_REF_invokeSpecial = 7; + private static int JVM_REF_newInvokeSpecial = 8; + private static int JVM_REF_invokeInterface = 9; private byte tag; @@ -62,6 +76,8 @@ public boolean isDouble() { return tag == JVM_CONSTANT_Double; } public boolean isNameAndType() { return tag == JVM_CONSTANT_NameAndType; } public boolean isUtf8() { return tag == JVM_CONSTANT_Utf8; } + public boolean isMethodHandle() { return tag == JVM_CONSTANT_MethodHandle; } + public boolean isMethodType() { return tag == JVM_CONSTANT_MethodType; } public boolean isInvalid() { return tag == JVM_CONSTANT_Invalid; } @@ -73,6 +89,8 @@ public boolean isUnresolvedString() { return tag == JVM_CONSTANT_UnresolvedString; } public boolean isStringIndex() { return tag == JVM_CONSTANT_StringIndex; } + public boolean isObject() { return tag == JVM_CONSTANT_Object; } + public boolean isKlassReference() { return isKlassIndex() || isUnresolvedKlass(); } public boolean isFieldOrMethod() { return isField() || isMethod() || isInterfaceMethod(); } public boolean isSymbol() { return isUtf8(); }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js Thu Jul 29 22:04:41 2010 -0700 @@ -825,6 +825,8 @@ } writeln(""); disAsm.decode(new sapkg.interpreter.BytecodeVisitor() { + prologue: function(method) { }, + epilogue: function() { }, visit: function(bytecode) { if (hasLines) { var line = method.getLineNumberFromBCI(bci);
--- a/hotspot/make/hotspot_version Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/make/hotspot_version Thu Jul 29 22:04:41 2010 -0700 @@ -35,7 +35,7 @@ HS_MAJOR_VER=19 HS_MINOR_VER=0 -HS_BUILD_NUMBER=03 +HS_BUILD_NUMBER=04 JDK_MAJOR_VER=1 JDK_MINOR_VER=7
--- a/hotspot/make/linux/makefiles/adlc.make Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/make/linux/makefiles/adlc.make Thu Jul 29 22:04:41 2010 -0700 @@ -138,7 +138,11 @@ # Normally, debugging is done directly on the ad_<arch>*.cpp files. # But -g will put #line directives in those files pointing back to <arch>.ad. +# Some builds of gcc 3.2 have a bug that gets tickled by the extra #line directives +# so skip it for 3.2 and ealier. +ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0" ADLCFLAGS += -g +endif ifdef LP64 ADLCFLAGS += -D_LP64
--- a/hotspot/make/linux/makefiles/sa.make Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/make/linux/makefiles/sa.make Thu Jul 29 22:04:41 2010 -0700 @@ -40,6 +40,9 @@ # tools.jar is needed by the JDI - SA binding SA_CLASSPATH = $(BOOT_JAVA_HOME)/lib/tools.jar +# TODO: if it's a modules image, check if SA module is installed. +MODULELIB_PATH= $(BOOT_JAVA_HOME)/lib/modules + # gnumake 3.78.1 does not accept the *s that # are in AGENT_FILES1 and AGENT_FILES2, so use the shell to expand them AGENT_FILES1 := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_FILES1)) @@ -65,7 +68,7 @@ echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \ exit 1; \ fi - $(QUIETLY) if [ ! -f $(SA_CLASSPATH) ] ; then \ + $(QUIETLY) if [ ! -f $(SA_CLASSPATH) -a ! -d $(MODULELIB_PATH) ] ; then \ echo "Missing $(SA_CLASSPATH) file. Use 1.6.0 or later version of JDK";\ echo ""; \ exit 1; \
--- a/hotspot/make/solaris/makefiles/sa.make Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/make/solaris/makefiles/sa.make Thu Jul 29 22:04:41 2010 -0700 @@ -36,6 +36,9 @@ # tools.jar is needed by the JDI - SA binding SA_CLASSPATH = $(BOOT_JAVA_HOME)/lib/tools.jar +# TODO: if it's a modules image, check if SA module is installed. +MODULELIB_PATH= $(BOOT_JAVA_HOME)/lib/modules + # gnumake 3.78.1 does not accept the *s that # are in AGENT_FILES1 and AGENT_FILES2, so use the shell to expand them AGENT_FILES1 := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_FILES1)) @@ -59,7 +62,7 @@ echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \ exit 1; \ fi - $(QUIETLY) if [ ! -f $(SA_CLASSPATH) ] ; then \ + $(QUIETLY) if [ ! -f $(SA_CLASSPATH) -a ! -d $(MODULELIB_PATH) ] ; then \ echo "Missing $(SA_CLASSPATH) file. Use 1.6.0 or later version of JDK";\ echo ""; \ exit 1; \
--- a/hotspot/make/windows/makefiles/defs.make Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/make/windows/makefiles/defs.make Thu Jul 29 22:04:41 2010 -0700 @@ -32,6 +32,17 @@ PATH_SEP = ; # Need PLATFORM (os-arch combo names) for jdk and hotspot, plus libarch name +ifeq ($(ARCH_DATA_MODEL),32) + ARCH_DATA_MODEL=32 + PLATFORM=windows-i586 + VM_PLATFORM=windows_i486 + HS_ARCH=x86 + MAKE_ARGS += ARCH=x86 + MAKE_ARGS += BUILDARCH=i486 + MAKE_ARGS += Platform_arch=x86 + MAKE_ARGS += Platform_arch_model=x86_32 +endif + ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) x86),) ARCH_DATA_MODEL=32 PLATFORM=windows-i586 @@ -43,55 +54,57 @@ MAKE_ARGS += Platform_arch_model=x86_32 endif -ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) ia64),) - ARCH_DATA_MODEL=64 - PLATFORM=windows-ia64 - VM_PLATFORM=windows_ia64 - HS_ARCH=ia64 - MAKE_ARGS += LP64=1 - MAKE_ARGS += ARCH=ia64 - MAKE_ARGS += BUILDARCH=ia64 - MAKE_ARGS += Platform_arch=ia64 - MAKE_ARGS += Platform_arch_model=ia64 -endif +ifneq ($(ARCH_DATA_MODEL),32) + ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) ia64),) + ARCH_DATA_MODEL=64 + PLATFORM=windows-ia64 + VM_PLATFORM=windows_ia64 + HS_ARCH=ia64 + MAKE_ARGS += LP64=1 + MAKE_ARGS += ARCH=ia64 + MAKE_ARGS += BUILDARCH=ia64 + MAKE_ARGS += Platform_arch=ia64 + MAKE_ARGS += Platform_arch_model=ia64 + endif # http://support.microsoft.com/kb/888731 : this can be either # AMD64 for AMD, or EM64T for Intel chips. -ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) AMD64),) - ARCH_DATA_MODEL=64 - PLATFORM=windows-amd64 - VM_PLATFORM=windows_amd64 - HS_ARCH=x86 - MAKE_ARGS += LP64=1 - MAKE_ARGS += ARCH=x86 - MAKE_ARGS += BUILDARCH=amd64 - MAKE_ARGS += Platform_arch=x86 - MAKE_ARGS += Platform_arch_model=x86_64 -endif + ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) AMD64),) + ARCH_DATA_MODEL=64 + PLATFORM=windows-amd64 + VM_PLATFORM=windows_amd64 + HS_ARCH=x86 + MAKE_ARGS += LP64=1 + MAKE_ARGS += ARCH=x86 + MAKE_ARGS += BUILDARCH=amd64 + MAKE_ARGS += Platform_arch=x86 + MAKE_ARGS += Platform_arch_model=x86_64 + endif ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) EM64T),) - ARCH_DATA_MODEL=64 - PLATFORM=windows-amd64 - VM_PLATFORM=windows_amd64 - HS_ARCH=x86 - MAKE_ARGS += LP64=1 - MAKE_ARGS += ARCH=x86 - MAKE_ARGS += BUILDARCH=amd64 - MAKE_ARGS += Platform_arch=x86 - MAKE_ARGS += Platform_arch_model=x86_64 -endif + ARCH_DATA_MODEL=64 + PLATFORM=windows-amd64 + VM_PLATFORM=windows_amd64 + HS_ARCH=x86 + MAKE_ARGS += LP64=1 + MAKE_ARGS += ARCH=x86 + MAKE_ARGS += BUILDARCH=amd64 + MAKE_ARGS += Platform_arch=x86 + MAKE_ARGS += Platform_arch_model=x86_64 + endif # NB later OS versions than 2003 may report "Intel64" -ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) Intel64),) - ARCH_DATA_MODEL=64 - PLATFORM=windows-amd64 - VM_PLATFORM=windows_amd64 - HS_ARCH=x86 - MAKE_ARGS += LP64=1 - MAKE_ARGS += ARCH=x86 - MAKE_ARGS += BUILDARCH=amd64 - MAKE_ARGS += Platform_arch=x86 - MAKE_ARGS += Platform_arch_model=x86_64 + ifneq ($(shell $(ECHO) $(PROCESSOR_IDENTIFIER) | $(GREP) Intel64),) + ARCH_DATA_MODEL=64 + PLATFORM=windows-amd64 + VM_PLATFORM=windows_amd64 + HS_ARCH=x86 + MAKE_ARGS += LP64=1 + MAKE_ARGS += ARCH=x86 + MAKE_ARGS += BUILDARCH=amd64 + MAKE_ARGS += Platform_arch=x86 + MAKE_ARGS += Platform_arch_model=x86_64 + endif endif JDK_INCLUDE_SUBDIR=win32
--- a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -318,6 +318,31 @@ __ bind(exit); } +// Fast path for caching oop constants. +// %%% We should use this to handle Class and String constants also. +// %%% It will simplify the ldc/primitive path considerably. +void TemplateTable::fast_aldc(bool wide) { + transition(vtos, atos); + + if (!EnableMethodHandles) { + // We should not encounter this bytecode if !EnableMethodHandles. + // The verifier will stop it. However, if we get past the verifier, + // this will stop the thread in a reasonable way, without crashing the JVM. + __ call_VM(noreg, CAST_FROM_FN_PTR(address, + InterpreterRuntime::throw_IncompatibleClassChangeError)); + // the call_VM checks for exception, so we should never return here. + __ should_not_reach_here(); + return; + } + + Register Rcache = G3_scratch; + Register Rscratch = G4_scratch; + + resolve_cache_and_index(f1_oop, Otos_i, Rcache, Rscratch, wide ? sizeof(u2) : sizeof(u1)); + + __ verify_oop(Otos_i); +} + void TemplateTable::ldc2_w() { transition(vtos, vtos); Label retry, resolved, Long, exit; @@ -1994,6 +2019,8 @@ case Bytecodes::_invokestatic : // fall through case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; + case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break; + case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break; default : ShouldNotReachHere(); break; } // first time invocation - must resolve first
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -375,6 +375,32 @@ __ bind(Done); } +// Fast path for caching oop constants. +// %%% We should use this to handle Class and String constants also. +// %%% It will simplify the ldc/primitive path considerably. +void TemplateTable::fast_aldc(bool wide) { + transition(vtos, atos); + + if (!EnableMethodHandles) { + // We should not encounter this bytecode if !EnableMethodHandles. + // The verifier will stop it. However, if we get past the verifier, + // this will stop the thread in a reasonable way, without crashing the JVM. + __ call_VM(noreg, CAST_FROM_FN_PTR(address, + InterpreterRuntime::throw_IncompatibleClassChangeError)); + // the call_VM checks for exception, so we should never return here. + __ should_not_reach_here(); + return; + } + + const Register cache = rcx; + const Register index = rdx; + + resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1)); + if (VerifyOops) { + __ verify_oop(rax); + } +} + void TemplateTable::ldc2_w() { transition(vtos, vtos); Label Long, Done; @@ -2055,6 +2081,8 @@ case Bytecodes::_invokestatic : // fall through case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; + case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break; + case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break; default : ShouldNotReachHere(); break; } __ movl(temp, (int)bytecode());
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -389,6 +389,32 @@ __ bind(Done); } +// Fast path for caching oop constants. +// %%% We should use this to handle Class and String constants also. +// %%% It will simplify the ldc/primitive path considerably. +void TemplateTable::fast_aldc(bool wide) { + transition(vtos, atos); + + if (!EnableMethodHandles) { + // We should not encounter this bytecode if !EnableMethodHandles. + // The verifier will stop it. However, if we get past the verifier, + // this will stop the thread in a reasonable way, without crashing the JVM. + __ call_VM(noreg, CAST_FROM_FN_PTR(address, + InterpreterRuntime::throw_IncompatibleClassChangeError)); + // the call_VM checks for exception, so we should never return here. + __ should_not_reach_here(); + return; + } + + const Register cache = rcx; + const Register index = rdx; + + resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1)); + if (VerifyOops) { + __ verify_oop(rax); + } +} + void TemplateTable::ldc2_w() { transition(vtos, vtos); Label Long, Done; @@ -2063,6 +2089,12 @@ case Bytecodes::_invokedynamic: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; + case Bytecodes::_fast_aldc: + entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); + break; + case Bytecodes::_fast_aldc_w: + entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); + break; default: ShouldNotReachHere(); break;
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,7 +34,7 @@ VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, }; static BufferBlob* stub_blob; -static const int stub_size = 300; +static const int stub_size = 400; extern "C" { typedef void (*getPsrInfo_stub_t)(void*); @@ -56,7 +56,7 @@ const uint32_t CPU_FAMILY_386 = (3 << CPU_FAMILY_SHIFT); const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT); - Label detect_486, cpu486, detect_586, std_cpuid1; + Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4; Label ext_cpuid1, ext_cpuid5, done; StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub"); @@ -131,13 +131,62 @@ __ movl(Address(rsi, 8), rcx); __ movl(Address(rsi,12), rdx); - __ cmpl(rax, 3); // Is cpuid(0x4) supported? - __ jccb(Assembler::belowEqual, std_cpuid1); + __ cmpl(rax, 0xa); // Is cpuid(0xB) supported? + __ jccb(Assembler::belowEqual, std_cpuid4); + + // + // cpuid(0xB) Processor Topology + // + __ movl(rax, 0xb); + __ xorl(rcx, rcx); // Threads level + __ cpuid(); + + __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB0_offset()))); + __ movl(Address(rsi, 0), rax); + __ movl(Address(rsi, 4), rbx); + __ movl(Address(rsi, 8), rcx); + __ movl(Address(rsi,12), rdx); + + __ movl(rax, 0xb); + __ movl(rcx, 1); // Cores level + __ cpuid(); + __ push(rax); + __ andl(rax, 0x1f); // Determine if valid topology level + __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level + __ andl(rax, 0xffff); + __ pop(rax); + __ jccb(Assembler::equal, std_cpuid4); + + __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB1_offset()))); + __ movl(Address(rsi, 0), rax); + __ movl(Address(rsi, 4), rbx); + __ movl(Address(rsi, 8), rcx); + __ movl(Address(rsi,12), rdx); + + __ movl(rax, 0xb); + __ movl(rcx, 2); // Packages level + __ cpuid(); + __ push(rax); + __ andl(rax, 0x1f); // Determine if valid topology level + __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level + __ andl(rax, 0xffff); + __ pop(rax); + __ jccb(Assembler::equal, std_cpuid4); + + __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB2_offset()))); + __ movl(Address(rsi, 0), rax); + __ movl(Address(rsi, 4), rbx); + __ movl(Address(rsi, 8), rcx); + __ movl(Address(rsi,12), rdx); // // cpuid(0x4) Deterministic cache params // + __ bind(std_cpuid4); __ movl(rax, 4); + __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x4) supported? + __ jccb(Assembler::greater, std_cpuid1); + __ xorl(rcx, rcx); // L1 cache __ cpuid(); __ push(rax); @@ -460,13 +509,18 @@ AllocatePrefetchDistance = allocate_prefetch_distance(); AllocatePrefetchStyle = allocate_prefetch_style(); - if( AllocatePrefetchStyle == 2 && is_intel() && - cpu_family() == 6 && supports_sse3() ) { // watermark prefetching on Core + if( is_intel() && cpu_family() == 6 && supports_sse3() ) { + if( AllocatePrefetchStyle == 2 ) { // watermark prefetching on Core #ifdef _LP64 - AllocatePrefetchDistance = 384; + AllocatePrefetchDistance = 384; #else - AllocatePrefetchDistance = 320; + AllocatePrefetchDistance = 320; #endif + } + if( supports_sse4_2() && supports_ht() ) { // Nehalem based cpus + AllocatePrefetchDistance = 192; + AllocatePrefetchLines = 4; + } } assert(AllocatePrefetchDistance % AllocatePrefetchStepSize == 0, "invalid value");
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/cpu/x86/vm/vm_version_x86.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All Rights Reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -114,6 +114,14 @@ } bits; }; + union TplCpuidBEbx { + uint32_t value; + struct { + uint32_t logical_cpus : 16, + : 16; + } bits; + }; + union ExtCpuid1Ecx { uint32_t value; struct { @@ -211,6 +219,25 @@ uint32_t dcp_cpuid4_ecx; // unused currently uint32_t dcp_cpuid4_edx; // unused currently + // cpuid function 0xB (processor topology) + // ecx = 0 + uint32_t tpl_cpuidB0_eax; + TplCpuidBEbx tpl_cpuidB0_ebx; + uint32_t tpl_cpuidB0_ecx; // unused currently + uint32_t tpl_cpuidB0_edx; // unused currently + + // ecx = 1 + uint32_t tpl_cpuidB1_eax; + TplCpuidBEbx tpl_cpuidB1_ebx; + uint32_t tpl_cpuidB1_ecx; // unused currently + uint32_t tpl_cpuidB1_edx; // unused currently + + // ecx = 2 + uint32_t tpl_cpuidB2_eax; + TplCpuidBEbx tpl_cpuidB2_ebx; + uint32_t tpl_cpuidB2_ecx; // unused currently + uint32_t tpl_cpuidB2_edx; // unused currently + // cpuid function 0x80000000 // example, unused uint32_t ext_max_function; uint32_t ext_vendor_name_0; @@ -316,6 +343,9 @@ static ByteSize ext_cpuid1_offset() { return byte_offset_of(CpuidInfo, ext_cpuid1_eax); } static ByteSize ext_cpuid5_offset() { return byte_offset_of(CpuidInfo, ext_cpuid5_eax); } static ByteSize ext_cpuid8_offset() { return byte_offset_of(CpuidInfo, ext_cpuid8_eax); } + static ByteSize tpl_cpuidB0_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB0_eax); } + static ByteSize tpl_cpuidB1_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB1_eax); } + static ByteSize tpl_cpuidB2_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB2_eax); } // Initialization static void initialize(); @@ -346,10 +376,22 @@ static bool is_amd() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x68747541; } // 'htuA' static bool is_intel() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x756e6547; } // 'uneG' + static bool supports_processor_topology() { + return (_cpuid_info.std_max_function >= 0xB) && + // eax[4:0] | ebx[0:15] == 0 indicates invalid topology level. + // Some cpus have max cpuid >= 0xB but do not support processor topology. + ((_cpuid_info.tpl_cpuidB0_eax & 0x1f | _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus) != 0); + } + static uint cores_per_cpu() { uint result = 1; if (is_intel()) { - result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1); + if (supports_processor_topology()) { + result = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus / + _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus; + } else { + result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1); + } } else if (is_amd()) { result = (_cpuid_info.ext_cpuid8_ecx.bits.cores_per_cpu + 1); } @@ -358,7 +400,9 @@ static uint threads_per_core() { uint result = 1; - if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) { + if (is_intel() && supports_processor_topology()) { + result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus; + } else if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) { result = _cpuid_info.std_cpuid1_ebx.bits.threads_per_cpu / cores_per_cpu(); }
--- a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -820,7 +820,7 @@ bool is_top_frame) { assert(popframe_extra_args == 0, "what to do?"); assert(!is_top_frame || (!callee_locals && !callee_param_count), - "top frame should have no caller") + "top frame should have no caller"); // This code must exactly match what InterpreterFrame::build // does (the full InterpreterFrame::build, that is, not the
--- a/hotspot/src/os/linux/vm/os_linux.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/os/linux/vm/os_linux.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -2079,9 +2079,9 @@ static char saved_jvm_path[MAXPATHLEN] = {0}; // Find the full path to the current module, libjvm.so or libjvm_g.so -void os::jvm_path(char *buf, jint len) { +void os::jvm_path(char *buf, jint buflen) { // Error checking. - if (len < MAXPATHLEN) { + if (buflen < MAXPATHLEN) { assert(false, "must use a large-enough buffer"); buf[0] = '\0'; return; @@ -2117,6 +2117,9 @@ // Look for JAVA_HOME in the environment. char* java_home_var = ::getenv("JAVA_HOME"); if (java_home_var != NULL && java_home_var[0] != 0) { + char* jrelib_p; + int len; + // Check the current module name "libjvm.so" or "libjvm_g.so". p = strrchr(buf, '/'); assert(strstr(p, "/libjvm") == p, "invalid library name"); @@ -2124,14 +2127,24 @@ if (realpath(java_home_var, buf) == NULL) return; - sprintf(buf + strlen(buf), "/jre/lib/%s", cpu_arch); + + // determine if this is a legacy image or modules image + // modules image doesn't have "jre" subdirectory + len = strlen(buf); + jrelib_p = buf + len; + snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch); + if (0 != access(buf, F_OK)) { + snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch); + } + if (0 == access(buf, F_OK)) { // Use current module name "libjvm[_g].so" instead of // "libjvm"debug_only("_g")".so" since for fastdebug version // we should have "libjvm.so" but debug_only("_g") adds "_g"! // It is used when we are choosing the HPI library's name // "libhpi[_g].so" in hpi::initialize_get_interface(). - sprintf(buf + strlen(buf), "/hotspot/libjvm%s.so", p); + len = strlen(buf); + snprintf(buf + len, buflen-len, "/hotspot/libjvm%s.so", p); } else { // Go back to path of .so if (realpath(dli_fname, buf) == NULL)
--- a/hotspot/src/os/linux/vm/vtune_linux.cpp Thu Jul 29 19:30:35 2010 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,45 +0,0 @@ -/* - * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "incls/_precompiled.incl" -#include "incls/_vtune_linux.cpp.incl" - -// empty implementation - -void VTune::start_GC() {} -void VTune::end_GC() {} -void VTune::start_class_load() {} -void VTune::end_class_load() {} -void VTune::exit() {} -void VTune::register_stub(const char* name, address start, address end) {} - -void VTune::create_nmethod(nmethod* nm) {} -void VTune::delete_nmethod(nmethod* nm) {} - -void vtune_init() {} - - -// Reconciliation History -// vtune_solaris.cpp 1.8 99/07/12 23:54:21 -// End
--- a/hotspot/src/os/solaris/vm/osThread_solaris.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/os/solaris/vm/osThread_solaris.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -123,7 +123,7 @@ int set_interrupt_callback (Sync_Interrupt_Callback * cb); void remove_interrupt_callback(Sync_Interrupt_Callback * cb); - void OSThread::do_interrupt_callbacks_at_interrupt(InterruptArguments *args); + void do_interrupt_callbacks_at_interrupt(InterruptArguments *args); // *************************************************************** // java.lang.Thread.interrupt state.
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/os/solaris/vm/os_solaris.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -2435,6 +2435,8 @@ char* java_home_var = ::getenv("JAVA_HOME"); if (java_home_var != NULL && java_home_var[0] != 0) { char cpu_arch[12]; + char* jrelib_p; + int len; sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch)); #ifdef _LP64 // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9. @@ -2450,14 +2452,23 @@ p = strstr(p, "_g") ? "_g" : ""; realpath(java_home_var, buf); - sprintf(buf + strlen(buf), "/jre/lib/%s", cpu_arch); + // determine if this is a legacy image or modules image + // modules image doesn't have "jre" subdirectory + len = strlen(buf); + jrelib_p = buf + len; + snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch); + if (0 != access(buf, F_OK)) { + snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch); + } + if (0 == access(buf, F_OK)) { // Use current module name "libjvm[_g].so" instead of // "libjvm"debug_only("_g")".so" since for fastdebug version // we should have "libjvm.so" but debug_only("_g") adds "_g"! // It is used when we are choosing the HPI library's name // "libhpi[_g].so" in hpi::initialize_get_interface(). - sprintf(buf + strlen(buf), "/hotspot/libjvm%s.so", p); + len = strlen(buf); + snprintf(buf + len, buflen-len, "/hotspot/libjvm%s.so", p); } else { // Go back to path of .so realpath((char *)dlinfo.dli_fname, buf);
--- a/hotspot/src/os/solaris/vm/vtune_solaris.cpp Thu Jul 29 19:30:35 2010 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,40 +0,0 @@ -/* - * Copyright (c) 1998, 2007, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "incls/_precompiled.incl" -#include "incls/_vtune_solaris.cpp.incl" - -// empty implementation - -void VTune::start_GC() {} -void VTune::end_GC() {} -void VTune::start_class_load() {} -void VTune::end_class_load() {} -void VTune::exit() {} -void VTune::register_stub(const char* name, address start, address end) {} - -void VTune::create_nmethod(nmethod* nm) {} -void VTune::delete_nmethod(nmethod* nm) {} - -void vtune_init() {}
--- a/hotspot/src/os/windows/vm/vtune_windows.cpp Thu Jul 29 19:30:35 2010 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,290 +0,0 @@ -/* - * Copyright (c) 1998, 2007, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "incls/_precompiled.incl" -#include "incls/_vtune_windows.cpp.incl" - -static int current_method_ID = 0; - -// ------------- iJITProf.h ------------------- -// defined by Intel -- do not change - -#include "windows.h" - -extern "C" { - enum iJITP_Event { - ExceptionOccurred_S, // Java exception - ExceptionOccurred_IDS, - - Shutdown, // VM exit - - ThreadCreate, // threads - ThreadDestroy, - ThreadSwitch, - - ClassLoadStart, // class loading - ClassLoadEnd, - - GCStart, // GC - GCEnd, - - NMethodCreate = 13, // nmethod creation - NMethodDelete - - // rest of event types omitted (call profiling not supported yet) - }; - - // version number -- 0 if VTune not installed - int WINAPI iJitP_VersionNumber(); - - enum iJITP_ModeFlags { - NoNotification = 0x0, // don't call vtune - NotifyNMethodCreate = 0x1, // notify NMethod_Create - NotifyNMethodDelete = 0x2, // notify NMethod_Create - NotifyMethodEnter = 0x4, // method entry - NotifyMethodExit = 0x8, // method exit - NotifyShutdown = 0x10, // VM exit - NotifyGC = 0x20, // GC - }; - - // call back function type - typedef void (WINAPI *ModeChangedFn)(iJITP_ModeFlags flags); - - // ------------- VTune method interfaces ---------------------- - typedef void (WINAPI *RegisterCallbackFn)(ModeChangedFn fn); // register callback - typedef int (WINAPI *NotifyEventFn)(iJITP_Event, void* event_data); - - // specific event data structures - - // data for NMethodCreate - - struct VTuneObj { // base class for allocation - // (can't use CHeapObj -- has vtable ptr) - void* operator new(size_t size) { return os::malloc(size); } - void operator delete(void* p) { fatal("never delete VTune data"); } - }; - - struct LineNumberInfo : VTuneObj { // PC-to-line number mapping - unsigned long offset; // byte offset from start of method - unsigned long line_num; // corresponding line number - }; - - struct MethodLoadInfo : VTuneObj { - unsigned long methodID; // unique method ID - const char* name; // method name - unsigned long instr_start; // start address - unsigned long instr_size; // length in bytes - unsigned long line_number_size; // size of line number table - LineNumberInfo* line_number_table; // line number mapping - unsigned long classID; // unique class ID - char* class_file_name; // fully qualified class file name - char* source_file_name; // fully qualified source file name - - MethodLoadInfo(nmethod* nm); // for real nmethods - MethodLoadInfo(const char* vm_name, address start, address end); - // for "nmethods" like stubs, interpreter, etc - - }; - - // data for NMethodDelete - struct MethodInfo : VTuneObj { - unsigned long methodID; // unique method ID - unsigned long classID; // (added for convenience -- not part of Intel interface) - - MethodInfo(methodOop m); - }; -}; - -MethodInfo::MethodInfo(methodOop m) { - // just give it a new ID -- we're not compiling methods twice (usually) - // (and even if we did, one might want to see the two versions separately) - methodID = ++current_method_ID; -} - -MethodLoadInfo::MethodLoadInfo(const char* vm_name, address start, address end) { - classID = 0; - methodID = ++current_method_ID; - name = vm_name; - instr_start = (unsigned long)start; - instr_size = end - start; - line_number_size = 0; - line_number_table = NULL; - class_file_name = source_file_name = "HotSpot JVM"; -} - -MethodLoadInfo::MethodLoadInfo(nmethod* nm) { - methodOop m = nm->method(); - MethodInfo info(m); - classID = info.classID; - methodID = info.methodID; - name = strdup(m->name()->as_C_string()); - instr_start = (unsigned long)nm->instructions_begin(); - instr_size = nm->code_size(); - line_number_size = 0; - line_number_table = NULL; - klassOop kl = m->method_holder(); - char* class_name = Klass::cast(kl)->name()->as_C_string(); - char* file_name = NEW_C_HEAP_ARRAY(char, strlen(class_name) + 1); - strcpy(file_name, class_name); - class_file_name = file_name; - char* src_name = NEW_C_HEAP_ARRAY(char, strlen(class_name) + strlen(".java") + 1); - strcpy(src_name, class_name); - strcat(src_name, ".java"); - source_file_name = src_name; -} - -// --------------------- DLL loading functions ------------------------ - -#define DLLNAME "iJitProf.dll" - -static HINSTANCE load_lib(char* name) { - HINSTANCE lib = NULL; - HKEY hk; - - // try to get VTune directory from the registry - if (RegOpenKey(HKEY_CURRENT_USER, "Software\\VB and VBA Program Settings\\VTune\\StartUp", &hk) == ERROR_SUCCESS) { - for (int i = 0; true; i++) { - char szName[MAX_PATH + 1]; - char szVal [MAX_PATH + 1]; - DWORD cbName, cbVal; - - cbName = cbVal = MAX_PATH + 1; - if (RegEnumValue(hk, i, szName, &cbName, NULL, NULL, (LPBYTE)szVal, &cbVal) == ERROR_SUCCESS) { - // get VTune directory - if (!strcmp(szName, name)) { - char*p = szVal; - while (*p == ' ') p++; // trim - char* q = p + strlen(p) - 1; - while (*q == ' ') *(q--) = '\0'; - - // chdir to the VTune dir - GetCurrentDirectory(MAX_PATH + 1, szName); - SetCurrentDirectory(p); - // load lib - lib = LoadLibrary(strcat(strcat(p, "\\"), DLLNAME)); - if (lib != NULL && WizardMode) tty->print_cr("*loaded VTune DLL %s", p); - // restore current dir - SetCurrentDirectory(szName); - break; - } - } else { - break; - } - } - } - return lib; -} - -static RegisterCallbackFn iJIT_RegisterCallback = NULL; -static NotifyEventFn iJIT_NotifyEvent = NULL; - -static bool load_iJIT_funcs() { - // first try to load from PATH - HINSTANCE lib = LoadLibrary(DLLNAME); - if (lib != NULL && WizardMode) tty->print_cr("*loaded VTune DLL %s via PATH", DLLNAME); - - // if not successful, try to look in the VTUNE directory - if (lib == NULL) lib = load_lib("VTUNEDIR30"); - if (lib == NULL) lib = load_lib("VTUNEDIR25"); - if (lib == NULL) lib = load_lib("VTUNEDIR"); - - if (lib == NULL) return false; // unsuccessful - - // try to load the functions - iJIT_RegisterCallback = (RegisterCallbackFn)GetProcAddress(lib, "iJIT_RegisterCallback"); - iJIT_NotifyEvent = (NotifyEventFn) GetProcAddress(lib, "iJIT_NotifyEvent"); - - if (!iJIT_RegisterCallback) tty->print_cr("*couldn't find VTune entry point iJIT_RegisterCallback"); - if (!iJIT_NotifyEvent) tty->print_cr("*couldn't find VTune entry point iJIT_NotifyEvent"); - return iJIT_RegisterCallback != NULL && iJIT_NotifyEvent != NULL; -} - -// --------------------- VTune class ------------------------ - -static bool active = false; -static int flags = 0; - -void VTune::start_GC() { - if (active && (flags & NotifyGC)) iJIT_NotifyEvent(GCStart, NULL); -} - -void VTune::end_GC() { - if (active && (flags & NotifyGC)) iJIT_NotifyEvent(GCEnd, NULL); -} - -void VTune::start_class_load() { - // not yet implemented in VTune -} - -void VTune::end_class_load() { - // not yet implemented in VTune -} - -void VTune::exit() { - if (active && (flags & NotifyShutdown)) iJIT_NotifyEvent(Shutdown, NULL); -} - -void VTune::register_stub(const char* name, address start, address end) { - if (flags & NotifyNMethodCreate) { - MethodLoadInfo* info = new MethodLoadInfo(name, start, end); - if (PrintMiscellaneous && WizardMode && Verbose) { - tty->print_cr("NMethodCreate %s (%d): %#x..%#x", info->name, info->methodID, - info->instr_start, info->instr_start + info->instr_size); - } - iJIT_NotifyEvent(NMethodCreate, info); - } -} - -void VTune::create_nmethod(nmethod* nm) { - if (flags & NotifyNMethodCreate) { - MethodLoadInfo* info = new MethodLoadInfo(nm); - if (PrintMiscellaneous && WizardMode && Verbose) { - tty->print_cr("NMethodCreate %s (%d): %#x..%#x", info->name, info->methodID, - info->instr_start, info->instr_start + info->instr_size); - } - iJIT_NotifyEvent(NMethodCreate, info); - } -} - -void VTune::delete_nmethod(nmethod* nm) { - if (flags & NotifyNMethodDelete) { - MethodInfo* info = new MethodInfo(nm->method()); - iJIT_NotifyEvent(NMethodDelete, info); - } -} - -static void set_flags(int new_flags) { - flags = new_flags; - // if (WizardMode) tty->print_cr("*new VTune flags: %#x", flags); -} - -void vtune_init() { - if (!UseVTune) return; - active = load_iJIT_funcs(); - if (active) { - iJIT_RegisterCallback((ModeChangedFn)set_flags); - } else { - assert(flags == 0, "flags shouldn't be set"); - } -}
--- a/hotspot/src/os_cpu/linux_x86/vm/copy_linux_x86.inline.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/os_cpu/linux_x86/vm/copy_linux_x86.inline.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -26,7 +26,7 @@ #ifdef AMD64 (void)memmove(to, from, count * HeapWordSize); #else - // Same as pd_aligned_conjoint_words, except includes a zero-count check. + // Includes a zero-count check. intx temp; __asm__ volatile(" testl %6,%6 ;" " jz 7f ;" @@ -84,7 +84,7 @@ break; } #else - // Same as pd_aligned_disjoint_words, except includes a zero-count check. + // Includes a zero-count check. intx temp; __asm__ volatile(" testl %6,%6 ;" " jz 3f ;" @@ -130,75 +130,18 @@ } static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { -#ifdef AMD64 - (void)memmove(to, from, count * HeapWordSize); -#else - // Same as pd_conjoint_words, except no zero-count check. - intx temp; - __asm__ volatile(" cmpl %4,%5 ;" - " leal -4(%4,%6,4),%3;" - " jbe 1f ;" - " cmpl %7,%5 ;" - " jbe 4f ;" - "1: cmpl $32,%6 ;" - " ja 3f ;" - " subl %4,%1 ;" - "2: movl (%4),%3 ;" - " movl %7,(%5,%4,1) ;" - " addl $4,%0 ;" - " subl $1,%2 ;" - " jnz 2b ;" - " jmp 7f ;" - "3: rep; smovl ;" - " jmp 7f ;" - "4: cmpl $32,%2 ;" - " movl %7,%0 ;" - " leal -4(%5,%6,4),%1;" - " ja 6f ;" - " subl %4,%1 ;" - "5: movl (%4),%3 ;" - " movl %7,(%5,%4,1) ;" - " subl $4,%0 ;" - " subl $1,%2 ;" - " jnz 5b ;" - " jmp 7f ;" - "6: std ;" - " rep; smovl ;" - " cld ;" - "7: nop " - : "=S" (from), "=D" (to), "=c" (count), "=r" (temp) - : "0" (from), "1" (to), "2" (count), "3" (temp) - : "memory", "flags"); -#endif // AMD64 + pd_conjoint_words(from, to, count); } static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { -#ifdef AMD64 pd_disjoint_words(from, to, count); -#else - // Same as pd_disjoint_words, except no zero-count check. - intx temp; - __asm__ volatile(" cmpl $32,%6 ;" - " ja 2f ;" - " subl %4,%1 ;" - "1: movl (%4),%3 ;" - " movl %7,(%5,%4,1);" - " addl $4,%0 ;" - " subl $1,%2 ;" - " jnz 1b ;" - " jmp 3f ;" - "2: rep; smovl ;" - "3: nop " - : "=S" (from), "=D" (to), "=c" (count), "=r" (temp) - : "0" (from), "1" (to), "2" (count), "3" (temp) - : "memory", "cc"); -#endif // AMD64 } static void pd_conjoint_bytes(void* from, void* to, size_t count) { #ifdef AMD64 (void)memmove(to, from, count); #else + // Includes a zero-count check. intx temp; __asm__ volatile(" testl %6,%6 ;" " jz 13f ;"
--- a/hotspot/src/os_cpu/linux_x86/vm/linux_x86_32.s Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/os_cpu/linux_x86/vm/linux_x86_32.s Thu Jul 29 22:04:41 2010 -0700 @@ -121,10 +121,10 @@ jnz 3b addl %esi,%edi 4: movl %eax,%ecx # byte count less prefix - andl $3,%ecx # suffix byte count +5: andl $3,%ecx # suffix byte count jz 7f # no suffix # copy suffix -5: xorl %eax,%eax + xorl %eax,%eax 6: movb (%esi,%eax,1),%dl movb %dl,(%edi,%eax,1) addl $1,%eax @@ -159,10 +159,10 @@ # copy dwords, aligned or not 3: rep; smovl 4: movl %eax,%ecx # byte count - andl $3,%ecx # suffix byte count +5: andl $3,%ecx # suffix byte count jz 7f # no suffix # copy suffix -5: subl %esi,%edi + subl %esi,%edi addl $3,%esi 6: movb (%esi),%dl movb %dl,(%edi,%esi,1) @@ -214,10 +214,10 @@ # copy aligned dwords 3: rep; smovl 4: movl %eax,%ecx - andl $3,%ecx +5: andl $3,%ecx jz 7f # copy suffix -5: xorl %eax,%eax + xorl %eax,%eax 6: movb (%esi,%eax,1),%dl movb %dl,(%edi,%eax,1) addl $1,%eax @@ -250,9 +250,9 @@ jnz 3b addl %esi,%edi 4: movl %eax,%ecx - andl $3,%ecx +5: andl $3,%ecx jz 7f -5: subl %esi,%edi + subl %esi,%edi addl $3,%esi 6: movb (%esi),%dl movb %dl,(%edi,%esi,1) @@ -287,11 +287,12 @@ andl $3,%eax # either 0 or 2 jz 1f # no prefix # copy prefix + subl $1,%ecx + jl 5f # zero count movw (%esi),%dx movw %dx,(%edi) addl %eax,%esi # %eax == 2 addl %eax,%edi - subl $1,%ecx 1: movl %ecx,%eax # word count less prefix sarl %ecx # dword count jz 4f # no dwords to move @@ -454,12 +455,13 @@ ret .=.+10 2: subl %esi,%edi + jmp 4f .p2align 4,,15 3: movl (%esi),%edx movl %edx,(%edi,%esi,1) addl $4,%esi - subl $1,%ecx - jnz 3b +4: subl $1,%ecx + jge 3b popl %edi popl %esi ret @@ -467,19 +469,20 @@ std leal -4(%edi,%ecx,4),%edi # to + count*4 - 4 cmpl $32,%ecx - ja 3f # > 32 dwords + ja 4f # > 32 dwords subl %eax,%edi # eax == from + count*4 - 4 + jmp 3f .p2align 4,,15 2: movl (%eax),%edx movl %edx,(%edi,%eax,1) subl $4,%eax - subl $1,%ecx - jnz 2b +3: subl $1,%ecx + jge 2b cld popl %edi popl %esi ret -3: movl %eax,%esi # from + count*4 - 4 +4: movl %eax,%esi # from + count*4 - 4 rep; smovl cld popl %edi
--- a/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -861,7 +861,7 @@ cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap; add_func_t* os::atomic_add_func = os::atomic_add_bootstrap; -extern "C" _solaris_raw_setup_fpu(address ptr); +extern "C" void _solaris_raw_setup_fpu(address ptr); void os::setup_fpu() { address fpu_cntrl = StubRoutines::addr_fpu_cntrl_wrd_std(); _solaris_raw_setup_fpu(fpu_cntrl);
--- a/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_32.s Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_32.s Thu Jul 29 22:04:41 2010 -0700 @@ -154,10 +154,10 @@ jnz 3b addl %esi,%edi 4: movl %eax,%ecx / byte count less prefix - andl $3,%ecx / suffix byte count +5: andl $3,%ecx / suffix byte count jz 7f / no suffix / copy suffix -5: xorl %eax,%eax + xorl %eax,%eax 6: movb (%esi,%eax,1),%dl movb %dl,(%edi,%eax,1) addl $1,%eax @@ -192,10 +192,10 @@ / copy dwords, aligned or not 3: rep; smovl 4: movl %eax,%ecx / byte count - andl $3,%ecx / suffix byte count +5: andl $3,%ecx / suffix byte count jz 7f / no suffix / copy suffix -5: subl %esi,%edi + subl %esi,%edi addl $3,%esi 6: movb (%esi),%dl movb %dl,(%edi,%esi,1) @@ -246,10 +246,10 @@ / copy aligned dwords 3: rep; smovl 4: movl %eax,%ecx - andl $3,%ecx +5: andl $3,%ecx jz 7f / copy suffix -5: xorl %eax,%eax + xorl %eax,%eax 6: movb (%esi,%eax,1),%dl movb %dl,(%edi,%eax,1) addl $1,%eax @@ -282,9 +282,9 @@ jnz 3b addl %esi,%edi 4: movl %eax,%ecx - andl $3,%ecx +5: andl $3,%ecx jz 7f -5: subl %esi,%edi + subl %esi,%edi addl $3,%esi 6: movb (%esi),%dl movb %dl,(%edi,%esi,1) @@ -318,11 +318,12 @@ andl $3,%eax / either 0 or 2 jz 1f / no prefix / copy prefix + subl $1,%ecx + jl 5f / zero count movw (%esi),%dx movw %dx,(%edi) addl %eax,%esi / %eax == 2 addl %eax,%edi - subl $1,%ecx 1: movl %ecx,%eax / word count less prefix sarl %ecx / dword count jz 4f / no dwords to move @@ -482,12 +483,13 @@ ret .=.+10 2: subl %esi,%edi + jmp 4f .align 16 3: movl (%esi),%edx movl %edx,(%edi,%esi,1) addl $4,%esi - subl $1,%ecx - jnz 3b +4: subl $1,%ecx + jge 3b popl %edi popl %esi ret @@ -495,19 +497,20 @@ std leal -4(%edi,%ecx,4),%edi / to + count*4 - 4 cmpl $32,%ecx - ja 3f / > 32 dwords + ja 4f / > 32 dwords subl %eax,%edi / eax == from + count*4 - 4 + jmp 3f .align 16 2: movl (%eax),%edx movl %edx,(%edi,%eax,1) subl $4,%eax - subl $1,%ecx - jnz 2b +3: subl $1,%ecx + jge 2b cld popl %edi popl %esi ret -3: movl %eax,%esi / from + count*4 - 4 +4: movl %eax,%esi / from + count*4 - 4 rep; smovl cld popl %edi
--- a/hotspot/src/share/vm/asm/codeBuffer.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/asm/codeBuffer.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -404,7 +404,7 @@ locs_start = REALLOC_RESOURCE_ARRAY(relocInfo, _locs_start, old_capacity, new_capacity); } else { locs_start = NEW_RESOURCE_ARRAY(relocInfo, new_capacity); - Copy::conjoint_bytes(_locs_start, locs_start, old_capacity * sizeof(relocInfo)); + Copy::conjoint_jbytes(_locs_start, locs_start, old_capacity * sizeof(relocInfo)); _locs_own = true; } _locs_start = locs_start; @@ -581,7 +581,7 @@ (HeapWord*)(buf+buf_offset), (lsize + HeapWordSize-1) / HeapWordSize); } else { - Copy::conjoint_bytes(lstart, buf+buf_offset, lsize); + Copy::conjoint_jbytes(lstart, buf+buf_offset, lsize); } } buf_offset += lsize;
--- a/hotspot/src/share/vm/c1/c1_Compilation.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/c1/c1_Compilation.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -242,10 +242,10 @@ code->insts()->initialize_shared_locs((relocInfo*)locs_buffer, locs_buffer_size / sizeof(relocInfo)); code->initialize_consts_size(Compilation::desired_max_constant_size()); - // Call stubs + deopt/exception handler + // Call stubs + two deopt handlers (regular and MH) + exception handler code->initialize_stubs_size((call_stub_estimate * LIR_Assembler::call_stub_size) + LIR_Assembler::exception_handler_size + - LIR_Assembler::deopt_handler_size); + 2 * LIR_Assembler::deopt_handler_size); }
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -878,15 +878,12 @@ case T_OBJECT : { ciObject* obj = con.as_object(); - if (obj->is_klass()) { - ciKlass* klass = obj->as_klass(); - if (!klass->is_loaded() || PatchALot) { - patch_state = state()->copy(); - t = new ObjectConstant(obj); - } else { - t = new InstanceConstant(klass->java_mirror()); - } + if (!obj->is_loaded() + || (PatchALot && obj->klass() != ciEnv::current()->String_klass())) { + patch_state = state()->copy(); + t = new ObjectConstant(obj); } else { + assert(!obj->is_klass(), "must be java_mirror of klass"); t = new InstanceConstant(obj->as_instance()); } break;
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -601,7 +601,7 @@ static klassOop resolve_field_return_klass(methodHandle caller, int bci, TRAPS) { - Bytecode_field* field_access = Bytecode_field_at(caller(), caller->bcp_from(bci)); + Bytecode_field* field_access = Bytecode_field_at(caller, bci); // This can be static or non-static field access Bytecodes::Code code = field_access->code(); @@ -721,7 +721,7 @@ Handle load_klass(THREAD, NULL); // oop needed by load_klass_patching code if (stub_id == Runtime1::access_field_patching_id) { - Bytecode_field* field_access = Bytecode_field_at(caller_method(), caller_method->bcp_from(bci)); + Bytecode_field* field_access = Bytecode_field_at(caller_method, bci); FieldAccessInfo result; // initialize class if needed Bytecodes::Code code = field_access->code(); constantPoolHandle constants(THREAD, caller_method->constants()); @@ -781,11 +781,9 @@ case Bytecodes::_ldc: case Bytecodes::_ldc_w: { - Bytecode_loadconstant* cc = Bytecode_loadconstant_at(caller_method(), - caller_method->bcp_from(bci)); - klassOop resolved = caller_method->constants()->klass_at(cc->index(), CHECK); - // ldc wants the java mirror. - k = resolved->klass_part()->java_mirror(); + Bytecode_loadconstant* cc = Bytecode_loadconstant_at(caller_method, bci); + k = cc->resolve_constant(CHECK); + assert(k != NULL && !k->is_klass(), "must be class mirror or other Java constant"); } break; default: Unimplemented(); @@ -816,6 +814,15 @@ // Return to the now deoptimized frame. } + // If we are patching in a non-perm oop, make sure the nmethod + // is on the right list. + if (ScavengeRootsInCode && load_klass.not_null() && load_klass->is_scavengable()) { + MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag); + nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); + guarantee(nm != NULL, "only nmethods can contain non-perm oops"); + if (!nm->on_scavenge_root_list()) + CodeCache::add_scavenge_root_nmethod(nm); + } // Now copy code back @@ -1115,7 +1122,7 @@ if (length == 0) return; // Not guaranteed to be word atomic, but that doesn't matter // for anything but an oop array, which is covered by oop_arraycopy. - Copy::conjoint_bytes(src, dst, length); + Copy::conjoint_jbytes(src, dst, length); JRT_END JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num))
--- a/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/ci/bcEscapeAnalyzer.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -106,7 +106,7 @@ void BCEscapeAnalyzer::set_returned(ArgumentMap vars) { for (int i = 0; i < _arg_size; i++) { if (vars.contains(i)) - _arg_returned.set_bit(i); + _arg_returned.set(i); } _return_local = _return_local && !(vars.contains_unknown() || vars.contains_allocated()); _return_allocated = _return_allocated && vars.contains_allocated() && !(vars.contains_unknown() || vars.contains_vars()); @@ -126,16 +126,16 @@ if (_conservative) return true; for (int i = 0; i < _arg_size; i++) { - if (vars.contains(i) && _arg_stack.at(i)) + if (vars.contains(i) && _arg_stack.test(i)) return true; } return false; } -void BCEscapeAnalyzer::clear_bits(ArgumentMap vars, BitMap &bm) { +void BCEscapeAnalyzer::clear_bits(ArgumentMap vars, VectorSet &bm) { for (int i = 0; i < _arg_size; i++) { if (vars.contains(i)) { - bm.clear_bit(i); + bm >>= i; } } } @@ -1157,15 +1157,15 @@ ciSignature* sig = method()->signature(); int j = 0; if (!method()->is_static()) { - _arg_local.set_bit(0); - _arg_stack.set_bit(0); + _arg_local.set(0); + _arg_stack.set(0); j++; } for (i = 0; i < sig->count(); i++) { ciType* t = sig->type_at(i); if (!t->is_primitive_type()) { - _arg_local.set_bit(j); - _arg_stack.set_bit(j); + _arg_local.set(j); + _arg_stack.set(j); } j += t->size(); } @@ -1198,9 +1198,9 @@ set_modified(var, OFFSET_ANY, 4); set_global_escape(var); } - _arg_local.clear(); - _arg_stack.clear(); - _arg_returned.clear(); + _arg_local.Clear(); + _arg_stack.Clear(); + _arg_returned.Clear(); _return_local = false; _return_allocated = false; _allocated_escapes = true; @@ -1254,7 +1254,7 @@ // Do not scan method if it has no object parameters and // does not returns an object (_return_allocated is set in initialize()). - if (_arg_local.is_empty() && !_return_allocated) { + if (_arg_local.Size() == 0 && !_return_allocated) { // Clear all info since method's bytecode was not analysed and // set pessimistic escape information. clear_escape_info(); @@ -1275,14 +1275,14 @@ // if (!has_dependencies() && !methodData()->is_empty()) { for (i = 0; i < _arg_size; i++) { - if (_arg_local.at(i)) { - assert(_arg_stack.at(i), "inconsistent escape info"); + if (_arg_local.test(i)) { + assert(_arg_stack.test(i), "inconsistent escape info"); methodData()->set_arg_local(i); methodData()->set_arg_stack(i); - } else if (_arg_stack.at(i)) { + } else if (_arg_stack.test(i)) { methodData()->set_arg_stack(i); } - if (_arg_returned.at(i)) { + if (_arg_returned.test(i)) { methodData()->set_arg_returned(i); } methodData()->set_arg_modified(i, _arg_modified[i]); @@ -1308,9 +1308,12 @@ // read escape information from method descriptor for (int i = 0; i < _arg_size; i++) { - _arg_local.at_put(i, methodData()->is_arg_local(i)); - _arg_stack.at_put(i, methodData()->is_arg_stack(i)); - _arg_returned.at_put(i, methodData()->is_arg_returned(i)); + if (methodData()->is_arg_local(i)) + _arg_local.set(i); + if (methodData()->is_arg_stack(i)) + _arg_stack.set(i); + if (methodData()->is_arg_returned(i)) + _arg_returned.set(i); _arg_modified[i] = methodData()->arg_modified(i); } _return_local = methodData()->eflag_set(methodDataOopDesc::return_local); @@ -1358,26 +1361,26 @@ BCEscapeAnalyzer::BCEscapeAnalyzer(ciMethod* method, BCEscapeAnalyzer* parent) : _conservative(method == NULL || !EstimateArgEscape) + , _arena(CURRENT_ENV->arena()) , _method(method) , _methodData(method ? method->method_data() : NULL) , _arg_size(method ? method->arg_size() : 0) - , _stack() - , _arg_local(_arg_size) - , _arg_stack(_arg_size) - , _arg_returned(_arg_size) - , _dirty(_arg_size) + , _arg_local(_arena) + , _arg_stack(_arena) + , _arg_returned(_arena) + , _dirty(_arena) , _return_local(false) , _return_allocated(false) , _allocated_escapes(false) , _unknown_modified(false) - , _dependencies() + , _dependencies(_arena, 4, 0, NULL) , _parent(parent) , _level(parent == NULL ? 0 : parent->level() + 1) { if (!_conservative) { - _arg_local.clear(); - _arg_stack.clear(); - _arg_returned.clear(); - _dirty.clear(); + _arg_local.Clear(); + _arg_stack.Clear(); + _arg_returned.Clear(); + _dirty.Clear(); Arena* arena = CURRENT_ENV->arena(); _arg_modified = (uint *) arena->Amalloc(_arg_size * sizeof(uint)); Copy::zero_to_bytes(_arg_modified, _arg_size * sizeof(uint)); @@ -1414,8 +1417,8 @@ deps->assert_evol_method(method()); } for (int i = 0; i < _dependencies.length(); i+=2) { - ciKlass *k = _dependencies[i]->as_klass(); - ciMethod *m = _dependencies[i+1]->as_method(); + ciKlass *k = _dependencies.at(i)->as_klass(); + ciMethod *m = _dependencies.at(i+1)->as_method(); deps->assert_unique_concrete_method(k, m); } }
--- a/hotspot/src/share/vm/ci/bcEscapeAnalyzer.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/ci/bcEscapeAnalyzer.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -22,9 +22,6 @@ * */ -define_array(ciObjectArray, ciObject*); -define_stack(ciObjectList, ciObjectArray); - // This class implements a fast, conservative analysis of effect of methods // on the escape state of their arguments. The analysis is at the bytecode // level. @@ -34,18 +31,17 @@ class BCEscapeAnalyzer : public ResourceObj { private: + Arena* _arena; // ciEnv arena + bool _conservative; // If true, return maximally // conservative results. ciMethod* _method; ciMethodData* _methodData; int _arg_size; - - intStack _stack; - - BitMap _arg_local; - BitMap _arg_stack; - BitMap _arg_returned; - BitMap _dirty; + VectorSet _arg_local; + VectorSet _arg_stack; + VectorSet _arg_returned; + VectorSet _dirty; enum{ ARG_OFFSET_MAX = 31}; uint *_arg_modified; @@ -54,7 +50,7 @@ bool _allocated_escapes; bool _unknown_modified; - ciObjectList _dependencies; + GrowableArray<ciObject *> _dependencies; ciMethodBlocks *_methodBlocks; @@ -68,20 +64,10 @@ private: // helper functions bool is_argument(int i) { return i >= 0 && i < _arg_size; } - - void raw_push(int i) { _stack.push(i); } - int raw_pop() { return _stack.is_empty() ? -1 : _stack.pop(); } - void apush(int i) { raw_push(i); } - void spush() { raw_push(-1); } - void lpush() { spush(); spush(); } - int apop() { return raw_pop(); } - void spop() { assert(_stack.is_empty() || _stack.top() == -1, ""); raw_pop(); } - void lpop() { spop(); spop(); } - void set_returned(ArgumentMap vars); bool is_argument(ArgumentMap vars); bool is_arg_stack(ArgumentMap vars); - void clear_bits(ArgumentMap vars, BitMap &bs); + void clear_bits(ArgumentMap vars, VectorSet &bs); void set_method_escape(ArgumentMap vars); void set_global_escape(ArgumentMap vars); void set_dirty(ArgumentMap vars); @@ -116,25 +102,25 @@ ciMethodData* methodData() const { return _methodData; } BCEscapeAnalyzer* parent() const { return _parent; } int level() const { return _level; } - ciObjectList* dependencies() { return &_dependencies; } + GrowableArray<ciObject *>* dependencies() { return &_dependencies; } bool has_dependencies() const { return !_dependencies.is_empty(); } // retrieval of interprocedural escape information // The given argument does not escape the callee. bool is_arg_local(int i) const { - return !_conservative && _arg_local.at(i); + return !_conservative && _arg_local.test(i); } // The given argument escapes the callee, but does not become globally // reachable. bool is_arg_stack(int i) const { - return !_conservative && _arg_stack.at(i); + return !_conservative && _arg_stack.test(i); } // The given argument does not escape globally, and may be returned. bool is_arg_returned(int i) const { - return !_conservative && _arg_returned.at(i); } + return !_conservative && _arg_returned.test(i); } // True iff only input arguments are returned. bool is_return_local() const {
--- a/hotspot/src/share/vm/ci/ciCPCache.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/ci/ciCPCache.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -44,13 +44,23 @@ // ciCPCache::is_f1_null_at bool ciCPCache::is_f1_null_at(int index) { VM_ENTRY_MARK; - constantPoolCacheOop cpcache = (constantPoolCacheOop) get_oop(); - oop f1 = cpcache->secondary_entry_at(index)->f1(); + oop f1 = entry_at(index)->f1(); return (f1 == NULL); } // ------------------------------------------------------------------ +// ciCPCache::get_pool_index +int ciCPCache::get_pool_index(int index) { + VM_ENTRY_MARK; + ConstantPoolCacheEntry* e = entry_at(index); + if (e->is_secondary_entry()) + e = entry_at(e->main_entry_index()); + return e->constant_pool_index(); +} + + +// ------------------------------------------------------------------ // ciCPCache::print // // Print debugging information about the cache.
--- a/hotspot/src/share/vm/ci/ciCPCache.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/ci/ciCPCache.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -29,6 +29,18 @@ // Note: This class is called ciCPCache as ciConstantPoolCache is used // for something different. class ciCPCache : public ciObject { +private: + constantPoolCacheOop get_cpCacheOop() { // must be called inside a VM_ENTRY_MARK + return (constantPoolCacheOop) get_oop(); + } + + ConstantPoolCacheEntry* entry_at(int i) { + int raw_index = i; + if (constantPoolCacheOopDesc::is_secondary_index(i)) + raw_index = constantPoolCacheOopDesc::decode_secondary_index(i); + return get_cpCacheOop()->entry_at(raw_index); + } + public: ciCPCache(constantPoolCacheHandle cpcache) : ciObject(cpcache) {} @@ -41,5 +53,7 @@ bool is_f1_null_at(int index); + int get_pool_index(int index); + void print(); };
--- a/hotspot/src/share/vm/ci/ciClassList.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/ci/ciClassList.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -85,6 +85,7 @@ friend class ciConstantPoolCache; \ friend class ciField; \ friend class ciConstant; \ +friend class ciCPCache; \ friend class ciFlags; \ friend class ciExceptionHandler; \ friend class ciCallProfile; \
--- a/hotspot/src/share/vm/ci/ciEnv.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/ci/ciEnv.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -511,9 +511,22 @@ // // Implementation of get_constant_by_index(). ciConstant ciEnv::get_constant_by_index_impl(constantPoolHandle cpool, - int index, + int pool_index, int cache_index, ciInstanceKlass* accessor) { + bool ignore_will_link; EXCEPTION_CONTEXT; + int index = pool_index; + if (cache_index >= 0) { + assert(index < 0, "only one kind of index at a time"); + ConstantPoolCacheEntry* cpc_entry = cpool->cache()->entry_at(cache_index); + index = cpc_entry->constant_pool_index(); + oop obj = cpc_entry->f1(); + if (obj != NULL) { + assert(obj->is_instance(), "must be an instance"); + ciObject* ciobj = get_object(obj); + return ciConstant(T_OBJECT, ciobj); + } + } constantTag tag = cpool->tag_at(index); if (tag.is_int()) { return ciConstant(T_INT, (jint)cpool->int_at(index)); @@ -540,8 +553,7 @@ return ciConstant(T_OBJECT, constant); } else if (tag.is_klass() || tag.is_unresolved_klass()) { // 4881222: allow ldc to take a class type - bool ignore; - ciKlass* klass = get_klass_by_index_impl(cpool, index, ignore, accessor); + ciKlass* klass = get_klass_by_index_impl(cpool, index, ignore_will_link, accessor); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; record_out_of_memory_failure(); @@ -549,12 +561,26 @@ } assert (klass->is_instance_klass() || klass->is_array_klass(), "must be an instance or array klass "); - return ciConstant(T_OBJECT, klass); + return ciConstant(T_OBJECT, klass->java_mirror()); } else if (tag.is_object()) { oop obj = cpool->object_at(index); assert(obj->is_instance(), "must be an instance"); ciObject* ciobj = get_object(obj); return ciConstant(T_OBJECT, ciobj); + } else if (tag.is_method_type()) { + // must execute Java code to link this CP entry into cache[i].f1 + ciSymbol* signature = get_object(cpool->method_type_signature_at(index))->as_symbol(); + ciObject* ciobj = get_unloaded_method_type_constant(signature); + return ciConstant(T_OBJECT, ciobj); + } else if (tag.is_method_handle()) { + // must execute Java code to link this CP entry into cache[i].f1 + int ref_kind = cpool->method_handle_ref_kind_at(index); + int callee_index = cpool->method_handle_klass_index_at(index); + ciKlass* callee = get_klass_by_index_impl(cpool, callee_index, ignore_will_link, accessor); + ciSymbol* name = get_object(cpool->method_handle_name_ref_at(index))->as_symbol(); + ciSymbol* signature = get_object(cpool->method_handle_signature_ref_at(index))->as_symbol(); + ciObject* ciobj = get_unloaded_method_handle_constant(callee, name, signature, ref_kind); + return ciConstant(T_OBJECT, ciobj); } else { ShouldNotReachHere(); return ciConstant(); @@ -562,61 +588,15 @@ } // ------------------------------------------------------------------ -// ciEnv::is_unresolved_string_impl -// -// Implementation of is_unresolved_string(). -bool ciEnv::is_unresolved_string_impl(instanceKlass* accessor, int index) const { - EXCEPTION_CONTEXT; - assert(accessor->is_linked(), "must be linked before accessing constant pool"); - constantPoolOop cpool = accessor->constants(); - constantTag tag = cpool->tag_at(index); - return tag.is_unresolved_string(); -} - -// ------------------------------------------------------------------ -// ciEnv::is_unresolved_klass_impl -// -// Implementation of is_unresolved_klass(). -bool ciEnv::is_unresolved_klass_impl(instanceKlass* accessor, int index) const { - EXCEPTION_CONTEXT; - assert(accessor->is_linked(), "must be linked before accessing constant pool"); - constantPoolOop cpool = accessor->constants(); - constantTag tag = cpool->tag_at(index); - return tag.is_unresolved_klass(); -} - -// ------------------------------------------------------------------ // ciEnv::get_constant_by_index // // Pull a constant out of the constant pool. How appropriate. // // Implementation note: this query is currently in no way cached. ciConstant ciEnv::get_constant_by_index(constantPoolHandle cpool, - int index, + int pool_index, int cache_index, ciInstanceKlass* accessor) { - GUARDED_VM_ENTRY(return get_constant_by_index_impl(cpool, index, accessor);) -} - -// ------------------------------------------------------------------ -// ciEnv::is_unresolved_string -// -// Check constant pool -// -// Implementation note: this query is currently in no way cached. -bool ciEnv::is_unresolved_string(ciInstanceKlass* accessor, - int index) const { - GUARDED_VM_ENTRY(return is_unresolved_string_impl(accessor->get_instanceKlass(), index); ) -} - -// ------------------------------------------------------------------ -// ciEnv::is_unresolved_klass -// -// Check constant pool -// -// Implementation note: this query is currently in no way cached. -bool ciEnv::is_unresolved_klass(ciInstanceKlass* accessor, - int index) const { - GUARDED_VM_ENTRY(return is_unresolved_klass_impl(accessor->get_instanceKlass(), index); ) + GUARDED_VM_ENTRY(return get_constant_by_index_impl(cpool, pool_index, cache_index, accessor);) } // ------------------------------------------------------------------
--- a/hotspot/src/share/vm/ci/ciEnv.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/ci/ciEnv.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -116,12 +116,8 @@ bool& is_accessible, ciInstanceKlass* loading_klass); ciConstant get_constant_by_index(constantPoolHandle cpool, - int constant_index, + int pool_index, int cache_index, ciInstanceKlass* accessor); - bool is_unresolved_string(ciInstanceKlass* loading_klass, - int constant_index) const; - bool is_unresolved_klass(ciInstanceKlass* loading_klass, - int constant_index) const; ciField* get_field_by_index(ciInstanceKlass* loading_klass, int field_index); ciMethod* get_method_by_index(constantPoolHandle cpool, @@ -137,12 +133,8 @@ bool& is_accessible, ciInstanceKlass* loading_klass); ciConstant get_constant_by_index_impl(constantPoolHandle cpool, - int constant_index, + int pool_index, int cache_index, ciInstanceKlass* loading_klass); - bool is_unresolved_string_impl (instanceKlass* loading_klass, - int constant_index) const; - bool is_unresolved_klass_impl (instanceKlass* loading_klass, - int constant_index) const; ciField* get_field_by_index_impl(ciInstanceKlass* loading_klass, int field_index); ciMethod* get_method_by_index_impl(constantPoolHandle cpool, @@ -190,6 +182,25 @@ return _factory->get_unloaded_klass(accessing_klass, name, true); } + // Get a ciKlass representing an unloaded klass mirror. + // Result is not necessarily unique, but will be unloaded. + ciInstance* get_unloaded_klass_mirror(ciKlass* type) { + return _factory->get_unloaded_klass_mirror(type); + } + + // Get a ciInstance representing an unresolved method handle constant. + ciInstance* get_unloaded_method_handle_constant(ciKlass* holder, + ciSymbol* name, + ciSymbol* signature, + int ref_kind) { + return _factory->get_unloaded_method_handle_constant(holder, name, signature, ref_kind); + } + + // Get a ciInstance representing an unresolved method type constant. + ciInstance* get_unloaded_method_type_constant(ciSymbol* signature) { + return _factory->get_unloaded_method_type_constant(signature); + } + // See if we already have an unloaded klass for the given name // or return NULL if not. ciKlass *check_get_unloaded_klass(ciKlass* accessing_klass, ciSymbol* name) {
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/ci/ciInstanceKlass.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,9 +44,7 @@ _flags = ciFlags(access_flags); _has_finalizer = access_flags.has_finalizer(); _has_subklass = ik->subklass() != NULL; - _is_initialized = ik->is_initialized(); - // Next line must follow and use the result of the previous line: - _is_linked = _is_initialized || ik->is_linked(); + _init_state = (instanceKlass::ClassState)ik->get_init_state(); _nonstatic_field_size = ik->nonstatic_field_size(); _has_nonstatic_fields = ik->has_nonstatic_fields(); _nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields: @@ -91,8 +89,7 @@ : ciKlass(name, ciInstanceKlassKlass::make()) { assert(name->byte_at(0) != '[', "not an instance klass"); - _is_initialized = false; - _is_linked = false; + _init_state = (instanceKlass::ClassState)0; _nonstatic_field_size = -1; _has_nonstatic_fields = false; _nonstatic_fields = NULL; @@ -109,21 +106,10 @@ // ------------------------------------------------------------------ // ciInstanceKlass::compute_shared_is_initialized -bool ciInstanceKlass::compute_shared_is_initialized() { +void ciInstanceKlass::compute_shared_init_state() { GUARDED_VM_ENTRY( instanceKlass* ik = get_instanceKlass(); - _is_initialized = ik->is_initialized(); - return _is_initialized; - ) -} - -// ------------------------------------------------------------------ -// ciInstanceKlass::compute_shared_is_linked -bool ciInstanceKlass::compute_shared_is_linked() { - GUARDED_VM_ENTRY( - instanceKlass* ik = get_instanceKlass(); - _is_linked = ik->is_linked(); - return _is_linked; + _init_state = (instanceKlass::ClassState)ik->get_init_state(); ) } @@ -323,8 +309,8 @@ // ciInstanceKlass::java_mirror // // Get the instance of java.lang.Class corresponding to this klass. +// Cache it on this->_java_mirror. ciInstance* ciInstanceKlass::java_mirror() { - assert(is_loaded(), "must be loaded"); if (_java_mirror == NULL) { _java_mirror = ciKlass::java_mirror(); }
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/ci/ciInstanceKlass.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,9 +39,8 @@ jobject _loader; jobject _protection_domain; + instanceKlass::ClassState _init_state; // state of class bool _is_shared; - bool _is_initialized; - bool _is_linked; bool _has_finalizer; bool _has_subklass; bool _has_nonstatic_fields; @@ -87,27 +86,34 @@ bool is_shared() { return _is_shared; } - bool compute_shared_is_initialized(); - bool compute_shared_is_linked(); + void compute_shared_init_state(); bool compute_shared_has_subklass(); int compute_shared_nof_implementors(); int compute_nonstatic_fields(); GrowableArray<ciField*>* compute_nonstatic_fields_impl(GrowableArray<ciField*>* super_fields); + // Update the init_state for shared klasses + void update_if_shared(instanceKlass::ClassState expected) { + if (_is_shared && _init_state != expected) { + if (is_loaded()) compute_shared_init_state(); + } + } + public: // Has this klass been initialized? bool is_initialized() { - if (_is_shared && !_is_initialized) { - return is_loaded() && compute_shared_is_initialized(); - } - return _is_initialized; + update_if_shared(instanceKlass::fully_initialized); + return _init_state == instanceKlass::fully_initialized; + } + // Is this klass being initialized? + bool is_being_initialized() { + update_if_shared(instanceKlass::being_initialized); + return _init_state == instanceKlass::being_initialized; } // Has this klass been linked? bool is_linked() { - if (_is_shared && !_is_linked) { - return is_loaded() && compute_shared_is_linked(); - } - return _is_linked; + update_if_shared(instanceKlass::linked); + return _init_state >= instanceKlass::linked; } // General klass information.
--- a/hotspot/src/share/vm/ci/ciKlass.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/ci/ciKlass.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -192,8 +192,14 @@ // ------------------------------------------------------------------ // ciKlass::java_mirror +// +// Get the instance of java.lang.Class corresponding to this klass. +// If it is an unloaded instance or array klass, return an unloaded +// mirror object of type Class. ciInstance* ciKlass::java_mirror() { GUARDED_VM_ENTRY( + if (!is_loaded()) + return ciEnv::current()->get_unloaded_klass_mirror(this); oop java_mirror = get_Klass()->java_mirror(); return CURRENT_ENV->get_object(java_mirror)->as_instance(); )
--- a/hotspot/src/share/vm/ci/ciMethod.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/ci/ciMethod.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -54,10 +54,10 @@ _code = NULL; _exception_handlers = NULL; _liveness = NULL; - _bcea = NULL; _method_blocks = NULL; #ifdef COMPILER2 _flow = NULL; + _bcea = NULL; #endif // COMPILER2 ciEnv *env = CURRENT_ENV; @@ -121,11 +121,11 @@ _intrinsic_id = vmIntrinsics::_none; _liveness = NULL; _can_be_statically_bound = false; - _bcea = NULL; _method_blocks = NULL; _method_data = NULL; #ifdef COMPILER2 _flow = NULL; + _bcea = NULL; #endif // COMPILER2 } @@ -1033,10 +1033,15 @@ bool ciMethod::is_initializer () const { FETCH_FLAG_FROM_VM(is_initializer); } BCEscapeAnalyzer *ciMethod::get_bcea() { +#ifdef COMPILER2 if (_bcea == NULL) { _bcea = new (CURRENT_ENV->arena()) BCEscapeAnalyzer(this, NULL); } return _bcea; +#else // COMPILER2 + ShouldNotReachHere(); + return NULL; +#endif // COMPILER2 } ciMethodBlocks *ciMethod::get_method_blocks() {
--- a/hotspot/src/share/vm/ci/ciMethod.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/ci/ciMethod.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -48,7 +48,6 @@ ciInstanceKlass* _holder; ciSignature* _signature; ciMethodData* _method_data; - BCEscapeAnalyzer* _bcea; ciMethodBlocks* _method_blocks; // Code attributes. @@ -72,7 +71,8 @@ // Optional liveness analyzer. MethodLiveness* _liveness; #ifdef COMPILER2 - ciTypeFlow* _flow; + ciTypeFlow* _flow; + BCEscapeAnalyzer* _bcea; #endif ciMethod(methodHandle h_m);
--- a/hotspot/src/share/vm/ci/ciObjectFactory.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/ci/ciObjectFactory.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -70,6 +70,7 @@ _unloaded_methods = new (arena) GrowableArray<ciMethod*>(arena, 4, 0, NULL); _unloaded_klasses = new (arena) GrowableArray<ciKlass*>(arena, 8, 0, NULL); + _unloaded_instances = new (arena) GrowableArray<ciInstance*>(arena, 4, 0, NULL); _return_addresses = new (arena) GrowableArray<ciReturnAddress*>(arena, 8, 0, NULL); } @@ -443,6 +444,74 @@ return new_klass; } + +//------------------------------------------------------------------ +// ciObjectFactory::get_unloaded_instance +// +// Get a ciInstance representing an as-yet undetermined instance of a given class. +// +ciInstance* ciObjectFactory::get_unloaded_instance(ciInstanceKlass* instance_klass) { + for (int i=0; i<_unloaded_instances->length(); i++) { + ciInstance* entry = _unloaded_instances->at(i); + if (entry->klass()->equals(instance_klass)) { + // We've found a match. + return entry; + } + } + + // This is a new unloaded instance. Create it and stick it in + // the cache. + ciInstance* new_instance = new (arena()) ciInstance(instance_klass); + + init_ident_of(new_instance); + _unloaded_instances->append(new_instance); + + // make sure it looks the way we want: + assert(!new_instance->is_loaded(), ""); + assert(new_instance->klass() == instance_klass, ""); + + return new_instance; +} + + +//------------------------------------------------------------------ +// ciObjectFactory::get_unloaded_klass_mirror +// +// Get a ciInstance representing an unresolved klass mirror. +// +// Currently, this ignores the parameters and returns a unique unloaded instance. +ciInstance* ciObjectFactory::get_unloaded_klass_mirror(ciKlass* type) { + assert(ciEnv::_Class_klass != NULL, ""); + return get_unloaded_instance(ciEnv::_Class_klass->as_instance_klass()); +} + +//------------------------------------------------------------------ +// ciObjectFactory::get_unloaded_method_handle_constant +// +// Get a ciInstance representing an unresolved method handle constant. +// +// Currently, this ignores the parameters and returns a unique unloaded instance. +ciInstance* ciObjectFactory::get_unloaded_method_handle_constant(ciKlass* holder, + ciSymbol* name, + ciSymbol* signature, + int ref_kind) { + if (ciEnv::_MethodHandle_klass == NULL) return NULL; + return get_unloaded_instance(ciEnv::_MethodHandle_klass->as_instance_klass()); +} + +//------------------------------------------------------------------ +// ciObjectFactory::get_unloaded_method_type_constant +// +// Get a ciInstance representing an unresolved method type constant. +// +// Currently, this ignores the parameters and returns a unique unloaded instance. +ciInstance* ciObjectFactory::get_unloaded_method_type_constant(ciSymbol* signature) { + if (ciEnv::_MethodType_klass == NULL) return NULL; + return get_unloaded_instance(ciEnv::_MethodType_klass->as_instance_klass()); +} + + + //------------------------------------------------------------------ // ciObjectFactory::get_empty_methodData // @@ -637,7 +706,8 @@ // // Print debugging information about the object factory void ciObjectFactory::print() { - tty->print("<ciObjectFactory oops=%d unloaded_methods=%d unloaded_klasses=%d>", + tty->print("<ciObjectFactory oops=%d unloaded_methods=%d unloaded_instances=%d unloaded_klasses=%d>", _ci_objects->length(), _unloaded_methods->length(), + _unloaded_instances->length(), _unloaded_klasses->length()); }
--- a/hotspot/src/share/vm/ci/ciObjectFactory.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/ci/ciObjectFactory.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -39,6 +39,7 @@ GrowableArray<ciObject*>* _ci_objects; GrowableArray<ciMethod*>* _unloaded_methods; GrowableArray<ciKlass*>* _unloaded_klasses; + GrowableArray<ciInstance*>* _unloaded_instances; GrowableArray<ciReturnAddress*>* _return_addresses; int _next_ident; @@ -73,6 +74,8 @@ void print_contents_impl(); + ciInstance* get_unloaded_instance(ciInstanceKlass* klass); + public: static bool is_initialized() { return _initialized; } @@ -98,6 +101,18 @@ ciSymbol* name, bool create_if_not_found); + // Get a ciInstance representing an unresolved klass mirror. + ciInstance* get_unloaded_klass_mirror(ciKlass* type); + + // Get a ciInstance representing an unresolved method handle constant. + ciInstance* get_unloaded_method_handle_constant(ciKlass* holder, + ciSymbol* name, + ciSymbol* signature, + int ref_kind); + + // Get a ciInstance representing an unresolved method type constant. + ciInstance* get_unloaded_method_type_constant(ciSymbol* signature); + // Get the ciMethodData representing the methodData for a method // with none.
--- a/hotspot/src/share/vm/ci/ciStreams.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/ci/ciStreams.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -186,12 +186,13 @@ } // ------------------------------------------------------------------ -// ciBytecodeStream::get_constant_index +// ciBytecodeStream::get_constant_raw_index // // If this bytecode is one of the ldc variants, get the index of the // referenced constant. -int ciBytecodeStream::get_constant_index() const { - switch(cur_bc()) { +int ciBytecodeStream::get_constant_raw_index() const { + // work-alike for Bytecode_loadconstant::raw_index() + switch (cur_bc()) { case Bytecodes::_ldc: return get_index_u1(); case Bytecodes::_ldc_w: @@ -202,25 +203,52 @@ return 0; } } + +// ------------------------------------------------------------------ +// ciBytecodeStream::get_constant_pool_index +// Decode any CP cache index into a regular pool index. +int ciBytecodeStream::get_constant_pool_index() const { + // work-alike for Bytecode_loadconstant::pool_index() + int index = get_constant_raw_index(); + if (has_cache_index()) { + return get_cpcache()->get_pool_index(index); + } + return index; +} + +// ------------------------------------------------------------------ +// ciBytecodeStream::get_constant_cache_index +// Return the CP cache index, or -1 if there isn't any. +int ciBytecodeStream::get_constant_cache_index() const { + // work-alike for Bytecode_loadconstant::cache_index() + return has_cache_index() ? get_constant_raw_index() : -1; +} + // ------------------------------------------------------------------ // ciBytecodeStream::get_constant // // If this bytecode is one of the ldc variants, get the referenced // constant. ciConstant ciBytecodeStream::get_constant() { + int pool_index = get_constant_raw_index(); + int cache_index = -1; + if (has_cache_index()) { + cache_index = pool_index; + pool_index = -1; + } VM_ENTRY_MARK; constantPoolHandle cpool(_method->get_methodOop()->constants()); - return CURRENT_ENV->get_constant_by_index(cpool, get_constant_index(), _holder); + return CURRENT_ENV->get_constant_by_index(cpool, pool_index, cache_index, _holder); } // ------------------------------------------------------------------ -bool ciBytecodeStream::is_unresolved_string() const { - return CURRENT_ENV->is_unresolved_string(_holder, get_constant_index()); -} - -// ------------------------------------------------------------------ -bool ciBytecodeStream::is_unresolved_klass() const { - return CURRENT_ENV->is_unresolved_klass(_holder, get_klass_index()); +// ciBytecodeStream::get_constant_pool_tag +// +// If this bytecode is one of the ldc variants, get the referenced +// constant. +constantTag ciBytecodeStream::get_constant_pool_tag(int index) const { + VM_ENTRY_MARK; + return _method->get_methodOop()->constants()->tag_at(index); } // ------------------------------------------------------------------ @@ -378,13 +406,16 @@ // ------------------------------------------------------------------ // ciBytecodeStream::get_cpcache -ciCPCache* ciBytecodeStream::get_cpcache() { - VM_ENTRY_MARK; - // Get the constant pool. - constantPoolOop cpool = _holder->get_instanceKlass()->constants(); - constantPoolCacheOop cpcache = cpool->cache(); +ciCPCache* ciBytecodeStream::get_cpcache() const { + if (_cpcache == NULL) { + VM_ENTRY_MARK; + // Get the constant pool. + constantPoolOop cpool = _holder->get_instanceKlass()->constants(); + constantPoolCacheOop cpcache = cpool->cache(); - return CURRENT_ENV->get_object(cpcache)->as_cpcache(); + *(ciCPCache**)&_cpcache = CURRENT_ENV->get_object(cpcache)->as_cpcache(); + } + return _cpcache; } // ------------------------------------------------------------------
--- a/hotspot/src/share/vm/ci/ciStreams.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/ci/ciStreams.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -46,6 +46,7 @@ ciMethod* _method; // the method ciInstanceKlass* _holder; + ciCPCache* _cpcache; address _bc_start; // Start of current bytecode for table address _was_wide; // Address past last wide bytecode jint* _table_base; // Aligned start of last table or switch @@ -58,7 +59,9 @@ void reset( address base, unsigned int size ) { _bc_start =_was_wide = 0; - _start = _pc = base; _end = base + size; } + _start = _pc = base; _end = base + size; + _cpcache = NULL; + } void assert_wide(bool require_wide) const { if (require_wide) @@ -136,15 +139,20 @@ bool is_wide() const { return ( _pc == _was_wide ); } // Does this instruction contain an index which refes into the CP cache? - bool uses_cp_cache() const { return Bytecodes::uses_cp_cache(cur_bc_raw()); } + bool has_cache_index() const { return Bytecodes::uses_cp_cache(cur_bc_raw()); } int get_index_u1() const { return bytecode()->get_index_u1(cur_bc_raw()); } + int get_index_u1_cpcache() const { + return bytecode()->get_index_u1_cpcache(cur_bc_raw()); + } + // Get a byte index following this bytecode. // If prefixed with a wide bytecode, get a wide index. int get_index() const { + assert(!has_cache_index(), "else use cpcache variant"); return (_pc == _was_wide) // was widened? ? get_index_u2(true) // yes, return wide index : get_index_u1(); // no, return narrow index @@ -207,7 +215,9 @@ return cur_bci() + get_int_table(index); } // --- Constant pool access --- - int get_constant_index() const; + int get_constant_raw_index() const; + int get_constant_pool_index() const; + int get_constant_cache_index() const; int get_field_index(); int get_method_index(); @@ -217,12 +227,17 @@ int get_klass_index() const; // If this bytecode is one of the ldc variants, get the referenced - // constant + // constant. Do not attempt to resolve it, since that would require + // execution of Java code. If it is not resolved, return an unloaded + // object (ciConstant.as_object()->is_loaded() == false). ciConstant get_constant(); - // True if the ldc variant points to an unresolved string - bool is_unresolved_string() const; - // True if the ldc variant points to an unresolved klass - bool is_unresolved_klass() const; + constantTag get_constant_pool_tag(int index) const; + + // True if the klass-using bytecode points to an unresolved klass + bool is_unresolved_klass() const { + constantTag tag = get_constant_pool_tag(get_klass_index()); + return tag.is_unresolved_klass(); + } // If this bytecode is one of get_field, get_static, put_field, // or put_static, get the referenced field. @@ -238,7 +253,7 @@ int get_method_holder_index(); int get_method_signature_index(); - ciCPCache* get_cpcache(); + ciCPCache* get_cpcache() const; ciCallSite* get_call_site(); };
--- a/hotspot/src/share/vm/ci/ciTypeFlow.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/ci/ciTypeFlow.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -712,10 +712,8 @@ ciObject* obj = con.as_object(); if (obj->is_null_object()) { push_null(); - } else if (obj->is_klass()) { - // The type of ldc <class> is java.lang.Class - push_object(outer()->env()->Class_klass()); } else { + assert(!obj->is_klass(), "must be java_mirror of klass"); push_object(obj->klass()); } } else {
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/classfile/classFileParser.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -117,6 +117,29 @@ cp->string_index_at_put(index, string_index); } break; + case JVM_CONSTANT_MethodHandle : + case JVM_CONSTANT_MethodType : + if (!EnableMethodHandles || + _major_version < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) { + classfile_parse_error( + (!EnableInvokeDynamic ? + "This JVM does not support constant tag %u in class file %s" : + "Class file version does not support constant tag %u in class file %s"), + tag, CHECK); + } + if (tag == JVM_CONSTANT_MethodHandle) { + cfs->guarantee_more(4, CHECK); // ref_kind, method_index, tag/access_flags + u1 ref_kind = cfs->get_u1_fast(); + u2 method_index = cfs->get_u2_fast(); + cp->method_handle_index_at_put(index, ref_kind, method_index); + } else if (tag == JVM_CONSTANT_MethodType) { + cfs->guarantee_more(3, CHECK); // signature_index, tag/access_flags + u2 signature_index = cfs->get_u2_fast(); + cp->method_type_index_at_put(index, signature_index); + } else { + ShouldNotReachHere(); + } + break; case JVM_CONSTANT_Integer : { cfs->guarantee_more(5, CHECK); // bytes, tag/access_flags @@ -337,6 +360,60 @@ cp->unresolved_string_at_put(index, sym); } break; + case JVM_CONSTANT_MethodHandle : + { + int ref_index = cp->method_handle_index_at(index); + check_property( + valid_cp_range(ref_index, length) && + EnableMethodHandles, + "Invalid constant pool index %u in class file %s", + ref_index, CHECK_(nullHandle)); + constantTag tag = cp->tag_at(ref_index); + int ref_kind = cp->method_handle_ref_kind_at(index); + switch (ref_kind) { + case JVM_REF_getField: + case JVM_REF_getStatic: + case JVM_REF_putField: + case JVM_REF_putStatic: + check_property( + tag.is_field(), + "Invalid constant pool index %u in class file %s (not a field)", + ref_index, CHECK_(nullHandle)); + break; + case JVM_REF_invokeVirtual: + case JVM_REF_invokeStatic: + case JVM_REF_invokeSpecial: + case JVM_REF_newInvokeSpecial: + check_property( + tag.is_method(), + "Invalid constant pool index %u in class file %s (not a method)", + ref_index, CHECK_(nullHandle)); + break; + case JVM_REF_invokeInterface: + check_property( + tag.is_interface_method(), + "Invalid constant pool index %u in class file %s (not an interface method)", + ref_index, CHECK_(nullHandle)); + break; + default: + classfile_parse_error( + "Bad method handle kind at constant pool index %u in class file %s", + index, CHECK_(nullHandle)); + } + // Keep the ref_index unchanged. It will be indirected at link-time. + } + break; + case JVM_CONSTANT_MethodType : + { + int ref_index = cp->method_type_index_at(index); + check_property( + valid_cp_range(ref_index, length) && + cp->tag_at(ref_index).is_utf8() && + EnableMethodHandles, + "Invalid constant pool index %u in class file %s", + ref_index, CHECK_(nullHandle)); + } + break; default: fatal(err_msg("bad constant pool tag value %u", cp->tag_at(index).value())); @@ -452,6 +529,43 @@ } break; } + case JVM_CONSTANT_MethodHandle: { + int ref_index = cp->method_handle_index_at(index); + int ref_kind = cp->method_handle_ref_kind_at(index); + switch (ref_kind) { + case JVM_REF_invokeVirtual: + case JVM_REF_invokeStatic: + case JVM_REF_invokeSpecial: + case JVM_REF_newInvokeSpecial: + { + int name_and_type_ref_index = cp->name_and_type_ref_index_at(ref_index); + int name_ref_index = cp->name_ref_index_at(name_and_type_ref_index); + symbolHandle name(THREAD, cp->symbol_at(name_ref_index)); + if (ref_kind == JVM_REF_newInvokeSpecial) { + if (name() != vmSymbols::object_initializer_name()) { + classfile_parse_error( + "Bad constructor name at constant pool index %u in class file %s", + name_ref_index, CHECK_(nullHandle)); + } + } else { + if (name() == vmSymbols::object_initializer_name()) { + classfile_parse_error( + "Bad method name at constant pool index %u in class file %s", + name_ref_index, CHECK_(nullHandle)); + } + } + } + break; + // Other ref_kinds are already fully checked in previous pass. + } + break; + } + case JVM_CONSTANT_MethodType: { + symbolHandle no_name = vmSymbolHandles::type_name(); // place holder + symbolHandle signature(THREAD, cp->method_type_signature_at(index)); + verify_legal_method_signature(no_name, signature, CHECK_(nullHandle)); + break; + } } // end of switch } // end of for @@ -467,7 +581,7 @@ case JVM_CONSTANT_UnresolvedClass : // Patching a class means pre-resolving it. // The name in the constant pool is ignored. - if (patch->klass() == SystemDictionary::Class_klass()) { // %%% java_lang_Class::is_instance + if (java_lang_Class::is_instance(patch())) { guarantee_property(!java_lang_Class::is_primitive(patch()), "Illegal class patch at %d in class file %s", index, CHECK);
--- a/hotspot/src/share/vm/classfile/classLoader.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/classfile/classLoader.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -832,7 +832,6 @@ instanceKlassHandle ClassLoader::load_classfile(symbolHandle h_name, TRAPS) { - VTuneClassLoadMarker clm; ResourceMark rm(THREAD); EventMark m("loading class " INTPTR_FORMAT, (address)h_name()); ThreadProfilerMark tpm(ThreadProfilerMark::classLoaderRegion);
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -2454,6 +2454,48 @@ return Handle(THREAD, (oop) result.get_jobject()); } +// Ask Java code to find or construct a method handle constant. +Handle SystemDictionary::link_method_handle_constant(KlassHandle caller, + int ref_kind, //e.g., JVM_REF_invokeVirtual + KlassHandle callee, + symbolHandle name_sym, + symbolHandle signature, + TRAPS) { + Handle empty; + Handle name = java_lang_String::create_from_symbol(name_sym(), CHECK_(empty)); + Handle type; + if (signature->utf8_length() > 0 && signature->byte_at(0) == '(') { + bool ignore_is_on_bcp = false; + type = find_method_handle_type(signature, caller, ignore_is_on_bcp, CHECK_(empty)); + } else { + SignatureStream ss(signature(), false); + if (!ss.is_done()) { + oop mirror = ss.as_java_mirror(caller->class_loader(), caller->protection_domain(), + SignatureStream::NCDFError, CHECK_(empty)); + type = Handle(THREAD, mirror); + ss.next(); + if (!ss.is_done()) type = Handle(); // error! + } + } + if (type.is_null()) { + THROW_MSG_(vmSymbols::java_lang_LinkageError(), "bad signature", empty); + } + + // call sun.dyn.MethodHandleNatives::linkMethodHandleConstant(Class caller, int refKind, Class callee, String name, Object type) -> MethodHandle + JavaCallArguments args; + args.push_oop(caller->java_mirror()); // the referring class + args.push_int(ref_kind); + args.push_oop(callee->java_mirror()); // the target class + args.push_oop(name()); + args.push_oop(type()); + JavaValue result(T_OBJECT); + JavaCalls::call_static(&result, + SystemDictionary::MethodHandleNatives_klass(), + vmSymbols::linkMethodHandleConstant_name(), + vmSymbols::linkMethodHandleConstant_signature(), + &args, CHECK_(empty)); + return Handle(THREAD, (oop) result.get_jobject()); +} // Ask Java code to find or construct a java.dyn.CallSite for the given // name and signature, as interpreted relative to the given class loader.
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -473,6 +473,13 @@ KlassHandle accessing_klass, bool& return_bcp_flag, TRAPS); + // ask Java to compute a java.dyn.MethodHandle object for a given CP entry + static Handle link_method_handle_constant(KlassHandle caller, + int ref_kind, //e.g., JVM_REF_invokeVirtual + KlassHandle callee, + symbolHandle name, + symbolHandle signature, + TRAPS); // ask Java to create a dynamic call site, while linking an invokedynamic op static Handle make_dynamic_call_site(Handle bootstrap_method, // Callee information:
--- a/hotspot/src/share/vm/classfile/verifier.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/classfile/verifier.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -1598,7 +1598,10 @@ if (opcode == Bytecodes::_ldc || opcode == Bytecodes::_ldc_w) { if (!tag.is_unresolved_string() && !tag.is_unresolved_klass()) { types = (1 << JVM_CONSTANT_Integer) | (1 << JVM_CONSTANT_Float) - | (1 << JVM_CONSTANT_String) | (1 << JVM_CONSTANT_Class); + | (1 << JVM_CONSTANT_String) | (1 << JVM_CONSTANT_Class) + | (1 << JVM_CONSTANT_MethodHandle) | (1 << JVM_CONSTANT_MethodType); + // Note: The class file parser already verified the legality of + // MethodHandle and MethodType constants. verify_cp_type(index, cp, types, CHECK_VERIFY(this)); } } else { @@ -1632,6 +1635,14 @@ current_frame->push_stack_2( VerificationType::long_type(), VerificationType::long2_type(), CHECK_VERIFY(this)); + } else if (tag.is_method_handle()) { + current_frame->push_stack( + VerificationType::reference_type( + vmSymbols::java_dyn_MethodHandle()), CHECK_VERIFY(this)); + } else if (tag.is_method_type()) { + current_frame->push_stack( + VerificationType::reference_type( + vmSymbols::java_dyn_MethodType()), CHECK_VERIFY(this)); } else { verify_error(bci, "Invalid index in ldc"); return; @@ -1920,9 +1931,12 @@ // Get referenced class type VerificationType ref_class_type; if (opcode == Bytecodes::_invokedynamic) { - if (!EnableInvokeDynamic) { + if (!EnableInvokeDynamic || + _klass->major_version() < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) { class_format_error( - "invokedynamic instructions not enabled on this JVM", + (!EnableInvokeDynamic ? + "invokedynamic instructions not enabled in this JVM" : + "invokedynamic instructions not supported by this class file version"), _klass->external_name()); return; }
--- a/hotspot/src/share/vm/classfile/verifier.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/classfile/verifier.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -25,7 +25,10 @@ // The verifier class class Verifier : AllStatic { public: - enum { STACKMAP_ATTRIBUTE_MAJOR_VERSION = 50 }; + enum { + STACKMAP_ATTRIBUTE_MAJOR_VERSION = 50, + INVOKEDYNAMIC_MAJOR_VERSION = 51 + }; typedef enum { ThrowException, NoException } Mode; /**
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -246,6 +246,8 @@ /* internal up-calls made only by the JVM, via class sun.dyn.MethodHandleNatives: */ \ template(findMethodHandleType_name, "findMethodHandleType") \ template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/dyn/MethodType;") \ + template(linkMethodHandleConstant_name, "linkMethodHandleConstant") \ + template(linkMethodHandleConstant_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/dyn/MethodHandle;") \ template(makeDynamicCallSite_name, "makeDynamicCallSite") \ template(makeDynamicCallSite_signature, "(Ljava/dyn/MethodHandle;Ljava/lang/String;Ljava/dyn/MethodType;Ljava/lang/Object;Lsun/dyn/MemberName;I)Ljava/dyn/CallSite;") \ NOT_LP64( do_alias(machine_word_signature, int_signature) ) \
--- a/hotspot/src/share/vm/code/codeBlob.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/code/codeBlob.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -210,6 +210,7 @@ { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); blob = new (size) AdapterBlob(size, cb); + CodeCache::commit(blob); } // Track memory usage statistic after releasing CodeCache_lock MemoryService::track_code_cache_memory_usage(); @@ -281,7 +282,6 @@ tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, stub); Disassembler::decode(stub->instructions_begin(), stub->instructions_end()); } - VTune::register_stub(stub_id, stub->instructions_begin(), stub->instructions_end()); Forte::register_stub(stub_id, stub->instructions_begin(), stub->instructions_end()); if (JvmtiExport::should_post_dynamic_code_generated()) { @@ -356,7 +356,6 @@ tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob); Disassembler::decode(blob->instructions_begin(), blob->instructions_end()); } - VTune::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end()); Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end()); if (JvmtiExport::should_post_dynamic_code_generated()) { @@ -414,7 +413,6 @@ tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob); Disassembler::decode(blob->instructions_begin(), blob->instructions_end()); } - VTune::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end()); Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end()); if (JvmtiExport::should_post_dynamic_code_generated()) { @@ -474,7 +472,6 @@ tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob); Disassembler::decode(blob->instructions_begin(), blob->instructions_end()); } - VTune::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end()); Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end()); if (JvmtiExport::should_post_dynamic_code_generated()) { @@ -533,7 +530,6 @@ tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob); Disassembler::decode(blob->instructions_begin(), blob->instructions_end()); } - VTune::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end()); Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end()); if (JvmtiExport::should_post_dynamic_code_generated()) {
--- a/hotspot/src/share/vm/code/codeCache.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/code/codeCache.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -93,6 +93,8 @@ CodeHeap * CodeCache::_heap = new CodeHeap(); int CodeCache::_number_of_blobs = 0; +int CodeCache::_number_of_adapters = 0; +int CodeCache::_number_of_nmethods = 0; int CodeCache::_number_of_nmethods_with_dependencies = 0; bool CodeCache::_needs_cache_clean = false; nmethod* CodeCache::_scavenge_root_nmethods = NULL; @@ -176,8 +178,14 @@ verify_if_often(); print_trace("free", cb); - if (cb->is_nmethod() && ((nmethod *)cb)->has_dependencies()) { - _number_of_nmethods_with_dependencies--; + if (cb->is_nmethod()) { + _number_of_nmethods--; + if (((nmethod *)cb)->has_dependencies()) { + _number_of_nmethods_with_dependencies--; + } + } + if (cb->is_adapter_blob()) { + _number_of_adapters--; } _number_of_blobs--; @@ -191,9 +199,16 @@ void CodeCache::commit(CodeBlob* cb) { // this is called by nmethod::nmethod, which must already own CodeCache_lock assert_locked_or_safepoint(CodeCache_lock); - if (cb->is_nmethod() && ((nmethod *)cb)->has_dependencies()) { - _number_of_nmethods_with_dependencies++; + if (cb->is_nmethod()) { + _number_of_nmethods++; + if (((nmethod *)cb)->has_dependencies()) { + _number_of_nmethods_with_dependencies++; + } } + if (cb->is_adapter_blob()) { + _number_of_adapters++; + } + // flush the hardware I-cache ICache::invalidate_range(cb->instructions_begin(), cb->instructions_size()); }
--- a/hotspot/src/share/vm/code/codeCache.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/code/codeCache.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,6 +43,8 @@ // 4422213 or 4436291 for details. static CodeHeap * _heap; static int _number_of_blobs; + static int _number_of_adapters; + static int _number_of_nmethods; static int _number_of_nmethods_with_dependencies; static bool _needs_cache_clean; static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link() @@ -105,6 +107,8 @@ static nmethod* first_nmethod(); static nmethod* next_nmethod (CodeBlob* cb); static int nof_blobs() { return _number_of_blobs; } + static int nof_adapters() { return _number_of_adapters; } + static int nof_nmethods() { return _number_of_nmethods; } // GC support static void gc_epilogue();
--- a/hotspot/src/share/vm/code/nmethod.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/code/nmethod.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -397,11 +397,6 @@ //-------------end of code for ExceptionCache-------------- -void nmFlags::clear() { - assert(sizeof(nmFlags) == sizeof(int), "using more than one word for nmFlags"); - *(jint*)this = 0; -} - int nmethod::total_size() const { return code_size() + @@ -419,8 +414,32 @@ return NULL; } -// %%% This variable is no longer used? -int nmethod::_zombie_instruction_size = NativeJump::instruction_size; +// Fill in default values for various flag fields +void nmethod::init_defaults() { + _state = alive; + _marked_for_reclamation = 0; + _has_flushed_dependencies = 0; + _speculatively_disconnected = 0; + _has_unsafe_access = 0; + _has_method_handle_invokes = 0; + _marked_for_deoptimization = 0; + _lock_count = 0; + _stack_traversal_mark = 0; + _unload_reported = false; // jvmti state + + NOT_PRODUCT(_has_debug_info = false); + _oops_do_mark_link = NULL; + _jmethod_id = NULL; + _osr_link = NULL; + _scavenge_root_link = NULL; + _scavenge_root_state = 0; + _saved_nmethod_link = NULL; + _compiler = NULL; + +#ifdef HAVE_DTRACE_H + _trap_offset = 0; +#endif // def HAVE_DTRACE_H +} nmethod* nmethod::new_native_nmethod(methodHandle method, @@ -580,24 +599,16 @@ debug_only(No_Safepoint_Verifier nsv;) assert_locked_or_safepoint(CodeCache_lock); - NOT_PRODUCT(_has_debug_info = false); - _oops_do_mark_link = NULL; + init_defaults(); _method = method; _entry_bci = InvocationEntryBci; - _osr_link = NULL; - _scavenge_root_link = NULL; - _scavenge_root_state = 0; - _saved_nmethod_link = NULL; - _compiler = NULL; // We have no exception handler or deopt handler make the // values something that will never match a pc like the nmethod vtable entry _exception_offset = 0; _deoptimize_offset = 0; _deoptimize_mh_offset = 0; _orig_pc_offset = 0; -#ifdef HAVE_DTRACE_H - _trap_offset = 0; -#endif // def HAVE_DTRACE_H + _stub_offset = data_offset(); _consts_offset = data_offset(); _oops_offset = data_offset(); @@ -615,17 +626,9 @@ _exception_cache = NULL; _pc_desc_cache.reset_to(NULL); - flags.clear(); - flags.state = alive; - _markedForDeoptimization = 0; - - _lock_count = 0; - _stack_traversal_mark = 0; - code_buffer->copy_oops_to(this); debug_only(verify_scavenge_root_oops()); CodeCache::commit(this); - VTune::create_nmethod(this); } if (PrintNativeNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) { @@ -673,14 +676,9 @@ debug_only(No_Safepoint_Verifier nsv;) assert_locked_or_safepoint(CodeCache_lock); - NOT_PRODUCT(_has_debug_info = false); - _oops_do_mark_link = NULL; + init_defaults(); _method = method; _entry_bci = InvocationEntryBci; - _osr_link = NULL; - _scavenge_root_link = NULL; - _scavenge_root_state = 0; - _compiler = NULL; // We have no exception handler or deopt handler make the // values something that will never match a pc like the nmethod vtable entry _exception_offset = 0; @@ -706,17 +704,9 @@ _exception_cache = NULL; _pc_desc_cache.reset_to(NULL); - flags.clear(); - flags.state = alive; - _markedForDeoptimization = 0; - - _lock_count = 0; - _stack_traversal_mark = 0; - code_buffer->copy_oops_to(this); debug_only(verify_scavenge_root_oops()); CodeCache::commit(this); - VTune::create_nmethod(this); } if (PrintNMethods || PrintDebugInfo || PrintRelocations || PrintDependencies) { @@ -781,20 +771,13 @@ debug_only(No_Safepoint_Verifier nsv;) assert_locked_or_safepoint(CodeCache_lock); - NOT_PRODUCT(_has_debug_info = false); - _oops_do_mark_link = NULL; + init_defaults(); _method = method; + _entry_bci = entry_bci; _compile_id = compile_id; _comp_level = comp_level; - _entry_bci = entry_bci; - _osr_link = NULL; - _scavenge_root_link = NULL; - _scavenge_root_state = 0; _compiler = compiler; _orig_pc_offset = orig_pc_offset; -#ifdef HAVE_DTRACE_H - _trap_offset = 0; -#endif // def HAVE_DTRACE_H _stub_offset = instructions_offset() + code_buffer->total_offset_of(code_buffer->stubs()->start()); // Exception handler and deopt handler are in the stub section @@ -821,15 +804,6 @@ _exception_cache = NULL; _pc_desc_cache.reset_to(scopes_pcs_begin()); - flags.clear(); - flags.state = alive; - _markedForDeoptimization = 0; - - _unload_reported = false; // jvmti state - - _lock_count = 0; - _stack_traversal_mark = 0; - // Copy contents of ScopeDescRecorder to nmethod code_buffer->copy_oops_to(this); debug_info->copy_to(this); @@ -841,8 +815,6 @@ CodeCache::commit(this); - VTune::create_nmethod(this); - // Copy contents of ExceptionHandlerTable to nmethod handler_table->copy_to(this); nul_chk_table->copy_to(this); @@ -988,11 +960,6 @@ } -void nmethod::set_version(int v) { - flags.version = v; -} - - // Promote one word from an assembly-time handle to a live embedded oop. inline void nmethod::initialize_immediate_oop(oop* dest, jobject handle) { if (handle == NULL || @@ -1139,6 +1106,8 @@ // This is a private interface with the sweeper. void nmethod::mark_as_seen_on_stack() { assert(is_not_entrant(), "must be a non-entrant method"); + // Set the traversal mark to ensure that the sweeper does 2 + // cleaning passes before moving to zombie. set_stack_traversal_mark(NMethodSweeper::traversal_count()); } @@ -1207,7 +1176,7 @@ // for later on. CodeCache::set_needs_cache_clean(true); } - flags.state = unloaded; + _state = unloaded; // Log the unloading. log_state_change(); @@ -1233,21 +1202,21 @@ if (LogCompilation) { if (xtty != NULL) { ttyLocker ttyl; // keep the following output all in one block - if (flags.state == unloaded) { + if (_state == unloaded) { xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'", os::current_thread_id()); } else { xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s", os::current_thread_id(), - (flags.state == zombie ? " zombie='1'" : "")); + (_state == zombie ? " zombie='1'" : "")); } log_identity(xtty); xtty->stamp(); xtty->end_elem(); } } - if (PrintCompilation && flags.state != unloaded) { - print_on(tty, flags.state == zombie ? "made zombie " : "made not entrant "); + if (PrintCompilation && _state != unloaded) { + print_on(tty, _state == zombie ? "made zombie " : "made not entrant "); tty->cr(); } } @@ -1258,8 +1227,9 @@ bool was_alive = false; - // Make sure the nmethod is not flushed in case of a safepoint in code below. + // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below. nmethodLocker nml(this); + methodHandle the_method(method()); { // If the method is already zombie there is nothing to do @@ -1279,7 +1249,7 @@ // Enter critical section. Does not block for safepoint. MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); - if (flags.state == state) { + if (_state == state) { // another thread already performed this transition so nothing // to do, but return false to indicate this. return false; @@ -1290,17 +1260,37 @@ if (!is_osr_method() && !is_not_entrant()) { NativeJump::patch_verified_entry(entry_point(), verified_entry_point(), SharedRuntime::get_handle_wrong_method_stub()); - assert (NativeJump::instruction_size == nmethod::_zombie_instruction_size, ""); } - was_alive = is_in_use(); // Read state under lock + if (is_in_use()) { + // It's a true state change, so mark the method as decompiled. + // Do it only for transition from alive. + inc_decompile_count(); + } // Change state - flags.state = state; + _state = state; // Log the transition once log_state_change(); + // Remove nmethod from method. + // We need to check if both the _code and _from_compiled_code_entry_point + // refer to this nmethod because there is a race in setting these two fields + // in methodOop as seen in bugid 4947125. + // If the vep() points to the zombie nmethod, the memory for the nmethod + // could be flushed and the compiler and vtable stubs could still call + // through it. + if (method() != NULL && (method()->code() == this || + method()->from_compiled_entry() == verified_entry_point())) { + HandleMark hm; + method()->clear_code(); + } + + if (state == not_entrant) { + mark_as_seen_on_stack(); + } + } // leave critical region under Patching_lock // When the nmethod becomes zombie it is no longer alive so the @@ -1308,18 +1298,17 @@ // state will be flushed later when the transition to zombie // happens or they get unloaded. if (state == zombie) { + // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event + // and it hasn't already been reported for this nmethod then report it now. + // (the event may have been reported earilier if the GC marked it for unloading). + post_compiled_method_unload(); + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); flush_dependencies(NULL); } else { assert(state == not_entrant, "other cases may need to be handled differently"); } - if (state == not_entrant) { - Events::log("Make nmethod not entrant " INTPTR_FORMAT, this); - } else { - Events::log("Make nmethod zombie " INTPTR_FORMAT, this); - } - if (TraceCreateZombies) { tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie"); } @@ -1327,47 +1316,6 @@ // Make sweeper aware that there is a zombie method that needs to be removed NMethodSweeper::notify(this); - // not_entrant only stuff - if (state == not_entrant) { - mark_as_seen_on_stack(); - } - - if (was_alive) { - // It's a true state change, so mark the method as decompiled. - // Do it only for transition from alive. - inc_decompile_count(); - } - - // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event - // and it hasn't already been reported for this nmethod then report it now. - // (the event may have been reported earilier if the GC marked it for unloading). - if (state == zombie) { - post_compiled_method_unload(); - } - - - // Zombie only stuff - if (state == zombie) { - VTune::delete_nmethod(this); - } - - // Check whether method got unloaded at a safepoint before this, - // if so we can skip the flushing steps below - if (method() == NULL) return true; - - // Remove nmethod from method. - // We need to check if both the _code and _from_compiled_code_entry_point - // refer to this nmethod because there is a race in setting these two fields - // in methodOop as seen in bugid 4947125. - // If the vep() points to the zombie nmethod, the memory for the nmethod - // could be flushed and the compiler and vtable stubs could still call - // through it. - if (method()->code() == this || - method()->from_compiled_entry() == verified_entry_point()) { - HandleMark hm; - method()->clear_code(); - } - return true; } @@ -1488,11 +1436,25 @@ moop->signature()->utf8_length(), code_begin(), code_size()); + if (JvmtiExport::should_post_compiled_method_load() || + JvmtiExport::should_post_compiled_method_unload()) { + get_and_cache_jmethod_id(); + } + if (JvmtiExport::should_post_compiled_method_load()) { JvmtiExport::post_compiled_method_load(this); } } +jmethodID nmethod::get_and_cache_jmethod_id() { + if (_jmethod_id == NULL) { + // Cache the jmethod_id since it can no longer be looked up once the + // method itself has been marked for unloading. + _jmethod_id = method()->jmethod_id(); + } + return _jmethod_id; +} + void nmethod::post_compiled_method_unload() { if (unload_reported()) { // During unloading we transition to unloaded and then to zombie @@ -1504,12 +1466,17 @@ DTRACE_METHOD_UNLOAD_PROBE(method()); // If a JVMTI agent has enabled the CompiledMethodUnload event then - // post the event. Sometime later this nmethod will be made a zombie by - // the sweeper but the methodOop will not be valid at that point. - if (JvmtiExport::should_post_compiled_method_unload()) { + // post the event. Sometime later this nmethod will be made a zombie + // by the sweeper but the methodOop will not be valid at that point. + // If the _jmethod_id is null then no load event was ever requested + // so don't bother posting the unload. The main reason for this is + // that the jmethodID is a weak reference to the methodOop so if + // it's being unloaded there's no way to look it up since the weak + // ref will have been cleared. + if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) { assert(!unload_reported(), "already unloaded"); HandleMark hm; - JvmtiExport::post_compiled_method_unload(method()->jmethod_id(), code_begin()); + JvmtiExport::post_compiled_method_unload(_jmethod_id, code_begin()); } // The JVMTI CompiledMethodUnload event can be enabled or disabled at @@ -2087,7 +2054,6 @@ void nmethod_init() { // make sure you didn't forget to adjust the filler fields - assert(sizeof(nmFlags) <= 4, "nmFlags occupies more than a word"); assert(sizeof(nmethod) % oopSize == 0, "nmethod size must be multiple of a word"); } @@ -2323,7 +2289,6 @@ tty->print("((nmethod*) "INTPTR_FORMAT ") ", this); tty->print(" for method " INTPTR_FORMAT , (address)method()); tty->print(" { "); - if (version()) tty->print("v%d ", version()); if (is_in_use()) tty->print("in_use "); if (is_not_entrant()) tty->print("not_entrant "); if (is_zombie()) tty->print("zombie "); @@ -2659,13 +2624,10 @@ case Bytecodes::_getstatic: case Bytecodes::_putstatic: { - methodHandle sdm = sd->method(); - Bytecode_field* field = Bytecode_field_at(sdm(), sdm->bcp_from(sd->bci())); - constantPoolOop sdmc = sdm->constants(); - symbolOop name = sdmc->name_ref_at(field->index()); + Bytecode_field* field = Bytecode_field_at(sd->method(), sd->bci()); st->print(" "); - if (name != NULL) - name->print_symbol_on(st); + if (field->name() != NULL) + field->name()->print_symbol_on(st); else st->print("<UNKNOWN>"); }
--- a/hotspot/src/share/vm/code/nmethod.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/code/nmethod.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -78,29 +78,8 @@ // nmethods (native methods) are the compiled code versions of Java methods. - -struct nmFlags { - friend class VMStructs; - unsigned int version:8; // version number (0 = first version) - unsigned int age:4; // age (in # of sweep steps) - - unsigned int state:2; // {alive, zombie, unloaded) - - unsigned int isUncommonRecompiled:1; // recompiled because of uncommon trap? - unsigned int isToBeRecompiled:1; // to be recompiled as soon as it matures - unsigned int hasFlushedDependencies:1; // Used for maintenance of dependencies - unsigned int markedForReclamation:1; // Used by NMethodSweeper - - unsigned int has_unsafe_access:1; // May fault due to unsafe access. - unsigned int has_method_handle_invokes:1; // Has this method MethodHandle invokes? - - unsigned int speculatively_disconnected:1; // Marked for potential unload - - void clear(); -}; - - -// A nmethod contains: +// +// An nmethod contains: // - header (the nmethod structure) // [Relocation] // - relocation information @@ -131,10 +110,9 @@ friend class CodeCache; // non-perm oops private: // Shared fields for all nmethod's - static int _zombie_instruction_size; - methodOop _method; int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method + jmethodID _jmethod_id; // Cache of method()->jmethod_id() // To support simple linked-list chaining of nmethods: nmethod* _osr_link; // from instanceKlass::osr_nmethods_head @@ -146,6 +124,11 @@ AbstractCompiler* _compiler; // The compiler which compiled this nmethod + // offsets for entry points + address _entry_point; // entry point with class check + address _verified_entry_point; // entry point without class check + address _osr_entry_point; // entry point for on stack replacement + // Offsets for different nmethod parts int _exception_offset; // All deoptee's will resume execution at this location described by @@ -174,23 +157,31 @@ // pc during a deopt. int _orig_pc_offset; - int _compile_id; // which compilation made this nmethod - int _comp_level; // compilation level + int _compile_id; // which compilation made this nmethod + int _comp_level; // compilation level + + // protected by CodeCache_lock + bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock) + bool _speculatively_disconnected; // Marked for potential unload + + bool _marked_for_reclamation; // Used by NMethodSweeper (set only by sweeper) + bool _marked_for_deoptimization; // Used for stack deoptimization - // offsets for entry points - address _entry_point; // entry point with class check - address _verified_entry_point; // entry point without class check - address _osr_entry_point; // entry point for on stack replacement + // used by jvmti to track if an unload event has been posted for this nmethod. + bool _unload_reported; - nmFlags flags; // various flags to keep track of nmethod state - bool _markedForDeoptimization; // Used for stack deoptimization + // set during construction + unsigned int _has_unsafe_access:1; // May fault due to unsafe access. + unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes? + + // Protected by Patching_lock + unsigned char _state; // {alive, not_entrant, zombie, unloaded) + enum { alive = 0, not_entrant = 1, // uncommon trap has happened but activations may still exist zombie = 2, unloaded = 3 }; - // used by jvmti to track if an unload event has been posted for this nmethod. - bool _unload_reported; jbyte _scavenge_root_state; @@ -269,15 +260,15 @@ bool make_not_entrant_or_zombie(unsigned int state); void inc_decompile_count(); - // used to check that writes to nmFlags are done consistently. - static void check_safepoint() PRODUCT_RETURN; - // Used to manipulate the exception cache void add_exception_cache_entry(ExceptionCache* new_entry); ExceptionCache* exception_cache_entry_for_exception(Handle exception); // Inform external interfaces that a compiled method has been unloaded - inline void post_compiled_method_unload(); + void post_compiled_method_unload(); + + // Initailize fields to their default values + void init_defaults(); public: // create nmethod with entry_bci @@ -392,11 +383,11 @@ address verified_entry_point() const { return _verified_entry_point; } // if klass is correct // flag accessing and manipulation - bool is_in_use() const { return flags.state == alive; } - bool is_alive() const { return flags.state == alive || flags.state == not_entrant; } - bool is_not_entrant() const { return flags.state == not_entrant; } - bool is_zombie() const { return flags.state == zombie; } - bool is_unloaded() const { return flags.state == unloaded; } + bool is_in_use() const { return _state == alive; } + bool is_alive() const { return _state == alive || _state == not_entrant; } + bool is_not_entrant() const { return _state == not_entrant; } + bool is_zombie() const { return _state == zombie; } + bool is_unloaded() const { return _state == unloaded; } // Make the nmethod non entrant. The nmethod will continue to be // alive. It is used when an uncommon trap happens. Returns true @@ -409,37 +400,33 @@ bool unload_reported() { return _unload_reported; } void set_unload_reported() { _unload_reported = true; } - bool is_marked_for_deoptimization() const { return _markedForDeoptimization; } - void mark_for_deoptimization() { _markedForDeoptimization = true; } + bool is_marked_for_deoptimization() const { return _marked_for_deoptimization; } + void mark_for_deoptimization() { _marked_for_deoptimization = true; } void make_unloaded(BoolObjectClosure* is_alive, oop cause); bool has_dependencies() { return dependencies_size() != 0; } void flush_dependencies(BoolObjectClosure* is_alive); - bool has_flushed_dependencies() { return flags.hasFlushedDependencies; } - void set_has_flushed_dependencies() { + bool has_flushed_dependencies() { return _has_flushed_dependencies; } + void set_has_flushed_dependencies() { assert(!has_flushed_dependencies(), "should only happen once"); - flags.hasFlushedDependencies = 1; + _has_flushed_dependencies = 1; } - bool is_marked_for_reclamation() const { return flags.markedForReclamation; } - void mark_for_reclamation() { flags.markedForReclamation = 1; } - void unmark_for_reclamation() { flags.markedForReclamation = 0; } + bool is_marked_for_reclamation() const { return _marked_for_reclamation; } + void mark_for_reclamation() { _marked_for_reclamation = 1; } + + bool has_unsafe_access() const { return _has_unsafe_access; } + void set_has_unsafe_access(bool z) { _has_unsafe_access = z; } - bool has_unsafe_access() const { return flags.has_unsafe_access; } - void set_has_unsafe_access(bool z) { flags.has_unsafe_access = z; } + bool has_method_handle_invokes() const { return _has_method_handle_invokes; } + void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; } - bool has_method_handle_invokes() const { return flags.has_method_handle_invokes; } - void set_has_method_handle_invokes(bool z) { flags.has_method_handle_invokes = z; } - - bool is_speculatively_disconnected() const { return flags.speculatively_disconnected; } - void set_speculatively_disconnected(bool z) { flags.speculatively_disconnected = z; } + bool is_speculatively_disconnected() const { return _speculatively_disconnected; } + void set_speculatively_disconnected(bool z) { _speculatively_disconnected = z; } int comp_level() const { return _comp_level; } - int version() const { return flags.version; } - void set_version(int v); - // Support for oops in scopes and relocs: // Note: index 0 is reserved for null. oop oop_at(int index) const { return index == 0 ? (oop) NULL: *oop_addr_at(index); } @@ -599,6 +586,7 @@ // jvmti support: void post_compiled_method_load_event(); + jmethodID get_and_cache_jmethod_id(); // verify operations void verify();
--- a/hotspot/src/share/vm/code/vtableStubs.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/code/vtableStubs.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,7 +50,6 @@ } _chunk = blob->instructions_begin(); _chunk_end = _chunk + bytes; - VTune::register_stub("vtable stub", _chunk, _chunk_end); Forte::register_stub("vtable stub", _chunk, _chunk_end); // Notify JVMTI about this stub. The event will be recorded by the enclosing // JvmtiDynamicCodeEventCollector and posted when this thread has released
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -3972,6 +3972,10 @@ void work(int i) { if (i >= _n_workers) return; // no work needed this round + + double start_time_ms = os::elapsedTime() * 1000.0; + _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms); + ResourceMark rm; HandleMark hm; @@ -4019,7 +4023,7 @@ double elapsed_ms = (os::elapsedTime()-start)*1000.0; double term_ms = pss.term_time()*1000.0; _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); - _g1h->g1_policy()->record_termination_time(i, term_ms); + _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts()); } _g1h->g1_policy()->record_thread_age_table(pss.age_table()); _g1h->update_surviving_young_words(pss.surviving_young_words()+1); @@ -4043,7 +4047,8 @@ double term = pss.term_time(); gclog_or_tty->print(" Elapsed: %7.2f ms.\n" " Strong roots: %7.2f ms (%6.2f%%)\n" - " Termination: %7.2f ms (%6.2f%%) (in %d entries)\n", + " Termination: %7.2f ms (%6.2f%%) " + "(in "SIZE_FORMAT" entries)\n", elapsed * 1000.0, strong_roots * 1000.0, (strong_roots*100.0/elapsed), term * 1000.0, (term*100.0/elapsed), @@ -4059,6 +4064,8 @@ assert(pss.refs_to_scan() == 0, "Task queue should be empty"); assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty"); + double end_time_ms = os::elapsedTime() * 1000.0; + _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms); } };
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -1549,7 +1549,7 @@ int _hash_seed; int _queue_num; - int _term_attempts; + size_t _term_attempts; #if G1_DETAILED_STATS int _pushes, _pops, _steals, _steal_attempts; int _overflow_pushes; @@ -1727,8 +1727,8 @@ int* hash_seed() { return &_hash_seed; } int queue_num() { return _queue_num; } - int term_attempts() { return _term_attempts; } - void note_term_attempt() { _term_attempts++; } + size_t term_attempts() { return _term_attempts; } + void note_term_attempt() { _term_attempts++; } #if G1_DETAILED_STATS int pushes() { return _pushes; }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -231,20 +231,21 @@ _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime()); _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0; + _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads]; _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads]; _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads]; - _par_last_update_rs_start_times_ms = new double[_parallel_gc_threads]; _par_last_update_rs_times_ms = new double[_parallel_gc_threads]; _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads]; - _par_last_scan_rs_start_times_ms = new double[_parallel_gc_threads]; _par_last_scan_rs_times_ms = new double[_parallel_gc_threads]; _par_last_scan_new_refs_times_ms = new double[_parallel_gc_threads]; _par_last_obj_copy_times_ms = new double[_parallel_gc_threads]; _par_last_termination_times_ms = new double[_parallel_gc_threads]; + _par_last_termination_attempts = new double[_parallel_gc_threads]; + _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads]; // start conservatively _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis; @@ -274,10 +275,64 @@ // </NEW PREDICTION> - double time_slice = (double) GCPauseIntervalMillis / 1000.0; + // Below, we might need to calculate the pause time target based on + // the pause interval. When we do so we are going to give G1 maximum + // flexibility and allow it to do pauses when it needs to. So, we'll + // arrange that the pause interval to be pause time target + 1 to + // ensure that a) the pause time target is maximized with respect to + // the pause interval and b) we maintain the invariant that pause + // time target < pause interval. If the user does not want this + // maximum flexibility, they will have to set the pause interval + // explicitly. + + // First make sure that, if either parameter is set, its value is + // reasonable. + if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) { + if (MaxGCPauseMillis < 1) { + vm_exit_during_initialization("MaxGCPauseMillis should be " + "greater than 0"); + } + } + if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { + if (GCPauseIntervalMillis < 1) { + vm_exit_during_initialization("GCPauseIntervalMillis should be " + "greater than 0"); + } + } + + // Then, if the pause time target parameter was not set, set it to + // the default value. + if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) { + if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { + // The default pause time target in G1 is 200ms + FLAG_SET_DEFAULT(MaxGCPauseMillis, 200); + } else { + // We do not allow the pause interval to be set without the + // pause time target + vm_exit_during_initialization("GCPauseIntervalMillis cannot be set " + "without setting MaxGCPauseMillis"); + } + } + + // Then, if the interval parameter was not set, set it according to + // the pause time target (this will also deal with the case when the + // pause time target is the default value). + if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { + FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1); + } + + // Finally, make sure that the two parameters are consistent. + if (MaxGCPauseMillis >= GCPauseIntervalMillis) { + char buffer[256]; + jio_snprintf(buffer, 256, + "MaxGCPauseMillis (%u) should be less than " + "GCPauseIntervalMillis (%u)", + MaxGCPauseMillis, GCPauseIntervalMillis); + vm_exit_during_initialization(buffer); + } + double max_gc_time = (double) MaxGCPauseMillis / 1000.0; - guarantee(max_gc_time < time_slice, - "Max GC time should not be greater than the time slice"); + double time_slice = (double) GCPauseIntervalMillis / 1000.0; _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time); _sigma = (double) G1ConfidencePercent / 100.0; @@ -782,16 +837,17 @@ // if they are not set properly for (int i = 0; i < _parallel_gc_threads; ++i) { - _par_last_ext_root_scan_times_ms[i] = -666.0; - _par_last_mark_stack_scan_times_ms[i] = -666.0; - _par_last_update_rs_start_times_ms[i] = -666.0; - _par_last_update_rs_times_ms[i] = -666.0; - _par_last_update_rs_processed_buffers[i] = -666.0; - _par_last_scan_rs_start_times_ms[i] = -666.0; - _par_last_scan_rs_times_ms[i] = -666.0; - _par_last_scan_new_refs_times_ms[i] = -666.0; - _par_last_obj_copy_times_ms[i] = -666.0; - _par_last_termination_times_ms[i] = -666.0; + _par_last_gc_worker_start_times_ms[i] = -1234.0; + _par_last_ext_root_scan_times_ms[i] = -1234.0; + _par_last_mark_stack_scan_times_ms[i] = -1234.0; + _par_last_update_rs_times_ms[i] = -1234.0; + _par_last_update_rs_processed_buffers[i] = -1234.0; + _par_last_scan_rs_times_ms[i] = -1234.0; + _par_last_scan_new_refs_times_ms[i] = -1234.0; + _par_last_obj_copy_times_ms[i] = -1234.0; + _par_last_termination_times_ms[i] = -1234.0; + _par_last_termination_attempts[i] = -1234.0; + _par_last_gc_worker_end_times_ms[i] = -1234.0; } #endif @@ -942,9 +998,9 @@ return sum; } -void G1CollectorPolicy::print_par_stats (int level, - const char* str, - double* data, +void G1CollectorPolicy::print_par_stats(int level, + const char* str, + double* data, bool summary) { double min = data[0], max = data[0]; double total = 0.0; @@ -973,10 +1029,10 @@ gclog_or_tty->print_cr("]"); } -void G1CollectorPolicy::print_par_buffers (int level, - const char* str, - double* data, - bool summary) { +void G1CollectorPolicy::print_par_sizes(int level, + const char* str, + double* data, + bool summary) { double min = data[0], max = data[0]; double total = 0.0; int j; @@ -1321,15 +1377,22 @@ } if (parallel) { print_stats(1, "Parallel Time", _cur_collection_par_time_ms); - print_par_stats(2, "Update RS (Start)", _par_last_update_rs_start_times_ms, false); + print_par_stats(2, "GC Worker Start Time", + _par_last_gc_worker_start_times_ms, false); print_par_stats(2, "Update RS", _par_last_update_rs_times_ms); - print_par_buffers(3, "Processed Buffers", - _par_last_update_rs_processed_buffers, true); - print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms); - print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms); + print_par_sizes(3, "Processed Buffers", + _par_last_update_rs_processed_buffers, true); + print_par_stats(2, "Ext Root Scanning", + _par_last_ext_root_scan_times_ms); + print_par_stats(2, "Mark Stack Scanning", + _par_last_mark_stack_scan_times_ms); print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms); print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms); print_par_stats(2, "Termination", _par_last_termination_times_ms); + print_par_sizes(3, "Termination Attempts", + _par_last_termination_attempts, true); + print_par_stats(2, "GC Worker End Time", + _par_last_gc_worker_end_times_ms, false); print_stats(2, "Other", parallel_other_time); print_stats(1, "Clear CT", _cur_clear_ct_time_ms); } else {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -171,16 +171,17 @@ double* _cur_aux_times_ms; bool* _cur_aux_times_set; + double* _par_last_gc_worker_start_times_ms; double* _par_last_ext_root_scan_times_ms; double* _par_last_mark_stack_scan_times_ms; - double* _par_last_update_rs_start_times_ms; double* _par_last_update_rs_times_ms; double* _par_last_update_rs_processed_buffers; - double* _par_last_scan_rs_start_times_ms; double* _par_last_scan_rs_times_ms; double* _par_last_scan_new_refs_times_ms; double* _par_last_obj_copy_times_ms; double* _par_last_termination_times_ms; + double* _par_last_termination_attempts; + double* _par_last_gc_worker_end_times_ms; // indicates that we are in young GC mode bool _in_young_gc_mode; @@ -559,13 +560,14 @@ } protected: - void print_stats (int level, const char* str, double value); - void print_stats (int level, const char* str, int value); - void print_par_stats (int level, const char* str, double* data) { + void print_stats(int level, const char* str, double value); + void print_stats(int level, const char* str, int value); + + void print_par_stats(int level, const char* str, double* data) { print_par_stats(level, str, data, true); } - void print_par_stats (int level, const char* str, double* data, bool summary); - void print_par_buffers (int level, const char* str, double* data, bool summary); + void print_par_stats(int level, const char* str, double* data, bool summary); + void print_par_sizes(int level, const char* str, double* data, bool summary); void check_other_times(int level, NumberSeq* other_times_ms, @@ -891,6 +893,10 @@ virtual void record_full_collection_start(); virtual void record_full_collection_end(); + void record_gc_worker_start_time(int worker_i, double ms) { + _par_last_gc_worker_start_times_ms[worker_i] = ms; + } + void record_ext_root_scan_time(int worker_i, double ms) { _par_last_ext_root_scan_times_ms[worker_i] = ms; } @@ -912,10 +918,6 @@ _all_mod_union_times_ms->add(ms); } - void record_update_rs_start_time(int thread, double ms) { - _par_last_update_rs_start_times_ms[thread] = ms; - } - void record_update_rs_time(int thread, double ms) { _par_last_update_rs_times_ms[thread] = ms; } @@ -925,10 +927,6 @@ _par_last_update_rs_processed_buffers[thread] = processed_buffers; } - void record_scan_rs_start_time(int thread, double ms) { - _par_last_scan_rs_start_times_ms[thread] = ms; - } - void record_scan_rs_time(int thread, double ms) { _par_last_scan_rs_times_ms[thread] = ms; } @@ -953,16 +951,13 @@ _par_last_obj_copy_times_ms[thread] += ms; } - void record_obj_copy_time(double ms) { - record_obj_copy_time(0, ms); + void record_termination(int thread, double ms, size_t attempts) { + _par_last_termination_times_ms[thread] = ms; + _par_last_termination_attempts[thread] = (double) attempts; } - void record_termination_time(int thread, double ms) { - _par_last_termination_times_ms[thread] = ms; - } - - void record_termination_time(double ms) { - record_termination_time(0, ms); + void record_gc_worker_end_time(int worker_i, double ms) { + _par_last_gc_worker_end_times_ms[worker_i] = ms; } void record_pause_time_ms(double ms) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -303,7 +303,6 @@ assert( _cards_scanned != NULL, "invariant" ); _cards_scanned[worker_i] = scanRScl.cards_done(); - _g1p->record_scan_rs_start_time(worker_i, rs_time_start * 1000.0); _g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0); } @@ -311,8 +310,6 @@ ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine(); double start = os::elapsedTime(); - _g1p->record_update_rs_start_time(worker_i, start * 1000.0); - // Apply the appropriate closure to all remaining log entries. _g1->iterate_dirty_card_closure(false, worker_i); // Now there should be no dirty cards. @@ -471,7 +468,6 @@ updateRS(worker_i); scanNewRefsRS(oc, worker_i); } else { - _g1p->record_update_rs_start_time(worker_i, os::elapsedTime() * 1000.0); _g1p->record_update_rs_processed_buffers(worker_i, 0.0); _g1p->record_update_rs_time(worker_i, 0.0); _g1p->record_scan_new_refs_time(worker_i, 0.0); @@ -479,7 +475,6 @@ if (G1UseParallelRSetScanning || (worker_i == 0)) { scanRS(oc, worker_i); } else { - _g1p->record_scan_rs_start_time(worker_i, os::elapsedTime() * 1000.0); _g1p->record_scan_rs_time(worker_i, 0.0); } } else {
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge Thu Jul 29 22:04:41 2010 -0700 @@ -270,7 +270,7 @@ psParallelCompact.cpp pcTasks.hpp psParallelCompact.cpp psMarkSweep.hpp psParallelCompact.cpp psMarkSweepDecorator.hpp -psParallelCompact.cpp psCompactionManager.hpp +psParallelCompact.cpp psCompactionManager.inline.hpp psParallelCompact.cpp psPromotionManager.inline.hpp psParallelCompact.cpp psOldGen.hpp psParallelCompact.cpp psParallelCompact.hpp
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -566,14 +566,14 @@ #endif // Commit new or uncommit old pages, if necessary. - resize_commit_uncommit(changed_region, new_region); + if (resize_commit_uncommit(changed_region, new_region)) { + // Set the new start of the committed region + resize_update_committed_table(changed_region, new_region); + } // Update card table entries resize_update_card_table_entries(changed_region, new_region); - // Set the new start of the committed region - resize_update_committed_table(changed_region, new_region); - // Update the covered region resize_update_covered_table(changed_region, new_region); @@ -604,8 +604,9 @@ debug_only(verify_guard();) } -void CardTableExtension::resize_commit_uncommit(int changed_region, +bool CardTableExtension::resize_commit_uncommit(int changed_region, MemRegion new_region) { + bool result = false; // Commit new or uncommit old pages, if necessary. MemRegion cur_committed = _committed[changed_region]; assert(_covered[changed_region].end() == new_region.end(), @@ -675,20 +676,31 @@ "card table expansion"); } } + result = true; } else if (new_start_aligned > cur_committed.start()) { // Shrink the committed region +#if 0 // uncommitting space is currently unsafe because of the interactions + // of growing and shrinking regions. One region A can uncommit space + // that it owns but which is being used by another region B (maybe). + // Region B has not committed the space because it was already + // committed by region A. MemRegion uncommit_region = committed_unique_to_self(changed_region, MemRegion(cur_committed.start(), new_start_aligned)); if (!uncommit_region.is_empty()) { if (!os::uncommit_memory((char*)uncommit_region.start(), uncommit_region.byte_size())) { - vm_exit_out_of_memory(uncommit_region.byte_size(), - "card table contraction"); + // If the uncommit fails, ignore it. Let the + // committed table resizing go even though the committed + // table will over state the committed space. } } +#else + assert(!result, "Should be false with current workaround"); +#endif } assert(_committed[changed_region].end() == cur_committed.end(), "end should not change"); + return result; } void CardTableExtension::resize_update_committed_table(int changed_region,
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,9 @@ class CardTableExtension : public CardTableModRefBS { private: // Support methods for resizing the card table. - void resize_commit_uncommit(int changed_region, MemRegion new_region); + // resize_commit_uncommit() returns true if the pages were committed or + // uncommitted + bool resize_commit_uncommit(int changed_region, MemRegion new_region); void resize_update_card_table_entries(int changed_region, MemRegion new_region); void resize_update_committed_table(int changed_region, MemRegion new_region);
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,7 +32,7 @@ ParCompactionManager::_objarray_queues = NULL; ObjectStartArray* ParCompactionManager::_start_array = NULL; ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL; -RegionTaskQueueSet* ParCompactionManager::_region_array = NULL; +RegionTaskQueueSet* ParCompactionManager::_region_array = NULL; ParCompactionManager::ParCompactionManager() : _action(CopyAndUpdate) { @@ -43,25 +43,9 @@ _old_gen = heap->old_gen(); _start_array = old_gen()->start_array(); - marking_stack()->initialize(); - - // We want the overflow stack to be permanent - _overflow_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true); - - _objarray_queue.initialize(); - _objarray_overflow_stack = - new (ResourceObj::C_HEAP) ObjArrayOverflowStack(10, true); - -#ifdef USE_RegionTaskQueueWithOverflow + _objarray_stack.initialize(); region_stack()->initialize(); -#else - region_stack()->initialize(); - - // We want the overflow stack to be permanent - _region_overflow_stack = - new (ResourceObj::C_HEAP) GrowableArray<size_t>(10, true); -#endif // Note that _revisit_klass_stack is allocated out of the // C heap (as opposed to out of ResourceArena). @@ -71,12 +55,9 @@ // From some experiments (#klass/k)^2 for k = 10 seems a better fit, but this will // have to do for now until we are able to investigate a more optimal setting. _revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true); - } ParCompactionManager::~ParCompactionManager() { - delete _overflow_stack; - delete _objarray_overflow_stack; delete _revisit_klass_stack; delete _revisit_mdo_stack; // _manager_array and _stack_array are statics @@ -108,12 +89,8 @@ _manager_array[i] = new ParCompactionManager(); guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager"); stack_array()->register_queue(i, _manager_array[i]->marking_stack()); - _objarray_queues->register_queue(i, &_manager_array[i]->_objarray_queue); -#ifdef USE_RegionTaskQueueWithOverflow - region_array()->register_queue(i, _manager_array[i]->region_stack()->task_queue()); -#else + _objarray_queues->register_queue(i, &_manager_array[i]->_objarray_stack); region_array()->register_queue(i, _manager_array[i]->region_stack()); -#endif } // The VMThread gets its own ParCompactionManager, which is not available @@ -149,57 +126,6 @@ return action() == ParCompactionManager::ResetObjects; } -// For now save on a stack -void ParCompactionManager::save_for_scanning(oop m) { - stack_push(m); -} - -void ParCompactionManager::stack_push(oop obj) { - - if(!marking_stack()->push(obj)) { - overflow_stack()->push(obj); - } -} - -oop ParCompactionManager::retrieve_for_scanning() { - - // Should not be used in the parallel case - ShouldNotReachHere(); - return NULL; -} - -// Save region on a stack -void ParCompactionManager::save_for_processing(size_t region_index) { -#ifdef ASSERT - const ParallelCompactData& sd = PSParallelCompact::summary_data(); - ParallelCompactData::RegionData* const region_ptr = sd.region(region_index); - assert(region_ptr->claimed(), "must be claimed"); - assert(region_ptr->_pushed++ == 0, "should only be pushed once"); -#endif - region_stack_push(region_index); -} - -void ParCompactionManager::region_stack_push(size_t region_index) { - -#ifdef USE_RegionTaskQueueWithOverflow - region_stack()->save(region_index); -#else - if(!region_stack()->push(region_index)) { - region_overflow_stack()->push(region_index); - } -#endif -} - -bool ParCompactionManager::retrieve_for_processing(size_t& region_index) { -#ifdef USE_RegionTaskQueueWithOverflow - return region_stack()->retrieve(region_index); -#else - // Should not be used in the parallel case - ShouldNotReachHere(); - return false; -#endif -} - ParCompactionManager* ParCompactionManager::gc_thread_compaction_manager(int index) { assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range"); @@ -218,8 +144,8 @@ do { // Drain the overflow stack first, to allow stealing from the marking stack. oop obj; - while (!overflow_stack()->is_empty()) { - overflow_stack()->pop()->follow_contents(this); + while (marking_stack()->pop_overflow(obj)) { + obj->follow_contents(this); } while (marking_stack()->pop_local(obj)) { obj->follow_contents(this); @@ -227,11 +153,10 @@ // Process ObjArrays one at a time to avoid marking stack bloat. ObjArrayTask task; - if (!_objarray_overflow_stack->is_empty()) { - task = _objarray_overflow_stack->pop(); + if (_objarray_stack.pop_overflow(task)) { objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint(); k->oop_follow_contents(this, task.obj(), task.index()); - } else if (_objarray_queue.pop_local(task)) { + } else if (_objarray_stack.pop_local(task)) { objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint(); k->oop_follow_contents(this, task.obj(), task.index()); } @@ -240,68 +165,18 @@ assert(marking_stacks_empty(), "Sanity"); } -void ParCompactionManager::drain_region_overflow_stack() { - size_t region_index = (size_t) -1; - while(region_stack()->retrieve_from_overflow(region_index)) { - PSParallelCompact::fill_and_update_region(this, region_index); - } -} - void ParCompactionManager::drain_region_stacks() { -#ifdef ASSERT - ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); - MutableSpace* to_space = heap->young_gen()->to_space(); - MutableSpace* old_space = heap->old_gen()->object_space(); - MutableSpace* perm_space = heap->perm_gen()->object_space(); -#endif /* ASSERT */ - -#if 1 // def DO_PARALLEL - the serial code hasn't been updated do { - -#ifdef USE_RegionTaskQueueWithOverflow - // Drain overflow stack first, so other threads can steal from - // claimed stack while we work. - size_t region_index = (size_t) -1; - while(region_stack()->retrieve_from_overflow(region_index)) { + // Drain overflow stack first so other threads can steal. + size_t region_index; + while (region_stack()->pop_overflow(region_index)) { PSParallelCompact::fill_and_update_region(this, region_index); } - while (region_stack()->retrieve_from_stealable_queue(region_index)) { + while (region_stack()->pop_local(region_index)) { PSParallelCompact::fill_and_update_region(this, region_index); } } while (!region_stack()->is_empty()); -#else - // Drain overflow stack first, so other threads can steal from - // claimed stack while we work. - while(!region_overflow_stack()->is_empty()) { - size_t region_index = region_overflow_stack()->pop(); - PSParallelCompact::fill_and_update_region(this, region_index); - } - - size_t region_index = -1; - // obj is a reference!!! - while (region_stack()->pop_local(region_index)) { - // It would be nice to assert about the type of objects we might - // pop, but they can come from anywhere, unfortunately. - PSParallelCompact::fill_and_update_region(this, region_index); - } - } while((region_stack()->size() != 0) || - (region_overflow_stack()->length() != 0)); -#endif - -#ifdef USE_RegionTaskQueueWithOverflow - assert(region_stack()->is_empty(), "Sanity"); -#else - assert(region_stack()->size() == 0, "Sanity"); - assert(region_overflow_stack()->length() == 0, "Sanity"); -#endif -#else - oop obj; - while (obj = retrieve_for_scanning()) { - obj->follow_contents(this); - } -#endif } #ifdef ASSERT
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -59,10 +59,10 @@ private: // 32-bit: 4K * 8 = 32KiB; 64-bit: 8K * 16 = 128KiB - #define OBJARRAY_QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13)) - typedef GenericTaskQueue<ObjArrayTask, OBJARRAY_QUEUE_SIZE> ObjArrayTaskQueue; - typedef GenericTaskQueueSet<ObjArrayTaskQueue> ObjArrayTaskQueueSet; - #undef OBJARRAY_QUEUE_SIZE + #define QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13)) + typedef OverflowTaskQueue<ObjArrayTask, QUEUE_SIZE> ObjArrayTaskQueue; + typedef GenericTaskQueueSet<ObjArrayTaskQueue> ObjArrayTaskQueueSet; + #undef QUEUE_SIZE static ParCompactionManager** _manager_array; static OopTaskQueueSet* _stack_array; @@ -72,23 +72,13 @@ static PSOldGen* _old_gen; private: - OopTaskQueue _marking_stack; - GrowableArray<oop>* _overflow_stack; - - typedef GrowableArray<ObjArrayTask> ObjArrayOverflowStack; - ObjArrayTaskQueue _objarray_queue; - ObjArrayOverflowStack* _objarray_overflow_stack; + OverflowTaskQueue<oop> _marking_stack; + ObjArrayTaskQueue _objarray_stack; // Is there a way to reuse the _marking_stack for the // saving empty regions? For now just create a different // type of TaskQueue. - -#ifdef USE_RegionTaskQueueWithOverflow - RegionTaskQueueWithOverflow _region_stack; -#else RegionTaskQueue _region_stack; - GrowableArray<size_t>* _region_overflow_stack; -#endif #if 1 // does this happen enough to need a per thread stack? GrowableArray<Klass*>* _revisit_klass_stack; @@ -107,16 +97,8 @@ protected: // Array of tasks. Needed by the ParallelTaskTerminator. static RegionTaskQueueSet* region_array() { return _region_array; } - OopTaskQueue* marking_stack() { return &_marking_stack; } - GrowableArray<oop>* overflow_stack() { return _overflow_stack; } -#ifdef USE_RegionTaskQueueWithOverflow - RegionTaskQueueWithOverflow* region_stack() { return &_region_stack; } -#else - RegionTaskQueue* region_stack() { return &_region_stack; } - GrowableArray<size_t>* region_overflow_stack() { - return _region_overflow_stack; - } -#endif + OverflowTaskQueue<oop>* marking_stack() { return &_marking_stack; } + RegionTaskQueue* region_stack() { return &_region_stack; } // Pushes onto the marking stack. If the marking stack is full, // pushes onto the overflow stack. @@ -124,11 +106,7 @@ // Do not implement an equivalent stack_pop. Deal with the // marking stack and overflow stack directly. - // Pushes onto the region stack. If the region stack is full, - // pushes onto the region overflow stack. - void region_stack_push(size_t region_index); - -public: + public: Action action() { return _action; } void set_action(Action v) { _action = v; } @@ -157,22 +135,15 @@ GrowableArray<DataLayout*>* revisit_mdo_stack() { return _revisit_mdo_stack; } #endif - // Save oop for later processing. Must not fail. - void save_for_scanning(oop m); - // Get a oop for scanning. If returns null, no oop were found. - oop retrieve_for_scanning(); - - inline void push_objarray(oop obj, size_t index); - - // Save region for later processing. Must not fail. - void save_for_processing(size_t region_index); - // Get a region for processing. If returns null, no region were found. - bool retrieve_for_processing(size_t& region_index); + // Save for later processing. Must not fail. + inline void push(oop obj) { _marking_stack.push(obj); } + inline void push_objarray(oop objarray, size_t index); + inline void push_region(size_t index); // Access function for compaction managers static ParCompactionManager* gc_thread_compaction_manager(int index); - static bool steal(int queue_num, int* seed, Task& t) { + static bool steal(int queue_num, int* seed, oop& t) { return stack_array()->steal(queue_num, seed, t); } @@ -180,8 +151,8 @@ return _objarray_queues->steal(queue_num, seed, t); } - static bool steal(int queue_num, int* seed, RegionTask& t) { - return region_array()->steal(queue_num, seed, t); + static bool steal(int queue_num, int* seed, size_t& region) { + return region_array()->steal(queue_num, seed, region); } // Process tasks remaining on any marking stack @@ -191,9 +162,6 @@ // Process tasks remaining on any stack void drain_region_stacks(); - // Process tasks remaining on any stack - void drain_region_overflow_stack(); - // Debugging support #ifdef ASSERT bool stacks_have_been_allocated(); @@ -208,6 +176,5 @@ } bool ParCompactionManager::marking_stacks_empty() const { - return _marking_stack.size() == 0 && _overflow_stack->is_empty() && - _objarray_queue.size() == 0 && _objarray_overflow_stack->is_empty(); + return _marking_stack.is_empty() && _objarray_stack.is_empty(); }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -26,7 +26,16 @@ { ObjArrayTask task(obj, index); assert(task.is_valid(), "bad ObjArrayTask"); - if (!_objarray_queue.push(task)) { - _objarray_overflow_stack->push(task); - } + _objarray_stack.push(task); } + +void ParCompactionManager::push_region(size_t index) +{ +#ifdef ASSERT + const ParallelCompactData& sd = PSParallelCompact::summary_data(); + ParallelCompactData::RegionData* const region_ptr = sd.region(index); + assert(region_ptr->claimed(), "must be claimed"); + assert(region_ptr->_pushed++ == 0, "should only be pushed once"); +#endif + region_stack()->push(index); +}
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -2474,7 +2474,7 @@ for (size_t cur = end_region - 1; cur >= beg_region; --cur) { if (sd.region(cur)->claim_unsafe()) { ParCompactionManager* cm = ParCompactionManager::manager_array(which); - cm->save_for_processing(cur); + cm->push_region(cur); if (TraceParallelOldGCCompactionPhase && Verbose) { const size_t count_mod_8 = fillable_regions & 7; @@ -3138,7 +3138,7 @@ assert(cur->data_size() > 0, "region must have live data"); cur->decrement_destination_count(); if (cur < enqueue_end && cur->available() && cur->claim()) { - cm->save_for_processing(sd.region(cur)); + cm->push_region(sd.region(cur)); } } }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1297,11 +1297,8 @@ T heap_oop = oopDesc::load_heap_oop(p); if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); - if (mark_bitmap()->is_unmarked(obj)) { - if (mark_obj(obj)) { - // This thread marked the object and owns the subsequent processing of it. - cm->save_for_scanning(obj); - } + if (mark_bitmap()->is_unmarked(obj) && mark_obj(obj)) { + cm->push(obj); } } }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2009, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -94,45 +94,13 @@ print_stats(); #endif // PS_PM_STATS - for(uint i=0; i<ParallelGCThreads+1; i++) { + for (uint i = 0; i < ParallelGCThreads + 1; i++) { PSPromotionManager* manager = manager_array(i); - - // the guarantees are a bit gratuitous but, if one fires, we'll - // have a better idea of what went wrong - if (i < ParallelGCThreads) { - guarantee((!UseDepthFirstScavengeOrder || - manager->overflow_stack_depth()->length() <= 0), - "promotion manager overflow stack must be empty"); - guarantee((UseDepthFirstScavengeOrder || - manager->overflow_stack_breadth()->length() <= 0), - "promotion manager overflow stack must be empty"); - - guarantee((!UseDepthFirstScavengeOrder || - manager->claimed_stack_depth()->size() <= 0), - "promotion manager claimed stack must be empty"); - guarantee((UseDepthFirstScavengeOrder || - manager->claimed_stack_breadth()->size() <= 0), - "promotion manager claimed stack must be empty"); + if (UseDepthFirstScavengeOrder) { + assert(manager->claimed_stack_depth()->is_empty(), "should be empty"); } else { - guarantee((!UseDepthFirstScavengeOrder || - manager->overflow_stack_depth()->length() <= 0), - "VM Thread promotion manager overflow stack " - "must be empty"); - guarantee((UseDepthFirstScavengeOrder || - manager->overflow_stack_breadth()->length() <= 0), - "VM Thread promotion manager overflow stack " - "must be empty"); - - guarantee((!UseDepthFirstScavengeOrder || - manager->claimed_stack_depth()->size() <= 0), - "VM Thread promotion manager claimed stack " - "must be empty"); - guarantee((UseDepthFirstScavengeOrder || - manager->claimed_stack_breadth()->size() <= 0), - "VM Thread promotion manager claimed stack " - "must be empty"); + assert(manager->claimed_stack_breadth()->is_empty(), "should be empty"); } - manager->flush_labs(); } } @@ -181,15 +149,9 @@ if (depth_first()) { claimed_stack_depth()->initialize(); queue_size = claimed_stack_depth()->max_elems(); - // We want the overflow stack to be permanent - _overflow_stack_depth = new (ResourceObj::C_HEAP) GrowableArray<StarTask>(10, true); - _overflow_stack_breadth = NULL; } else { claimed_stack_breadth()->initialize(); queue_size = claimed_stack_breadth()->max_elems(); - // We want the overflow stack to be permanent - _overflow_stack_breadth = new (ResourceObj::C_HEAP) GrowableArray<oop>(10, true); - _overflow_stack_depth = NULL; } _totally_drain = (ParallelGCThreads == 1) || (GCDrainStackTargetSize == 0); @@ -209,8 +171,7 @@ } void PSPromotionManager::reset() { - assert(claimed_stack_empty(), "reset of non-empty claimed stack"); - assert(overflow_stack_empty(), "reset of non-empty overflow stack"); + assert(stacks_empty(), "reset of non-empty stack"); // We need to get an assert in here to make sure the labs are always flushed. @@ -243,7 +204,7 @@ void PSPromotionManager::drain_stacks_depth(bool totally_drain) { assert(depth_first(), "invariant"); - assert(overflow_stack_depth() != NULL, "invariant"); + assert(claimed_stack_depth()->overflow_stack() != NULL, "invariant"); totally_drain = totally_drain || _totally_drain; #ifdef ASSERT @@ -254,41 +215,35 @@ MutableSpace* perm_space = heap->perm_gen()->object_space(); #endif /* ASSERT */ + OopStarTaskQueue* const tq = claimed_stack_depth(); do { StarTask p; // Drain overflow stack first, so other threads can steal from // claimed stack while we work. - while(!overflow_stack_depth()->is_empty()) { - // linux compiler wants different overloaded operator= in taskqueue to - // assign to p that the other compilers don't like. - StarTask ptr = overflow_stack_depth()->pop(); - process_popped_location_depth(ptr); + while (tq->pop_overflow(p)) { + process_popped_location_depth(p); } if (totally_drain) { - while (claimed_stack_depth()->pop_local(p)) { + while (tq->pop_local(p)) { process_popped_location_depth(p); } } else { - while (claimed_stack_depth()->size() > _target_stack_size && - claimed_stack_depth()->pop_local(p)) { + while (tq->size() > _target_stack_size && tq->pop_local(p)) { process_popped_location_depth(p); } } - } while( (totally_drain && claimed_stack_depth()->size() > 0) || - (overflow_stack_depth()->length() > 0) ); + } while (totally_drain && !tq->taskqueue_empty() || !tq->overflow_empty()); - assert(!totally_drain || claimed_stack_empty(), "Sanity"); - assert(totally_drain || - claimed_stack_depth()->size() <= _target_stack_size, - "Sanity"); - assert(overflow_stack_empty(), "Sanity"); + assert(!totally_drain || tq->taskqueue_empty(), "Sanity"); + assert(totally_drain || tq->size() <= _target_stack_size, "Sanity"); + assert(tq->overflow_empty(), "Sanity"); } void PSPromotionManager::drain_stacks_breadth(bool totally_drain) { assert(!depth_first(), "invariant"); - assert(overflow_stack_breadth() != NULL, "invariant"); + assert(claimed_stack_breadth()->overflow_stack() != NULL, "invariant"); totally_drain = totally_drain || _totally_drain; #ifdef ASSERT @@ -299,51 +254,39 @@ MutableSpace* perm_space = heap->perm_gen()->object_space(); #endif /* ASSERT */ + OverflowTaskQueue<oop>* const tq = claimed_stack_breadth(); do { oop obj; // Drain overflow stack first, so other threads can steal from // claimed stack while we work. - while(!overflow_stack_breadth()->is_empty()) { - obj = overflow_stack_breadth()->pop(); + while (tq->pop_overflow(obj)) { obj->copy_contents(this); } if (totally_drain) { - // obj is a reference!!! - while (claimed_stack_breadth()->pop_local(obj)) { - // It would be nice to assert about the type of objects we might - // pop, but they can come from anywhere, unfortunately. + while (tq->pop_local(obj)) { obj->copy_contents(this); } } else { - // obj is a reference!!! - while (claimed_stack_breadth()->size() > _target_stack_size && - claimed_stack_breadth()->pop_local(obj)) { - // It would be nice to assert about the type of objects we might - // pop, but they can come from anywhere, unfortunately. + while (tq->size() > _target_stack_size && tq->pop_local(obj)) { obj->copy_contents(this); } } // If we could not find any other work, flush the prefetch queue - if (claimed_stack_breadth()->size() == 0 && - (overflow_stack_breadth()->length() == 0)) { + if (tq->is_empty()) { flush_prefetch_queue(); } - } while((totally_drain && claimed_stack_breadth()->size() > 0) || - (overflow_stack_breadth()->length() > 0)); + } while (totally_drain && !tq->taskqueue_empty() || !tq->overflow_empty()); - assert(!totally_drain || claimed_stack_empty(), "Sanity"); - assert(totally_drain || - claimed_stack_breadth()->size() <= _target_stack_size, - "Sanity"); - assert(overflow_stack_empty(), "Sanity"); + assert(!totally_drain || tq->taskqueue_empty(), "Sanity"); + assert(totally_drain || tq->size() <= _target_stack_size, "Sanity"); + assert(tq->overflow_empty(), "Sanity"); } void PSPromotionManager::flush_labs() { - assert(claimed_stack_empty(), "Attempt to flush lab with live stack"); - assert(overflow_stack_empty(), "Attempt to flush lab with live overflow stack"); + assert(stacks_empty(), "Attempt to flush lab with live stack"); // If either promotion lab fills up, we can flush the // lab but not refill it, so check first.
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -78,9 +78,7 @@ PrefetchQueue _prefetch_queue; OopStarTaskQueue _claimed_stack_depth; - GrowableArray<StarTask>* _overflow_stack_depth; - OopTaskQueue _claimed_stack_breadth; - GrowableArray<oop>* _overflow_stack_breadth; + OverflowTaskQueue<oop> _claimed_stack_breadth; bool _depth_first; bool _totally_drain; @@ -97,9 +95,6 @@ template <class T> inline void claim_or_forward_internal_depth(T* p); template <class T> inline void claim_or_forward_internal_breadth(T* p); - GrowableArray<StarTask>* overflow_stack_depth() { return _overflow_stack_depth; } - GrowableArray<oop>* overflow_stack_breadth() { return _overflow_stack_breadth; } - // On the task queues we push reference locations as well as // partially-scanned arrays (in the latter case, we push an oop to // the from-space image of the array and the length on the @@ -151,18 +146,19 @@ #if PS_PM_STATS ++_total_pushes; + int stack_length = claimed_stack_depth()->overflow_stack()->length(); #endif // PS_PM_STATS - if (!claimed_stack_depth()->push(p)) { - overflow_stack_depth()->push(p); + claimed_stack_depth()->push(p); + #if PS_PM_STATS + if (claimed_stack_depth()->overflow_stack()->length() != stack_length) { ++_overflow_pushes; - uint stack_length = (uint) overflow_stack_depth()->length(); - if (stack_length > _max_overflow_length) { - _max_overflow_length = stack_length; + if ((uint)stack_length + 1 > _max_overflow_length) { + _max_overflow_length = (uint)stack_length + 1; } + } #endif // PS_PM_STATS - } } void push_breadth(oop o) { @@ -170,18 +166,19 @@ #if PS_PM_STATS ++_total_pushes; + int stack_length = claimed_stack_breadth()->overflow_stack()->length(); #endif // PS_PM_STATS - if(!claimed_stack_breadth()->push(o)) { - overflow_stack_breadth()->push(o); + claimed_stack_breadth()->push(o); + #if PS_PM_STATS + if (claimed_stack_breadth()->overflow_stack()->length() != stack_length) { ++_overflow_pushes; - uint stack_length = (uint) overflow_stack_breadth()->length(); - if (stack_length > _max_overflow_length) { - _max_overflow_length = stack_length; + if ((uint)stack_length + 1 > _max_overflow_length) { + _max_overflow_length = (uint)stack_length + 1; } + } #endif // PS_PM_STATS - } } protected: @@ -199,12 +196,10 @@ static PSPromotionManager* vm_thread_promotion_manager(); static bool steal_depth(int queue_num, int* seed, StarTask& t) { - assert(stack_array_depth() != NULL, "invariant"); return stack_array_depth()->steal(queue_num, seed, t); } - static bool steal_breadth(int queue_num, int* seed, Task& t) { - assert(stack_array_breadth() != NULL, "invariant"); + static bool steal_breadth(int queue_num, int* seed, oop& t) { return stack_array_breadth()->steal(queue_num, seed, t); } @@ -214,7 +209,7 @@ OopStarTaskQueue* claimed_stack_depth() { return &_claimed_stack_depth; } - OopTaskQueue* claimed_stack_breadth() { + OverflowTaskQueue<oop>* claimed_stack_breadth() { return &_claimed_stack_breadth; } @@ -246,25 +241,13 @@ void drain_stacks_depth(bool totally_drain); void drain_stacks_breadth(bool totally_drain); - bool claimed_stack_empty() { - if (depth_first()) { - return claimed_stack_depth()->size() <= 0; - } else { - return claimed_stack_breadth()->size() <= 0; - } - } - bool overflow_stack_empty() { - if (depth_first()) { - return overflow_stack_depth()->length() <= 0; - } else { - return overflow_stack_breadth()->length() <= 0; - } + bool depth_first() const { + return _depth_first; } bool stacks_empty() { - return claimed_stack_empty() && overflow_stack_empty(); - } - bool depth_first() { - return _depth_first; + return depth_first() ? + claimed_stack_depth()->is_empty() : + claimed_stack_breadth()->is_empty(); } inline void process_popped_location_depth(StarTask p);
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -414,7 +414,6 @@ } // Finally, flush the promotion_manager's labs, and deallocate its stacks. - assert(promotion_manager->claimed_stack_empty(), "Sanity"); PSPromotionManager::post_scavenge(); promotion_failure_occurred = promotion_failed();
--- a/hotspot/src/share/vm/includeDB_compiler2 Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/includeDB_compiler2 Thu Jul 29 22:04:41 2010 -0700 @@ -89,6 +89,21 @@ allocation.hpp c2_globals.hpp +bcEscapeAnalyzer.cpp bcEscapeAnalyzer.hpp +bcEscapeAnalyzer.cpp bitMap.inline.hpp +bcEscapeAnalyzer.cpp bytecode.hpp +bcEscapeAnalyzer.cpp ciConstant.hpp +bcEscapeAnalyzer.cpp ciField.hpp +bcEscapeAnalyzer.cpp ciMethodBlocks.hpp +bcEscapeAnalyzer.cpp ciStreams.hpp + +bcEscapeAnalyzer.hpp allocation.hpp +bcEscapeAnalyzer.hpp ciMethod.hpp +bcEscapeAnalyzer.hpp ciMethodData.hpp +bcEscapeAnalyzer.hpp dependencies.hpp +bcEscapeAnalyzer.hpp growableArray.hpp +bcEscapeAnalyzer.hpp vectset.hpp + block.cpp allocation.inline.hpp block.cpp block.hpp block.cpp cfgnode.hpp @@ -239,6 +254,7 @@ ciEnv.cpp compileLog.hpp ciEnv.cpp runtime.hpp +ciMethod.cpp bcEscapeAnalyzer.hpp ciMethod.cpp ciTypeFlow.hpp ciMethod.cpp methodOop.hpp
--- a/hotspot/src/share/vm/includeDB_core Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/includeDB_core Thu Jul 29 22:04:41 2010 -0700 @@ -301,20 +301,6 @@ barrierSet.inline.hpp barrierSet.hpp barrierSet.inline.hpp cardTableModRefBS.hpp -bcEscapeAnalyzer.cpp bcEscapeAnalyzer.hpp -bcEscapeAnalyzer.cpp bitMap.inline.hpp -bcEscapeAnalyzer.cpp bytecode.hpp -bcEscapeAnalyzer.cpp ciConstant.hpp -bcEscapeAnalyzer.cpp ciField.hpp -bcEscapeAnalyzer.cpp ciMethodBlocks.hpp -bcEscapeAnalyzer.cpp ciStreams.hpp - -bcEscapeAnalyzer.hpp allocation.hpp -bcEscapeAnalyzer.hpp ciMethod.hpp -bcEscapeAnalyzer.hpp ciMethodData.hpp -bcEscapeAnalyzer.hpp dependencies.hpp -bcEscapeAnalyzer.hpp growableArray.hpp - biasedLocking.cpp biasedLocking.hpp biasedLocking.cpp klass.inline.hpp biasedLocking.cpp markOop.hpp @@ -545,6 +531,7 @@ ciCPCache.hpp ciClassList.hpp ciCPCache.hpp ciObject.hpp +ciCPCache.hpp cpCacheOop.hpp ciEnv.cpp allocation.inline.hpp ciEnv.cpp ciConstant.hpp @@ -664,7 +651,6 @@ ciMethod.cpp abstractCompiler.hpp ciMethod.cpp allocation.inline.hpp -ciMethod.cpp bcEscapeAnalyzer.hpp ciMethod.cpp bitMap.inline.hpp ciMethod.cpp ciCallProfile.hpp ciMethod.cpp ciExceptionHandler.hpp @@ -823,6 +809,7 @@ ciStreams.cpp ciCallSite.hpp ciStreams.cpp ciConstant.hpp +ciStreams.cpp ciCPCache.hpp ciStreams.cpp ciField.hpp ciStreams.cpp ciStreams.hpp ciStreams.cpp ciUtilities.hpp @@ -962,7 +949,6 @@ classLoader.cpp timer.hpp classLoader.cpp universe.inline.hpp classLoader.cpp vmSymbols.hpp -classLoader.cpp vtune.hpp classLoader.hpp classFileParser.hpp classLoader.hpp perfData.hpp @@ -1002,7 +988,6 @@ codeBlob.cpp safepoint.hpp codeBlob.cpp sharedRuntime.hpp codeBlob.cpp vframe.hpp -codeBlob.cpp vtune.hpp codeBlob.hpp codeBuffer.hpp codeBlob.hpp frame.hpp @@ -2165,7 +2150,6 @@ interpreter.cpp stubRoutines.hpp interpreter.cpp templateTable.hpp interpreter.cpp timer.hpp -interpreter.cpp vtune.hpp interpreter.hpp cppInterpreter.hpp interpreter.hpp stubs.hpp @@ -2321,7 +2305,6 @@ java.cpp vmError.hpp java.cpp vm_operations.hpp java.cpp vm_version_<arch>.hpp -java.cpp vtune.hpp java.hpp os.hpp @@ -3048,7 +3031,6 @@ nmethod.cpp scopeDesc.hpp nmethod.cpp sharedRuntime.hpp nmethod.cpp sweeper.hpp -nmethod.cpp vtune.hpp nmethod.cpp xmlstream.hpp nmethod.hpp codeBlob.hpp @@ -3771,7 +3753,6 @@ sharedRuntime.cpp vmSymbols.hpp sharedRuntime.cpp vmreg_<arch>.inline.hpp sharedRuntime.cpp vtableStubs.hpp -sharedRuntime.cpp vtune.hpp sharedRuntime.cpp xmlstream.hpp sharedRuntime.hpp allocation.hpp @@ -3935,7 +3916,6 @@ stubCodeGenerator.cpp forte.hpp stubCodeGenerator.cpp oop.inline.hpp stubCodeGenerator.cpp stubCodeGenerator.hpp -stubCodeGenerator.cpp vtune.hpp stubCodeGenerator.hpp allocation.hpp stubCodeGenerator.hpp assembler.hpp @@ -4456,7 +4436,6 @@ universe.cpp universe.inline.hpp universe.cpp vmSymbols.hpp universe.cpp vm_operations.hpp -universe.cpp vtune.hpp universe.hpp growableArray.hpp universe.hpp handles.hpp @@ -4719,7 +4698,6 @@ vtableStubs.cpp resourceArea.hpp vtableStubs.cpp sharedRuntime.hpp vtableStubs.cpp vtableStubs.hpp -vtableStubs.cpp vtune.hpp vtableStubs.hpp allocation.hpp @@ -4733,11 +4711,6 @@ vtableStubs_<arch_model>.cpp vmreg_<arch>.inline.hpp vtableStubs_<arch_model>.cpp vtableStubs.hpp -vtune.hpp allocation.hpp - -vtune_<os_family>.cpp interpreter.hpp -vtune_<os_family>.cpp vtune.hpp - watermark.hpp allocation.hpp watermark.hpp globalDefinitions.hpp
--- a/hotspot/src/share/vm/interpreter/bytecode.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/interpreter/bytecode.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -136,25 +136,24 @@ // Implementation of Bytecode_invoke void Bytecode_invoke::verify() const { - Bytecodes::Code bc = adjusted_invoke_code(); assert(is_valid(), "check invoke"); assert(method()->constants()->cache() != NULL, "do not call this from verifier or rewriter"); } -symbolOop Bytecode_invoke::signature() const { +symbolOop Bytecode_member_ref::signature() const { constantPoolOop constants = method()->constants(); return constants->signature_ref_at(index()); } -symbolOop Bytecode_invoke::name() const { +symbolOop Bytecode_member_ref::name() const { constantPoolOop constants = method()->constants(); return constants->name_ref_at(index()); } -BasicType Bytecode_invoke::result_type(Thread *thread) const { +BasicType Bytecode_member_ref::result_type(Thread *thread) const { symbolHandle sh(thread, signature()); ResultTypeFinder rts(sh); rts.iterate(); @@ -167,9 +166,9 @@ KlassHandle resolved_klass; constantPoolHandle constants(THREAD, _method->constants()); - if (adjusted_invoke_code() == Bytecodes::_invokedynamic) { + if (java_code() == Bytecodes::_invokedynamic) { LinkResolver::resolve_dynamic_method(m, resolved_klass, constants, index(), CHECK_(methodHandle())); - } else if (adjusted_invoke_code() != Bytecodes::_invokeinterface) { + } else if (java_code() != Bytecodes::_invokeinterface) { LinkResolver::resolve_method(m, resolved_klass, constants, index(), CHECK_(methodHandle())); } else { LinkResolver::resolve_interface_method(m, resolved_klass, constants, index(), CHECK_(methodHandle())); @@ -178,51 +177,68 @@ } -int Bytecode_invoke::index() const { +int Bytecode_member_ref::index() const { // Note: Rewriter::rewrite changes the Java_u2 of an invokedynamic to a native_u4, // at the same time it allocates per-call-site CP cache entries. - Bytecodes::Code stdc = Bytecodes::java_code(code()); - Bytecode* invoke = Bytecode_at(bcp()); - if (invoke->has_index_u4(stdc)) - return invoke->get_index_u4(stdc); + Bytecodes::Code rawc = code(); + Bytecode* invoke = bytecode(); + if (invoke->has_index_u4(rawc)) + return invoke->get_index_u4(rawc); else - return invoke->get_index_u2_cpcache(stdc); + return invoke->get_index_u2_cpcache(rawc); } +int Bytecode_member_ref::pool_index() const { + int index = this->index(); + DEBUG_ONLY({ + if (!bytecode()->has_index_u4(code())) + index -= constantPoolOopDesc::CPCACHE_INDEX_TAG; + }); + return _method->constants()->cache()->entry_at(index)->constant_pool_index(); +} // Implementation of Bytecode_field void Bytecode_field::verify() const { - Bytecodes::Code stdc = Bytecodes::java_code(code()); - assert(stdc == Bytecodes::_putstatic || stdc == Bytecodes::_getstatic || - stdc == Bytecodes::_putfield || stdc == Bytecodes::_getfield, "check field"); -} - - -bool Bytecode_field::is_static() const { - Bytecodes::Code stdc = Bytecodes::java_code(code()); - return stdc == Bytecodes::_putstatic || stdc == Bytecodes::_getstatic; + assert(is_valid(), "check field"); } -int Bytecode_field::index() const { - Bytecode* invoke = Bytecode_at(bcp()); - return invoke->get_index_u2_cpcache(Bytecodes::_getfield); +// Implementation of Bytecode_loadconstant + +int Bytecode_loadconstant::raw_index() const { + Bytecode* bcp = bytecode(); + Bytecodes::Code rawc = bcp->code(); + assert(rawc != Bytecodes::_wide, "verifier prevents this"); + if (Bytecodes::java_code(rawc) == Bytecodes::_ldc) + return bcp->get_index_u1(rawc); + else + return bcp->get_index_u2(rawc, false); } - -// Implementation of Bytecodes loac constant +int Bytecode_loadconstant::pool_index() const { + int index = raw_index(); + if (has_cache_index()) { + return _method->constants()->cache()->entry_at(index)->constant_pool_index(); + } + return index; +} -int Bytecode_loadconstant::index() const { - Bytecodes::Code stdc = Bytecodes::java_code(code()); - if (stdc != Bytecodes::_wide) { - if (Bytecodes::java_code(stdc) == Bytecodes::_ldc) - return get_index_u1(stdc); - else - return get_index_u2(stdc, false); +BasicType Bytecode_loadconstant::result_type() const { + int index = pool_index(); + constantTag tag = _method->constants()->tag_at(index); + return tag.basic_type(); +} + +oop Bytecode_loadconstant::resolve_constant(TRAPS) const { + assert(_method.not_null(), "must supply method to resolve constant"); + int index = raw_index(); + constantPoolOop constants = _method->constants(); + if (has_cache_index()) { + return constants->resolve_cached_constant_at(index, THREAD); + } else { + return constants->resolve_constant_at(index, THREAD); } - stdc = Bytecodes::code_at(addr_at(1)); - return get_index_u2(stdc, true); } //------------------------------------------------------------------------------
--- a/hotspot/src/share/vm/interpreter/bytecode.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/interpreter/bytecode.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -76,9 +76,13 @@ return Bytes::get_native_u2(p); else return Bytes::get_Java_u2(p); } + int get_index_u1_cpcache(Bytecodes::Code bc) const { + assert_same_format_as(bc); assert_index_size(1, bc); + return *(jubyte*)addr_at(1) + constantPoolOopDesc::CPCACHE_INDEX_TAG; + } int get_index_u2_cpcache(Bytecodes::Code bc) const { assert_same_format_as(bc); assert_index_size(2, bc); assert_native_index(bc); - return Bytes::get_native_u2(addr_at(1)) DEBUG_ONLY(+ constantPoolOopDesc::CPCACHE_INDEX_TAG); + return Bytes::get_native_u2(addr_at(1)) + constantPoolOopDesc::CPCACHE_INDEX_TAG; } int get_index_u4(Bytecodes::Code bc) const { assert_same_format_as(bc); assert_index_size(4, bc); @@ -152,7 +156,7 @@ inline Bytecode_lookupswitch* Bytecode_lookupswitch_at(address bcp) { Bytecode_lookupswitch* b = (Bytecode_lookupswitch*)bcp; - debug_only(b->verify()); + DEBUG_ONLY(b->verify()); return b; } @@ -174,44 +178,56 @@ inline Bytecode_tableswitch* Bytecode_tableswitch_at(address bcp) { Bytecode_tableswitch* b = (Bytecode_tableswitch*)bcp; - debug_only(b->verify()); + DEBUG_ONLY(b->verify()); return b; } -// Abstraction for invoke_{virtual, static, interface, special} +// Common code for decoding invokes and field references. -class Bytecode_invoke: public ResourceObj { +class Bytecode_member_ref: public ResourceObj { protected: methodHandle _method; // method containing the bytecode int _bci; // position of the bytecode - Bytecode_invoke(methodHandle method, int bci) : _method(method), _bci(bci) {} + Bytecode_member_ref(methodHandle method, int bci) : _method(method), _bci(bci) {} + + public: + // Attributes + methodHandle method() const { return _method; } + int bci() const { return _bci; } + address bcp() const { return _method->bcp_from(bci()); } + Bytecode* bytecode() const { return Bytecode_at(bcp()); } + + int index() const; // cache index (loaded from instruction) + int pool_index() const; // constant pool index + symbolOop name() const; // returns the name of the method or field + symbolOop signature() const; // returns the signature of the method or field + + BasicType result_type(Thread* thread) const; // returns the result type of the getfield or invoke + + Bytecodes::Code code() const { return Bytecodes::code_at(bcp(), _method()); } + Bytecodes::Code java_code() const { return Bytecodes::java_code(code()); } +}; + +// Abstraction for invoke_{virtual, static, interface, special} + +class Bytecode_invoke: public Bytecode_member_ref { + protected: + Bytecode_invoke(methodHandle method, int bci) : Bytecode_member_ref(method, bci) {} public: void verify() const; // Attributes - methodHandle method() const { return _method; } - int bci() const { return _bci; } - address bcp() const { return _method->bcp_from(bci()); } - - int index() const; // the constant pool index for the invoke - symbolOop name() const; // returns the name of the invoked method - symbolOop signature() const; // returns the signature of the invoked method - BasicType result_type(Thread *thread) const; // returns the result type of the invoke - - Bytecodes::Code code() const { return Bytecodes::code_at(bcp(), _method()); } - Bytecodes::Code adjusted_invoke_code() const { return Bytecodes::java_code(code()); } - methodHandle static_target(TRAPS); // "specified" method (from constant pool) // Testers - bool is_invokeinterface() const { return adjusted_invoke_code() == Bytecodes::_invokeinterface; } - bool is_invokevirtual() const { return adjusted_invoke_code() == Bytecodes::_invokevirtual; } - bool is_invokestatic() const { return adjusted_invoke_code() == Bytecodes::_invokestatic; } - bool is_invokespecial() const { return adjusted_invoke_code() == Bytecodes::_invokespecial; } - bool is_invokedynamic() const { return adjusted_invoke_code() == Bytecodes::_invokedynamic; } + bool is_invokeinterface() const { return java_code() == Bytecodes::_invokeinterface; } + bool is_invokevirtual() const { return java_code() == Bytecodes::_invokevirtual; } + bool is_invokestatic() const { return java_code() == Bytecodes::_invokestatic; } + bool is_invokespecial() const { return java_code() == Bytecodes::_invokespecial; } + bool is_invokedynamic() const { return java_code() == Bytecodes::_invokedynamic; } bool has_receiver() const { return !is_invokestatic() && !is_invokedynamic(); } @@ -230,7 +246,7 @@ inline Bytecode_invoke* Bytecode_invoke_at(methodHandle method, int bci) { Bytecode_invoke* b = new Bytecode_invoke(method, bci); - debug_only(b->verify()); + DEBUG_ONLY(b->verify()); return b; } @@ -240,21 +256,34 @@ } -// Abstraction for all field accesses (put/get field/static_ -class Bytecode_field: public Bytecode { -public: +// Abstraction for all field accesses (put/get field/static) +class Bytecode_field: public Bytecode_member_ref { + protected: + Bytecode_field(methodHandle method, int bci) : Bytecode_member_ref(method, bci) {} + + public: + // Testers + bool is_getfield() const { return java_code() == Bytecodes::_getfield; } + bool is_putfield() const { return java_code() == Bytecodes::_putfield; } + bool is_getstatic() const { return java_code() == Bytecodes::_getstatic; } + bool is_putstatic() const { return java_code() == Bytecodes::_putstatic; } + + bool is_getter() const { return is_getfield() || is_getstatic(); } + bool is_static() const { return is_getstatic() || is_putstatic(); } + + bool is_valid() const { return is_getfield() || + is_putfield() || + is_getstatic() || + is_putstatic(); } void verify() const; - int index() const; - bool is_static() const; - // Creation - inline friend Bytecode_field* Bytecode_field_at(const methodOop method, address bcp); + inline friend Bytecode_field* Bytecode_field_at(methodHandle method, int bci); }; -inline Bytecode_field* Bytecode_field_at(const methodOop method, address bcp) { - Bytecode_field* b = (Bytecode_field*)bcp; - debug_only(b->verify()); +inline Bytecode_field* Bytecode_field_at(methodHandle method, int bci) { + Bytecode_field* b = new Bytecode_field(method, bci); + DEBUG_ONLY(b->verify()); return b; } @@ -274,7 +303,7 @@ inline Bytecode_checkcast* Bytecode_checkcast_at(address bcp) { Bytecode_checkcast* b = (Bytecode_checkcast*)bcp; - debug_only(b->verify()); + DEBUG_ONLY(b->verify()); return b; } @@ -294,7 +323,7 @@ inline Bytecode_instanceof* Bytecode_instanceof_at(address bcp) { Bytecode_instanceof* b = (Bytecode_instanceof*)bcp; - debug_only(b->verify()); + DEBUG_ONLY(b->verify()); return b; } @@ -312,7 +341,7 @@ inline Bytecode_new* Bytecode_new_at(address bcp) { Bytecode_new* b = (Bytecode_new*)bcp; - debug_only(b->verify()); + DEBUG_ONLY(b->verify()); return b; } @@ -330,7 +359,7 @@ inline Bytecode_multianewarray* Bytecode_multianewarray_at(address bcp) { Bytecode_multianewarray* b = (Bytecode_multianewarray*)bcp; - debug_only(b->verify()); + DEBUG_ONLY(b->verify()); return b; } @@ -348,29 +377,57 @@ inline Bytecode_anewarray* Bytecode_anewarray_at(address bcp) { Bytecode_anewarray* b = (Bytecode_anewarray*)bcp; - debug_only(b->verify()); + DEBUG_ONLY(b->verify()); return b; } // Abstraction for ldc, ldc_w and ldc2_w -class Bytecode_loadconstant: public Bytecode { +class Bytecode_loadconstant: public ResourceObj { + private: + int _bci; + methodHandle _method; + + Bytecodes::Code code() const { return bytecode()->code(); } + + int raw_index() const; + + Bytecode_loadconstant(methodHandle method, int bci) : _method(method), _bci(bci) {} + public: + // Attributes + methodHandle method() const { return _method; } + int bci() const { return _bci; } + address bcp() const { return _method->bcp_from(bci()); } + Bytecode* bytecode() const { return Bytecode_at(bcp()); } + void verify() const { + assert(_method.not_null(), "must supply method"); Bytecodes::Code stdc = Bytecodes::java_code(code()); assert(stdc == Bytecodes::_ldc || stdc == Bytecodes::_ldc_w || stdc == Bytecodes::_ldc2_w, "load constant"); } - int index() const; + // Only non-standard bytecodes (fast_aldc) have CP cache indexes. + bool has_cache_index() const { return code() >= Bytecodes::number_of_java_codes; } - inline friend Bytecode_loadconstant* Bytecode_loadconstant_at(const methodOop method, address bcp); + int pool_index() const; // index into constant pool + int cache_index() const { // index into CP cache (or -1 if none) + return has_cache_index() ? raw_index() : -1; + } + + BasicType result_type() const; // returns the result type of the ldc + + oop resolve_constant(TRAPS) const; + + // Creation + inline friend Bytecode_loadconstant* Bytecode_loadconstant_at(methodHandle method, int bci); }; -inline Bytecode_loadconstant* Bytecode_loadconstant_at(const methodOop method, address bcp) { - Bytecode_loadconstant* b = (Bytecode_loadconstant*)bcp; - debug_only(b->verify()); +inline Bytecode_loadconstant* Bytecode_loadconstant_at(methodHandle method, int bci) { + Bytecode_loadconstant* b = new Bytecode_loadconstant(method, bci); + DEBUG_ONLY(b->verify()); return b; }
--- a/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -49,6 +49,7 @@ int get_index_u1() { return *(address)_next_pc++; } int get_index_u2() { int i=Bytes::get_Java_u2(_next_pc); _next_pc+=2; return i; } + int get_index_u1_cpcache() { return get_index_u1() + constantPoolOopDesc::CPCACHE_INDEX_TAG; } int get_index_u2_cpcache() { int i=Bytes::get_native_u2(_next_pc); _next_pc+=2; return i + constantPoolOopDesc::CPCACHE_INDEX_TAG; } int get_index_u4() { int i=Bytes::get_native_u4(_next_pc); _next_pc+=4; return i; } int get_index_special() { return (is_wide()) ? get_index_u2() : get_index_u1(); } @@ -60,6 +61,7 @@ bool check_index(int i, int& cp_index, outputStream* st = tty); void print_constant(int i, outputStream* st = tty); void print_field_or_method(int i, outputStream* st = tty); + void print_field_or_method(int orig_i, int i, outputStream* st = tty); void print_attributes(int bci, outputStream* st = tty); void bytecode_epilog(int bci, outputStream* st = tty); @@ -177,18 +179,29 @@ _closure->trace(method, bcp, st); } +void print_symbol(symbolOop sym, outputStream* st) { + char buf[40]; + int len = sym->utf8_length(); + if (len >= (int)sizeof(buf)) { + st->print_cr(" %s...[%d]", sym->as_C_string(buf, sizeof(buf)), len); + } else { + st->print(" "); + sym->print_on(st); st->cr(); + } +} + void print_oop(oop value, outputStream* st) { if (value == NULL) { st->print_cr(" NULL"); - } else { + } else if (java_lang_String::is_instance(value)) { EXCEPTION_MARK; Handle h_value (THREAD, value); symbolHandle sym = java_lang_String::as_symbol(h_value, CATCH); - if (sym->utf8_length() > 32) { - st->print_cr(" ...."); - } else { - sym->print_on(st); st->cr(); - } + print_symbol(sym(), st); + } else if (value->is_symbol()) { + print_symbol(symbolOop(value), st); + } else { + st->print_cr(" " PTR_FORMAT, (intptr_t) value); } } @@ -279,16 +292,27 @@ } else if (tag.is_double()) { st->print_cr(" %f", constants->double_at(i)); } else if (tag.is_string()) { - oop string = constants->resolved_string_at(i); + oop string = constants->pseudo_string_at(i); print_oop(string, st); } else if (tag.is_unresolved_string()) { - st->print_cr(" <unresolved string at %d>", i); + const char* string = constants->string_at_noresolve(i); + st->print_cr(" %s", string); } else if (tag.is_klass()) { st->print_cr(" %s", constants->resolved_klass_at(i)->klass_part()->external_name()); } else if (tag.is_unresolved_klass()) { st->print_cr(" <unresolved klass at %d>", i); } else if (tag.is_object()) { - st->print_cr(" " PTR_FORMAT, constants->object_at(i)); + st->print(" <Object>"); + print_oop(constants->object_at(i), st); + } else if (tag.is_method_type()) { + int i2 = constants->method_type_index_at(i); + st->print(" <MethodType> %d", i2); + print_oop(constants->symbol_at(i2), st); + } else if (tag.is_method_handle()) { + int kind = constants->method_handle_ref_kind_at(i); + int i2 = constants->method_handle_index_at(i); + st->print(" <MethodHandle of kind %d>", kind, i2); + print_field_or_method(-i, i2, st); } else { st->print_cr(" bad tag=%d at %d", tag.value(), i); } @@ -297,7 +321,10 @@ void BytecodePrinter::print_field_or_method(int i, outputStream* st) { int orig_i = i; if (!check_index(orig_i, i, st)) return; + print_field_or_method(orig_i, i, st); +} +void BytecodePrinter::print_field_or_method(int orig_i, int i, outputStream* st) { constantPoolOop constants = method()->constants(); constantTag tag = constants->tag_at(i); @@ -314,9 +341,11 @@ return; } + symbolOop klass = constants->klass_name_at(constants->uncached_klass_ref_index_at(i)); symbolOop name = constants->uncached_name_ref_at(i); symbolOop signature = constants->uncached_signature_ref_at(i); - st->print_cr(" %d <%s> <%s> ", i, name->as_C_string(), signature->as_C_string()); + const char* sep = (tag.is_field() ? "/" : ""); + st->print_cr(" %d <%s.%s%s%s> ", i, klass->as_C_string(), name->as_C_string(), sep, signature->as_C_string()); } @@ -340,12 +369,20 @@ st->print_cr(" " INT32_FORMAT, get_short()); break; case Bytecodes::_ldc: - print_constant(get_index_u1(), st); + if (Bytecodes::uses_cp_cache(raw_code())) { + print_constant(get_index_u1_cpcache(), st); + } else { + print_constant(get_index_u1(), st); + } break; case Bytecodes::_ldc_w: case Bytecodes::_ldc2_w: - print_constant(get_index_u2(), st); + if (Bytecodes::uses_cp_cache(raw_code())) { + print_constant(get_index_u2_cpcache(), st); + } else { + print_constant(get_index_u2(), st); + } break; case Bytecodes::_iload:
--- a/hotspot/src/share/vm/interpreter/bytecodes.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/interpreter/bytecodes.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -489,6 +489,9 @@ def(_return_register_finalizer , "return_register_finalizer" , "b" , NULL , T_VOID , 0, true, _return); + def(_fast_aldc , "fast_aldc" , "bj" , NULL , T_OBJECT, 1, true, _ldc ); + def(_fast_aldc_w , "fast_aldc_w" , "bJJ" , NULL , T_OBJECT, 1, true, _ldc_w ); + def(_shouldnotreachhere , "_shouldnotreachhere" , "b" , NULL , T_VOID , 0, false); // platform specific JVM bytecodes
--- a/hotspot/src/share/vm/interpreter/bytecodes.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/interpreter/bytecodes.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -270,6 +270,10 @@ _fast_linearswitch , _fast_binaryswitch , + // special handling of oop constants: + _fast_aldc , + _fast_aldc_w , + _return_register_finalizer , _shouldnotreachhere, // For debugging
--- a/hotspot/src/share/vm/interpreter/interpreter.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/interpreter/interpreter.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -99,11 +99,6 @@ #endif // PRODUCT // need to hit every safepoint in order to call zapping routine // register the interpreter - VTune::register_stub( - "Interpreter", - AbstractInterpreter::code()->code_start(), - AbstractInterpreter::code()->code_end() - ); Forte::register_stub( "Interpreter", AbstractInterpreter::code()->code_start(), @@ -267,20 +262,6 @@ } #endif // PRODUCT -static BasicType constant_pool_type(methodOop method, int index) { - constantTag tag = method->constants()->tag_at(index); - if (tag.is_int ()) return T_INT; - else if (tag.is_float ()) return T_FLOAT; - else if (tag.is_long ()) return T_LONG; - else if (tag.is_double ()) return T_DOUBLE; - else if (tag.is_string ()) return T_OBJECT; - else if (tag.is_unresolved_string()) return T_OBJECT; - else if (tag.is_klass ()) return T_OBJECT; - else if (tag.is_unresolved_klass ()) return T_OBJECT; - ShouldNotReachHere(); - return T_ILLEGAL; -} - //------------------------------------------------------------------------------------------------------------------------ // Deoptimization support @@ -330,13 +311,15 @@ } case Bytecodes::_ldc : - type = constant_pool_type( method, *(bcp+1) ); - break; - case Bytecodes::_ldc_w : // fall through case Bytecodes::_ldc2_w: - type = constant_pool_type( method, Bytes::get_Java_u2(bcp+1) ); - break; + { + Thread *thread = Thread::current(); + ResourceMark rm(thread); + methodHandle mh(thread, method); + type = Bytecode_loadconstant_at(mh, bci)->result_type(); + break; + } default: type = Bytecodes::result_type(code);
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -83,6 +83,18 @@ } IRT_END +IRT_ENTRY(void, InterpreterRuntime::resolve_ldc(JavaThread* thread, Bytecodes::Code bytecode)) { + assert(bytecode == Bytecodes::_fast_aldc || + bytecode == Bytecodes::_fast_aldc_w, "wrong bc"); + ResourceMark rm(thread); + methodHandle m (thread, method(thread)); + Bytecode_loadconstant* ldc = Bytecode_loadconstant_at(m, bci(thread)); + oop result = ldc->resolve_constant(THREAD); + DEBUG_ONLY(ConstantPoolCacheEntry* cpce = m->constants()->cache()->entry_at(ldc->cache_index())); + assert(result == cpce->f1(), "expected result for assembly code"); +} +IRT_END + //------------------------------------------------------------------------------------------------------------------------ // Allocation @@ -328,7 +340,7 @@ typeArrayHandle h_extable (thread, h_method->exception_table()); bool should_repeat; int handler_bci; - int current_bci = bcp(thread) - h_method->code_base(); + int current_bci = bci(thread); // Need to do this check first since when _do_not_unlock_if_synchronized // is set, we don't want to trigger any classloading which may make calls @@ -615,8 +627,7 @@ if (bytecode == Bytecodes::_invokevirtual || bytecode == Bytecodes::_invokeinterface) { ResourceMark rm(thread); methodHandle m (thread, method(thread)); - int bci = m->bci_from(bcp(thread)); - Bytecode_invoke* call = Bytecode_invoke_at(m, bci); + Bytecode_invoke* call = Bytecode_invoke_at(m, bci(thread)); symbolHandle signature (thread, call->signature()); receiver = Handle(thread, thread->last_frame().interpreter_callee_receiver(signature)); @@ -1257,7 +1268,7 @@ Bytecode_invoke* invoke = Bytecode_invoke_at(mh, bci); ArgumentSizeComputer asc(invoke->signature()); int size_of_arguments = (asc.size() + (invoke->has_receiver() ? 1 : 0)); // receiver - Copy::conjoint_bytes(src_address, dest_address, + Copy::conjoint_jbytes(src_address, dest_address, size_of_arguments * Interpreter::stackElementSize); IRT_END #endif
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -34,6 +34,7 @@ static frame last_frame(JavaThread *thread) { return thread->last_frame(); } static methodOop method(JavaThread *thread) { return last_frame(thread).interpreter_frame_method(); } static address bcp(JavaThread *thread) { return last_frame(thread).interpreter_frame_bcp(); } + static int bci(JavaThread *thread) { return last_frame(thread).interpreter_frame_bci(); } static void set_bcp_and_mdp(address bcp, JavaThread*thread); static Bytecodes::Code code(JavaThread *thread) { // pass method to avoid calling unsafe bcp_to_method (partial fix 4926272) @@ -59,6 +60,7 @@ public: // Constants static void ldc (JavaThread* thread, bool wide); + static void resolve_ldc (JavaThread* thread, Bytecodes::Code bytecode); // Allocation static void _new (JavaThread* thread, constantPoolOopDesc* pool, int index);
--- a/hotspot/src/share/vm/interpreter/rewriter.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/interpreter/rewriter.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -38,6 +38,8 @@ case JVM_CONSTANT_InterfaceMethodref: case JVM_CONSTANT_Fieldref : // fall through case JVM_CONSTANT_Methodref : // fall through + case JVM_CONSTANT_MethodHandle : // fall through + case JVM_CONSTANT_MethodType : // fall through add_cp_cache_entry(i); break; } @@ -131,6 +133,27 @@ } +// Rewrite some ldc bytecodes to _fast_aldc +void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide) { + assert((*bcp) == (is_wide ? Bytecodes::_ldc_w : Bytecodes::_ldc), ""); + address p = bcp + offset; + int cp_index = is_wide ? Bytes::get_Java_u2(p) : (u1)(*p); + constantTag tag = _pool->tag_at(cp_index).value(); + if (tag.is_method_handle() || tag.is_method_type()) { + int cache_index = cp_entry_to_cp_cache(cp_index); + if (is_wide) { + (*bcp) = Bytecodes::_fast_aldc_w; + assert(cache_index == (u2)cache_index, ""); + Bytes::put_native_u2(p, cache_index); + } else { + (*bcp) = Bytecodes::_fast_aldc; + assert(cache_index == (u1)cache_index, ""); + (*p) = (u1)cache_index; + } + } +} + + // Rewrites a method given the index_map information void Rewriter::scan_method(methodOop method) { @@ -198,6 +221,12 @@ case Bytecodes::_invokedynamic: rewrite_invokedynamic(bcp, prefix_length+1); break; + case Bytecodes::_ldc: + maybe_rewrite_ldc(bcp, prefix_length+1, false); + break; + case Bytecodes::_ldc_w: + maybe_rewrite_ldc(bcp, prefix_length+1, true); + break; case Bytecodes::_jsr : // fall through case Bytecodes::_jsr_w : nof_jsrs++; break; case Bytecodes::_monitorenter : // fall through
--- a/hotspot/src/share/vm/interpreter/rewriter.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/interpreter/rewriter.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -66,6 +66,7 @@ void rewrite_Object_init(methodHandle m, TRAPS); void rewrite_member_reference(address bcp, int offset); void rewrite_invokedynamic(address bcp, int offset); + void maybe_rewrite_ldc(address bcp, int offset, bool is_wide); public: // Driver routine:
--- a/hotspot/src/share/vm/interpreter/templateTable.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/interpreter/templateTable.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -507,6 +507,9 @@ def(Bytecodes::_fast_linearswitch , ubcp|disp|____|____, itos, vtos, fast_linearswitch , _ ); def(Bytecodes::_fast_binaryswitch , ubcp|disp|____|____, itos, vtos, fast_binaryswitch , _ ); + def(Bytecodes::_fast_aldc , ubcp|____|clvm|____, vtos, atos, fast_aldc , false ); + def(Bytecodes::_fast_aldc_w , ubcp|____|clvm|____, vtos, atos, fast_aldc , true ); + def(Bytecodes::_return_register_finalizer , ____|disp|clvm|____, vtos, vtos, _return , vtos ); def(Bytecodes::_shouldnotreachhere , ____|____|____|____, vtos, vtos, shouldnotreachhere , _ );
--- a/hotspot/src/share/vm/interpreter/templateTable.hpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/interpreter/templateTable.hpp Thu Jul 29 22:04:41 2010 -0700 @@ -123,6 +123,7 @@ static void sipush(); static void ldc(bool wide); static void ldc2_w(); + static void fast_aldc(bool wide); static void locals_index(Register reg, int offset = 1); static void iload();
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -284,12 +284,19 @@ committed_unique_to_self(ind, MemRegion(new_end_aligned, cur_committed.end())); if (!uncommit_region.is_empty()) { - if (!os::uncommit_memory((char*)uncommit_region.start(), - uncommit_region.byte_size())) { - assert(false, "Card table contraction failed"); - // The call failed so don't change the end of the - // committed region. This is better than taking the - // VM down. + // It is not safe to uncommit cards if the boundary between + // the generations is moving. A shrink can uncommit cards + // owned by generation A but being used by generation B. + if (!UseAdaptiveGCBoundary) { + if (!os::uncommit_memory((char*)uncommit_region.start(), + uncommit_region.byte_size())) { + assert(false, "Card table contraction failed"); + // The call failed so don't change the end of the + // committed region. This is better than taking the + // VM down. + new_end_aligned = _committed[ind].end(); + } + } else { new_end_aligned = _committed[ind].end(); } } @@ -297,6 +304,19 @@ // In any case, we can reset the end of the current committed entry. _committed[ind].set_end(new_end_aligned); +#ifdef ASSERT + // Check that the last card in the new region is committed according + // to the tables. + bool covered = false; + for (int cr = 0; cr < _cur_covered_regions; cr++) { + if (_committed[cr].contains(new_end - 1)) { + covered = true; + break; + } + } + assert(covered, "Card for end of new region not committed"); +#endif + // The default of 0 is not necessarily clean cards. jbyte* entry; if (old_region.last() < _whole_heap.start()) { @@ -354,6 +374,9 @@ addr_for((jbyte*) _committed[ind].start()), addr_for((jbyte*) _committed[ind].last())); } + // Touch the last card of the covered region to show that it + // is committed (or SEGV). + debug_only(*byte_for(_covered[ind].last());) debug_only(verify_guard();) }
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -179,9 +179,14 @@ } n_covered_regions += _gen_specs[i]->n_covered_regions(); } - assert(total_reserved % pageSize == 0, "Gen size"); + assert(total_reserved % pageSize == 0, + err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize=" + SIZE_FORMAT, total_reserved, pageSize)); total_reserved += perm_gen_spec->max_size(); - assert(total_reserved % pageSize == 0, "Perm Gen size"); + assert(total_reserved % pageSize == 0, + err_msg("Perm size; total_reserved=" SIZE_FORMAT ", pageSize=" + SIZE_FORMAT ", perm gen max=" SIZE_FORMAT, total_reserved, + pageSize, perm_gen_spec->max_size())); if (total_reserved < perm_gen_spec->max_size()) { vm_exit_during_initialization(overflow_msg);
--- a/hotspot/src/share/vm/oops/constantPoolKlass.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/oops/constantPoolKlass.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -372,6 +372,13 @@ entry->print_value_on(st); } break; + case JVM_CONSTANT_MethodHandle : + st->print("ref_kind=%d", cp->method_handle_ref_kind_at(index)); + st->print(" ref_index=%d", cp->method_handle_index_at(index)); + break; + case JVM_CONSTANT_MethodType : + st->print("signature_index=%d", cp->method_type_index_at(index)); + break; default: ShouldNotReachHere(); break; @@ -437,6 +444,7 @@ // can be non-perm, can be non-instance (array) } } + // FIXME: verify JSR 292 tags JVM_CONSTANT_MethodHandle, etc. base++; } guarantee(cp->tags()->is_perm(), "should be in permspace");
--- a/hotspot/src/share/vm/oops/constantPoolOop.cpp Thu Jul 29 19:30:35 2010 -0700 +++ b/hotspot/src/share/vm/oops/constantPoolOop.cpp Thu Jul 29 22:04:41 2010 -0700 @@ -358,6 +358,11 @@ return klass_at_noresolve(ref_index); } +symbolOop constantPoolOopDesc::uncached_klass_ref_at_noresolve(int which) { + jint ref_index = uncached_klass_ref_index_at(which); + return klass_at_noresolve(ref_index); +} + char* constantPoolOopDesc::string_at_noresolve(int which) { // Test entry type in case string is resolved while in here. oop entry = *(obj_at_addr(which)); @@ -384,6 +389,119 @@ } } +oop constantPoolOopDesc::resolve_constant_at_impl(constantPoolHandle this_oop, int index, int cache_index, TRAPS) { + oop result_oop = NULL; + if (cache_index >= 0) { + assert(index < 0, "only one kind of index at a time"); + ConstantPoolCacheEntry* cpc_entry = this_oop->cache()->entry_at(cache_index); + result_oop = cpc_entry->f1(); + if (result_oop != NULL) { + return result_oop; // that was easy... + } + index = cpc_entry->constant_pool_index(); + } + + int tag_value = this_oop->tag_at(index).value(); + switch (tag_value) { + + case JVM_CONSTANT_UnresolvedClass: + case JVM_CONSTANT_UnresolvedClassInError: + case JVM_CONSTANT_Class: + { + klassOop resolved = klass_at_impl(this_oop, index, CHECK_NULL); + // ldc wants the java mirror. + result_oop = resolved->klass_part()->java_mirror(); + break; + } + + case JVM_CONSTANT_String: + case JVM_CONSTANT_UnresolvedString: + if (this_oop->is_pseudo_string_at(index)) { + result_oop = this_oop->pseudo_string_at(index); + break; + } + result_oop = string_at_impl(this_oop, index, CHECK_NULL); + break; + + case JVM_CONSTANT_Object: + result_oop = this_oop->object_at(index); + break; + + case JVM_CONSTANT_MethodHandle: + { + int ref_kind = this_oop->method_handle_ref_kind_at(index); + int callee_index = this_oop->method_handle_klass_index_at(index); + symbolHandle name(THREAD, this_oop->method_handle_name_ref_at(index)); + symbolHandle signature(THREAD, this_oop->method_handle_signature_ref_at(index)); + if (PrintMiscellaneous) + tty->print_cr("resolve JVM_CONSTANT_MethodHandle:%d [%d/%d/%d] %s.%s", + ref_kind, index, this_oop->method_handle_index_at(index), + callee_index, name->as_C_string(), signature->as_C_string()); + KlassHandle callee; + { klassOop k = klass_at_impl(this_oop, callee_index, CHECK_NULL); + callee = KlassHandle(THREAD, k); + } + KlassHandle klass(THREAD, this_oop->pool_holder()); + Handle value = SystemDictionary::link_method_handle_constant(klass, ref_kind, + callee, name, signature, + CHECK_NULL); + result_oop = value(); + // FIXME: Uniquify errors, using SystemDictionary::find_resolution_error. + break; + } + + case JVM_CONSTANT_MethodType: + { + symbolHandle signature(THREAD, this_oop->method_type_signature_at(index)); + if (PrintMiscellaneous) + tty->print_cr("resolve JVM_CONSTANT_MethodType [%d/%d] %s", + index, this_oop->method_type_index_at(index), + signature->as_C_string()); + KlassHandle klass(THREAD, this_oop->pool_holder()); + bool ignore_is_on_bcp = false; + Handle value = SystemDictionary::find_method_handle_type(signature, + klass, + ignore_is_on_bcp, + CHECK_NULL); + result_oop = value(); + // FIXME: Uniquify errors, using SystemDictionary::find_resolution_error. + break; + } + + /* maybe some day + case JVM_CONSTANT_Integer: + case JVM_CONSTANT_Float: + case JVM_CONSTANT_Long: + case JVM_CONSTANT_Double: + result_oop = java_lang_boxing_object::create(...); + break; + */ + + default: + DEBUG_ONLY( tty->print_cr("*** %p: tag at CP[%d/%d] = %d", + this_oop(), index, cache_index, tag_value) ); + assert(false, "unexpected constant tag"); + break; + } + + if (cache_index >= 0) { + // Cache the oop here also. + Handle result(THREAD, result_oop); + result_oop = NULL; // safety + ObjectLocker ol(this_oop, THREAD); + ConstantPoolCacheEntry* cpc_entry = this_oop->cache()->entry_at(cache_index); + oop result_oop2 = cpc_entry->f1(); + if (result_oop2 != NULL) { + // Race condition: May already be filled in while we were trying to lock. + return result_oop2; + } + cpc_entry->set_f1(result()); + return result(); + } else { + return result_oop; + } +} + oop constantPoolOopDesc::string_at_impl(constantPoolHandle this_oop, int which, TRAPS) { oop entry = *(this_oop->obj_at_addr(which)); if (entry->is_symbol()) { @@ -690,6 +808,28 @@ } } break; + case JVM_CONSTANT_MethodType: + { + int k1 = method_type_index_at(index1); + int k2 = cp2->method_type_index_at(index2); + if (k1 == k2) { + return true; + } + } break; + + case JVM_CONSTANT_MethodHandle: + { + int k1 = method_handle_ref_kind_at(index1); + int k2 = cp2->method_handle_ref_kind_at(index2); + if (k1 == k2) { + int i1 = method_handle_index_at(index1); + int i2 = cp2->method_handle_index_at(index2); + if (i1 == i2) { + return true; + } + } + } break; + case JVM_CONSTANT_UnresolvedString: { symbolOop s1 = unresolved_string_at(index1); @@ -863,6 +1003,19 @@ to_cp->symbol_at_put(to_i, s); } break; + case JVM_CONSTANT_MethodType: + { + jint k = method_type_index_at(from_i); + to_cp->method_type_index_at_put(to_i, k); + } break; + + case JVM_CONSTANT_MethodHandle: + { + int k1 = method_handle_ref_kind_at(from_i); + int k2 = method_handle_index_at(from_i); + to_cp->method_handle_index_at_put(to_i, k1, k2); + } break;