OpenJDK / jdk8 / jdk8 / hotspot
changeset 5780:0611ce949aaa
Merge
author | kizune |
---|---|
date | Tue, 03 Dec 2013 14:13:06 +0400 |
parents | c31f0cbe6d9e e6dfcdf37ef2 |
children | e254e5940c19 |
files | src/share/vm/classfile/classFileParser.cpp |
diffstat | 152 files changed, 14885 insertions(+), 1348 deletions(-) [+] |
line wrap: on
line diff
--- a/.hgtags Sun Nov 03 07:50:24 2013 +0000 +++ b/.hgtags Tue Dec 03 14:13:06 2013 +0400 @@ -390,3 +390,11 @@ 4589b398ab03aba6a5da8c06ff53603488d1b8f4 jdk8-b113 82a9cdbf683e374a76f2009352de53e16bed5a91 hs25-b56 7fd913010dbbf75260688fd2fa8964763fa49a09 jdk8-b114 +3b32d287da89a47a45d16f6d9ba5bd3cd9bf4b3e hs25-b57 +9ebaac78a8a0061fb9597e07f806498cb626cdeb jdk8-b115 +e510dfdec6dd701410f3398ed86ebcdff0cca63a hs25-b58 +52b076e6ffae247c1c7d8b7aba995195be2b6fc2 jdk8-b116 +c78d517c7ea47501b456e707afd4b78e7b5b202e hs25-b59 +f573d00213b7170c2ff856f9cd83cd148437f5b9 jdk8-b117 +abad3b2d905d9e1ad767c94baa94aba6ed5b207b hs25-b60 +c9f439732b18ea16f7e65815327d5ea7092cc258 jdk8-b118
--- a/agent/src/os/bsd/ps_proc.c Sun Nov 03 07:50:24 2013 +0000 +++ b/agent/src/os/bsd/ps_proc.c Tue Dec 03 14:13:06 2013 +0400 @@ -131,7 +131,7 @@ static bool ptrace_continue(pid_t pid, int signal) { // pass the signal to the process so we don't swallow it - if (ptrace(PTRACE_CONT, pid, NULL, signal) < 0) { + if (ptrace(PT_CONTINUE, pid, NULL, signal) < 0) { print_debug("ptrace(PTRACE_CONT, ..) failed for %d\n", pid); return false; } @@ -434,7 +434,6 @@ // attach to the process. One and only one exposed stuff struct ps_prochandle* Pgrab(pid_t pid) { struct ps_prochandle* ph = NULL; - thread_info* thr = NULL; if ( (ph = (struct ps_prochandle*) calloc(1, sizeof(struct ps_prochandle))) == NULL) { print_debug("can't allocate memory for ps_prochandle\n");
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/JInfo.java Sun Nov 03 07:50:24 2013 +0000 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/JInfo.java Tue Dec 03 14:13:06 2013 +0400 @@ -24,8 +24,9 @@ package sun.jvm.hotspot.tools; -import sun.jvm.hotspot.runtime.*; import sun.jvm.hotspot.debugger.JVMDebugger; +import sun.jvm.hotspot.runtime.Arguments; +import sun.jvm.hotspot.runtime.VM; public class JInfo extends Tool { public JInfo() { @@ -138,14 +139,33 @@ } private void printVMFlags() { + VM.Flag[] flags = VM.getVM().getCommandLineFlags(); + System.out.print("Non-default VM flags: "); + for (VM.Flag flag : flags) { + if (flag.getOrigin() == 0) { + // only print flags which aren't their defaults + continue; + } + if (flag.isBool()) { + String onoff = flag.getBool() ? "+" : "-"; + System.out.print("-XX:" + onoff + flag.getName() + " "); + } else { + System.out.print("-XX:" + flag.getName() + "=" + + flag.getValue() + " "); + } + } + System.out.println(); + + System.out.print("Command line: "); String str = Arguments.getJVMFlags(); if (str != null) { - System.out.println(str); + System.out.print(str + " "); } str = Arguments.getJVMArgs(); if (str != null) { - System.out.println(str); + System.out.print(str); } + System.out.println(); } private int mode;
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/Tool.java Sun Nov 03 07:50:24 2013 +0000 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/Tool.java Tue Dec 03 14:13:06 2013 +0400 @@ -25,11 +25,11 @@ package sun.jvm.hotspot.tools; import java.io.PrintStream; -import java.util.Hashtable; -import sun.jvm.hotspot.*; -import sun.jvm.hotspot.runtime.*; -import sun.jvm.hotspot.debugger.*; +import sun.jvm.hotspot.HotSpotAgent; +import sun.jvm.hotspot.debugger.DebuggerException; +import sun.jvm.hotspot.debugger.JVMDebugger; +import sun.jvm.hotspot.runtime.VM; // generic command line or GUI tool. // override run & code main as shown below. @@ -147,6 +147,7 @@ } PrintStream err = System.err; + PrintStream out = System.out; int pid = 0; String coreFileName = null; @@ -180,18 +181,18 @@ try { switch (debugeeType) { case DEBUGEE_PID: - err.println("Attaching to process ID " + pid + ", please wait..."); + out.println("Attaching to process ID " + pid + ", please wait..."); agent.attach(pid); break; case DEBUGEE_CORE: - err.println("Attaching to core " + coreFileName + + out.println("Attaching to core " + coreFileName + " from executable " + executableName + ", please wait..."); agent.attach(executableName, coreFileName); break; case DEBUGEE_REMOTE: - err.println("Attaching to remote server " + remoteServer + ", please wait..."); + out.println("Attaching to remote server " + remoteServer + ", please wait..."); agent.attach(remoteServer); break; } @@ -218,7 +219,7 @@ return 1; } - err.println("Debugger attached successfully."); + out.println("Debugger attached successfully."); startInternal(); return 0; } @@ -237,14 +238,14 @@ // Remains of the start mechanism, common to both start methods. private void startInternal() { - PrintStream err = System.err; + PrintStream out = System.out; VM vm = VM.getVM(); if (vm.isCore()) { - err.println("Core build detected."); + out.println("Core build detected."); } else if (vm.isClientCompiler()) { - err.println("Client compiler detected."); + out.println("Client compiler detected."); } else if (vm.isServerCompiler()) { - err.println("Server compiler detected."); + out.println("Server compiler detected."); } else { throw new RuntimeException("Fatal error: " + "should have been able to detect core/C1/C2 build"); @@ -252,8 +253,8 @@ String version = vm.getVMRelease(); if (version != null) { - err.print("JVM version is "); - err.println(version); + out.print("JVM version is "); + out.println(version); } run();
--- a/make/hotspot_version Sun Nov 03 07:50:24 2013 +0000 +++ b/make/hotspot_version Tue Dec 03 14:13:06 2013 +0400 @@ -35,7 +35,7 @@ HS_MAJOR_VER=25 HS_MINOR_VER=0 -HS_BUILD_NUMBER=56 +HS_BUILD_NUMBER=60 JDK_MAJOR_VER=1 JDK_MINOR_VER=8
--- a/src/cpu/sparc/vm/cppInterpreter_sparc.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/cpu/sparc/vm/cppInterpreter_sparc.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -365,7 +365,7 @@ return entry; } -address CppInterpreter::return_entry(TosState state, int length) { +address CppInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) { // make it look good in the debugger return CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation) + frame::pc_return_offset; }
--- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -3526,8 +3526,12 @@ delayed()->sub(Rtsp, Roffset, Rtsp); // Bang down shadow pages too. - // The -1 because we already subtracted 1 page. - for (int i = 0; i< StackShadowPages-1; i++) { + // At this point, (tmp-0) is the last address touched, so don't + // touch it again. (It was touched as (tmp-pagesize) but then tmp + // was post-decremented.) Skip this address by starting at i=1, and + // touch a few more pages below. N.B. It is important to touch all + // the way down to and including i=StackShadowPages. + for (int i = 1; i <= StackShadowPages; i++) { set((-i*offset)+STACK_BIAS, Rscratch); st(G0, Rtsp, Rscratch); } @@ -4099,15 +4103,19 @@ void MacroAssembler::encode_klass_not_null(Register r) { assert (UseCompressedClassPointers, "must be compressed"); - assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized"); - assert(r != G6_heapbase, "bad register choice"); - set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); - sub(r, G6_heapbase, r); - if (Universe::narrow_klass_shift() != 0) { - assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); - srlx(r, LogKlassAlignmentInBytes, r); + if (Universe::narrow_klass_base() != NULL) { + assert(r != G6_heapbase, "bad register choice"); + set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); + sub(r, G6_heapbase, r); + if (Universe::narrow_klass_shift() != 0) { + assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); + srlx(r, LogKlassAlignmentInBytes, r); + } + reinit_heapbase(); + } else { + assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); + srlx(r, Universe::narrow_klass_shift(), r); } - reinit_heapbase(); } void MacroAssembler::encode_klass_not_null(Register src, Register dst) { @@ -4115,11 +4123,16 @@ encode_klass_not_null(src); } else { assert (UseCompressedClassPointers, "must be compressed"); - assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized"); - set((intptr_t)Universe::narrow_klass_base(), dst); - sub(src, dst, dst); - if (Universe::narrow_klass_shift() != 0) { - srlx(dst, LogKlassAlignmentInBytes, dst); + if (Universe::narrow_klass_base() != NULL) { + set((intptr_t)Universe::narrow_klass_base(), dst); + sub(src, dst, dst); + if (Universe::narrow_klass_shift() != 0) { + srlx(dst, LogKlassAlignmentInBytes, dst); + } + } else { + // shift src into dst + assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); + srlx(src, Universe::narrow_klass_shift(), dst); } } } @@ -4129,14 +4142,16 @@ // the instructions they generate change, then this method needs to be updated. int MacroAssembler::instr_size_for_decode_klass_not_null() { assert (UseCompressedClassPointers, "only for compressed klass ptrs"); - // set + add + set - int num_instrs = insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 1 + - insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base()); - if (Universe::narrow_klass_shift() == 0) { - return num_instrs * BytesPerInstWord; - } else { // sllx - return (num_instrs + 1) * BytesPerInstWord; + int num_instrs = 1; // shift src,dst or add + if (Universe::narrow_klass_base() != NULL) { + // set + add + set + num_instrs += insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + + insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base()); + if (Universe::narrow_klass_shift() != 0) { + num_instrs += 1; // sllx + } } + return num_instrs * BytesPerInstWord; } // !!! If the instructions that get generated here change then function @@ -4145,13 +4160,17 @@ // Do not add assert code to this unless you change vtableStubs_sparc.cpp // pd_code_size_limit. assert (UseCompressedClassPointers, "must be compressed"); - assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized"); - assert(r != G6_heapbase, "bad register choice"); - set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); - if (Universe::narrow_klass_shift() != 0) - sllx(r, LogKlassAlignmentInBytes, r); - add(r, G6_heapbase, r); - reinit_heapbase(); + if (Universe::narrow_klass_base() != NULL) { + assert(r != G6_heapbase, "bad register choice"); + set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); + if (Universe::narrow_klass_shift() != 0) + sllx(r, LogKlassAlignmentInBytes, r); + add(r, G6_heapbase, r); + reinit_heapbase(); + } else { + assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); + sllx(r, Universe::narrow_klass_shift(), r); + } } void MacroAssembler::decode_klass_not_null(Register src, Register dst) { @@ -4161,16 +4180,21 @@ // Do not add assert code to this unless you change vtableStubs_sparc.cpp // pd_code_size_limit. assert (UseCompressedClassPointers, "must be compressed"); - assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized"); - if (Universe::narrow_klass_shift() != 0) { - assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice"); - set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); - sllx(src, LogKlassAlignmentInBytes, dst); - add(dst, G6_heapbase, dst); - reinit_heapbase(); + if (Universe::narrow_klass_base() != NULL) { + if (Universe::narrow_klass_shift() != 0) { + assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice"); + set((intptr_t)Universe::narrow_klass_base(), G6_heapbase); + sllx(src, LogKlassAlignmentInBytes, dst); + add(dst, G6_heapbase, dst); + reinit_heapbase(); + } else { + set((intptr_t)Universe::narrow_klass_base(), dst); + add(src, dst, dst); + } } else { - set((intptr_t)Universe::narrow_klass_base(), dst); - add(src, dst, dst); + // shift/mov src into dst. + assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift() || Universe::narrow_klass_shift() == 0, "decode alg wrong"); + sllx(src, Universe::narrow_klass_shift(), dst); } } }
--- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -1002,18 +1002,6 @@ // and the vm will find there should this case occur. Address callee_target_addr(G2_thread, JavaThread::callee_target_offset()); __ st_ptr(G5_method, callee_target_addr); - - if (StressNonEntrant) { - // Open a big window for deopt failure - __ save_frame(0); - __ mov(G0, L0); - Label loop; - __ bind(loop); - __ sub(L0, 1, L0); - __ br_null_short(L0, Assembler::pt, loop); - __ restore(); - } - __ jmpl(G3, 0, G0); __ delayed()->nop(); }
--- a/src/cpu/sparc/vm/sparc.ad Sun Nov 03 07:50:24 2013 +0000 +++ b/src/cpu/sparc/vm/sparc.ad Tue Dec 03 14:13:06 2013 +0400 @@ -1660,12 +1660,16 @@ if (UseCompressedClassPointers) { assert(Universe::heap() != NULL, "java heap should be initialized"); st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass"); - st->print_cr("\tSET Universe::narrow_klass_base,R_G6_heap_base"); - if (Universe::narrow_klass_shift() != 0) { - st->print_cr("\tSLL R_G5,3,R_G5"); + if (Universe::narrow_klass_base() != 0) { + st->print_cr("\tSET Universe::narrow_klass_base,R_G6_heap_base"); + if (Universe::narrow_klass_shift() != 0) { + st->print_cr("\tSLL R_G5,Universe::narrow_klass_shift,R_G5"); + } + st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5"); + st->print_cr("\tSET Universe::narrow_ptrs_base,R_G6_heap_base"); + } else { + st->print_cr("\tSLL R_G5,Universe::narrow_klass_shift,R_G5"); } - st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5"); - st->print_cr("\tSET Universe::narrow_ptrs_base,R_G6_heap_base"); } else { st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check"); } @@ -2912,6 +2916,9 @@ __ bind(LSkip2); } + // We have no guarantee that on 64 bit the higher half of limit_reg is 0 + __ signx(limit_reg); + __ subcc(limit_reg, 1 * sizeof(jchar), chr1_reg); __ br(Assembler::equal, true, Assembler::pn, Ldone); __ delayed()->mov(O7, result_reg); // result is difference in lengths @@ -2969,6 +2976,9 @@ Register chr1_reg = result_reg; Register chr2_reg = tmp1_reg; + // We have no guarantee that on 64 bit the higher half of limit_reg is 0 + __ signx(limit_reg); + //check for alignment and position the pointers to the ends __ or3(str1_reg, str2_reg, chr1_reg); __ andcc(chr1_reg, 0x3, chr1_reg);
--- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -153,13 +153,9 @@ } -address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) { - TosState incoming_state = state; +address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { + address entry = __ pc(); - Label cont; - address compiled_entry = __ pc(); - - address entry = __ pc(); #if !defined(_LP64) && defined(COMPILER2) // All return values are where we want them, except for Longs. C2 returns // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1. @@ -170,14 +166,12 @@ // do this here. Unfortunately if we did a rethrow we'd see an machepilog node // first which would move g1 -> O0/O1 and destroy the exception we were throwing. - if (incoming_state == ltos) { + if (state == ltos) { __ srl (G1, 0, O1); __ srlx(G1, 32, O0); } #endif // !_LP64 && COMPILER2 - __ bind(cont); - // The callee returns with the stack possibly adjusted by adapter transition // We remove that possible adjustment here. // All interpreter local registers are untouched. Any result is passed back @@ -186,29 +180,18 @@ __ mov(Llast_SP, SP); // Remove any adapter added stack space. - Label L_got_cache, L_giant_index; const Register cache = G3_scratch; - const Register size = G1_scratch; - if (EnableInvokeDynamic) { - __ ldub(Address(Lbcp, 0), G1_scratch); // Load current bytecode. - __ cmp_and_br_short(G1_scratch, Bytecodes::_invokedynamic, Assembler::equal, Assembler::pn, L_giant_index); - } - __ get_cache_and_index_at_bcp(cache, G1_scratch, 1); - __ bind(L_got_cache); - __ ld_ptr(cache, ConstantPoolCache::base_offset() + - ConstantPoolCacheEntry::flags_offset(), size); - __ and3(size, 0xFF, size); // argument size in words - __ sll(size, Interpreter::logStackElementSize, size); // each argument size in bytes - __ add(Lesp, size, Lesp); // pop arguments + const Register index = G1_scratch; + __ get_cache_and_index_at_bcp(cache, index, 1, index_size); + + const Register flags = cache; + __ ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), flags); + const Register parameter_size = flags; + __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, parameter_size); // argument size in words + __ sll(parameter_size, Interpreter::logStackElementSize, parameter_size); // each argument size in bytes + __ add(Lesp, parameter_size, Lesp); // pop arguments __ dispatch_next(state, step); - // out of the main line of code... - if (EnableInvokeDynamic) { - __ bind(L_giant_index); - __ get_cache_and_index_at_bcp(cache, G1_scratch, 1, sizeof(u4)); - __ ba_short(L_got_cache); - } - return entry; }
--- a/src/cpu/sparc/vm/templateTable_sparc.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/cpu/sparc/vm/templateTable_sparc.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -2932,9 +2932,7 @@ ConstantPoolCacheEntry::verify_tos_state_shift(); // load return address { - const address table_addr = (is_invokeinterface || is_invokedynamic) ? - (address)Interpreter::return_5_addrs_by_index_table() : - (address)Interpreter::return_3_addrs_by_index_table(); + const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); AddressLiteral table(table_addr); __ set(table, temp); __ sll(ra, LogBytesPerWord, ra); @@ -2984,7 +2982,7 @@ __ verify_oop(O0_recv); // get return address - AddressLiteral table(Interpreter::return_3_addrs_by_index_table()); + AddressLiteral table(Interpreter::invoke_return_entry_table()); __ set(table, Rtemp); __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type // Make sure we don't need to mask Rret after the above shift @@ -3026,7 +3024,7 @@ __ profile_final_call(O4); // get return address - AddressLiteral table(Interpreter::return_3_addrs_by_index_table()); + AddressLiteral table(Interpreter::invoke_return_entry_table()); __ set(table, Rtemp); __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type // Make sure we don't need to mask Rret after the above shift
--- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -1468,19 +1468,18 @@ addr = new LIR_Address(src.result(), offset, type); } - if (data != dst) { - __ move(data, dst); - data = dst; - } + // Because we want a 2-arg form of xchg and xadd + __ move(data, dst); + if (x->is_add()) { - __ xadd(LIR_OprFact::address(addr), data, dst, LIR_OprFact::illegalOpr); + __ xadd(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr); } else { if (is_obj) { // Do the pre-write barrier, if any. pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */, true /* do_load */, false /* patch */, NULL); } - __ xchg(LIR_OprFact::address(addr), data, dst, LIR_OprFact::illegalOpr); + __ xchg(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr); if (is_obj) { // Seems to be a precise address post_barrier(LIR_OprFact::address(addr), data);
--- a/src/cpu/x86/vm/cppInterpreter_x86.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/cpu/x86/vm/cppInterpreter_x86.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -367,7 +367,7 @@ return entry; } -address CppInterpreter::return_entry(TosState state, int length) { +address CppInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) { // make it look good in the debugger return CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation); }
--- a/src/cpu/x86/vm/interp_masm_x86_32.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -196,7 +196,7 @@ void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset) { assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode"); - movl(reg, Address(rsi, bcp_offset)); + load_unsigned_short(reg, Address(rsi, bcp_offset)); bswapl(reg); shrl(reg, 16); }
--- a/src/cpu/x86/vm/interp_masm_x86_64.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -192,7 +192,7 @@ Register reg, int bcp_offset) { assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode"); - movl(reg, Address(r13, bcp_offset)); + load_unsigned_short(reg, Address(r13, bcp_offset)); bswapl(reg); shrl(reg, 16); }
--- a/src/cpu/x86/vm/macroAssembler_x86.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/cpu/x86/vm/macroAssembler_x86.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -1381,8 +1381,12 @@ jcc(Assembler::greater, loop); // Bang down shadow pages too. - // The -1 because we already subtracted 1 page. - for (int i = 0; i< StackShadowPages-1; i++) { + // At this point, (tmp-0) is the last address touched, so don't + // touch it again. (It was touched as (tmp-pagesize) but then tmp + // was post-decremented.) Skip this address by starting at i=1, and + // touch a few more pages below. N.B. It is important to touch all + // the way down to and including i=StackShadowPages. + for (int i = 1; i <= StackShadowPages; i++) { // this could be any sized move but this is can be a debugging crumb // so the bigger the better. movptr(Address(tmp, (-i*os::vm_page_size())), size ); @@ -5049,25 +5053,32 @@ } void MacroAssembler::encode_klass_not_null(Register r) { - assert(Universe::narrow_klass_base() != NULL, "Base should be initialized"); - // Use r12 as a scratch register in which to temporarily load the narrow_klass_base. - assert(r != r12_heapbase, "Encoding a klass in r12"); - mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base()); - subq(r, r12_heapbase); + if (Universe::narrow_klass_base() != NULL) { + // Use r12 as a scratch register in which to temporarily load the narrow_klass_base. + assert(r != r12_heapbase, "Encoding a klass in r12"); + mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base()); + subq(r, r12_heapbase); + } if (Universe::narrow_klass_shift() != 0) { assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); shrq(r, LogKlassAlignmentInBytes); } - reinit_heapbase(); + if (Universe::narrow_klass_base() != NULL) { + reinit_heapbase(); + } } void MacroAssembler::encode_klass_not_null(Register dst, Register src) { if (dst == src) { encode_klass_not_null(src); } else { - mov64(dst, (int64_t)Universe::narrow_klass_base()); - negq(dst); - addq(dst, src); + if (Universe::narrow_klass_base() != NULL) { + mov64(dst, (int64_t)Universe::narrow_klass_base()); + negq(dst); + addq(dst, src); + } else { + movptr(dst, src); + } if (Universe::narrow_klass_shift() != 0) { assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong"); shrq(dst, LogKlassAlignmentInBytes); @@ -5081,15 +5092,19 @@ // generate change, then this method needs to be updated. int MacroAssembler::instr_size_for_decode_klass_not_null() { assert (UseCompressedClassPointers, "only for compressed klass ptrs"); - // mov64 + addq + shlq? + mov64 (for reinit_heapbase()). - return (Universe::narrow_klass_shift() == 0 ? 20 : 24); + if (Universe::narrow_klass_base() != NULL) { + // mov64 + addq + shlq? + mov64 (for reinit_heapbase()). + return (Universe::narrow_klass_shift() == 0 ? 20 : 24); + } else { + // longest load decode klass function, mov64, leaq + return 16; + } } // !!! If the instructions that get generated here change then function // instr_size_for_decode_klass_not_null() needs to get updated. void MacroAssembler::decode_klass_not_null(Register r) { // Note: it will change flags - assert(Universe::narrow_klass_base() != NULL, "Base should be initialized"); assert (UseCompressedClassPointers, "should only be used for compressed headers"); assert(r != r12_heapbase, "Decoding a klass in r12"); // Cannot assert, unverified entry point counts instructions (see .ad file) @@ -5100,14 +5115,15 @@ shlq(r, LogKlassAlignmentInBytes); } // Use r12 as a scratch register in which to temporarily load the narrow_klass_base. - mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base()); - addq(r, r12_heapbase); - reinit_heapbase(); + if (Universe::narrow_klass_base() != NULL) { + mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base()); + addq(r, r12_heapbase); + reinit_heapbase(); + } } void MacroAssembler::decode_klass_not_null(Register dst, Register src) { // Note: it will change flags - assert(Universe::narrow_klass_base() != NULL, "Base should be initialized"); assert (UseCompressedClassPointers, "should only be used for compressed headers"); if (dst == src) { decode_klass_not_null(dst); @@ -5115,7 +5131,6 @@ // Cannot assert, unverified entry point counts instructions (see .ad file) // vtableStubs also counts instructions in pd_code_size_limit. // Also do not verify_oop as this is called by verify_oop. - mov64(dst, (int64_t)Universe::narrow_klass_base()); if (Universe::narrow_klass_shift() != 0) { assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
--- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -3001,6 +3001,10 @@ // sp should be pointing at the return address to the caller (3) + // Pick up the initial fp we should save + // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved) + __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes())); + // Stack bang to make sure there's enough room for these interpreter frames. if (UseStackBanging) { __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes())); @@ -3020,9 +3024,6 @@ __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes())); __ movl(counter, rbx); - // Pick up the initial fp we should save - __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes())); - // Now adjust the caller's stack to make up for the extra locals // but record the original sp so that we can save it in the skeletal interpreter // frame and the stack walking of interpreter_sender will get the unextended sp @@ -3220,6 +3221,10 @@ // sp should be pointing at the return address to the caller (3) + // Pick up the initial fp we should save + // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved) + __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes())); + // Stack bang to make sure there's enough room for these interpreter frames. if (UseStackBanging) { __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes())); @@ -3240,9 +3245,6 @@ __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes())); __ movl(counter, rbx); - // Pick up the initial fp we should save - __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes())); - // Now adjust the caller's stack to make up for the extra locals // but record the original sp so that we can save it in the skeletal interpreter // frame and the stack walking of interpreter_sender will get the unextended sp
--- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -3471,6 +3471,10 @@ // rsp should be pointing at the return address to the caller (3) + // Pick up the initial fp we should save + // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved) + __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes())); + // Stack bang to make sure there's enough room for these interpreter frames. if (UseStackBanging) { __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes())); @@ -3489,9 +3493,6 @@ // Load counter into rdx __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes())); - // Pick up the initial fp we should save - __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes())); - // Now adjust the caller's stack to make up for the extra locals // but record the original sp so that we can save it in the skeletal interpreter // frame and the stack walking of interpreter_sender will get the unextended sp @@ -3663,6 +3664,10 @@ // rsp should be pointing at the return address to the caller (3) + // Pick up the initial fp we should save + // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved) + __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes())); + // Stack bang to make sure there's enough room for these interpreter frames. if (UseStackBanging) { __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes())); @@ -3670,27 +3675,16 @@ } // Load address of array of frame pcs into rcx (address*) - __ movptr(rcx, - Address(rdi, - Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes())); + __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes())); // Trash the return pc __ addptr(rsp, wordSize); // Load address of array of frame sizes into rsi (intptr_t*) - __ movptr(rsi, Address(rdi, - Deoptimization::UnrollBlock:: - frame_sizes_offset_in_bytes())); + __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock:: frame_sizes_offset_in_bytes())); // Counter - __ movl(rdx, Address(rdi, - Deoptimization::UnrollBlock:: - number_of_frames_offset_in_bytes())); // (int) - - // Pick up the initial fp we should save - __ movptr(rbp, - Address(rdi, - Deoptimization::UnrollBlock::initial_info_offset_in_bytes())); + __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock:: number_of_frames_offset_in_bytes())); // (int) // Now adjust the caller's stack to make up for the extra locals but // record the original sp so that we can save it in the skeletal @@ -3700,9 +3694,7 @@ const Register sender_sp = r8; __ mov(sender_sp, rsp); - __ movl(rbx, Address(rdi, - Deoptimization::UnrollBlock:: - caller_adjustment_offset_in_bytes())); // (int) + __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock:: caller_adjustment_offset_in_bytes())); // (int) __ subptr(rsp, rbx); // Push interpreter frames in a loop
--- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -150,13 +150,12 @@ } -address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) { - TosState incoming_state = state; +address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { address entry = __ pc(); #ifdef COMPILER2 // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases - if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) { + if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { for (int i = 1; i < 8; i++) { __ ffree(i); } @@ -164,7 +163,7 @@ __ empty_FPU_stack(); } #endif - if ((incoming_state == ftos && UseSSE < 1) || (incoming_state == dtos && UseSSE < 2)) { + if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) { __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled"); } else { __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled"); @@ -172,12 +171,12 @@ // In SSE mode, interpreter returns FP results in xmm0 but they need // to end up back on the FPU so it can operate on them. - if (incoming_state == ftos && UseSSE >= 1) { + if (state == ftos && UseSSE >= 1) { __ subptr(rsp, wordSize); __ movflt(Address(rsp, 0), xmm0); __ fld_s(Address(rsp, 0)); __ addptr(rsp, wordSize); - } else if (incoming_state == dtos && UseSSE >= 2) { + } else if (state == dtos && UseSSE >= 2) { __ subptr(rsp, 2*wordSize); __ movdbl(Address(rsp, 0), xmm0); __ fld_d(Address(rsp, 0)); @@ -194,33 +193,22 @@ __ restore_bcp(); __ restore_locals(); - if (incoming_state == atos) { + if (state == atos) { Register mdp = rbx; Register tmp = rcx; __ profile_return_type(mdp, rax, tmp); } - Label L_got_cache, L_giant_index; - if (EnableInvokeDynamic) { - __ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic); - __ jcc(Assembler::equal, L_giant_index); - } - __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2)); - __ bind(L_got_cache); - __ movl(rbx, Address(rbx, rcx, - Address::times_ptr, ConstantPoolCache::base_offset() + - ConstantPoolCacheEntry::flags_offset())); - __ andptr(rbx, 0xFF); - __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); + const Register cache = rbx; + const Register index = rcx; + __ get_cache_and_index_at_bcp(cache, index, 1, index_size); + + const Register flags = cache; + __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); + __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask); + __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale())); __ dispatch_next(state, step); - // out of the main line of code... - if (EnableInvokeDynamic) { - __ bind(L_giant_index); - __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4)); - __ jmp(L_got_cache); - } - return entry; }
--- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -166,7 +166,7 @@ } -address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) { +address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { address entry = __ pc(); // Restore stack bottom in case i2c adjusted stack @@ -183,28 +183,16 @@ __ profile_return_type(mdp, rax, tmp); } - Label L_got_cache, L_giant_index; - if (EnableInvokeDynamic) { - __ cmpb(Address(r13, 0), Bytecodes::_invokedynamic); - __ jcc(Assembler::equal, L_giant_index); - } - __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u2)); - __ bind(L_got_cache); - __ movl(rbx, Address(rbx, rcx, - Address::times_ptr, - in_bytes(ConstantPoolCache::base_offset()) + - 3 * wordSize)); - __ andl(rbx, 0xFF); - __ lea(rsp, Address(rsp, rbx, Address::times_8)); + const Register cache = rbx; + const Register index = rcx; + __ get_cache_and_index_at_bcp(cache, index, 1, index_size); + + const Register flags = cache; + __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); + __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask); + __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale())); __ dispatch_next(state, step); - // out of the main line of code... - if (EnableInvokeDynamic) { - __ bind(L_giant_index); - __ get_cache_and_index_at_bcp(rbx, rcx, 1, sizeof(u4)); - __ jmp(L_got_cache); - } - return entry; }
--- a/src/cpu/x86/vm/templateTable_x86_32.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/cpu/x86/vm/templateTable_x86_32.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -558,7 +558,7 @@ void TemplateTable::locals_index_wide(Register reg) { - __ movl(reg, at_bcp(2)); + __ load_unsigned_short(reg, at_bcp(2)); __ bswapl(reg); __ shrl(reg, 16); __ negptr(reg); @@ -1552,7 +1552,11 @@ InvocationCounter::counter_offset(); // Load up EDX with the branch displacement - __ movl(rdx, at_bcp(1)); + if (is_wide) { + __ movl(rdx, at_bcp(1)); + } else { + __ load_signed_short(rdx, at_bcp(1)); + } __ bswapl(rdx); if (!is_wide) __ sarl(rdx, 16); LP64_ONLY(__ movslq(rdx, rdx)); @@ -2925,9 +2929,7 @@ ConstantPoolCacheEntry::verify_tos_state_shift(); // load return address { - const address table_addr = (is_invokeinterface || is_invokedynamic) ? - (address)Interpreter::return_5_addrs_by_index_table() : - (address)Interpreter::return_3_addrs_by_index_table(); + const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); ExternalAddress table(table_addr); __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr))); }
--- a/src/cpu/x86/vm/templateTable_x86_64.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/cpu/x86/vm/templateTable_x86_64.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -568,7 +568,7 @@ } void TemplateTable::locals_index_wide(Register reg) { - __ movl(reg, at_bcp(2)); + __ load_unsigned_short(reg, at_bcp(2)); __ bswapl(reg); __ shrl(reg, 16); __ negptr(reg); @@ -1575,7 +1575,11 @@ InvocationCounter::counter_offset(); // Load up edx with the branch displacement - __ movl(rdx, at_bcp(1)); + if (is_wide) { + __ movl(rdx, at_bcp(1)); + } else { + __ load_signed_short(rdx, at_bcp(1)); + } __ bswapl(rdx); if (!is_wide) { @@ -2980,9 +2984,7 @@ ConstantPoolCacheEntry::verify_tos_state_shift(); // load return address { - const address table_addr = (is_invokeinterface || is_invokedynamic) ? - (address)Interpreter::return_5_addrs_by_index_table() : - (address)Interpreter::return_3_addrs_by_index_table(); + const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); ExternalAddress table(table_addr); __ lea(rscratch1, table); __ movptr(flags, Address(rscratch1, flags, Address::times_ptr));
--- a/src/cpu/zero/vm/cppInterpreter_zero.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -1006,7 +1006,7 @@ istate->set_stack_limit(stack_base - method->max_stack() - 1); } -address CppInterpreter::return_entry(TosState state, int length) { +address CppInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) { ShouldNotCallThis(); return NULL; }
--- a/src/cpu/zero/vm/globals_zero.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/cpu/zero/vm/globals_zero.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -57,6 +57,8 @@ // GC Ergo Flags define_pd_global(uintx, CMSYoungGenPerWorker, 16*M); // default max size of CMS young gen, per GC worker thread +define_pd_global(uintx, TypeProfileLevel, 0); + #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) #endif // CPU_ZERO_VM_GLOBALS_ZERO_HPP
--- a/src/os/bsd/vm/os_bsd.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/os/bsd/vm/os_bsd.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -945,17 +945,15 @@ // Used by VMSelfDestructTimer and the MemProfiler. double os::elapsedTime() { - return (double)(os::elapsed_counter()) * 0.000001; + return ((double)os::elapsed_counter()) / os::elapsed_frequency(); } jlong os::elapsed_counter() { - timeval time; - int status = gettimeofday(&time, NULL); - return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count; + return javaTimeNanos() - initial_time_count; } jlong os::elapsed_frequency() { - return (1000 * 1000); + return NANOSECS_PER_SEC; // nanosecond resolution } bool os::supports_vtime() { return true; } @@ -3582,7 +3580,7 @@ Bsd::_main_thread = pthread_self(); Bsd::clock_init(); - initial_time_count = os::elapsed_counter(); + initial_time_count = javaTimeNanos(); #ifdef __APPLE__ // XXXDARWIN
--- a/src/os/linux/vm/os_linux.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/os/linux/vm/os_linux.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -1333,17 +1333,15 @@ // Used by VMSelfDestructTimer and the MemProfiler. double os::elapsedTime() { - return (double)(os::elapsed_counter()) * 0.000001; + return ((double)os::elapsed_counter()) / os::elapsed_frequency(); // nanosecond resolution } jlong os::elapsed_counter() { - timeval time; - int status = gettimeofday(&time, NULL); - return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count; + return javaTimeNanos() - initial_time_count; } jlong os::elapsed_frequency() { - return (1000 * 1000); + return NANOSECS_PER_SEC; // nanosecond resolution } bool os::supports_vtime() { return true; } @@ -4750,7 +4748,7 @@ Linux::_main_thread = pthread_self(); Linux::clock_init(); - initial_time_count = os::elapsed_counter(); + initial_time_count = javaTimeNanos(); // pthread_condattr initialization for monotonic clock int status;
--- a/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -79,6 +79,15 @@ # include <pthread_np.h> #endif +// needed by current_stack_region() workaround for Mavericks +#if defined(__APPLE__) +# include <errno.h> +# include <sys/types.h> +# include <sys/sysctl.h> +# define DEFAULT_MAIN_THREAD_STACK_PAGES 2048 +# define OS_X_10_9_0_KERNEL_MAJOR_VERSION 13 +#endif + #ifdef AMD64 #define SPELL_REG_SP "rsp" #define SPELL_REG_FP "rbp" @@ -828,6 +837,21 @@ pthread_t self = pthread_self(); void *stacktop = pthread_get_stackaddr_np(self); *size = pthread_get_stacksize_np(self); + // workaround for OS X 10.9.0 (Mavericks) + // pthread_get_stacksize_np returns 128 pages even though the actual size is 2048 pages + if (pthread_main_np() == 1) { + if ((*size) < (DEFAULT_MAIN_THREAD_STACK_PAGES * (size_t)getpagesize())) { + char kern_osrelease[256]; + size_t kern_osrelease_size = sizeof(kern_osrelease); + int ret = sysctlbyname("kern.osrelease", kern_osrelease, &kern_osrelease_size, NULL, 0); + if (ret == 0) { + // get the major number, atoi will ignore the minor amd micro portions of the version string + if (atoi(kern_osrelease) >= OS_X_10_9_0_KERNEL_MAJOR_VERSION) { + *size = (DEFAULT_MAIN_THREAD_STACK_PAGES*getpagesize()); + } + } + } + } *bottom = (address) stacktop - *size; #elif defined(__OpenBSD__) stack_t ss;
--- a/src/share/vm/asm/assembler.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/asm/assembler.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -122,7 +122,7 @@ void AbstractAssembler::generate_stack_overflow_check( int frame_size_in_bytes) { if (UseStackBanging) { // Each code entry causes one stack bang n pages down the stack where n - // is configurable by StackBangPages. The setting depends on the maximum + // is configurable by StackShadowPages. The setting depends on the maximum // depth of VM call stack or native before going back into java code, // since only java code can raise a stack overflow exception using the // stack banging mechanism. The VM and native code does not detect stack
--- a/src/share/vm/c1/c1_GraphBuilder.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -1873,7 +1873,7 @@ // number of implementors for decl_interface is 0 or 1. If // it's 0 then no class implements decl_interface and there's // no point in inlining. - if (!holder->is_loaded() || decl_interface->nof_implementors() != 1) { + if (!holder->is_loaded() || decl_interface->nof_implementors() != 1 || decl_interface->has_default_methods()) { singleton = NULL; } } @@ -4338,6 +4338,11 @@ #endif // PRODUCT void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder, Values* obj_args, bool inlined) { + // A default method's holder is an interface + if (known_holder != NULL && known_holder->is_interface()) { + assert(known_holder->is_instance_klass() && ((ciInstanceKlass*)known_holder)->has_default_methods(), "should be default method"); + known_holder = NULL; + } append(new ProfileCall(method(), bci(), callee, recv, known_holder, obj_args, inlined)); }
--- a/src/share/vm/c1/c1_LIRGenerator.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -2574,8 +2574,25 @@ __ jump(x->default_sux()); } - -ciKlass* LIRGenerator::profile_arg_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k, Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_k) { +/** + * Emit profiling code if needed for arguments, parameters, return value types + * + * @param md MDO the code will update at runtime + * @param md_base_offset common offset in the MDO for this profile and subsequent ones + * @param md_offset offset in the MDO (on top of md_base_offset) for this profile + * @param profiled_k current profile + * @param obj IR node for the object to be profiled + * @param mdp register to hold the pointer inside the MDO (md + md_base_offset). + * Set once we find an update to make and use for next ones. + * @param not_null true if we know obj cannot be null + * @param signature_at_call_k signature at call for obj + * @param callee_signature_k signature of callee for obj + * at call and callee signatures differ at method handle call + * @return the only klass we know will ever be seen at this profile point + */ +ciKlass* LIRGenerator::profile_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k, + Value obj, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k, + ciKlass* callee_signature_k) { ciKlass* result = NULL; bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k); bool do_update = !TypeEntries::is_type_unknown(profiled_k); @@ -2590,9 +2607,9 @@ if (do_update) { // try to find exact type, using CHA if possible, so that loading // the klass from the object can be avoided - ciType* type = arg->exact_type(); + ciType* type = obj->exact_type(); if (type == NULL) { - type = arg->declared_type(); + type = obj->declared_type(); type = comp->cha_exact_type(type); } assert(type == NULL || type->is_klass(), "type should be class"); @@ -2608,23 +2625,33 @@ ciKlass* exact_signature_k = NULL; if (do_update) { // Is the type from the signature exact (the only one possible)? - exact_signature_k = signature_k->exact_klass(); + exact_signature_k = signature_at_call_k->exact_klass(); if (exact_signature_k == NULL) { - exact_signature_k = comp->cha_exact_type(signature_k); + exact_signature_k = comp->cha_exact_type(signature_at_call_k); } else { result = exact_signature_k; - do_update = false; // Known statically. No need to emit any code: prevent // LIR_Assembler::emit_profile_type() from emitting useless code profiled_k = ciTypeEntries::with_status(result, profiled_k); } if (exact_signature_k != NULL && exact_klass != exact_signature_k) { - assert(exact_klass == NULL, "arg and signature disagree?"); + assert(exact_klass == NULL, "obj and signature disagree?"); // sometimes the type of the signature is better than the best type // the compiler has exact_klass = exact_signature_k; - do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass; } + if (callee_signature_k != NULL && + callee_signature_k != signature_at_call_k) { + ciKlass* improved_klass = callee_signature_k->exact_klass(); + if (improved_klass == NULL) { + improved_klass = comp->cha_exact_type(callee_signature_k); + } + if (improved_klass != NULL && exact_klass != improved_klass) { + assert(exact_klass == NULL, "obj and signature disagree?"); + exact_klass = exact_signature_k; + } + } + do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass; } if (!do_null && !do_update) { @@ -2640,7 +2667,7 @@ __ leal(LIR_OprFact::address(base_type_address), mdp); } } - LIRItem value(arg, this); + LIRItem value(obj, this); value.load_item(); __ profile_type(new LIR_Address(mdp, md_offset, T_METADATA), value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != NULL); @@ -2665,9 +2692,9 @@ if (t == T_OBJECT || t == T_ARRAY) { intptr_t profiled_k = parameters->type(j); Local* local = x->state()->local_at(java_index)->as_Local(); - ciKlass* exact = profile_arg_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)), - in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)), - profiled_k, local, mdp, false, local->declared_type()->as_klass()); + ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)), + in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)), + profiled_k, local, mdp, false, local->declared_type()->as_klass(), NULL); // If the profile is known statically set it once for all and do not emit any code if (exact != NULL) { md->set_parameter_type(j, exact); @@ -3129,19 +3156,28 @@ Bytecodes::Code bc = x->method()->java_code_at_bci(bci); int start = 0; int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments(); - if (x->nb_profiled_args() < stop) { - // if called through method handle invoke, some arguments may have been popped - stop = x->nb_profiled_args(); + if (x->inlined() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) { + // first argument is not profiled at call (method handle invoke) + assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected"); + start = 1; } - ciSignature* sig = x->callee()->signature(); + ciSignature* callee_signature = x->callee()->signature(); // method handle call to virtual method bool has_receiver = x->inlined() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc); - ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL); - for (int i = 0; i < stop; i++) { + ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : NULL); + + bool ignored_will_link; + ciSignature* signature_at_call = NULL; + x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call); + ciSignatureStream signature_at_call_stream(signature_at_call); + + // if called through method handle invoke, some arguments may have been popped + for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) { int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset()); - ciKlass* exact = profile_arg_type(md, base_offset, off, - args->type(i), x->profiled_arg_at(i+start), mdp, - !x->arg_needs_null_check(i+start), sig_stream.next_klass()); + ciKlass* exact = profile_type(md, base_offset, off, + args->type(i), x->profiled_arg_at(i+start), mdp, + !x->arg_needs_null_check(i+start), + signature_at_call_stream.next_klass(), callee_signature_stream.next_klass()); if (exact != NULL) { md->set_argument_type(bci, i, exact); } @@ -3176,8 +3212,8 @@ int bci = x->bci_of_invoke(); Bytecodes::Code bc = x->method()->java_code_at_bci(bci); // The first parameter is the receiver so that's what we start - // with if it exists. On exception if method handle call to - // virtual method has receiver in the args list + // with if it exists. One exception is method handle call to + // virtual method: the receiver is in the args list if (arg == NULL || !Bytecodes::has_receiver(bc)) { i = 1; arg = x->profiled_arg_at(0); @@ -3186,9 +3222,9 @@ int k = 0; // to iterate on the profile data for (;;) { intptr_t profiled_k = parameters->type(k); - ciKlass* exact = profile_arg_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)), - in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)), - profiled_k, arg, mdp, not_null, sig_stream.next_klass()); + ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)), + in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)), + profiled_k, arg, mdp, not_null, sig_stream.next_klass(), NULL); // If the profile is known statically set it once for all and do not emit any code if (exact != NULL) { md->set_parameter_type(k, exact); @@ -3247,9 +3283,16 @@ assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type"); ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret(); LIR_Opr mdp = LIR_OprFact::illegalOpr; - ciKlass* exact = profile_arg_type(md, 0, md->byte_offset_of_slot(data, ret->type_offset()), - ret->type(), x->ret(), mdp, - !x->needs_null_check(), x->callee()->signature()->return_type()->as_klass()); + + bool ignored_will_link; + ciSignature* signature_at_call = NULL; + x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call); + + ciKlass* exact = profile_type(md, 0, md->byte_offset_of_slot(data, ret->type_offset()), + ret->type(), x->ret(), mdp, + !x->needs_null_check(), + signature_at_call->return_type()->as_klass(), + x->callee()->signature()->return_type()->as_klass()); if (exact != NULL) { md->set_return_type(bci, exact); }
--- a/src/share/vm/c1/c1_LIRGenerator.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/c1/c1_LIRGenerator.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -434,7 +434,9 @@ void do_ThreadIDIntrinsic(Intrinsic* x); void do_ClassIDIntrinsic(Intrinsic* x); #endif - ciKlass* profile_arg_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k, Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_k); + ciKlass* profile_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k, + Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k, + ciKlass* callee_signature_k); void profile_arguments(ProfileCall* x); void profile_parameters(Base* x); void profile_parameters_at_call(ProfileCall* x);
--- a/src/share/vm/c1/c1_LinearScan.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/c1/c1_LinearScan.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -1138,8 +1138,10 @@ } } } - - } else if (opr_type != T_LONG) { + // We want to sometimes use logical operations on pointers, in particular in GC barriers. + // Since 64bit logical operations do not current support operands on stack, we have to make sure + // T_OBJECT doesn't get spilled along with T_LONG. + } else if (opr_type != T_LONG LP64_ONLY(&& opr_type != T_OBJECT)) { // integer instruction (note: long operands must always be in register) switch (op->code()) { case lir_cmp:
--- a/src/share/vm/c1/c1_globals.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/c1/c1_globals.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -341,9 +341,6 @@ diagnostic(bool, C1PatchInvokeDynamic, true, \ "Patch invokedynamic appendix not known at compile time") \ \ - develop(intx, MaxForceInlineLevel, 100, \ - "maximum number of nested @ForceInline calls that are inlined") \ - \ // Read default values for c1 globals
--- a/src/share/vm/ci/ciEnv.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/ci/ciEnv.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -935,7 +935,9 @@ // Prevent SystemDictionary::add_to_hierarchy from running // and invalidating our dependencies until we install this method. + // No safepoints are allowed. Otherwise, class redefinition can occur in between. MutexLocker ml(Compile_lock); + No_Safepoint_Verifier nsv; // Change in Jvmti state may invalidate compilation. if (!failing() && @@ -1001,31 +1003,15 @@ // Free codeBlobs code_buffer->free_blob(); - // stress test 6243940 by immediately making the method - // non-entrant behind the system's back. This has serious - // side effects on the code cache and is not meant for - // general stress testing - if (nm != NULL && StressNonEntrant) { - MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); - NativeJump::patch_verified_entry(nm->entry_point(), nm->verified_entry_point(), - SharedRuntime::get_handle_wrong_method_stub()); - } - - if (nm == NULL) { - // The CodeCache is full. Print out warning and disable compilation. - record_failure("code cache is full"); - { - MutexUnlocker ml(Compile_lock); - MutexUnlocker locker(MethodCompileQueue_lock); - CompileBroker::handle_full_code_cache(); - } - } else { + if (nm != NULL) { nm->set_has_unsafe_access(has_unsafe_access); nm->set_has_wide_vectors(has_wide_vectors); // Record successful registration. // (Put nm into the task handle *before* publishing to the Java heap.) - if (task() != NULL) task()->set_code(nm); + if (task() != NULL) { + task()->set_code(nm); + } if (entry_bci == InvocationEntryBci) { if (TieredCompilation) { @@ -1036,11 +1022,11 @@ char *method_name = method->name_and_sig_as_C_string(); tty->print_cr("Replacing method %s", method_name); } - if (old != NULL ) { + if (old != NULL) { old->make_not_entrant(); } } - if (TraceNMethodInstalls ) { + if (TraceNMethodInstalls) { ResourceMark rm; char *method_name = method->name_and_sig_as_C_string(); ttyLocker ttyl; @@ -1051,7 +1037,7 @@ // Allow the code to be executed method->set_code(method, nm); } else { - if (TraceNMethodInstalls ) { + if (TraceNMethodInstalls) { ResourceMark rm; char *method_name = method->name_and_sig_as_C_string(); ttyLocker ttyl; @@ -1061,15 +1047,18 @@ entry_bci); } method->method_holder()->add_osr_nmethod(nm); - } } - } - // JVMTI -- compiled method notification (must be done outside lock) + } // safepoints are allowed again + if (nm != NULL) { + // JVMTI -- compiled method notification (must be done outside lock) nm->post_compiled_method_load_event(); + } else { + // The CodeCache is full. Print out warning and disable compilation. + record_failure("code cache is full"); + CompileBroker::handle_full_code_cache(); } - }
--- a/src/share/vm/ci/ciInstanceKlass.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/ci/ciInstanceKlass.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -57,6 +57,7 @@ _init_state = ik->init_state(); _nonstatic_field_size = ik->nonstatic_field_size(); _has_nonstatic_fields = ik->has_nonstatic_fields(); + _has_default_methods = ik->has_default_methods(); _nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields: _implementor = NULL; // we will fill these lazily
--- a/src/share/vm/ci/ciInstanceKlass.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/ci/ciInstanceKlass.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -52,6 +52,7 @@ bool _has_finalizer; bool _has_subklass; bool _has_nonstatic_fields; + bool _has_default_methods; ciFlags _flags; jint _nonstatic_field_size; @@ -171,6 +172,11 @@ } } + bool has_default_methods() { + assert(is_loaded(), "must be loaded"); + return _has_default_methods; + } + ciInstanceKlass* get_canonical_holder(int offset); ciField* get_field_by_offset(int field_offset, bool is_static); ciField* get_field_by_name(ciSymbol* name, ciSymbol* signature, bool is_static);
--- a/src/share/vm/ci/ciMethodData.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/ci/ciMethodData.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -77,7 +77,9 @@ static ciKlass* valid_ciklass(intptr_t k) { if (!TypeEntries::is_type_none(k) && !TypeEntries::is_type_unknown(k)) { - return (ciKlass*)TypeEntries::klass_part(k); + ciKlass* res = (ciKlass*)TypeEntries::klass_part(k); + assert(res != NULL, "invalid"); + return res; } else { return NULL; }
--- a/src/share/vm/classfile/classFileParser.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/classfile/classFileParser.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -4080,7 +4080,7 @@ // Generate any default methods - default methods are interface methods // that have a default implementation. This is new with Lambda project. - if (has_default_methods && !access_flags.is_interface() ) { + if (has_default_methods ) { DefaultMethods::generate_default_methods( this_klass(), &all_mirandas, CHECK_(nullHandle)); }
--- a/src/share/vm/classfile/classLoaderData.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/classfile/classLoaderData.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -131,6 +131,17 @@ } } +void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) { + // Lock to avoid classes being modified/added/removed during iteration + MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag); + for (Klass* k = _klasses; k != NULL; k = k->next_link()) { + // Do not filter ArrayKlass oops here... + if (k->oop_is_array() || (k->oop_is_instance() && InstanceKlass::cast(k)->is_loaded())) { + klass_closure->do_klass(k); + } + } +} + void ClassLoaderData::classes_do(void f(InstanceKlass*)) { for (Klass* k = _klasses; k != NULL; k = k->next_link()) { if (k->oop_is_instance()) { @@ -600,6 +611,12 @@ } } +void ClassLoaderDataGraph::loaded_classes_do(KlassClosure* klass_closure) { + for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) { + cld->loaded_classes_do(klass_closure); + } +} + void ClassLoaderDataGraph::classes_unloading_do(void f(Klass* const)) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!"); for (ClassLoaderData* cld = _unloading; cld != NULL; cld = cld->next()) {
--- a/src/share/vm/classfile/classLoaderData.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/classfile/classLoaderData.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -78,6 +78,7 @@ static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim); static void classes_do(KlassClosure* klass_closure); static void classes_do(void f(Klass* const)); + static void loaded_classes_do(KlassClosure* klass_closure); static void classes_unloading_do(void f(Klass* const)); static bool do_unloading(BoolObjectClosure* is_alive); @@ -186,6 +187,7 @@ bool keep_alive() const { return _keep_alive; } bool is_alive(BoolObjectClosure* is_alive_closure) const; void classes_do(void f(Klass*)); + void loaded_classes_do(KlassClosure* klass_closure); void classes_do(void f(InstanceKlass*)); // Deallocate free list during class unloading.
--- a/src/share/vm/classfile/defaultMethods.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/classfile/defaultMethods.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -171,8 +171,12 @@ } bool is_cancelled() const { return _cancelled; } + // This code used to skip interface classes because their only + // superclass was j.l.Object which would be also covered by class + // superclass hierarchy walks. Now that the starting point can be + // an interface, we must ensure we catch j.l.Object as the super. static bool has_super(InstanceKlass* cls) { - return cls->super() != NULL && !cls->is_interface(); + return cls->super() != NULL; } Node* node_at_depth(int i) const { @@ -391,24 +395,32 @@ return; } + // Qualified methods are maximally-specific methods + // These include public, instance concrete (=default) and abstract methods GrowableArray<Method*> qualified_methods; + int num_defaults = 0; + int default_index = -1; + int qualified_index = -1; for (int i = 0; i < _members.length(); ++i) { Pair<Method*,QualifiedState> entry = _members.at(i); if (entry.second == QUALIFIED) { qualified_methods.append(entry.first); + qualified_index++; + if (entry.first->is_default_method()) { + num_defaults++; + default_index = qualified_index; + + } } } if (qualified_methods.length() == 0) { _exception_message = generate_no_defaults_message(CHECK); _exception_name = vmSymbols::java_lang_AbstractMethodError(); - } else if (qualified_methods.length() == 1) { - // leave abstract methods alone, they will be found via normal search path - Method* method = qualified_methods.at(0); - if (!method->is_abstract()) { - _selected_target = qualified_methods.at(0); - } - } else { + // If only one qualified method is default, select that + } else if (num_defaults == 1) { + _selected_target = qualified_methods.at(default_index); + } else if (num_defaults > 1) { _exception_message = generate_conflicts_message(&qualified_methods,CHECK); _exception_name = vmSymbols::java_lang_IncompatibleClassChangeError(); if (TraceDefaultMethods) { @@ -416,6 +428,7 @@ tty->print_cr(""); } } + // leave abstract methods alone, they will be found via normal search path } bool contains_signature(Symbol* query) { @@ -695,8 +708,10 @@ Method* m = iklass->find_method(_method_name, _method_signature); // private interface methods are not candidates for default methods // invokespecial to private interface methods doesn't use default method logic + // The overpasses are your supertypes' errors, we do not include them // future: take access controls into account for superclass methods - if (m != NULL && !m->is_static() && (!iklass->is_interface() || m->is_public())) { + if (m != NULL && !m->is_static() && !m->is_overpass() && + (!iklass->is_interface() || m->is_public())) { if (_family == NULL) { _family = new StatefulMethodFamily(); } @@ -772,7 +787,8 @@ #ifndef PRODUCT if (TraceDefaultMethods) { ResourceMark rm; // be careful with these! - tty->print_cr("Class %s requires default method processing", + tty->print_cr("%s %s requires default method processing", + klass->is_interface() ? "Interface" : "Class", klass->name()->as_klass_external_name()); PrintHierarchy printer; printer.run(klass); @@ -797,7 +813,7 @@ } #ifndef PRODUCT if (TraceDefaultMethods) { - tty->print_cr("Creating overpasses..."); + tty->print_cr("Creating defaults and overpasses..."); } #endif // ndef PRODUCT @@ -1067,7 +1083,9 @@ klass->set_initial_method_idnum(new_size); ClassLoaderData* cld = klass->class_loader_data(); - MetadataFactory::free_array(cld, original_methods); + if (original_methods ->length() > 0) { + MetadataFactory::free_array(cld, original_methods); + } if (original_ordering->length() > 0) { klass->set_method_ordering(merged_ordering); MetadataFactory::free_array(cld, original_ordering);
--- a/src/share/vm/classfile/metadataOnStackMark.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/classfile/metadataOnStackMark.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -27,8 +27,10 @@ #include "code/codeCache.hpp" #include "compiler/compileBroker.hpp" #include "oops/metadata.hpp" +#include "prims/jvmtiImpl.hpp" #include "runtime/synchronizer.hpp" #include "runtime/thread.hpp" +#include "services/threadService.hpp" #include "utilities/growableArray.hpp" @@ -48,6 +50,8 @@ Threads::metadata_do(Metadata::mark_on_stack); CodeCache::alive_nmethods_do(nmethod::mark_on_stack); CompileBroker::mark_on_stack(); + JvmtiCurrentBreakpoints::metadata_do(Metadata::mark_on_stack); + ThreadService::metadata_do(Metadata::mark_on_stack); } MetadataOnStackMark::~MetadataOnStackMark() {
--- a/src/share/vm/classfile/systemDictionary.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/classfile/systemDictionary.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -141,7 +141,6 @@ /* NOTE: needed too early in bootstrapping process to have checks based on JDK version */ \ /* Universe::is_gte_jdk14x_version() is not set up by this point. */ \ /* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \ - do_klass(lambda_MagicLambdaImpl_klass, java_lang_invoke_MagicLambdaImpl, Opt ) \ do_klass(reflect_MagicAccessorImpl_klass, sun_reflect_MagicAccessorImpl, Opt ) \ do_klass(reflect_MethodAccessorImpl_klass, sun_reflect_MethodAccessorImpl, Opt_Only_JDK14NewRef) \ do_klass(reflect_ConstructorAccessorImpl_klass, sun_reflect_ConstructorAccessorImpl, Opt_Only_JDK14NewRef) \ @@ -173,8 +172,6 @@ /* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \ do_klass(nio_Buffer_klass, java_nio_Buffer, Opt ) \ \ - do_klass(PostVMInitHook_klass, sun_misc_PostVMInitHook, Opt ) \ - \ /* Preload boxing klasses */ \ do_klass(Boolean_klass, java_lang_Boolean, Pre ) \ do_klass(Character_klass, java_lang_Character, Pre ) \
--- a/src/share/vm/classfile/verifier.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/classfile/verifier.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -188,10 +188,8 @@ bool Verifier::is_eligible_for_verification(instanceKlassHandle klass, bool should_verify_class) { Symbol* name = klass->name(); Klass* refl_magic_klass = SystemDictionary::reflect_MagicAccessorImpl_klass(); - Klass* lambda_magic_klass = SystemDictionary::lambda_MagicLambdaImpl_klass(); bool is_reflect = refl_magic_klass != NULL && klass->is_subtype_of(refl_magic_klass); - bool is_lambda = lambda_magic_klass != NULL && klass->is_subtype_of(lambda_magic_klass); return (should_verify_for(klass->class_loader(), should_verify_class) && // return if the class is a bootstrapping class @@ -215,9 +213,7 @@ // NOTE: this is called too early in the bootstrapping process to be // guarded by Universe::is_gte_jdk14x_version()/UseNewReflection. // Also for lambda generated code, gte jdk8 - (!is_reflect || VerifyReflectionBytecodes) && - (!is_lambda || VerifyLambdaBytecodes) - ); + (!is_reflect || VerifyReflectionBytecodes)); } Symbol* Verifier::inference_verify(
--- a/src/share/vm/classfile/vmSymbols.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/classfile/vmSymbols.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -273,7 +273,6 @@ template(java_lang_invoke_Stable_signature, "Ljava/lang/invoke/Stable;") \ template(java_lang_invoke_LambdaForm_Compiled_signature, "Ljava/lang/invoke/LambdaForm$Compiled;") \ template(java_lang_invoke_LambdaForm_Hidden_signature, "Ljava/lang/invoke/LambdaForm$Hidden;") \ - template(java_lang_invoke_MagicLambdaImpl, "java/lang/invoke/MagicLambdaImpl") \ /* internal up-calls made only by the JVM, via class sun.invoke.MethodHandleNatives: */ \ template(findMethodHandleType_name, "findMethodHandleType") \ template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/lang/invoke/MethodType;") \
--- a/src/share/vm/code/nmethod.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/code/nmethod.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -618,21 +618,18 @@ // record this nmethod as dependent on this klass InstanceKlass::cast(klass)->add_dependent_nmethod(nm); } - } - NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_nmethod(nm)); - if (PrintAssembly && nm != NULL) { - Disassembler::decode(nm); + NOT_PRODUCT(nmethod_stats.note_nmethod(nm)); + if (PrintAssembly) { + Disassembler::decode(nm); + } } } - - // verify nmethod - debug_only(if (nm) nm->verify();) // might block - + // Do verification and logging outside CodeCache_lock. if (nm != NULL) { + // Safepoints in nmethod::verify aren't allowed because nm hasn't been installed yet. + DEBUG_ONLY(nm->verify();) nm->log_new_nmethod(); } - - // done return nm; } @@ -1262,7 +1259,7 @@ set_osr_link(NULL); //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods - NMethodSweeper::notify(); + NMethodSweeper::report_state_change(this); } void nmethod::invalidate_osr_method() { @@ -1296,7 +1293,9 @@ } } -// Common functionality for both make_not_entrant and make_zombie +/** + * Common functionality for both make_not_entrant and make_zombie + */ bool nmethod::make_not_entrant_or_zombie(unsigned int state) { assert(state == zombie || state == not_entrant, "must be zombie or not_entrant"); assert(!is_zombie(), "should not already be a zombie"); @@ -1420,9 +1419,7 @@ tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie"); } - // Make sweeper aware that there is a zombie method that needs to be removed - NMethodSweeper::notify(); - + NMethodSweeper::report_state_change(this); return true; } @@ -2395,20 +2392,23 @@ void nmethod::verify_interrupt_point(address call_site) { - // This code does not work in release mode since - // owns_lock only is available in debug mode. - CompiledIC* ic = NULL; - Thread *cur = Thread::current(); - if (CompiledIC_lock->owner() == cur || - ((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) && - SafepointSynchronize::is_at_safepoint())) { - ic = CompiledIC_at(this, call_site); - CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); - } else { - MutexLocker ml_verify (CompiledIC_lock); - ic = CompiledIC_at(this, call_site); + // Verify IC only when nmethod installation is finished. + bool is_installed = (method()->code() == this) // nmethod is in state 'alive' and installed + || !this->is_in_use(); // nmethod is installed, but not in 'alive' state + if (is_installed) { + Thread *cur = Thread::current(); + if (CompiledIC_lock->owner() == cur || + ((cur->is_VM_thread() || cur->is_ConcurrentGC_thread()) && + SafepointSynchronize::is_at_safepoint())) { + CompiledIC_at(this, call_site); + CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); + } else { + MutexLocker ml_verify (CompiledIC_lock); + CompiledIC_at(this, call_site); + } } - PcDesc* pd = pc_desc_at(ic->end_of_call()); + + PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address()); assert(pd != NULL, "PcDesc must exist"); for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(), pd->obj_decode_offset(), pd->should_reexecute(),
--- a/src/share/vm/compiler/compileBroker.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/compiler/compileBroker.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -126,6 +126,7 @@ bool CompileBroker::_initialized = false; volatile bool CompileBroker::_should_block = false; +volatile jint CompileBroker::_print_compilation_warning = 0; volatile jint CompileBroker::_should_compile_new_jobs = run_compilation; // The installed compiler(s) @@ -780,6 +781,10 @@ void CompileBroker::compilation_init() { _last_method_compiled[0] = '\0'; + // No need to initialize compilation system if we do not use it. + if (!UseCompiler) { + return; + } #ifndef SHARK // Set the interface to the current compiler(s). int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple); @@ -2023,11 +2028,10 @@ #endif } -// ------------------------------------------------------------------ -// CompileBroker::handle_full_code_cache -// -// The CodeCache is full. Print out warning and disable compilation or -// try code cache cleaning so compilation can continue later. +/** + * The CodeCache is full. Print out warning and disable compilation + * or try code cache cleaning so compilation can continue later. + */ void CompileBroker::handle_full_code_cache() { UseInterpreter = true; if (UseCompiler || AlwaysCompileLoopMethods ) { @@ -2044,12 +2048,9 @@ xtty->stamp(); xtty->end_elem(); } - warning("CodeCache is full. Compiler has been disabled."); - warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize="); CodeCache::report_codemem_full(); - #ifndef PRODUCT if (CompileTheWorld || ExitOnFullCodeCache) { codecache_print(/* detailed= */ true); @@ -2062,17 +2063,22 @@ // Since code cache is full, immediately stop new compiles if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) { NMethodSweeper::log_sweep("disable_compiler"); - - // Switch to 'vm_state'. This ensures that possibly_sweep() can be called - // without having to consider the state in which the current thread is. - ThreadInVMfromUnknown in_vm; - NMethodSweeper::possibly_sweep(); } + // Switch to 'vm_state'. This ensures that possibly_sweep() can be called + // without having to consider the state in which the current thread is. + ThreadInVMfromUnknown in_vm; + NMethodSweeper::possibly_sweep(); } else { disable_compilation_forever(); } + + // Print warning only once + if (should_print_compiler_warning()) { + warning("CodeCache is full. Compiler has been disabled."); + warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize="); + codecache_print(/* detailed= */ true); + } } - codecache_print(/* detailed= */ true); } // ------------------------------------------------------------------
--- a/src/share/vm/compiler/compileBroker.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/compiler/compileBroker.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -315,6 +315,8 @@ static int _sum_nmethod_code_size; static long _peak_compilation_time; + static volatile jint _print_compilation_warning; + static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, AbstractCompiler* comp, TRAPS); static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count); static bool compilation_is_complete (methodHandle method, int osr_bci, int comp_level); @@ -418,7 +420,11 @@ return _should_compile_new_jobs == shutdown_compilaton; } static void handle_full_code_cache(); - + // Ensures that warning is only printed once. + static bool should_print_compiler_warning() { + jint old = Atomic::cmpxchg(1, &_print_compilation_warning, 0); + return old == 0; + } // Return total compilation ticks static jlong total_compilation_ticks() { return _perf_total_compilation != NULL ? _perf_total_compilation->get_value() : 0;
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -47,8 +47,9 @@ // ConcurrentMarkSweepPolicy methods // -ConcurrentMarkSweepPolicy::ConcurrentMarkSweepPolicy() { - initialize_all(); +void ConcurrentMarkSweepPolicy::initialize_alignments() { + _space_alignment = _gen_alignment = (uintx)Generation::GenGrain; + _heap_alignment = compute_heap_alignment(); } void ConcurrentMarkSweepPolicy::initialize_generations() {
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -29,10 +29,11 @@ class ConcurrentMarkSweepPolicy : public TwoGenerationCollectorPolicy { protected: + void initialize_alignments(); void initialize_generations(); public: - ConcurrentMarkSweepPolicy(); + ConcurrentMarkSweepPolicy() {} ConcurrentMarkSweepPolicy* as_concurrent_mark_sweep_policy() { return this; }
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -594,9 +594,9 @@ _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"), _completed_initialization(false), _collector_policy(cp), - _should_unload_classes(false), + _should_unload_classes(CMSClassUnloadingEnabled), _concurrent_cycles_since_last_unload(0), - _roots_scanning_options(0), + _roots_scanning_options(SharedHeap::SO_None), _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding), _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding), _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()), @@ -788,14 +788,6 @@ && _survivor_chunk_index == 0), "Error"); - // Choose what strong roots should be scanned depending on verification options - if (!CMSClassUnloadingEnabled) { - // If class unloading is disabled we want to include all classes into the root set. - add_root_scanning_option(SharedHeap::SO_AllClasses); - } else { - add_root_scanning_option(SharedHeap::SO_SystemClasses); - } - NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;) _gc_counters = new CollectorCounters("CMS", 1); _completed_initialization = true; @@ -2532,6 +2524,9 @@ // Snapshot the soft reference policy to be used in this collection cycle. ref_processor()->setup_policy(clear_all_soft_refs); + // Decide if class unloading should be done + update_should_unload_classes(); + bool init_mark_was_synchronous = false; // until proven otherwise while (_collectorState != Idling) { if (TraceCMSState) { @@ -3310,7 +3305,10 @@ || VerifyBeforeExit; const int rso = SharedHeap::SO_Strings | SharedHeap::SO_CodeCache; + // We set the proper root for this CMS cycle here. if (should_unload_classes()) { // Should unload classes this cycle + remove_root_scanning_option(SharedHeap::SO_AllClasses); + add_root_scanning_option(SharedHeap::SO_SystemClasses); remove_root_scanning_option(rso); // Shrink the root set appropriately set_verifying(should_verify); // Set verification state for this cycle return; // Nothing else needs to be done at this time @@ -3318,6 +3316,9 @@ // Not unloading classes this cycle assert(!should_unload_classes(), "Inconsitency!"); + remove_root_scanning_option(SharedHeap::SO_SystemClasses); + add_root_scanning_option(SharedHeap::SO_AllClasses); + if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) { // Include symbols, strings and code cache elements to prevent their resurrection. add_root_scanning_option(rso);
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -2008,7 +2008,7 @@ size_t init_byte_size = collector_policy()->initial_heap_byte_size(); size_t max_byte_size = collector_policy()->max_heap_byte_size(); - size_t heap_alignment = collector_policy()->max_alignment(); + size_t heap_alignment = collector_policy()->heap_alignment(); // Ensure that the sizes are properly aligned. Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap"); @@ -6656,13 +6656,18 @@ if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); HeapRegion* hr = _g1h->heap_region_containing(obj); - assert(!hr->isHumongous(), "code root in humongous region?"); + assert(!hr->continuesHumongous(), + err_msg("trying to add code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT + " starting at "HR_FORMAT, + _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()))); // HeapRegion::add_strong_code_root() avoids adding duplicate // entries but having duplicates is OK since we "mark" nmethods // as visited when we scan the strong code root lists during the GC. hr->add_strong_code_root(_nm); - assert(hr->rem_set()->strong_code_roots_list_contains(_nm), "add failed?"); + assert(hr->rem_set()->strong_code_roots_list_contains(_nm), + err_msg("failed to add code root "PTR_FORMAT" to remembered set of region "HR_FORMAT, + _nm, HR_FORMAT_PARAMS(hr))); } } @@ -6683,9 +6688,15 @@ if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); HeapRegion* hr = _g1h->heap_region_containing(obj); - assert(!hr->isHumongous(), "code root in humongous region?"); + assert(!hr->continuesHumongous(), + err_msg("trying to remove code root "PTR_FORMAT" in continuation of humongous region "HR_FORMAT + " starting at "HR_FORMAT, + _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()))); + hr->remove_strong_code_root(_nm); - assert(!hr->rem_set()->strong_code_roots_list_contains(_nm), "remove failed?"); + assert(!hr->rem_set()->strong_code_roots_list_contains(_nm), + err_msg("failed to remove code root "PTR_FORMAT" of region "HR_FORMAT, + _nm, HR_FORMAT_PARAMS(hr))); } } @@ -6716,7 +6727,9 @@ class MigrateCodeRootsHeapRegionClosure: public HeapRegionClosure { public: bool doHeapRegion(HeapRegion *hr) { - assert(!hr->isHumongous(), "humongous region in collection set?"); + assert(!hr->isHumongous(), + err_msg("humongous region "HR_FORMAT" should not have been added to collection set", + HR_FORMAT_PARAMS(hr))); hr->migrate_strong_code_roots(); return false; } @@ -6796,9 +6809,13 @@ bool doHeapRegion(HeapRegion *hr) { HeapRegionRemSet* hrrs = hr->rem_set(); - if (hr->isHumongous()) { - // Code roots should never be attached to a humongous region - assert(hrrs->strong_code_roots_list_length() == 0, "sanity"); + if (hr->continuesHumongous()) { + // Code roots should never be attached to a continuation of a humongous region + assert(hrrs->strong_code_roots_list_length() == 0, + err_msg("code roots should never be attached to continuations of humongous region "HR_FORMAT + " starting at "HR_FORMAT", but has "INT32_FORMAT, + HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()), + hrrs->strong_code_roots_list_length())); return false; }
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -313,27 +313,38 @@ // for the first time during initialization. _reserve_regions = 0; - initialize_all(); _collectionSetChooser = new CollectionSetChooser(); - _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags +} + +void G1CollectorPolicy::initialize_alignments() { + _space_alignment = HeapRegion::GrainBytes; + size_t card_table_alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable); + size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); + _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size); } void G1CollectorPolicy::initialize_flags() { - _min_alignment = HeapRegion::GrainBytes; - size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name()); - size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size(); - _max_alignment = MAX3(card_table_alignment, _min_alignment, page_size); + if (G1HeapRegionSize != HeapRegion::GrainBytes) { + FLAG_SET_ERGO(uintx, G1HeapRegionSize, HeapRegion::GrainBytes); + } + if (SurvivorRatio < 1) { vm_exit_during_initialization("Invalid survivor ratio specified"); } CollectorPolicy::initialize_flags(); + _young_gen_sizer = new G1YoungGenSizer(); // Must be after call to initialize_flags } -G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true) { - assert(G1NewSizePercent <= G1MaxNewSizePercent, "Min larger than max"); - assert(G1NewSizePercent > 0 && G1NewSizePercent < 100, "Min out of bounds"); - assert(G1MaxNewSizePercent > 0 && G1MaxNewSizePercent < 100, "Max out of bounds"); +void G1CollectorPolicy::post_heap_initialize() { + uintx max_regions = G1CollectedHeap::heap()->max_regions(); + size_t max_young_size = (size_t)_young_gen_sizer->max_young_length(max_regions) * HeapRegion::GrainBytes; + if (max_young_size != MaxNewSize) { + FLAG_SET_ERGO(uintx, MaxNewSize, max_young_size); + } +} +G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true), + _min_desired_young_length(0), _max_desired_young_length(0) { if (FLAG_IS_CMDLINE(NewRatio)) { if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) { warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio"); @@ -344,8 +355,13 @@ } } - if (FLAG_IS_CMDLINE(NewSize) && FLAG_IS_CMDLINE(MaxNewSize) && NewSize > MaxNewSize) { - vm_exit_during_initialization("Initial young gen size set larger than the maximum young gen size"); + if (NewSize > MaxNewSize) { + if (FLAG_IS_CMDLINE(MaxNewSize)) { + warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). " + "A new max generation size of " SIZE_FORMAT "k will be used.", + NewSize/K, MaxNewSize/K, NewSize/K); + } + MaxNewSize = NewSize; } if (FLAG_IS_CMDLINE(NewSize)) { @@ -378,34 +394,48 @@ return MAX2(1U, default_value); } -void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) { - assert(new_number_of_heap_regions > 0, "Heap must be initialized"); +void G1YoungGenSizer::recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length) { + assert(number_of_heap_regions > 0, "Heap must be initialized"); switch (_sizer_kind) { case SizerDefaults: - _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions); - _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions); + *min_young_length = calculate_default_min_length(number_of_heap_regions); + *max_young_length = calculate_default_max_length(number_of_heap_regions); break; case SizerNewSizeOnly: - _max_desired_young_length = calculate_default_max_length(new_number_of_heap_regions); - _max_desired_young_length = MAX2(_min_desired_young_length, _max_desired_young_length); + *max_young_length = calculate_default_max_length(number_of_heap_regions); + *max_young_length = MAX2(*min_young_length, *max_young_length); break; case SizerMaxNewSizeOnly: - _min_desired_young_length = calculate_default_min_length(new_number_of_heap_regions); - _min_desired_young_length = MIN2(_min_desired_young_length, _max_desired_young_length); + *min_young_length = calculate_default_min_length(number_of_heap_regions); + *min_young_length = MIN2(*min_young_length, *max_young_length); break; case SizerMaxAndNewSize: // Do nothing. Values set on the command line, don't update them at runtime. break; case SizerNewRatio: - _min_desired_young_length = new_number_of_heap_regions / (NewRatio + 1); - _max_desired_young_length = _min_desired_young_length; + *min_young_length = number_of_heap_regions / (NewRatio + 1); + *max_young_length = *min_young_length; break; default: ShouldNotReachHere(); } - assert(_min_desired_young_length <= _max_desired_young_length, "Invalid min/max young gen size values"); + assert(*min_young_length <= *max_young_length, "Invalid min/max young gen size values"); +} + +uint G1YoungGenSizer::max_young_length(uint number_of_heap_regions) { + // We need to pass the desired values because recalculation may not update these + // values in some cases. + uint temp = _min_desired_young_length; + uint result = _max_desired_young_length; + recalculate_min_max_young_length(number_of_heap_regions, &temp, &result); + return result; +} + +void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) { + recalculate_min_max_young_length(new_number_of_heap_regions, &_min_desired_young_length, + &_max_desired_young_length); } void G1CollectorPolicy::init() {
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -136,8 +136,16 @@ uint calculate_default_min_length(uint new_number_of_heap_regions); uint calculate_default_max_length(uint new_number_of_heap_regions); + // Update the given values for minimum and maximum young gen length in regions + // given the number of heap regions depending on the kind of sizing algorithm. + void recalculate_min_max_young_length(uint number_of_heap_regions, uint* min_young_length, uint* max_young_length); + public: G1YoungGenSizer(); + // Calculate the maximum length of the young gen given the number of regions + // depending on the sizing algorithm. + uint max_young_length(uint number_of_heap_regions); + void heap_size_changed(uint new_number_of_heap_regions); uint min_desired_young_length() { return _min_desired_young_length; @@ -165,13 +173,9 @@ G1MMUTracker* _mmu_tracker; + void initialize_alignments(); void initialize_flags(); - void initialize_all() { - initialize_flags(); - initialize_size_info(); - } - CollectionSetChooser* _collectionSetChooser; double _full_collection_start_sec; @@ -217,7 +221,6 @@ return _during_marking; } -private: enum PredictionConstants { TruncatedSeqLength = 10 }; @@ -665,8 +668,6 @@ BarrierSet::Name barrier_set_name() { return BarrierSet::G1SATBCTLogging; } - GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; } - bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0); // Record the start and end of an evacuation pause. @@ -934,6 +935,7 @@ // Calculates survivor space parameters. void update_survivors_policy(); + virtual void post_heap_initialize(); }; // This should move to some place more general...
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -377,11 +377,6 @@ DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); dcqs.concatenate_logs(); - if (G1CollectedHeap::use_parallel_gc_threads()) { - // Don't set the number of workers here. It will be set - // when the task is run - // _seq_task->set_n_termination((int)n_workers()); - } guarantee( _cards_scanned == NULL, "invariant" ); _cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers(), mtGC); for (uint i = 0; i < n_workers(); ++i) {
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -174,11 +174,6 @@ region_size = MAX_REGION_SIZE; } - if (region_size != G1HeapRegionSize) { - // Update the flag to make sure that PrintFlagsFinal logs the correct value - FLAG_SET_ERGO(uintx, G1HeapRegionSize, region_size); - } - // And recalculate the log. region_size_log = log2_long((jlong) region_size); @@ -606,7 +601,9 @@ void HeapRegion::migrate_strong_code_roots() { assert(in_collection_set(), "only collection set regions"); - assert(!isHumongous(), "not humongous regions"); + assert(!isHumongous(), + err_msg("humongous region "HR_FORMAT" should not have been added to collection set", + HR_FORMAT_PARAMS(this))); HeapRegionRemSet* hrrs = rem_set(); hrrs->migrate_strong_code_roots(); @@ -727,12 +724,11 @@ return; } - // An H-region should have an empty strong code root list - if (isHumongous()) { + if (continuesHumongous()) { if (strong_code_roots_length > 0) { - gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] is humongous " - "but has "INT32_FORMAT" code root entries", - bottom(), end(), strong_code_roots_length); + gclog_or_tty->print_cr("region "HR_FORMAT" is a continuation of a humongous " + "region but has "INT32_FORMAT" code root entries", + HR_FORMAT_PARAMS(this), strong_code_roots_length); *failures = true; } return;
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -1004,7 +1004,9 @@ void HeapRegionRemSet::migrate_strong_code_roots() { assert(hr()->in_collection_set(), "only collection set regions"); - assert(!hr()->isHumongous(), "not humongous regions"); + assert(!hr()->isHumongous(), + err_msg("humongous region "HR_FORMAT" should not have been added to the collection set", + HR_FORMAT_PARAMS(hr()))); ResourceMark rm;
--- a/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "gc_implementation/parallelScavenge/adjoiningGenerations.hpp" #include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp" +#include "gc_implementation/parallelScavenge/generationSizer.hpp" #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" // If boundary moving is being used, create the young gen and old @@ -32,15 +33,17 @@ // the old behavior otherwise (with PSYoungGen and PSOldGen). AdjoiningGenerations::AdjoiningGenerations(ReservedSpace old_young_rs, - size_t init_low_byte_size, - size_t min_low_byte_size, - size_t max_low_byte_size, - size_t init_high_byte_size, - size_t min_high_byte_size, - size_t max_high_byte_size, + GenerationSizer* policy, size_t alignment) : - _virtual_spaces(old_young_rs, min_low_byte_size, - min_high_byte_size, alignment) { + _virtual_spaces(old_young_rs, policy->min_gen1_size(), + policy->min_gen0_size(), alignment) { + size_t init_low_byte_size = policy->initial_gen1_size(); + size_t min_low_byte_size = policy->min_gen1_size(); + size_t max_low_byte_size = policy->max_gen1_size(); + size_t init_high_byte_size = policy->initial_gen0_size(); + size_t min_high_byte_size = policy->min_gen0_size(); + size_t max_high_byte_size = policy->max_gen0_size(); + assert(min_low_byte_size <= init_low_byte_size && init_low_byte_size <= max_low_byte_size, "Parameter check"); assert(min_high_byte_size <= init_high_byte_size &&
--- a/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -28,6 +28,7 @@ #include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp" #include "gc_implementation/parallelScavenge/asPSOldGen.hpp" #include "gc_implementation/parallelScavenge/asPSYoungGen.hpp" +#include "gc_implementation/parallelScavenge/generationSizer.hpp" // Contains two generations that both use an AdjoiningVirtualSpaces. @@ -56,14 +57,7 @@ bool request_young_gen_expansion(size_t desired_change_in_bytes); public: - AdjoiningGenerations(ReservedSpace rs, - size_t init_low_byte_size, - size_t min_low_byte_size, - size_t max_low_byte_size, - size_t init_high_byte_size, - size_t min_high_byte_size, - size_t max_high_bytes_size, - size_t alignment); + AdjoiningGenerations(ReservedSpace rs, GenerationSizer* policy, size_t alignment); // Accessors PSYoungGen* young_gen() { return _young_gen; }
--- a/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -54,7 +54,6 @@ int level) : PSOldGen(initial_size, min_size, size_limit, gen_name, level), _gen_size_limit(size_limit) - {} ASPSOldGen::ASPSOldGen(PSVirtualSpace* vs, @@ -65,13 +64,11 @@ int level) : PSOldGen(initial_size, min_size, size_limit, gen_name, level), _gen_size_limit(size_limit) - { _virtual_space = vs; } void ASPSOldGen::initialize_work(const char* perf_data_name, int level) { - PSOldGen::initialize_work(perf_data_name, level); // The old gen can grow to gen_size_limit(). _reserve reflects only @@ -94,7 +91,7 @@ ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); size_t result = gen_size_limit() - virtual_space()->committed_size(); - size_t result_aligned = align_size_down(result, heap->old_gen_alignment()); + size_t result_aligned = align_size_down(result, heap->generation_alignment()); return result_aligned; } @@ -105,7 +102,7 @@ } ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - const size_t gen_alignment = heap->old_gen_alignment(); + const size_t gen_alignment = heap->generation_alignment(); PSAdaptiveSizePolicy* policy = heap->size_policy(); const size_t working_size = used_in_bytes() + (size_t) policy->avg_promoted()->padded_average();
--- a/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -70,13 +70,12 @@ } size_t ASPSYoungGen::available_for_expansion() { - size_t current_committed_size = virtual_space()->committed_size(); assert((gen_size_limit() >= current_committed_size), "generation size limit is wrong"); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); size_t result = gen_size_limit() - current_committed_size; - size_t result_aligned = align_size_down(result, heap->young_gen_alignment()); + size_t result_aligned = align_size_down(result, heap->generation_alignment()); return result_aligned; } @@ -85,7 +84,6 @@ // Future implementations could check the survivors and if to_space is in the // right place (below from_space), take a chunk from to_space. size_t ASPSYoungGen::available_for_contraction() { - size_t uncommitted_bytes = virtual_space()->uncommitted_size(); if (uncommitted_bytes != 0) { return uncommitted_bytes; @@ -94,8 +92,8 @@ if (eden_space()->is_empty()) { // Respect the minimum size for eden and for the young gen as a whole. ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - const size_t eden_alignment = heap->intra_heap_alignment(); - const size_t gen_alignment = heap->young_gen_alignment(); + const size_t eden_alignment = heap->space_alignment(); + const size_t gen_alignment = heap->generation_alignment(); assert(eden_space()->capacity_in_bytes() >= eden_alignment, "Alignment is wrong"); @@ -121,7 +119,6 @@ gclog_or_tty->print_cr(" gen_avail %d K", gen_avail/K); } return result_aligned; - } return 0; @@ -132,7 +129,7 @@ // to_space can be. size_t ASPSYoungGen::available_to_live() { ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - const size_t alignment = heap->intra_heap_alignment(); + const size_t alignment = heap->space_alignment(); // Include any space that is committed but is not in eden. size_t available = pointer_delta(eden_space()->bottom(), @@ -296,7 +293,7 @@ assert(eden_start < from_start, "Cannot push into from_space"); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - const size_t alignment = heap->intra_heap_alignment(); + const size_t alignment = heap->space_alignment(); const bool maintain_minimum = (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/parallelScavenge/generationSizer.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/parallelScavenge/generationSizer.hpp" +#include "memory/collectorPolicy.hpp" + +void GenerationSizer::trace_gen_sizes(const char* const str) { + if (TracePageSizes) { + tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " " + SIZE_FORMAT "," SIZE_FORMAT " " + SIZE_FORMAT, + str, + _min_gen1_size / K, _max_gen1_size / K, + _min_gen0_size / K, _max_gen0_size / K, + _max_heap_byte_size / K); + } +} + +void GenerationSizer::initialize_alignments() { + _space_alignment = _gen_alignment = default_gen_alignment(); + _heap_alignment = compute_heap_alignment(); +} + +void GenerationSizer::initialize_flags() { + // Do basic sizing work + TwoGenerationCollectorPolicy::initialize_flags(); + + assert(UseSerialGC || + !FLAG_IS_DEFAULT(ParallelGCThreads) || + (ParallelGCThreads > 0), + "ParallelGCThreads should be set before flag initialization"); + + // The survivor ratio's are calculated "raw", unlike the + // default gc, which adds 2 to the ratio value. We need to + // make sure the values are valid before using them. + if (MinSurvivorRatio < 3) { + FLAG_SET_ERGO(uintx, MinSurvivorRatio, 3); + } + + if (InitialSurvivorRatio < 3) { + FLAG_SET_ERGO(uintx, InitialSurvivorRatio, 3); + } +} + +void GenerationSizer::initialize_size_info() { + trace_gen_sizes("ps heap raw"); + const size_t page_sz = os::page_size_for_region(_min_heap_byte_size, + _max_heap_byte_size, + 8); + + // Can a page size be something else than a power of two? + assert(is_power_of_2((intptr_t)page_sz), "must be a power of 2"); + size_t new_alignment = round_to(page_sz, _gen_alignment); + if (new_alignment != _gen_alignment) { + _gen_alignment = new_alignment; + _space_alignment = new_alignment; + // Redo everything from the start + initialize_flags(); + } + TwoGenerationCollectorPolicy::initialize_size_info(); + + trace_gen_sizes("ps heap rnd"); +}
--- a/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -31,41 +31,17 @@ // TwoGenerationCollectorPolicy. Lets reuse it! class GenerationSizer : public TwoGenerationCollectorPolicy { - public: - GenerationSizer() { - // Partial init only! - initialize_flags(); - initialize_size_info(); - } + private: - void initialize_flags() { - // Do basic sizing work - TwoGenerationCollectorPolicy::initialize_flags(); + void trace_gen_sizes(const char* const str); - assert(UseSerialGC || - !FLAG_IS_DEFAULT(ParallelGCThreads) || - (ParallelGCThreads > 0), - "ParallelGCThreads should be set before flag initialization"); + // The alignment used for boundary between young gen and old gen + static size_t default_gen_alignment() { return 64 * K * HeapWordSize; } - // The survivor ratio's are calculated "raw", unlike the - // default gc, which adds 2 to the ratio value. We need to - // make sure the values are valid before using them. - if (MinSurvivorRatio < 3) { - MinSurvivorRatio = 3; - } + protected: - if (InitialSurvivorRatio < 3) { - InitialSurvivorRatio = 3; - } - } - - size_t min_young_gen_size() { return _min_gen0_size; } - size_t young_gen_size() { return _initial_gen0_size; } - size_t max_young_gen_size() { return _max_gen0_size; } - - size_t min_old_gen_size() { return _min_gen1_size; } - size_t old_gen_size() { return _initial_gen1_size; } - size_t max_old_gen_size() { return _max_gen1_size; } + void initialize_alignments(); + void initialize_flags(); + void initialize_size_info(); }; - #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_GENERATIONSIZER_HPP
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -52,76 +52,20 @@ ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL; GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL; -static void trace_gen_sizes(const char* const str, - size_t og_min, size_t og_max, - size_t yg_min, size_t yg_max) -{ - if (TracePageSizes) { - tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " " - SIZE_FORMAT "," SIZE_FORMAT " " - SIZE_FORMAT, - str, - og_min / K, og_max / K, - yg_min / K, yg_max / K, - (og_max + yg_max) / K); - } -} - jint ParallelScavengeHeap::initialize() { CollectedHeap::pre_initialize(); - // Cannot be initialized until after the flags are parsed - // GenerationSizer flag_parser; + // Initialize collector policy _collector_policy = new GenerationSizer(); - - size_t yg_min_size = _collector_policy->min_young_gen_size(); - size_t yg_max_size = _collector_policy->max_young_gen_size(); - size_t og_min_size = _collector_policy->min_old_gen_size(); - size_t og_max_size = _collector_policy->max_old_gen_size(); - - trace_gen_sizes("ps heap raw", - og_min_size, og_max_size, - yg_min_size, yg_max_size); - - const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size, - yg_max_size + og_max_size, - 8); - - const size_t og_align = set_alignment(_old_gen_alignment, og_page_sz); - const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz); + _collector_policy->initialize_all(); - // Update sizes to reflect the selected page size(s). - // - // NEEDS_CLEANUP. The default TwoGenerationCollectorPolicy uses NewRatio; it - // should check UseAdaptiveSizePolicy. Changes from generationSizer could - // move to the common code. - yg_min_size = align_size_up(yg_min_size, yg_align); - yg_max_size = align_size_up(yg_max_size, yg_align); - size_t yg_cur_size = - align_size_up(_collector_policy->young_gen_size(), yg_align); - yg_cur_size = MAX2(yg_cur_size, yg_min_size); + const size_t heap_size = _collector_policy->max_heap_byte_size(); - og_min_size = align_size_up(og_min_size, og_align); - // Align old gen size down to preserve specified heap size. - assert(og_align == yg_align, "sanity"); - og_max_size = align_size_down(og_max_size, og_align); - og_max_size = MAX2(og_max_size, og_min_size); - size_t og_cur_size = - align_size_down(_collector_policy->old_gen_size(), og_align); - og_cur_size = MAX2(og_cur_size, og_min_size); - - trace_gen_sizes("ps heap rnd", - og_min_size, og_max_size, - yg_min_size, yg_max_size); - - const size_t heap_size = og_max_size + yg_max_size; - - ReservedSpace heap_rs = Universe::reserve_heap(heap_size, og_align); - + ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment()); MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtJavaHeap); - os::trace_page_sizes("ps main", og_min_size + yg_min_size, - og_max_size + yg_max_size, og_page_sz, + os::trace_page_sizes("ps main", _collector_policy->min_heap_byte_size(), + heap_size, generation_alignment(), heap_rs.base(), heap_rs.size()); if (!heap_rs.is_reserved()) { @@ -142,12 +86,6 @@ return JNI_ENOMEM; } - // Initial young gen size is 4 Mb - // - // XXX - what about flag_parser.young_gen_size()? - const size_t init_young_size = align_size_up(4 * M, yg_align); - yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size); - // Make up the generations // Calculate the maximum size that a generation can grow. This // includes growth into the other generation. Note that the @@ -157,14 +95,7 @@ double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; - _gens = new AdjoiningGenerations(heap_rs, - og_cur_size, - og_min_size, - og_max_size, - yg_cur_size, - yg_min_size, - yg_max_size, - yg_align); + _gens = new AdjoiningGenerations(heap_rs, _collector_policy, generation_alignment()); _old_gen = _gens->old_gen(); _young_gen = _gens->young_gen(); @@ -176,7 +107,7 @@ new PSAdaptiveSizePolicy(eden_capacity, initial_promo_size, young_gen()->to_space()->capacity_in_bytes(), - intra_heap_alignment(), + _collector_policy->gen_alignment(), max_gc_pause_sec, max_gc_minor_pause_sec, GCTimeRatio
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -25,6 +25,7 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP +#include "gc_implementation/parallelScavenge/generationSizer.hpp" #include "gc_implementation/parallelScavenge/objectStartArray.hpp" #include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp" #include "gc_implementation/parallelScavenge/psOldGen.hpp" @@ -32,14 +33,12 @@ #include "gc_implementation/shared/gcPolicyCounters.hpp" #include "gc_implementation/shared/gcWhen.hpp" #include "gc_interface/collectedHeap.inline.hpp" +#include "memory/collectorPolicy.hpp" #include "utilities/ostream.hpp" class AdjoiningGenerations; -class CollectorPolicy; class GCHeapSummary; class GCTaskManager; -class GenerationSizer; -class CollectorPolicy; class PSAdaptiveSizePolicy; class PSHeapSummary; @@ -50,24 +49,20 @@ static PSOldGen* _old_gen; // Sizing policy for entire heap - static PSAdaptiveSizePolicy* _size_policy; - static PSGCAdaptivePolicyCounters* _gc_policy_counters; + static PSAdaptiveSizePolicy* _size_policy; + static PSGCAdaptivePolicyCounters* _gc_policy_counters; static ParallelScavengeHeap* _psh; - size_t _young_gen_alignment; - size_t _old_gen_alignment; - GenerationSizer* _collector_policy; - inline size_t set_alignment(size_t& var, size_t val); - // Collection of generations that are adjacent in the // space reserved for the heap. AdjoiningGenerations* _gens; unsigned int _death_march_count; - static GCTaskManager* _gc_task_manager; // The task manager. + // The task manager + static GCTaskManager* _gc_task_manager; void trace_heap(GCWhen::Type when, GCTracer* tracer); @@ -80,16 +75,7 @@ HeapWord* mem_allocate_old_gen(size_t size); public: - ParallelScavengeHeap() : CollectedHeap() { - _death_march_count = 0; - set_alignment(_young_gen_alignment, intra_heap_alignment()); - set_alignment(_old_gen_alignment, intra_heap_alignment()); - } - - // Return the (conservative) maximum heap alignment - static size_t conservative_max_heap_alignment() { - return intra_heap_alignment(); - } + ParallelScavengeHeap() : CollectedHeap(), _death_march_count(0) { } // For use by VM operations enum CollectionType { @@ -103,8 +89,8 @@ virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector_policy; } - static PSYoungGen* young_gen() { return _young_gen; } - static PSOldGen* old_gen() { return _old_gen; } + static PSYoungGen* young_gen() { return _young_gen; } + static PSOldGen* old_gen() { return _old_gen; } virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; } @@ -121,13 +107,15 @@ void post_initialize(); void update_counters(); - // The alignment used for the various generations. - size_t young_gen_alignment() const { return _young_gen_alignment; } - size_t old_gen_alignment() const { return _old_gen_alignment; } + + // The alignment used for the various areas + size_t space_alignment() { return _collector_policy->space_alignment(); } + size_t generation_alignment() { return _collector_policy->gen_alignment(); } - // The alignment used for eden and survivors within the young gen - // and for boundary between young gen and old gen. - static size_t intra_heap_alignment() { return 64 * K * HeapWordSize; } + // Return the (conservative) maximum heap alignment + static size_t conservative_max_heap_alignment() { + return CollectorPolicy::compute_heap_alignment(); + } size_t capacity() const; size_t used() const; @@ -157,16 +145,15 @@ virtual bool is_in_partial_collection(const void *p); #endif - bool is_in_young(oop p); // reserved part - bool is_in_old(oop p); // reserved part + bool is_in_young(oop p); // reserved part + bool is_in_old(oop p); // reserved part // Memory allocation. "gc_time_limit_was_exceeded" will // be set to true if the adaptive size policy determine that // an excessive amount of time is being spent doing collections // and caused a NULL to be returned. If a NULL is not returned, // "gc_time_limit_was_exceeded" has an undefined meaning. - HeapWord* mem_allocate(size_t size, - bool* gc_overhead_limit_was_exceeded); + HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded); // Allocation attempt(s) during a safepoint. It should never be called // to allocate a new TLAB as this allocation might be satisfied out @@ -257,17 +244,10 @@ // Call these in sequential code around the processing of strong roots. class ParStrongRootsScope : public MarkingCodeBlobClosure::MarkScope { - public: + public: ParStrongRootsScope(); ~ParStrongRootsScope(); }; }; -inline size_t ParallelScavengeHeap::set_alignment(size_t& var, size_t val) -{ - assert(is_power_of_2((intptr_t)val), "must be a power of 2"); - var = round_to(val, intra_heap_alignment()); - return var; -} - #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PARALLELSCAVENGEHEAP_HPP
--- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -37,7 +37,7 @@ PSAdaptiveSizePolicy::PSAdaptiveSizePolicy(size_t init_eden_size, size_t init_promo_size, size_t init_survivor_size, - size_t intra_generation_alignment, + size_t space_alignment, double gc_pause_goal_sec, double gc_minor_pause_goal_sec, uint gc_cost_ratio) : @@ -46,9 +46,8 @@ init_survivor_size, gc_pause_goal_sec, gc_cost_ratio), - _collection_cost_margin_fraction(AdaptiveSizePolicyCollectionCostMargin/ - 100.0), - _intra_generation_alignment(intra_generation_alignment), + _collection_cost_margin_fraction(AdaptiveSizePolicyCollectionCostMargin / 100.0), + _space_alignment(space_alignment), _live_at_last_full_gc(init_promo_size), _gc_minor_pause_goal_sec(gc_minor_pause_goal_sec), _latest_major_mutator_interval_seconds(0), @@ -353,11 +352,10 @@ } // Align everything and make a final limit check - const size_t alignment = _intra_generation_alignment; - desired_eden_size = align_size_up(desired_eden_size, alignment); - desired_eden_size = MAX2(desired_eden_size, alignment); + desired_eden_size = align_size_up(desired_eden_size, _space_alignment); + desired_eden_size = MAX2(desired_eden_size, _space_alignment); - eden_limit = align_size_down(eden_limit, alignment); + eden_limit = align_size_down(eden_limit, _space_alignment); // And one last limit check, now that we've aligned things. if (desired_eden_size > eden_limit) { @@ -561,11 +559,10 @@ } // Align everything and make a final limit check - const size_t alignment = _intra_generation_alignment; - desired_promo_size = align_size_up(desired_promo_size, alignment); - desired_promo_size = MAX2(desired_promo_size, alignment); + desired_promo_size = align_size_up(desired_promo_size, _space_alignment); + desired_promo_size = MAX2(desired_promo_size, _space_alignment); - promo_limit = align_size_down(promo_limit, alignment); + promo_limit = align_size_down(promo_limit, _space_alignment); // And one last limit check, now that we've aligned things. desired_promo_size = MIN2(desired_promo_size, promo_limit); @@ -650,7 +647,7 @@ } // If the desired eden size is as small as it will get, // try to adjust the old gen size. - if (*desired_eden_size_ptr <= _intra_generation_alignment) { + if (*desired_eden_size_ptr <= _space_alignment) { // Vary the old gen size to reduce the young gen pause. This // may not be a good idea. This is just a test. if (minor_pause_old_estimator()->decrement_will_decrease()) { @@ -755,7 +752,7 @@ // If the promo size is at the minimum (i.e., the old gen // size will not actually decrease), consider changing the // young gen size. - if (*desired_promo_size_ptr < _intra_generation_alignment) { + if (*desired_promo_size_ptr < _space_alignment) { // If increasing the young generation will decrease the old gen // pause, do it. // During startup there is noise in the statistics for deciding @@ -1066,24 +1063,24 @@ size_t PSAdaptiveSizePolicy::eden_increment_aligned_up(size_t cur_eden) { size_t result = eden_increment(cur_eden, YoungGenerationSizeIncrement); - return align_size_up(result, _intra_generation_alignment); + return align_size_up(result, _space_alignment); } size_t PSAdaptiveSizePolicy::eden_increment_aligned_down(size_t cur_eden) { size_t result = eden_increment(cur_eden); - return align_size_down(result, _intra_generation_alignment); + return align_size_down(result, _space_alignment); } size_t PSAdaptiveSizePolicy::eden_increment_with_supplement_aligned_up( size_t cur_eden) { size_t result = eden_increment(cur_eden, YoungGenerationSizeIncrement + _young_gen_size_increment_supplement); - return align_size_up(result, _intra_generation_alignment); + return align_size_up(result, _space_alignment); } size_t PSAdaptiveSizePolicy::eden_decrement_aligned_down(size_t cur_eden) { size_t eden_heap_delta = eden_decrement(cur_eden); - return align_size_down(eden_heap_delta, _intra_generation_alignment); + return align_size_down(eden_heap_delta, _space_alignment); } size_t PSAdaptiveSizePolicy::eden_decrement(size_t cur_eden) { @@ -1105,24 +1102,24 @@ size_t PSAdaptiveSizePolicy::promo_increment_aligned_up(size_t cur_promo) { size_t result = promo_increment(cur_promo, TenuredGenerationSizeIncrement); - return align_size_up(result, _intra_generation_alignment); + return align_size_up(result, _space_alignment); } size_t PSAdaptiveSizePolicy::promo_increment_aligned_down(size_t cur_promo) { size_t result = promo_increment(cur_promo, TenuredGenerationSizeIncrement); - return align_size_down(result, _intra_generation_alignment); + return align_size_down(result, _space_alignment); } size_t PSAdaptiveSizePolicy::promo_increment_with_supplement_aligned_up( size_t cur_promo) { size_t result = promo_increment(cur_promo, TenuredGenerationSizeIncrement + _old_gen_size_increment_supplement); - return align_size_up(result, _intra_generation_alignment); + return align_size_up(result, _space_alignment); } size_t PSAdaptiveSizePolicy::promo_decrement_aligned_down(size_t cur_promo) { size_t promo_heap_delta = promo_decrement(cur_promo); - return align_size_down(promo_heap_delta, _intra_generation_alignment); + return align_size_down(promo_heap_delta, _space_alignment); } size_t PSAdaptiveSizePolicy::promo_decrement(size_t cur_promo) { @@ -1135,9 +1132,9 @@ bool is_survivor_overflow, uint tenuring_threshold, size_t survivor_limit) { - assert(survivor_limit >= _intra_generation_alignment, + assert(survivor_limit >= _space_alignment, "survivor_limit too small"); - assert((size_t)align_size_down(survivor_limit, _intra_generation_alignment) + assert((size_t)align_size_down(survivor_limit, _space_alignment) == survivor_limit, "survivor_limit not aligned"); // This method is called even if the tenuring threshold and survivor @@ -1201,8 +1198,8 @@ // We're trying to pad the survivor size as little as possible without // overflowing the survivor spaces. size_t target_size = align_size_up((size_t)_avg_survived->padded_average(), - _intra_generation_alignment); - target_size = MAX2(target_size, _intra_generation_alignment); + _space_alignment); + target_size = MAX2(target_size, _space_alignment); if (target_size > survivor_limit) { // Target size is bigger than we can handle. Let's also reduce
--- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -91,7 +91,7 @@ // for making ergonomic decisions. double _latest_major_mutator_interval_seconds; - const size_t _intra_generation_alignment; // alignment for eden, survivors + const size_t _space_alignment; // alignment for eden, survivors const double _gc_minor_pause_goal_sec; // goal for maximum minor gc pause @@ -229,7 +229,7 @@ PSAdaptiveSizePolicy(size_t init_eden_size, size_t init_promo_size, size_t init_survivor_size, - size_t intra_generation_alignment, + size_t space_alignment, double gc_pause_goal_sec, double gc_minor_pause_goal_sec, uint gc_time_ratio); @@ -378,7 +378,7 @@ // remain almost full anyway (top() will be near end(), but there will be a // large filler object at the bottom). const size_t sz = gen_size / MinSurvivorRatio; - const size_t alignment = _intra_generation_alignment; + const size_t alignment = _space_alignment; return sz > alignment ? align_size_down(sz, alignment) : alignment; }
--- a/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -103,7 +103,7 @@ // Compute maximum space sizes for performance counters ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - size_t alignment = heap->intra_heap_alignment(); + size_t alignment = heap->space_alignment(); size_t size = virtual_space()->reserved_size(); size_t max_survivor_size; @@ -156,8 +156,9 @@ assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); // Compute sizes - size_t alignment = heap->intra_heap_alignment(); + size_t alignment = heap->space_alignment(); size_t size = virtual_space()->committed_size(); + assert(size >= 3 * alignment, "Young space is not large enough for eden + 2 survivors"); size_t survivor_size = size / InitialSurvivorRatio; survivor_size = align_size_down(survivor_size, alignment); @@ -207,7 +208,7 @@ #ifndef PRODUCT void PSYoungGen::space_invariants() { ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - const size_t alignment = heap->intra_heap_alignment(); + const size_t alignment = heap->space_alignment(); // Currently, our eden size cannot shrink to zero guarantee(eden_space()->capacity_in_bytes() >= alignment, "eden too small"); @@ -491,7 +492,7 @@ char* to_end = (char*)to_space()->end(); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - const size_t alignment = heap->intra_heap_alignment(); + const size_t alignment = heap->space_alignment(); const bool maintain_minimum = (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size(); @@ -840,8 +841,8 @@ size_t PSYoungGen::available_to_live() { size_t delta_in_survivor = 0; ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); - const size_t space_alignment = heap->intra_heap_alignment(); - const size_t gen_alignment = heap->young_gen_alignment(); + const size_t space_alignment = heap->space_alignment(); + const size_t gen_alignment = heap->generation_alignment(); MutableSpace* space_shrinking = NULL; if (from_space()->end() > to_space()->end()) {
--- a/src/share/vm/gc_interface/collectedHeap.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/gc_interface/collectedHeap.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -469,6 +469,10 @@ fill_with_object_impl(start, words, zap); } +void CollectedHeap::post_initialize() { + collector_policy()->post_heap_initialize(); +} + HeapWord* CollectedHeap::allocate_new_tlab(size_t size) { guarantee(false, "thread-local allocation buffers not supported"); return NULL;
--- a/src/share/vm/interpreter/abstractInterpreter.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/interpreter/abstractInterpreter.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -158,8 +158,8 @@ // Runtime support // length = invoke bytecode length (to advance to next bytecode) - static address deopt_entry (TosState state, int length) { ShouldNotReachHere(); return NULL; } - static address return_entry (TosState state, int length) { ShouldNotReachHere(); return NULL; } + static address deopt_entry(TosState state, int length) { ShouldNotReachHere(); return NULL; } + static address return_entry(TosState state, int length, Bytecodes::Code code) { ShouldNotReachHere(); return NULL; } static address rethrow_exception_entry() { return _rethrow_exception_entry; }
--- a/src/share/vm/interpreter/cppInterpreter.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/interpreter/cppInterpreter.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -78,7 +78,7 @@ static address stack_result_to_stack(int index) { return _stack_to_stack[index]; } static address stack_result_to_native(int index) { return _stack_to_native_abi[index]; } - static address return_entry (TosState state, int length); + static address return_entry (TosState state, int length, Bytecodes::Code code); static address deopt_entry (TosState state, int length); #ifdef TARGET_ARCH_x86
--- a/src/share/vm/interpreter/interpreter.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/interpreter/interpreter.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -329,15 +329,21 @@ //------------------------------------------------------------------------------------------------------------------------ // Deoptimization support -// If deoptimization happens, this function returns the point of next bytecode to continue execution +/** + * If a deoptimization happens, this function returns the point of next bytecode to continue execution. + */ address AbstractInterpreter::deopt_continue_after_entry(Method* method, address bcp, int callee_parameters, bool is_top_frame) { assert(method->contains(bcp), "just checkin'"); - Bytecodes::Code code = Bytecodes::java_code_at(method, bcp); + + // Get the original and rewritten bytecode. + Bytecodes::Code code = Bytecodes::java_code_at(method, bcp); assert(!Interpreter::bytecode_should_reexecute(code), "should not reexecute"); - int bci = method->bci_from(bcp); - int length = -1; // initial value for debugging + + const int bci = method->bci_from(bcp); + // compute continuation length - length = Bytecodes::length_at(method, bcp); + const int length = Bytecodes::length_at(method, bcp); + // compute result type BasicType type = T_ILLEGAL; @@ -393,7 +399,7 @@ return is_top_frame ? Interpreter::deopt_entry (as_TosState(type), length) - : Interpreter::return_entry(as_TosState(type), length); + : Interpreter::return_entry(as_TosState(type), length, code); } // If deoptimization happens, this function returns the point where the interpreter reexecutes
--- a/src/share/vm/interpreter/linkResolver.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/interpreter/linkResolver.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -152,11 +152,13 @@ // Could be an Object method inherited into an interface, but still a vtable call. kind = CallInfo::vtable_call; } else if (!resolved_klass->is_interface()) { - // A miranda method. Compute the vtable index. + // A default or miranda method. Compute the vtable index. ResourceMark rm; klassVtable* vt = InstanceKlass::cast(resolved_klass)->vtable(); - index = vt->index_of_miranda(resolved_method->name(), - resolved_method->signature()); + index = LinkResolver::vtable_index_of_interface_method(resolved_klass, + resolved_method); + assert(index >= 0 , "we should have valid vtable index at this point"); + kind = CallInfo::vtable_call; } else if (resolved_method->has_vtable_index()) { // Can occur if an interface redeclares a method of Object. @@ -279,7 +281,7 @@ } int LinkResolver::vtable_index_of_interface_method(KlassHandle klass, - methodHandle resolved_method, TRAPS) { + methodHandle resolved_method) { int vtable_index = Method::invalid_vtable_index; Symbol* name = resolved_method->name(); @@ -295,7 +297,7 @@ } if (vtable_index == Method::invalid_vtable_index) { // get vtable_index for miranda methods - ResourceMark rm(THREAD); + ResourceMark rm; klassVtable *vt = InstanceKlass::cast(klass())->vtable(); vtable_index = vt->index_of_miranda(name, signature); } @@ -691,7 +693,7 @@ ); resolved_method->access_flags().print_on(tty); if (resolved_method->is_default_method()) { - tty->print("default"); + tty->print("default "); } if (resolved_method->is_overpass()) { tty->print("overpass"); @@ -937,7 +939,7 @@ ); resolved_method->access_flags().print_on(tty); if (resolved_method->is_default_method()) { - tty->print("default"); + tty->print("default "); } if (resolved_method->is_overpass()) { tty->print("overpass"); @@ -1017,7 +1019,7 @@ ); sel_method->access_flags().print_on(tty); if (sel_method->is_default_method()) { - tty->print("default"); + tty->print("default "); } if (sel_method->is_overpass()) { tty->print("overpass"); @@ -1081,7 +1083,7 @@ ); resolved_method->access_flags().print_on(tty); if (resolved_method->is_default_method()) { - tty->print("default"); + tty->print("default "); } if (resolved_method->is_overpass()) { tty->print("overpass"); @@ -1118,7 +1120,7 @@ // do lookup based on receiver klass using the vtable index if (resolved_method->method_holder()->is_interface()) { // miranda method vtable_index = vtable_index_of_interface_method(resolved_klass, - resolved_method, CHECK); + resolved_method); assert(vtable_index >= 0 , "we should have valid vtable index at this point"); InstanceKlass* inst = InstanceKlass::cast(recv_klass()); @@ -1175,7 +1177,7 @@ ); selected_method->access_flags().print_on(tty); if (selected_method->is_default_method()) { - tty->print("default"); + tty->print("default "); } if (selected_method->is_overpass()) { tty->print("overpass"); @@ -1268,14 +1270,6 @@ sel_method->name(), sel_method->signature())); } - // setup result - if (!resolved_method->has_itable_index()) { - int vtable_index = resolved_method->vtable_index(); - assert(vtable_index == sel_method->vtable_index(), "sanity check"); - result.set_virtual(resolved_klass, recv_klass, resolved_method, sel_method, vtable_index, CHECK); - return; - } - int itable_index = resolved_method()->itable_index(); if (TraceItables && Verbose) { ResourceMark rm(THREAD); @@ -1289,14 +1283,22 @@ ); sel_method->access_flags().print_on(tty); if (sel_method->is_default_method()) { - tty->print("default"); + tty->print("default "); } if (sel_method->is_overpass()) { tty->print("overpass"); } tty->cr(); } - result.set_interface(resolved_klass, recv_klass, resolved_method, sel_method, itable_index, CHECK); + // setup result + if (!resolved_method->has_itable_index()) { + int vtable_index = resolved_method->vtable_index(); + assert(vtable_index == sel_method->vtable_index(), "sanity check"); + result.set_virtual(resolved_klass, recv_klass, resolved_method, sel_method, vtable_index, CHECK); + } else { + int itable_index = resolved_method()->itable_index(); + result.set_interface(resolved_klass, recv_klass, resolved_method, sel_method, itable_index, CHECK); + } }
--- a/src/share/vm/interpreter/linkResolver.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/interpreter/linkResolver.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -130,7 +130,6 @@ static void lookup_polymorphic_method (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, KlassHandle current_klass, Handle *appendix_result_or_null, Handle *method_type_result, TRAPS); - static int vtable_index_of_interface_method(KlassHandle klass, methodHandle resolved_method, TRAPS); static void resolve_klass (KlassHandle& result, constantPoolHandle pool, int index, TRAPS); static void resolve_pool (KlassHandle& resolved_klass, Symbol*& method_name, Symbol*& method_signature, KlassHandle& current_klass, constantPoolHandle pool, int index, TRAPS); @@ -186,6 +185,7 @@ static methodHandle resolve_interface_call_or_null(KlassHandle receiver_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass); static methodHandle resolve_static_call_or_null (KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass); static methodHandle resolve_special_call_or_null (KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass); + static int vtable_index_of_interface_method(KlassHandle klass, methodHandle resolved_method); // same as above for compile-time resolution; returns vtable_index if current_klass if linked static int resolve_virtual_vtable_index (KlassHandle receiver_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass);
--- a/src/share/vm/interpreter/rewriter.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/interpreter/rewriter.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -72,19 +72,21 @@ // Unrewrite the bytecodes if an error occurs. void Rewriter::restore_bytecodes() { int len = _methods->length(); + bool invokespecial_error = false; for (int i = len-1; i >= 0; i--) { Method* method = _methods->at(i); - scan_method(method, true); + scan_method(method, true, &invokespecial_error); + assert(!invokespecial_error, "reversing should not get an invokespecial error"); } } // Creates a constant pool cache given a CPC map void Rewriter::make_constant_pool_cache(TRAPS) { - const int length = _cp_cache_map.length(); ClassLoaderData* loader_data = _pool->pool_holder()->class_loader_data(); ConstantPoolCache* cache = - ConstantPoolCache::allocate(loader_data, length, _cp_cache_map, + ConstantPoolCache::allocate(loader_data, _cp_cache_map, + _invokedynamic_cp_cache_map, _invokedynamic_references_map, CHECK); // initialize object cache in constant pool @@ -154,6 +156,30 @@ } } +// If the constant pool entry for invokespecial is InterfaceMethodref, +// we need to add a separate cpCache entry for its resolution, because it is +// different than the resolution for invokeinterface with InterfaceMethodref. +// These cannot share cpCache entries. It's unclear if all invokespecial to +// InterfaceMethodrefs would resolve to the same thing so a new cpCache entry +// is created for each one. This was added with lambda. +void Rewriter::rewrite_invokespecial(address bcp, int offset, bool reverse, bool* invokespecial_error) { + address p = bcp + offset; + if (!reverse) { + int cp_index = Bytes::get_Java_u2(p); + if (_pool->tag_at(cp_index).is_interface_method()) { + int cache_index = add_invokespecial_cp_cache_entry(cp_index); + if (cache_index != (int)(jushort) cache_index) { + *invokespecial_error = true; + } + Bytes::put_native_u2(p, cache_index); + } else { + rewrite_member_reference(bcp, offset, reverse); + } + } else { + rewrite_member_reference(bcp, offset, reverse); + } +} + // Adjust the invocation bytecode for a signature-polymorphic method (MethodHandle.invoke, etc.) void Rewriter::maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse) { @@ -203,7 +229,7 @@ if (!reverse) { int cp_index = Bytes::get_Java_u2(p); int cache_index = add_invokedynamic_cp_cache_entry(cp_index); - add_invokedynamic_resolved_references_entries(cp_index, cache_index); + int resolved_index = add_invokedynamic_resolved_references_entries(cp_index, cache_index); // Replace the trailing four bytes with a CPC index for the dynamic // call site. Unlike other CPC entries, there is one per bytecode, // not just one per distinct CP entry. In other words, the @@ -212,13 +238,20 @@ // all these entries. That is the main reason invokedynamic // must have a five-byte instruction format. (Of course, other JVM // implementations can use the bytes for other purposes.) + // Note: We use native_u4 format exclusively for 4-byte indexes. Bytes::put_native_u4(p, ConstantPool::encode_invokedynamic_index(cache_index)); - // Note: We use native_u4 format exclusively for 4-byte indexes. + // add the bcp in case we need to patch this bytecode if we also find a + // invokespecial/InterfaceMethodref in the bytecode stream + _patch_invokedynamic_bcps->push(p); + _patch_invokedynamic_refs->push(resolved_index); } else { - // callsite index int cache_index = ConstantPool::decode_invokedynamic_index( Bytes::get_native_u4(p)); - int cp_index = cp_cache_entry_pool_index(cache_index); + // We will reverse the bytecode rewriting _after_ adjusting them. + // Adjust the cache index by offset to the invokedynamic entries in the + // cpCache plus the delta if the invokedynamic bytecodes were adjusted. + cache_index = cp_cache_delta() + _first_iteration_cp_cache_limit; + int cp_index = invokedynamic_cp_cache_entry_pool_index(cache_index); assert(_pool->tag_at(cp_index).is_invoke_dynamic(), "wrong index"); // zero out 4 bytes Bytes::put_Java_u4(p, 0); @@ -226,6 +259,34 @@ } } +void Rewriter::patch_invokedynamic_bytecodes() { + // If the end of the cp_cache is the same as after initializing with the + // cpool, nothing needs to be done. Invokedynamic bytecodes are at the + // correct offsets. ie. no invokespecials added + int delta = cp_cache_delta(); + if (delta > 0) { + int length = _patch_invokedynamic_bcps->length(); + assert(length == _patch_invokedynamic_refs->length(), + "lengths should match"); + for (int i = 0; i < length; i++) { + address p = _patch_invokedynamic_bcps->at(i); + int cache_index = ConstantPool::decode_invokedynamic_index( + Bytes::get_native_u4(p)); + Bytes::put_native_u4(p, ConstantPool::encode_invokedynamic_index(cache_index + delta)); + + // invokedynamic resolved references map also points to cp cache and must + // add delta to each. + int resolved_index = _patch_invokedynamic_refs->at(i); + for (int entry = 0; entry < ConstantPoolCacheEntry::_indy_resolved_references_entries; entry++) { + assert(_invokedynamic_references_map[resolved_index+entry] == cache_index, + "should be the same index"); + _invokedynamic_references_map.at_put(resolved_index+entry, + cache_index + delta); + } + } + } +} + // Rewrite some ldc bytecodes to _fast_aldc void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide, @@ -269,7 +330,7 @@ // Rewrites a method given the index_map information -void Rewriter::scan_method(Method* method, bool reverse) { +void Rewriter::scan_method(Method* method, bool reverse, bool* invokespecial_error) { int nof_jsrs = 0; bool has_monitor_bytecodes = false; @@ -329,12 +390,17 @@ #endif break; } + + case Bytecodes::_invokespecial : { + rewrite_invokespecial(bcp, prefix_length+1, reverse, invokespecial_error); + break; + } + case Bytecodes::_getstatic : // fall through case Bytecodes::_putstatic : // fall through case Bytecodes::_getfield : // fall through case Bytecodes::_putfield : // fall through case Bytecodes::_invokevirtual : // fall through - case Bytecodes::_invokespecial : // fall through case Bytecodes::_invokestatic : case Bytecodes::_invokeinterface: case Bytecodes::_invokehandle : // if reverse=true @@ -423,12 +489,26 @@ // rewrite methods, in two passes int len = _methods->length(); + bool invokespecial_error = false; for (int i = len-1; i >= 0; i--) { Method* method = _methods->at(i); - scan_method(method); + scan_method(method, false, &invokespecial_error); + if (invokespecial_error) { + // If you get an error here, there is no reversing bytecodes + // This exception is stored for this class and no further attempt is + // made at verifying or rewriting. + THROW_MSG(vmSymbols::java_lang_InternalError(), + "This classfile overflows invokespecial for interfaces " + "and cannot be loaded"); + return; + } } + // May have to fix invokedynamic bytecodes if invokestatic/InterfaceMethodref + // entries had to be added. + patch_invokedynamic_bytecodes(); + // allocate constant pool cache, now that we've seen all the bytecodes make_constant_pool_cache(THREAD);
--- a/src/share/vm/interpreter/rewriter.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/interpreter/rewriter.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,55 +46,102 @@ intArray _method_handle_invokers; int _resolved_reference_limit; + // For mapping invokedynamic bytecodes, which are discovered during method + // scanning. The invokedynamic entries are added at the end of the cpCache. + // If there are any invokespecial/InterfaceMethodref special case bytecodes, + // these entries are added before invokedynamic entries so that the + // invokespecial bytecode 16 bit index doesn't overflow. + intStack _invokedynamic_cp_cache_map; + + // For patching. + GrowableArray<address>* _patch_invokedynamic_bcps; + GrowableArray<int>* _patch_invokedynamic_refs; + void init_maps(int length) { _cp_map.initialize(length, -1); // Choose an initial value large enough that we don't get frequent // calls to grow(). - _cp_cache_map.initialize(length / 2); + _cp_cache_map.initialize(length/2); // Also cache resolved objects, in another different cache. _reference_map.initialize(length, -1); - _resolved_references_map.initialize(length / 2); - _invokedynamic_references_map.initialize(length / 2); + _resolved_references_map.initialize(length/2); + _invokedynamic_references_map.initialize(length/2); _resolved_reference_limit = -1; - DEBUG_ONLY(_cp_cache_index_limit = -1); + _first_iteration_cp_cache_limit = -1; + + // invokedynamic specific fields + _invokedynamic_cp_cache_map.initialize(length/4); + _patch_invokedynamic_bcps = new GrowableArray<address>(length/4); + _patch_invokedynamic_refs = new GrowableArray<int>(length/4); } - int _cp_cache_index_limit; + int _first_iteration_cp_cache_limit; void record_map_limits() { -#ifdef ASSERT - // Record initial size of the two arrays generated for the CP cache: - _cp_cache_index_limit = _cp_cache_map.length(); -#endif //ASSERT + // Record initial size of the two arrays generated for the CP cache + // relative to walking the constant pool. + _first_iteration_cp_cache_limit = _cp_cache_map.length(); _resolved_reference_limit = _resolved_references_map.length(); } + int cp_cache_delta() { + // How many cp cache entries were added since recording map limits after + // cp cache initialization? + assert(_first_iteration_cp_cache_limit != -1, "only valid after first iteration"); + return _cp_cache_map.length() - _first_iteration_cp_cache_limit; + } + int cp_entry_to_cp_cache(int i) { assert(has_cp_cache(i), "oob"); return _cp_map[i]; } bool has_cp_cache(int i) { return (uint)i < (uint)_cp_map.length() && _cp_map[i] >= 0; } + int add_map_entry(int cp_index, intArray* cp_map, intStack* cp_cache_map) { + assert(cp_map->at(cp_index) == -1, "not twice on same cp_index"); + int cache_index = cp_cache_map->append(cp_index); + cp_map->at_put(cp_index, cache_index); + return cache_index; + } + int add_cp_cache_entry(int cp_index) { assert(_pool->tag_at(cp_index).value() != JVM_CONSTANT_InvokeDynamic, "use indy version"); - assert(_cp_map[cp_index] == -1, "not twice on same cp_index"); - assert(_cp_cache_index_limit == -1, "do not add cache entries after first iteration"); - int cache_index = _cp_cache_map.append(cp_index); - _cp_map.at_put(cp_index, cache_index); + assert(_first_iteration_cp_cache_limit == -1, "do not add cache entries after first iteration"); + int cache_index = add_map_entry(cp_index, &_cp_map, &_cp_cache_map); assert(cp_entry_to_cp_cache(cp_index) == cache_index, ""); assert(cp_cache_entry_pool_index(cache_index) == cp_index, ""); return cache_index; } - // add a new CP cache entry beyond the normal cache (for invokedynamic only) int add_invokedynamic_cp_cache_entry(int cp_index) { assert(_pool->tag_at(cp_index).value() == JVM_CONSTANT_InvokeDynamic, "use non-indy version"); - assert(_cp_map[cp_index] == -1, "do not map from cp_index"); - assert(_cp_cache_index_limit >= 0, "add indy cache entries after first iteration"); + assert(_first_iteration_cp_cache_limit >= 0, "add indy cache entries after first iteration"); + // add to the invokedynamic index map. + int cache_index = _invokedynamic_cp_cache_map.append(cp_index); + // do not update _cp_map, since the mapping is one-to-many + assert(invokedynamic_cp_cache_entry_pool_index(cache_index) == cp_index, ""); + // this index starts at one but in the bytecode it's appended to the end. + return cache_index + _first_iteration_cp_cache_limit; + } + + int invokedynamic_cp_cache_entry_pool_index(int cache_index) { + int cp_index = _invokedynamic_cp_cache_map[cache_index]; + return cp_index; + } + + // add a new CP cache entry beyond the normal cache for the special case of + // invokespecial with InterfaceMethodref as cpool operand. + int add_invokespecial_cp_cache_entry(int cp_index) { + assert(_first_iteration_cp_cache_limit >= 0, "add these special cache entries after first iteration"); + // Don't add InterfaceMethodref if it already exists at the end. + for (int i = _first_iteration_cp_cache_limit; i < _cp_cache_map.length(); i++) { + if (cp_cache_entry_pool_index(i) == cp_index) { + return i; + } + } int cache_index = _cp_cache_map.append(cp_index); - assert(cache_index >= _cp_cache_index_limit, ""); + assert(cache_index >= _first_iteration_cp_cache_limit, ""); // do not update _cp_map, since the mapping is one-to-many assert(cp_cache_entry_pool_index(cache_index) == cp_index, ""); return cache_index; } - // fix duplicated code later int cp_entry_to_resolved_references(int cp_index) const { assert(has_entry_in_resolved_references(cp_index), "oob"); return _reference_map[cp_index]; @@ -105,10 +152,7 @@ // add a new entry to the resolved_references map int add_resolved_references_entry(int cp_index) { - assert(_reference_map[cp_index] == -1, "not twice on same cp_index"); - assert(_resolved_reference_limit == -1, "do not add CP refs after first iteration"); - int ref_index = _resolved_references_map.append(cp_index); - _reference_map.at_put(cp_index, ref_index); + int ref_index = add_map_entry(cp_index, &_reference_map, &_resolved_references_map); assert(cp_entry_to_resolved_references(cp_index) == ref_index, ""); return ref_index; } @@ -137,7 +181,7 @@ // Access the contents of _cp_cache_map to determine CP cache layout. int cp_cache_entry_pool_index(int cache_index) { int cp_index = _cp_cache_map[cache_index]; - return cp_index; + return cp_index; } // All the work goes in here: @@ -145,12 +189,16 @@ void compute_index_maps(); void make_constant_pool_cache(TRAPS); - void scan_method(Method* m, bool reverse = false); + void scan_method(Method* m, bool reverse, bool* invokespecial_error); void rewrite_Object_init(methodHandle m, TRAPS); - void rewrite_member_reference(address bcp, int offset, bool reverse = false); - void maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse = false); - void rewrite_invokedynamic(address bcp, int offset, bool reverse = false); - void maybe_rewrite_ldc(address bcp, int offset, bool is_wide, bool reverse = false); + void rewrite_member_reference(address bcp, int offset, bool reverse); + void maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse); + void rewrite_invokedynamic(address bcp, int offset, bool reverse); + void maybe_rewrite_ldc(address bcp, int offset, bool is_wide, bool reverse); + void rewrite_invokespecial(address bcp, int offset, bool reverse, bool* invokespecial_error); + + void patch_invokedynamic_bytecodes(); + // Revert bytecodes in case of an exception. void restore_bytecodes();
--- a/src/share/vm/interpreter/templateInterpreter.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/interpreter/templateInterpreter.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -184,8 +184,9 @@ EntryPoint TemplateInterpreter::_continuation_entry; EntryPoint TemplateInterpreter::_safept_entry; -address TemplateInterpreter::_return_3_addrs_by_index[TemplateInterpreter::number_of_return_addrs]; -address TemplateInterpreter::_return_5_addrs_by_index[TemplateInterpreter::number_of_return_addrs]; +address TemplateInterpreter::_invoke_return_entry[TemplateInterpreter::number_of_return_addrs]; +address TemplateInterpreter::_invokeinterface_return_entry[TemplateInterpreter::number_of_return_addrs]; +address TemplateInterpreter::_invokedynamic_return_entry[TemplateInterpreter::number_of_return_addrs]; DispatchTable TemplateInterpreter::_active_table; DispatchTable TemplateInterpreter::_normal_table; @@ -237,22 +238,37 @@ #endif // !PRODUCT { CodeletMark cm(_masm, "return entry points"); + const int index_size = sizeof(u2); for (int i = 0; i < Interpreter::number_of_return_entries; i++) { Interpreter::_return_entry[i] = EntryPoint( - generate_return_entry_for(itos, i), - generate_return_entry_for(itos, i), - generate_return_entry_for(itos, i), - generate_return_entry_for(atos, i), - generate_return_entry_for(itos, i), - generate_return_entry_for(ltos, i), - generate_return_entry_for(ftos, i), - generate_return_entry_for(dtos, i), - generate_return_entry_for(vtos, i) + generate_return_entry_for(itos, i, index_size), + generate_return_entry_for(itos, i, index_size), + generate_return_entry_for(itos, i, index_size), + generate_return_entry_for(atos, i, index_size), + generate_return_entry_for(itos, i, index_size), + generate_return_entry_for(ltos, i, index_size), + generate_return_entry_for(ftos, i, index_size), + generate_return_entry_for(dtos, i, index_size), + generate_return_entry_for(vtos, i, index_size) ); } } + { CodeletMark cm(_masm, "invoke return entry points"); + const TosState states[] = {itos, itos, itos, itos, ltos, ftos, dtos, atos, vtos}; + const int invoke_length = Bytecodes::length_for(Bytecodes::_invokestatic); + const int invokeinterface_length = Bytecodes::length_for(Bytecodes::_invokeinterface); + const int invokedynamic_length = Bytecodes::length_for(Bytecodes::_invokedynamic); + + for (int i = 0; i < Interpreter::number_of_return_addrs; i++) { + TosState state = states[i]; + Interpreter::_invoke_return_entry[i] = generate_return_entry_for(state, invoke_length, sizeof(u2)); + Interpreter::_invokeinterface_return_entry[i] = generate_return_entry_for(state, invokeinterface_length, sizeof(u2)); + Interpreter::_invokedynamic_return_entry[i] = generate_return_entry_for(state, invokedynamic_length, sizeof(u4)); + } + } + { CodeletMark cm(_masm, "earlyret entry points"); Interpreter::_earlyret_entry = EntryPoint( @@ -298,13 +314,6 @@ } } - for (int j = 0; j < number_of_states; j++) { - const TosState states[] = {btos, ctos, stos, itos, ltos, ftos, dtos, atos, vtos}; - int index = Interpreter::TosState_as_index(states[j]); - Interpreter::_return_3_addrs_by_index[index] = Interpreter::return_entry(states[j], 3); - Interpreter::_return_5_addrs_by_index[index] = Interpreter::return_entry(states[j], 5); - } - { CodeletMark cm(_masm, "continuation entry points"); Interpreter::_continuation_entry = EntryPoint( @@ -534,9 +543,46 @@ //------------------------------------------------------------------------------------------------------------------------ // Entry points -address TemplateInterpreter::return_entry(TosState state, int length) { +/** + * Returns the return entry table for the given invoke bytecode. + */ +address* TemplateInterpreter::invoke_return_entry_table_for(Bytecodes::Code code) { + switch (code) { + case Bytecodes::_invokestatic: + case Bytecodes::_invokespecial: + case Bytecodes::_invokevirtual: + case Bytecodes::_invokehandle: + return Interpreter::invoke_return_entry_table(); + case Bytecodes::_invokeinterface: + return Interpreter::invokeinterface_return_entry_table(); + case Bytecodes::_invokedynamic: + return Interpreter::invokedynamic_return_entry_table(); + default: + fatal(err_msg("invalid bytecode: %s", Bytecodes::name(code))); + return NULL; + } +} + +/** + * Returns the return entry address for the given top-of-stack state and bytecode. + */ +address TemplateInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) { guarantee(0 <= length && length < Interpreter::number_of_return_entries, "illegal length"); - return _return_entry[length].entry(state); + const int index = TosState_as_index(state); + switch (code) { + case Bytecodes::_invokestatic: + case Bytecodes::_invokespecial: + case Bytecodes::_invokevirtual: + case Bytecodes::_invokehandle: + return _invoke_return_entry[index]; + case Bytecodes::_invokeinterface: + return _invokeinterface_return_entry[index]; + case Bytecodes::_invokedynamic: + return _invokedynamic_return_entry[index]; + default: + assert(!Bytecodes::is_invoke(code), err_msg("invoke instructions should be handled separately: %s", Bytecodes::name(code))); + return _return_entry[length].entry(state); + } }
--- a/src/share/vm/interpreter/templateInterpreter.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/interpreter/templateInterpreter.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -120,8 +120,9 @@ static EntryPoint _continuation_entry; static EntryPoint _safept_entry; - static address _return_3_addrs_by_index[number_of_return_addrs]; // for invokevirtual return entries - static address _return_5_addrs_by_index[number_of_return_addrs]; // for invokeinterface return entries + static address _invoke_return_entry[number_of_return_addrs]; // for invokestatic, invokespecial, invokevirtual return entries + static address _invokeinterface_return_entry[number_of_return_addrs]; // for invokeinterface return entries + static address _invokedynamic_return_entry[number_of_return_addrs]; // for invokedynamic return entries static DispatchTable _active_table; // the active dispatch table (used by the interpreter for dispatch) static DispatchTable _normal_table; // the normal dispatch table (used to set the active table in normal mode) @@ -161,12 +162,15 @@ static address* normal_table() { return _normal_table.table_for(); } // Support for invokes - static address* return_3_addrs_by_index_table() { return _return_3_addrs_by_index; } - static address* return_5_addrs_by_index_table() { return _return_5_addrs_by_index; } - static int TosState_as_index(TosState state); // computes index into return_3_entry_by_index table + static address* invoke_return_entry_table() { return _invoke_return_entry; } + static address* invokeinterface_return_entry_table() { return _invokeinterface_return_entry; } + static address* invokedynamic_return_entry_table() { return _invokedynamic_return_entry; } + static int TosState_as_index(TosState state); - static address return_entry (TosState state, int length); - static address deopt_entry (TosState state, int length); + static address* invoke_return_entry_table_for(Bytecodes::Code code); + + static address deopt_entry(TosState state, int length); + static address return_entry(TosState state, int length, Bytecodes::Code code); // Safepoint support static void notice_safepoints(); // stops the thread when reaching a safepoint
--- a/src/share/vm/interpreter/templateInterpreterGenerator.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/interpreter/templateInterpreterGenerator.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -53,7 +53,7 @@ address generate_ClassCastException_handler(); address generate_ArrayIndexOutOfBounds_handler(const char* name); address generate_continuation_for(TosState state); - address generate_return_entry_for(TosState state, int step); + address generate_return_entry_for(TosState state, int step, size_t index_size); address generate_earlyret_entry_for(TosState state); address generate_deopt_entry_for(TosState state, int step); address generate_safept_entry_for(TosState state, address runtime_entry);
--- a/src/share/vm/memory/collectorPolicy.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/memory/collectorPolicy.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -47,54 +47,107 @@ // CollectorPolicy methods. +CollectorPolicy::CollectorPolicy() : + _space_alignment(0), + _heap_alignment(0), + _initial_heap_byte_size(InitialHeapSize), + _max_heap_byte_size(MaxHeapSize), + _min_heap_byte_size(Arguments::min_heap_size()), + _max_heap_size_cmdline(false), + _size_policy(NULL), + _should_clear_all_soft_refs(false), + _all_soft_refs_clear(false) +{} + +#ifdef ASSERT +void CollectorPolicy::assert_flags() { + assert(InitialHeapSize <= MaxHeapSize, "Ergonomics decided on incompatible initial and maximum heap sizes"); + assert(InitialHeapSize % _heap_alignment == 0, "InitialHeapSize alignment"); + assert(MaxHeapSize % _heap_alignment == 0, "MaxHeapSize alignment"); +} + +void CollectorPolicy::assert_size_info() { + assert(InitialHeapSize == _initial_heap_byte_size, "Discrepancy between InitialHeapSize flag and local storage"); + assert(MaxHeapSize == _max_heap_byte_size, "Discrepancy between MaxHeapSize flag and local storage"); + assert(_max_heap_byte_size >= _min_heap_byte_size, "Ergonomics decided on incompatible minimum and maximum heap sizes"); + assert(_initial_heap_byte_size >= _min_heap_byte_size, "Ergonomics decided on incompatible initial and minimum heap sizes"); + assert(_max_heap_byte_size >= _initial_heap_byte_size, "Ergonomics decided on incompatible initial and maximum heap sizes"); + assert(_min_heap_byte_size % _heap_alignment == 0, "min_heap_byte_size alignment"); + assert(_initial_heap_byte_size % _heap_alignment == 0, "initial_heap_byte_size alignment"); + assert(_max_heap_byte_size % _heap_alignment == 0, "max_heap_byte_size alignment"); +} +#endif // ASSERT + void CollectorPolicy::initialize_flags() { - assert(_max_alignment >= _min_alignment, - err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT, - _max_alignment, _min_alignment)); - assert(_max_alignment % _min_alignment == 0, - err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT, - _max_alignment, _min_alignment)); + assert(_space_alignment != 0, "Space alignment not set up properly"); + assert(_heap_alignment != 0, "Heap alignment not set up properly"); + assert(_heap_alignment >= _space_alignment, + err_msg("heap_alignment: " SIZE_FORMAT " less than space_alignment: " SIZE_FORMAT, + _heap_alignment, _space_alignment)); + assert(_heap_alignment % _space_alignment == 0, + err_msg("heap_alignment: " SIZE_FORMAT " not aligned by space_alignment: " SIZE_FORMAT, + _heap_alignment, _space_alignment)); - if (MaxHeapSize < InitialHeapSize) { - vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified"); + if (FLAG_IS_CMDLINE(MaxHeapSize)) { + if (FLAG_IS_CMDLINE(InitialHeapSize) && InitialHeapSize > MaxHeapSize) { + vm_exit_during_initialization("Initial heap size set to a larger value than the maximum heap size"); + } + if (_min_heap_byte_size != 0 && MaxHeapSize < _min_heap_byte_size) { + vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified"); + } + _max_heap_size_cmdline = true; } - MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, _min_alignment); + // Check heap parameter properties + if (InitialHeapSize < M) { + vm_exit_during_initialization("Too small initial heap"); + } + if (_min_heap_byte_size < M) { + vm_exit_during_initialization("Too small minimum heap"); + } + + // User inputs from -Xmx and -Xms must be aligned + _min_heap_byte_size = align_size_up(_min_heap_byte_size, _heap_alignment); + uintx aligned_initial_heap_size = align_size_up(InitialHeapSize, _heap_alignment); + uintx aligned_max_heap_size = align_size_up(MaxHeapSize, _heap_alignment); + + // Write back to flags if the values changed + if (aligned_initial_heap_size != InitialHeapSize) { + FLAG_SET_ERGO(uintx, InitialHeapSize, aligned_initial_heap_size); + } + if (aligned_max_heap_size != MaxHeapSize) { + FLAG_SET_ERGO(uintx, MaxHeapSize, aligned_max_heap_size); + } + + if (FLAG_IS_CMDLINE(InitialHeapSize) && _min_heap_byte_size != 0 && + InitialHeapSize < _min_heap_byte_size) { + vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified"); + } + if (!FLAG_IS_DEFAULT(InitialHeapSize) && InitialHeapSize > MaxHeapSize) { + FLAG_SET_ERGO(uintx, MaxHeapSize, InitialHeapSize); + } else if (!FLAG_IS_DEFAULT(MaxHeapSize) && InitialHeapSize > MaxHeapSize) { + FLAG_SET_ERGO(uintx, InitialHeapSize, MaxHeapSize); + if (InitialHeapSize < _min_heap_byte_size) { + _min_heap_byte_size = InitialHeapSize; + } + } + + _initial_heap_byte_size = InitialHeapSize; + _max_heap_byte_size = MaxHeapSize; + + FLAG_SET_ERGO(uintx, MinHeapDeltaBytes, align_size_up(MinHeapDeltaBytes, _space_alignment)); + + DEBUG_ONLY(CollectorPolicy::assert_flags();) } void CollectorPolicy::initialize_size_info() { - // User inputs from -mx and ms must be aligned - _min_heap_byte_size = align_size_up(Arguments::min_heap_size(), _min_alignment); - _initial_heap_byte_size = align_size_up(InitialHeapSize, _min_alignment); - _max_heap_byte_size = align_size_up(MaxHeapSize, _max_alignment); - - // Check heap parameter properties - if (_initial_heap_byte_size < M) { - vm_exit_during_initialization("Too small initial heap"); - } - // Check heap parameter properties - if (_min_heap_byte_size < M) { - vm_exit_during_initialization("Too small minimum heap"); - } - if (_initial_heap_byte_size <= NewSize) { - // make sure there is at least some room in old space - vm_exit_during_initialization("Too small initial heap for new size specified"); - } - if (_max_heap_byte_size < _min_heap_byte_size) { - vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified"); - } - if (_initial_heap_byte_size < _min_heap_byte_size) { - vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified"); - } - if (_max_heap_byte_size < _initial_heap_byte_size) { - vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified"); - } - if (PrintGCDetails && Verbose) { gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT " Initial heap " SIZE_FORMAT " Maximum heap " SIZE_FORMAT, _min_heap_byte_size, _initial_heap_byte_size, _max_heap_byte_size); } + + DEBUG_ONLY(CollectorPolicy::assert_size_info();) } bool CollectorPolicy::use_should_clear_all_soft_refs(bool v) { @@ -105,7 +158,6 @@ GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap, int max_covered_regions) { - assert(rem_set_name() == GenRemSet::CardTable, "unrecognized GenRemSet::Name"); return new CardTableRS(whole_heap, max_covered_regions); } @@ -119,7 +171,7 @@ _all_soft_refs_clear = true; } -size_t CollectorPolicy::compute_max_alignment() { +size_t CollectorPolicy::compute_heap_alignment() { // The card marking array and the offset arrays for old generations are // committed in os pages as well. Make sure they are entirely full (to // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1 @@ -146,18 +198,21 @@ // GenCollectorPolicy methods. +GenCollectorPolicy::GenCollectorPolicy() : + _min_gen0_size(0), + _initial_gen0_size(0), + _max_gen0_size(0), + _gen_alignment(0), + _generations(NULL) +{} + size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) { - size_t x = base_size / (NewRatio+1); - size_t new_gen_size = x > _min_alignment ? - align_size_down(x, _min_alignment) : - _min_alignment; - return new_gen_size; + return align_size_down_bounded(base_size / (NewRatio + 1), _gen_alignment); } size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size, size_t maximum_size) { - size_t alignment = _min_alignment; - size_t max_minus = maximum_size - alignment; + size_t max_minus = maximum_size - _gen_alignment; return desired_size < max_minus ? desired_size : max_minus; } @@ -165,7 +220,7 @@ void GenCollectorPolicy::initialize_size_policy(size_t init_eden_size, size_t init_promo_size, size_t init_survivor_size) { - const double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; + const double max_gc_pause_sec = ((double) MaxGCPauseMillis) / 1000.0; _size_policy = new AdaptiveSizePolicy(init_eden_size, init_promo_size, init_survivor_size, @@ -173,100 +228,181 @@ GCTimeRatio); } +size_t GenCollectorPolicy::young_gen_size_lower_bound() { + // The young generation must be aligned and have room for eden + two survivors + return align_size_up(3 * _space_alignment, _gen_alignment); +} + +#ifdef ASSERT +void GenCollectorPolicy::assert_flags() { + CollectorPolicy::assert_flags(); + assert(NewSize >= _min_gen0_size, "Ergonomics decided on a too small young gen size"); + assert(NewSize <= MaxNewSize, "Ergonomics decided on incompatible initial and maximum young gen sizes"); + assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young gen and heap sizes"); + assert(NewSize % _gen_alignment == 0, "NewSize alignment"); + assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize % _gen_alignment == 0, "MaxNewSize alignment"); +} + +void TwoGenerationCollectorPolicy::assert_flags() { + GenCollectorPolicy::assert_flags(); + assert(OldSize + NewSize <= MaxHeapSize, "Ergonomics decided on incompatible generation and heap sizes"); + assert(OldSize % _gen_alignment == 0, "OldSize alignment"); +} + +void GenCollectorPolicy::assert_size_info() { + CollectorPolicy::assert_size_info(); + // GenCollectorPolicy::initialize_size_info may update the MaxNewSize + assert(MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young and heap sizes"); + assert(NewSize == _initial_gen0_size, "Discrepancy between NewSize flag and local storage"); + assert(MaxNewSize == _max_gen0_size, "Discrepancy between MaxNewSize flag and local storage"); + assert(_min_gen0_size <= _initial_gen0_size, "Ergonomics decided on incompatible minimum and initial young gen sizes"); + assert(_initial_gen0_size <= _max_gen0_size, "Ergonomics decided on incompatible initial and maximum young gen sizes"); + assert(_min_gen0_size % _gen_alignment == 0, "_min_gen0_size alignment"); + assert(_initial_gen0_size % _gen_alignment == 0, "_initial_gen0_size alignment"); + assert(_max_gen0_size % _gen_alignment == 0, "_max_gen0_size alignment"); +} + +void TwoGenerationCollectorPolicy::assert_size_info() { + GenCollectorPolicy::assert_size_info(); + assert(OldSize == _initial_gen1_size, "Discrepancy between OldSize flag and local storage"); + assert(_min_gen1_size <= _initial_gen1_size, "Ergonomics decided on incompatible minimum and initial old gen sizes"); + assert(_initial_gen1_size <= _max_gen1_size, "Ergonomics decided on incompatible initial and maximum old gen sizes"); + assert(_max_gen1_size % _gen_alignment == 0, "_max_gen1_size alignment"); + assert(_initial_gen1_size % _gen_alignment == 0, "_initial_gen1_size alignment"); + assert(_max_heap_byte_size <= (_max_gen0_size + _max_gen1_size), "Total maximum heap sizes must be sum of generation maximum sizes"); +} +#endif // ASSERT + void GenCollectorPolicy::initialize_flags() { - // All sizes must be multiples of the generation granularity. - _min_alignment = (uintx) Generation::GenGrain; - _max_alignment = compute_max_alignment(); - CollectorPolicy::initialize_flags(); - // All generational heaps have a youngest gen; handle those flags here. + assert(_gen_alignment != 0, "Generation alignment not set up properly"); + assert(_heap_alignment >= _gen_alignment, + err_msg("heap_alignment: " SIZE_FORMAT " less than gen_alignment: " SIZE_FORMAT, + _heap_alignment, _gen_alignment)); + assert(_gen_alignment % _space_alignment == 0, + err_msg("gen_alignment: " SIZE_FORMAT " not aligned by space_alignment: " SIZE_FORMAT, + _gen_alignment, _space_alignment)); + assert(_heap_alignment % _gen_alignment == 0, + err_msg("heap_alignment: " SIZE_FORMAT " not aligned by gen_alignment: " SIZE_FORMAT, + _heap_alignment, _gen_alignment)); + + // All generational heaps have a youngest gen; handle those flags here - // Adjust max size parameters - if (NewSize > MaxNewSize) { - MaxNewSize = NewSize; + // Make sure the heap is large enough for two generations + uintx smallest_new_size = young_gen_size_lower_bound(); + uintx smallest_heap_size = align_size_up(smallest_new_size + align_size_up(_space_alignment, _gen_alignment), + _heap_alignment); + if (MaxHeapSize < smallest_heap_size) { + FLAG_SET_ERGO(uintx, MaxHeapSize, smallest_heap_size); + _max_heap_byte_size = MaxHeapSize; } - NewSize = align_size_down(NewSize, _min_alignment); - MaxNewSize = align_size_down(MaxNewSize, _min_alignment); + // If needed, synchronize _min_heap_byte size and _initial_heap_byte_size + if (_min_heap_byte_size < smallest_heap_size) { + _min_heap_byte_size = smallest_heap_size; + if (InitialHeapSize < _min_heap_byte_size) { + FLAG_SET_ERGO(uintx, InitialHeapSize, smallest_heap_size); + _initial_heap_byte_size = smallest_heap_size; + } + } - // Check validity of heap flags - assert(NewSize % _min_alignment == 0, "eden space alignment"); - assert(MaxNewSize % _min_alignment == 0, "survivor space alignment"); + // Now take the actual NewSize into account. We will silently increase NewSize + // if the user specified a smaller value. + smallest_new_size = MAX2(smallest_new_size, (uintx)align_size_down(NewSize, _gen_alignment)); + if (smallest_new_size != NewSize) { + FLAG_SET_ERGO(uintx, NewSize, smallest_new_size); + } + _initial_gen0_size = NewSize; + + if (!FLAG_IS_DEFAULT(MaxNewSize)) { + uintx min_new_size = MAX2(_gen_alignment, _min_gen0_size); - if (NewSize < 3 * _min_alignment) { - // make sure there room for eden and two survivor spaces - vm_exit_during_initialization("Too small new size specified"); + if (MaxNewSize >= MaxHeapSize) { + // Make sure there is room for an old generation + uintx smaller_max_new_size = MaxHeapSize - _gen_alignment; + if (FLAG_IS_CMDLINE(MaxNewSize)) { + warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or greater than the entire " + "heap (" SIZE_FORMAT "k). A new max generation size of " SIZE_FORMAT "k will be used.", + MaxNewSize/K, MaxHeapSize/K, smaller_max_new_size/K); + } + FLAG_SET_ERGO(uintx, MaxNewSize, smaller_max_new_size); + if (NewSize > MaxNewSize) { + FLAG_SET_ERGO(uintx, NewSize, MaxNewSize); + _initial_gen0_size = NewSize; + } + } else if (MaxNewSize < min_new_size) { + FLAG_SET_ERGO(uintx, MaxNewSize, min_new_size); + } else if (!is_size_aligned(MaxNewSize, _gen_alignment)) { + FLAG_SET_ERGO(uintx, MaxNewSize, align_size_down(MaxNewSize, _gen_alignment)); + } + _max_gen0_size = MaxNewSize; } + + if (NewSize > MaxNewSize) { + // At this point this should only happen if the user specifies a large NewSize and/or + // a small (but not too small) MaxNewSize. + if (FLAG_IS_CMDLINE(MaxNewSize)) { + warning("NewSize (" SIZE_FORMAT "k) is greater than the MaxNewSize (" SIZE_FORMAT "k). " + "A new max generation size of " SIZE_FORMAT "k will be used.", + NewSize/K, MaxNewSize/K, NewSize/K); + } + FLAG_SET_ERGO(uintx, MaxNewSize, NewSize); + _max_gen0_size = MaxNewSize; + } + if (SurvivorRatio < 1 || NewRatio < 1) { vm_exit_during_initialization("Invalid young gen ratio specified"); } + + DEBUG_ONLY(GenCollectorPolicy::assert_flags();) } void TwoGenerationCollectorPolicy::initialize_flags() { GenCollectorPolicy::initialize_flags(); - OldSize = align_size_down(OldSize, _min_alignment); + if (!is_size_aligned(OldSize, _gen_alignment)) { + FLAG_SET_ERGO(uintx, OldSize, align_size_down(OldSize, _gen_alignment)); + } - if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(NewSize)) { + if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(MaxHeapSize)) { // NewRatio will be used later to set the young generation size so we use // it to calculate how big the heap should be based on the requested OldSize // and NewRatio. assert(NewRatio > 0, "NewRatio should have been set up earlier"); size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1); - calculated_heapsize = align_size_up(calculated_heapsize, _max_alignment); - MaxHeapSize = calculated_heapsize; - InitialHeapSize = calculated_heapsize; + calculated_heapsize = align_size_up(calculated_heapsize, _heap_alignment); + FLAG_SET_ERGO(uintx, MaxHeapSize, calculated_heapsize); + _max_heap_byte_size = MaxHeapSize; + FLAG_SET_ERGO(uintx, InitialHeapSize, calculated_heapsize); + _initial_heap_byte_size = InitialHeapSize; } - MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment); // adjust max heap size if necessary if (NewSize + OldSize > MaxHeapSize) { - if (FLAG_IS_CMDLINE(MaxHeapSize)) { + if (_max_heap_size_cmdline) { // somebody set a maximum heap size with the intention that we should not // exceed it. Adjust New/OldSize as necessary. uintx calculated_size = NewSize + OldSize; double shrink_factor = (double) MaxHeapSize / calculated_size; - // align - NewSize = align_size_down((uintx) (NewSize * shrink_factor), _min_alignment); + uintx smaller_new_size = align_size_down((uintx)(NewSize * shrink_factor), _gen_alignment); + FLAG_SET_ERGO(uintx, NewSize, MAX2(young_gen_size_lower_bound(), smaller_new_size)); + _initial_gen0_size = NewSize; + // OldSize is already aligned because above we aligned MaxHeapSize to - // _max_alignment, and we just made sure that NewSize is aligned to - // _min_alignment. In initialize_flags() we verified that _max_alignment - // is a multiple of _min_alignment. - OldSize = MaxHeapSize - NewSize; + // _heap_alignment, and we just made sure that NewSize is aligned to + // _gen_alignment. In initialize_flags() we verified that _heap_alignment + // is a multiple of _gen_alignment. + FLAG_SET_ERGO(uintx, OldSize, MaxHeapSize - NewSize); } else { - MaxHeapSize = NewSize + OldSize; + FLAG_SET_ERGO(uintx, MaxHeapSize, align_size_up(NewSize + OldSize, _heap_alignment)); + _max_heap_byte_size = MaxHeapSize; } } - // need to do this again - MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment); - - // adjust max heap size if necessary - if (NewSize + OldSize > MaxHeapSize) { - if (FLAG_IS_CMDLINE(MaxHeapSize)) { - // somebody set a maximum heap size with the intention that we should not - // exceed it. Adjust New/OldSize as necessary. - uintx calculated_size = NewSize + OldSize; - double shrink_factor = (double) MaxHeapSize / calculated_size; - // align - NewSize = align_size_down((uintx) (NewSize * shrink_factor), _min_alignment); - // OldSize is already aligned because above we aligned MaxHeapSize to - // _max_alignment, and we just made sure that NewSize is aligned to - // _min_alignment. In initialize_flags() we verified that _max_alignment - // is a multiple of _min_alignment. - OldSize = MaxHeapSize - NewSize; - } else { - MaxHeapSize = NewSize + OldSize; - } - } - // need to do this again - MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment); always_do_update_barrier = UseConcMarkSweepGC; - // Check validity of heap flags - assert(OldSize % _min_alignment == 0, "old space alignment"); - assert(MaxHeapSize % _max_alignment == 0, "maximum heap alignment"); + DEBUG_ONLY(TwoGenerationCollectorPolicy::assert_flags();) } // Values set on the command line win over any ergonomically @@ -281,7 +417,7 @@ void GenCollectorPolicy::initialize_size_info() { CollectorPolicy::initialize_size_info(); - // _min_alignment is used for alignment within a generation. + // _space_alignment is used for alignment within a generation. // There is additional alignment done down stream for some // collectors that sometimes causes unwanted rounding up of // generations sizes. @@ -289,35 +425,8 @@ // Determine maximum size of gen0 size_t max_new_size = 0; - if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) { - if (MaxNewSize < _min_alignment) { - max_new_size = _min_alignment; - } - if (MaxNewSize >= _max_heap_byte_size) { - max_new_size = align_size_down(_max_heap_byte_size - _min_alignment, - _min_alignment); - warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or " - "greater than the entire heap (" SIZE_FORMAT "k). A " - "new generation size of " SIZE_FORMAT "k will be used.", - MaxNewSize/K, _max_heap_byte_size/K, max_new_size/K); - } else { - max_new_size = align_size_down(MaxNewSize, _min_alignment); - } - - // The case for FLAG_IS_ERGO(MaxNewSize) could be treated - // specially at this point to just use an ergonomically set - // MaxNewSize to set max_new_size. For cases with small - // heaps such a policy often did not work because the MaxNewSize - // was larger than the entire heap. The interpretation given - // to ergonomically set flags is that the flags are set - // by different collectors for their own special needs but - // are not allowed to badly shape the heap. This allows the - // different collectors to decide what's best for themselves - // without having to factor in the overall heap shape. It - // can be the case in the future that the collectors would - // only make "wise" ergonomics choices and this policy could - // just accept those choices. The choices currently made are - // not always "wise". + if (!FLAG_IS_DEFAULT(MaxNewSize)) { + max_new_size = MaxNewSize; } else { max_new_size = scale_by_NewRatio_aligned(_max_heap_byte_size); // Bound the maximum size by NewSize below (since it historically @@ -386,11 +495,22 @@ _min_gen0_size = MIN2(_min_gen0_size, _initial_gen0_size); } + // Write back to flags if necessary + if (NewSize != _initial_gen0_size) { + FLAG_SET_ERGO(uintx, NewSize, _initial_gen0_size); + } + + if (MaxNewSize != _max_gen0_size) { + FLAG_SET_ERGO(uintx, MaxNewSize, _max_gen0_size); + } + if (PrintGCDetails && Verbose) { gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT " Initial gen0 " SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, _min_gen0_size, _initial_gen0_size, _max_gen0_size); } + + DEBUG_ONLY(GenCollectorPolicy::assert_size_info();) } // Call this method during the sizing of the gen1 to make @@ -403,23 +523,18 @@ // keeping it simple also seems a worthwhile goal. bool TwoGenerationCollectorPolicy::adjust_gen0_sizes(size_t* gen0_size_ptr, size_t* gen1_size_ptr, - const size_t heap_size, - const size_t min_gen1_size) { + const size_t heap_size) { bool result = false; - if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) { - if ((heap_size < (*gen0_size_ptr + min_gen1_size)) && - (heap_size >= min_gen1_size + _min_alignment)) { - // Adjust gen0 down to accommodate min_gen1_size - *gen0_size_ptr = heap_size - min_gen1_size; - *gen0_size_ptr = - MAX2((uintx)align_size_down(*gen0_size_ptr, _min_alignment), _min_alignment); - assert(*gen0_size_ptr > 0, "Min gen0 is too large"); + if ((*gen0_size_ptr + *gen1_size_ptr) > heap_size) { + uintx smallest_new_size = young_gen_size_lower_bound(); + if ((heap_size < (*gen0_size_ptr + _min_gen1_size)) && + (heap_size >= _min_gen1_size + smallest_new_size)) { + // Adjust gen0 down to accommodate _min_gen1_size + *gen0_size_ptr = align_size_down_bounded(heap_size - _min_gen1_size, _gen_alignment); result = true; } else { - *gen1_size_ptr = heap_size - *gen0_size_ptr; - *gen1_size_ptr = - MAX2((uintx)align_size_down(*gen1_size_ptr, _min_alignment), _min_alignment); + *gen1_size_ptr = align_size_down_bounded(heap_size - *gen0_size_ptr, _gen_alignment); } } return result; @@ -440,41 +555,36 @@ // The maximum gen1 size can be determined from the maximum gen0 // and maximum heap size since no explicit flags exits // for setting the gen1 maximum. - _max_gen1_size = _max_heap_byte_size - _max_gen0_size; - _max_gen1_size = - MAX2((uintx)align_size_down(_max_gen1_size, _min_alignment), _min_alignment); + _max_gen1_size = MAX2(_max_heap_byte_size - _max_gen0_size, _gen_alignment); + // If no explicit command line flag has been set for the // gen1 size, use what is left for gen1. - if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) { - // The user has not specified any value or ergonomics - // has chosen a value (which may or may not be consistent + if (!FLAG_IS_CMDLINE(OldSize)) { + // The user has not specified any value but the ergonomics + // may have chosen a value (which may or may not be consistent // with the overall heap size). In either case make // the minimum, maximum and initial sizes consistent // with the gen0 sizes and the overall heap sizes. - assert(_min_heap_byte_size > _min_gen0_size, - "gen0 has an unexpected minimum size"); - _min_gen1_size = _min_heap_byte_size - _min_gen0_size; - _min_gen1_size = - MAX2((uintx)align_size_down(_min_gen1_size, _min_alignment), _min_alignment); - _initial_gen1_size = _initial_heap_byte_size - _initial_gen0_size; - _initial_gen1_size = - MAX2((uintx)align_size_down(_initial_gen1_size, _min_alignment), _min_alignment); + _min_gen1_size = MAX2(_min_heap_byte_size - _min_gen0_size, _gen_alignment); + _initial_gen1_size = MAX2(_initial_heap_byte_size - _initial_gen0_size, _gen_alignment); + // _max_gen1_size has already been made consistent above + FLAG_SET_ERGO(uintx, OldSize, _initial_gen1_size); } else { // It's been explicitly set on the command line. Use the // OldSize and then determine the consequences. - _min_gen1_size = OldSize; + _min_gen1_size = MIN2(OldSize, _min_heap_byte_size - _min_gen0_size); _initial_gen1_size = OldSize; // If the user has explicitly set an OldSize that is inconsistent // with other command line flags, issue a warning. // The generation minimums and the overall heap mimimum should - // be within one heap alignment. - if ((_min_gen1_size + _min_gen0_size + _min_alignment) < _min_heap_byte_size) { + // be within one generation alignment. + if ((_min_gen1_size + _min_gen0_size + _gen_alignment) < _min_heap_byte_size) { warning("Inconsistency between minimum heap size and minimum " "generation sizes: using minimum heap = " SIZE_FORMAT, _min_heap_byte_size); } - if ((OldSize > _max_gen1_size)) { + if (OldSize > _max_gen1_size) { warning("Inconsistency between maximum heap size and maximum " "generation sizes: using maximum heap = " SIZE_FORMAT " -XX:OldSize flag is being ignored", @@ -482,8 +592,7 @@ } // If there is an inconsistency between the OldSize and the minimum and/or // initial size of gen0, since OldSize was explicitly set, OldSize wins. - if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size, - _min_heap_byte_size, OldSize)) { + if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size, _min_heap_byte_size)) { if (PrintGCDetails && Verbose) { gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT " Initial gen0 " SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, @@ -492,7 +601,7 @@ } // Initial size if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size, - _initial_heap_byte_size, OldSize)) { + _initial_heap_byte_size)) { if (PrintGCDetails && Verbose) { gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT " Initial gen0 " SIZE_FORMAT " Maximum gen0 " SIZE_FORMAT, @@ -507,11 +616,26 @@ _initial_gen1_size = MAX2(_initial_gen1_size, _min_gen1_size); _initial_gen1_size = MIN2(_initial_gen1_size, _max_gen1_size); + // Write back to flags if necessary + if (NewSize != _initial_gen0_size) { + FLAG_SET_ERGO(uintx, NewSize, _initial_gen0_size); + } + + if (MaxNewSize != _max_gen0_size) { + FLAG_SET_ERGO(uintx, MaxNewSize, _max_gen0_size); + } + + if (OldSize != _initial_gen1_size) { + FLAG_SET_ERGO(uintx, OldSize, _initial_gen1_size); + } + if (PrintGCDetails && Verbose) { gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT " Initial gen1 " SIZE_FORMAT " Maximum gen1 " SIZE_FORMAT, _min_gen1_size, _initial_gen1_size, _max_gen1_size); } + + DEBUG_ONLY(TwoGenerationCollectorPolicy::assert_size_info();) } HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size, @@ -605,9 +729,7 @@ gc_count_before = Universe::heap()->total_collections(); } - VM_GenCollectForAllocation op(size, - is_tlab, - gc_count_before); + VM_GenCollectForAllocation op(size, is_tlab, gc_count_before); VMThread::execute(&op); if (op.prologue_succeeded()) { result = op.result(); @@ -836,14 +958,16 @@ // MarkSweepPolicy methods // -MarkSweepPolicy::MarkSweepPolicy() { - initialize_all(); +void MarkSweepPolicy::initialize_alignments() { + _space_alignment = _gen_alignment = (uintx)Generation::GenGrain; + _heap_alignment = compute_heap_alignment(); } void MarkSweepPolicy::initialize_generations() { _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, 0, AllocFailStrategy::RETURN_NULL); - if (_generations == NULL) + if (_generations == NULL) { vm_exit_during_initialization("Unable to allocate gen spec"); + } if (UseParNewGC) { _generations[0] = new GenerationSpec(Generation::ParNew, _initial_gen0_size, _max_gen0_size); @@ -852,8 +976,9 @@ } _generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_gen1_size, _max_gen1_size); - if (_generations[0] == NULL || _generations[1] == NULL) + if (_generations[0] == NULL || _generations[1] == NULL) { vm_exit_during_initialization("Unable to allocate gen spec"); + } } void MarkSweepPolicy::initialize_gc_policy_counters() {
--- a/src/share/vm/memory/collectorPolicy.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/memory/collectorPolicy.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -61,17 +61,23 @@ protected: GCPolicyCounters* _gc_policy_counters; - // Requires that the concrete subclass sets the alignment constraints - // before calling. + virtual void initialize_alignments() = 0; virtual void initialize_flags(); virtual void initialize_size_info(); + DEBUG_ONLY(virtual void assert_flags();) + DEBUG_ONLY(virtual void assert_size_info();) + size_t _initial_heap_byte_size; size_t _max_heap_byte_size; size_t _min_heap_byte_size; - size_t _min_alignment; - size_t _max_alignment; + size_t _space_alignment; + size_t _heap_alignment; + + // Needed to keep information if MaxHeapSize was set on the command line + // when the flag value is aligned etc by ergonomics + bool _max_heap_size_cmdline; // The sizing of the heap are controlled by a sizing policy. AdaptiveSizePolicy* _size_policy; @@ -79,6 +85,7 @@ // Set to true when policy wants soft refs cleared. // Reset to false by gc after it clears all soft refs. bool _should_clear_all_soft_refs; + // Set to true by the GC if the just-completed gc cleared all // softrefs. This is set to true whenever a gc clears all softrefs, and // set to false each time gc returns to the mutator. For example, in the @@ -86,23 +93,20 @@ // mem_allocate() where it returns op.result() bool _all_soft_refs_clear; - CollectorPolicy() : - _min_alignment(1), - _max_alignment(1), - _initial_heap_byte_size(0), - _max_heap_byte_size(0), - _min_heap_byte_size(0), - _size_policy(NULL), - _should_clear_all_soft_refs(false), - _all_soft_refs_clear(false) - {} + CollectorPolicy(); public: - // Return maximum heap alignment that may be imposed by the policy - static size_t compute_max_alignment(); + virtual void initialize_all() { + initialize_alignments(); + initialize_flags(); + initialize_size_info(); + } - size_t min_alignment() { return _min_alignment; } - size_t max_alignment() { return _max_alignment; } + // Return maximum heap alignment that may be imposed by the policy + static size_t compute_heap_alignment(); + + size_t space_alignment() { return _space_alignment; } + size_t heap_alignment() { return _heap_alignment; } size_t initial_heap_byte_size() { return _initial_heap_byte_size; } size_t max_heap_byte_size() { return _max_heap_byte_size; } @@ -151,7 +155,6 @@ virtual BarrierSet::Name barrier_set_name() = 0; - virtual GenRemSet::Name rem_set_name() = 0; // Create the remembered set (to cover the given reserved region, // allowing breaking up into at most "max_covered_regions"). @@ -195,6 +198,9 @@ return false; } + // Do any updates required to global flags that are due to heap initialization + // changes + virtual void post_heap_initialize() = 0; }; class ClearedAllSoftRefs : public StackObj { @@ -219,6 +225,10 @@ size_t _initial_gen0_size; size_t _max_gen0_size; + // _gen_alignment and _space_alignment will have the same value most of the + // time. When using large pages they can differ. + size_t _gen_alignment; + GenerationSpec **_generations; // Return true if an allocation should be attempted in the older @@ -229,41 +239,50 @@ void initialize_flags(); void initialize_size_info(); + DEBUG_ONLY(void assert_flags();) + DEBUG_ONLY(void assert_size_info();) + // Try to allocate space by expanding the heap. virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab); - // Scale the base_size by NewRation according to + // Compute max heap alignment + size_t compute_max_alignment(); + + // Scale the base_size by NewRatio according to // result = base_size / (NewRatio + 1) // and align by min_alignment() size_t scale_by_NewRatio_aligned(size_t base_size); - // Bound the value by the given maximum minus the - // min_alignment. + // Bound the value by the given maximum minus the min_alignment size_t bound_minus_alignment(size_t desired_size, size_t maximum_size); public: + GenCollectorPolicy(); + // Accessors size_t min_gen0_size() { return _min_gen0_size; } size_t initial_gen0_size() { return _initial_gen0_size; } size_t max_gen0_size() { return _max_gen0_size; } + size_t gen_alignment() { return _gen_alignment; } virtual int number_of_generations() = 0; - virtual GenerationSpec **generations() { + virtual GenerationSpec **generations() { assert(_generations != NULL, "Sanity check"); return _generations; } virtual GenCollectorPolicy* as_generation_policy() { return this; } - virtual void initialize_generations() = 0; + virtual void initialize_generations() { }; virtual void initialize_all() { - initialize_flags(); - initialize_size_info(); + CollectorPolicy::initialize_all(); initialize_generations(); } + size_t young_gen_size_lower_bound(); + HeapWord* mem_allocate_work(size_t size, bool is_tlab, bool* gc_overhead_limit_was_exceeded); @@ -274,6 +293,10 @@ virtual void initialize_size_policy(size_t init_eden_size, size_t init_promo_size, size_t init_survivor_size); + + virtual void post_heap_initialize() { + assert(_max_gen0_size == MaxNewSize, "Should be taken care of by initialize_size_info"); + } }; // All of hotspot's current collectors are subtypes of this @@ -290,9 +313,14 @@ void initialize_flags(); void initialize_size_info(); - void initialize_generations() { ShouldNotReachHere(); } + + DEBUG_ONLY(void assert_flags();) + DEBUG_ONLY(void assert_size_info();) public: + TwoGenerationCollectorPolicy() : GenCollectorPolicy(), _min_gen1_size(0), + _initial_gen1_size(0), _max_gen1_size(0) {} + // Accessors size_t min_gen1_size() { return _min_gen1_size; } size_t initial_gen1_size() { return _initial_gen1_size; } @@ -301,25 +329,25 @@ // Inherited methods TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; } - int number_of_generations() { return 2; } - BarrierSet::Name barrier_set_name() { return BarrierSet::CardTableModRef; } - GenRemSet::Name rem_set_name() { return GenRemSet::CardTable; } + int number_of_generations() { return 2; } + BarrierSet::Name barrier_set_name() { return BarrierSet::CardTableModRef; } virtual CollectorPolicy::Name kind() { return CollectorPolicy::TwoGenerationCollectorPolicyKind; } - // Returns true is gen0 sizes were adjusted + // Returns true if gen0 sizes were adjusted bool adjust_gen0_sizes(size_t* gen0_size_ptr, size_t* gen1_size_ptr, - const size_t heap_size, const size_t min_gen1_size); + const size_t heap_size); }; class MarkSweepPolicy : public TwoGenerationCollectorPolicy { protected: + void initialize_alignments(); void initialize_generations(); public: - MarkSweepPolicy(); + MarkSweepPolicy() {} MarkSweepPolicy* as_mark_sweep_policy() { return this; }
--- a/src/share/vm/memory/defNewGeneration.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/memory/defNewGeneration.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -204,7 +204,7 @@ // Compute the maximum eden and survivor space sizes. These sizes // are computed assuming the entire reserved space is committed. // These values are exported as performance counters. - uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment(); + uintx alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment(); uintx size = _virtual_space.reserved_size(); _max_survivor_size = compute_survivor_size(size, alignment); _max_eden_size = size - (2*_max_survivor_size); @@ -235,7 +235,7 @@ bool clear_space, bool mangle_space) { uintx alignment = - GenCollectedHeap::heap()->collector_policy()->min_alignment(); + GenCollectedHeap::heap()->collector_policy()->space_alignment(); // If the spaces are being cleared (only done at heap initialization // currently), the survivor spaces need not be empty. @@ -473,7 +473,7 @@ } size_t DefNewGeneration::max_capacity() const { - const size_t alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment(); + const size_t alignment = GenCollectedHeap::heap()->collector_policy()->space_alignment(); const size_t reserved_bytes = reserved().byte_size(); return reserved_bytes - compute_survivor_size(reserved_bytes, alignment); }
--- a/src/share/vm/memory/genCollectedHeap.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/memory/genCollectedHeap.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -111,7 +111,7 @@ int n_covered_regions = 0; ReservedSpace heap_rs; - size_t heap_alignment = collector_policy()->max_alignment(); + size_t heap_alignment = collector_policy()->heap_alignment(); heap_address = allocate(heap_alignment, &total_reserved, &n_covered_regions, &heap_rs); @@ -1053,12 +1053,6 @@ } } -void GenCollectedHeap::compute_new_generation_sizes(int collectedGen) { - for (int i = 0; i <= collectedGen; i++) { - _gens[i]->compute_new_size(); - } -} - GenCollectedHeap* GenCollectedHeap::heap() { assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()"); assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
--- a/src/share/vm/memory/genCollectedHeap.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/memory/genCollectedHeap.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -86,10 +86,6 @@ NOT_PRODUCT(static size_t _skip_header_HeapWords;) protected: - // Directs each generation up to and including "collectedGen" to recompute - // its desired size. - void compute_new_generation_sizes(int collectedGen); - // Helper functions for allocation HeapWord* attempt_allocation(size_t size, bool is_tlab,
--- a/src/share/vm/memory/metaspace.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/memory/metaspace.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -56,7 +56,7 @@ MetaWord* last_allocated = 0; -size_t Metaspace::_class_metaspace_size; +size_t Metaspace::_compressed_class_space_size; // Used in declarations in SpaceManager and ChunkManager enum ChunkIndex { @@ -2843,6 +2843,8 @@ #define VIRTUALSPACEMULTIPLIER 2 #ifdef _LP64 +static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); + void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) { // Figure out the narrow_klass_base and the narrow_klass_shift. The // narrow_klass_base is the lower of the metaspace base and the cds base @@ -2852,14 +2854,22 @@ address higher_address; if (UseSharedSpaces) { higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), - (address)(metaspace_base + class_metaspace_size())); + (address)(metaspace_base + compressed_class_space_size())); lower_base = MIN2(metaspace_base, cds_base); } else { - higher_address = metaspace_base + class_metaspace_size(); + higher_address = metaspace_base + compressed_class_space_size(); lower_base = metaspace_base; + + uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes; + // If compressed class space fits in lower 32G, we don't need a base. + if (higher_address <= (address)klass_encoding_max) { + lower_base = 0; // effectively lower base is zero. + } } + Universe::set_narrow_klass_base(lower_base); - if ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint) { + + if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) { Universe::set_narrow_klass_shift(0); } else { assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces"); @@ -2874,24 +2884,24 @@ assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); address lower_base = MIN2((address)metaspace_base, cds_base); address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), - (address)(metaspace_base + class_metaspace_size())); - return ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint); + (address)(metaspace_base + compressed_class_space_size())); + return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax); } // Try to allocate the metaspace at the requested addr. void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { assert(using_class_space(), "called improperly"); assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); - assert(class_metaspace_size() < KlassEncodingMetaspaceMax, + assert(compressed_class_space_size() < KlassEncodingMetaspaceMax, "Metaspace size is too big"); - assert_is_ptr_aligned(requested_addr, _reserve_alignment); - assert_is_ptr_aligned(cds_base, _reserve_alignment); - assert_is_size_aligned(class_metaspace_size(), _reserve_alignment); + assert_is_ptr_aligned(requested_addr, _reserve_alignment); + assert_is_ptr_aligned(cds_base, _reserve_alignment); + assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment); // Don't use large pages for the class space. bool large_pages = false; - ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(), + ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(), _reserve_alignment, large_pages, requested_addr, 0); @@ -2906,7 +2916,7 @@ while (!metaspace_rs.is_reserved() && (addr + increment > addr) && can_use_cds_with_metaspace_addr(addr + increment, cds_base)) { addr = addr + increment; - metaspace_rs = ReservedSpace(class_metaspace_size(), + metaspace_rs = ReservedSpace(compressed_class_space_size(), _reserve_alignment, large_pages, addr, 0); } } @@ -2917,11 +2927,11 @@ // initialization has happened that depends on UseCompressedClassPointers. // So, UseCompressedClassPointers cannot be turned off at this point. if (!metaspace_rs.is_reserved()) { - metaspace_rs = ReservedSpace(class_metaspace_size(), + metaspace_rs = ReservedSpace(compressed_class_space_size(), _reserve_alignment, large_pages); if (!metaspace_rs.is_reserved()) { vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes", - class_metaspace_size())); + compressed_class_space_size())); } } } @@ -2943,8 +2953,8 @@ if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) { gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT, Universe::narrow_klass_base(), Universe::narrow_klass_shift()); - gclog_or_tty->print_cr("Metaspace Size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT, - class_metaspace_size(), metaspace_rs.base(), requested_addr); + gclog_or_tty->print_cr("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT, + compressed_class_space_size(), metaspace_rs.base(), requested_addr); } } @@ -2965,11 +2975,6 @@ #endif -// Align down. If the aligning result in 0, return 'alignment'. -static size_t restricted_align_down(size_t size, size_t alignment) { - return MAX2(alignment, align_size_down_(size, alignment)); -} - void Metaspace::ergo_initialize() { if (DumpSharedSpaces) { // Using large pages when dumping the shared archive is currently not implemented. @@ -2992,13 +2997,13 @@ // Ideally, we would be able to set the default value of MaxMetaspaceSize in // globals.hpp to the aligned value, but this is not possible, since the // alignment depends on other flags being parsed. - MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, _reserve_alignment); + MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment); if (MetaspaceSize > MaxMetaspaceSize) { MetaspaceSize = MaxMetaspaceSize; } - MetaspaceSize = restricted_align_down(MetaspaceSize, _commit_alignment); + MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment); assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize"); @@ -3006,11 +3011,11 @@ vm_exit_during_initialization("Too small initial Metaspace size"); } - MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, _commit_alignment); - MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, _commit_alignment); - - CompressedClassSpaceSize = restricted_align_down(CompressedClassSpaceSize, _reserve_alignment); - set_class_metaspace_size(CompressedClassSpaceSize); + MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment); + MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment); + + CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment); + set_compressed_class_space_size(CompressedClassSpaceSize); } void Metaspace::global_initialize() { @@ -3039,12 +3044,12 @@ } #ifdef _LP64 - if (cds_total + class_metaspace_size() > (uint64_t)max_juint) { + if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) { vm_exit_during_initialization("Unable to dump shared archive.", err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space (" SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed " - "klass limit: " SIZE_FORMAT, cds_total, class_metaspace_size(), - cds_total + class_metaspace_size(), (size_t)max_juint)); + "klass limit: " SIZE_FORMAT, cds_total, compressed_class_space_size(), + cds_total + compressed_class_space_size(), UnscaledClassSpaceMax)); } // Set the compressed klass pointer base so that decoding of these pointers works @@ -3092,7 +3097,8 @@ cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment); allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); } else { - allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0); + char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment); + allocate_metaspace_compressed_klass_ptrs(base, 0); } } #endif @@ -3354,6 +3360,11 @@ return result; } +size_t Metaspace::class_chunk_size(size_t word_size) { + assert(using_class_space(), "Has to use class space"); + return class_vsm()->calc_chunk_size(word_size); +} + void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetadataType mdtype, TRAPS) { // If result is still null, we are out of memory. if (Verbose && TraceMetadataChunkAllocation) { @@ -3365,9 +3376,19 @@ MetaspaceAux::dump(gclog_or_tty); } + bool out_of_compressed_class_space = false; + if (is_class_space_allocation(mdtype)) { + Metaspace* metaspace = loader_data->metaspace_non_null(); + out_of_compressed_class_space = + MetaspaceAux::committed_bytes(Metaspace::ClassType) + + (metaspace->class_chunk_size(word_size) * BytesPerWord) > + CompressedClassSpaceSize; + } + // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support - const char* space_string = is_class_space_allocation(mdtype) ? "Compressed class space" : - "Metadata space"; + const char* space_string = out_of_compressed_class_space ? + "Compressed class space" : "Metaspace"; + report_java_out_of_memory(space_string); if (JvmtiExport::should_post_resource_exhausted()) { @@ -3380,7 +3401,7 @@ vm_exit_during_initialization("OutOfMemoryError", space_string); } - if (is_class_space_allocation(mdtype)) { + if (out_of_compressed_class_space) { THROW_OOP(Universe::out_of_memory_error_class_metaspace()); } else { THROW_OOP(Universe::out_of_memory_error_metaspace());
--- a/src/share/vm/memory/metaspace.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/memory/metaspace.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -115,13 +115,13 @@ static size_t align_word_size_up(size_t); // Aligned size of the metaspace. - static size_t _class_metaspace_size; + static size_t _compressed_class_space_size; - static size_t class_metaspace_size() { - return _class_metaspace_size; + static size_t compressed_class_space_size() { + return _compressed_class_space_size; } - static void set_class_metaspace_size(size_t metaspace_size) { - _class_metaspace_size = metaspace_size; + static void set_compressed_class_space_size(size_t size) { + _compressed_class_space_size = size; } static size_t _first_chunk_word_size; @@ -192,6 +192,8 @@ AllocRecord * _alloc_record_head; AllocRecord * _alloc_record_tail; + size_t class_chunk_size(size_t word_size); + public: Metaspace(Mutex* lock, MetaspaceType type); @@ -252,6 +254,7 @@ static bool is_class_space_allocation(MetadataType mdType) { return mdType == ClassType && using_class_space(); } + }; class MetaspaceAux : AllStatic {
--- a/src/share/vm/memory/sharedHeap.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/memory/sharedHeap.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -247,6 +247,7 @@ } void SharedHeap::post_initialize() { + CollectedHeap::post_initialize(); ref_processing_init(); }
--- a/src/share/vm/memory/universe.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/memory/universe.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -677,13 +677,13 @@ // HeapBased - Use compressed oops with heap base + encoding. // 4Gb -static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1); +static const uint64_t UnscaledOopHeapMax = (uint64_t(max_juint) + 1); // 32Gb -// OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes; +// OopEncodingHeapMax == UnscaledOopHeapMax << LogMinObjAlignmentInBytes; char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) { assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be"); - assert(is_size_aligned((size_t)NarrowOopHeapMax, alignment), "Must be"); + assert(is_size_aligned((size_t)UnscaledOopHeapMax, alignment), "Must be"); assert(is_size_aligned(heap_size, alignment), "Must be"); uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment); @@ -702,20 +702,40 @@ // If the total size is small enough to allow UnscaledNarrowOop then // just use UnscaledNarrowOop. } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) { - if ((total_size <= NarrowOopHeapMax) && (mode == UnscaledNarrowOop) && + if ((total_size <= UnscaledOopHeapMax) && (mode == UnscaledNarrowOop) && (Universe::narrow_oop_shift() == 0)) { // Use 32-bits oops without encoding and // place heap's top on the 4Gb boundary - base = (NarrowOopHeapMax - heap_size); + base = (UnscaledOopHeapMax - heap_size); } else { // Can't reserve with NarrowOopShift == 0 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); + if (mode == UnscaledNarrowOop || - mode == ZeroBasedNarrowOop && total_size <= NarrowOopHeapMax) { + mode == ZeroBasedNarrowOop && total_size <= UnscaledOopHeapMax) { + // Use zero based compressed oops with encoding and // place heap's top on the 32Gb boundary in case // total_size > 4Gb or failed to reserve below 4Gb. - base = (OopEncodingHeapMax - heap_size); + uint64_t heap_top = OopEncodingHeapMax; + + // For small heaps, save some space for compressed class pointer + // space so it can be decoded with no base. + if (UseCompressedClassPointers && !UseSharedSpaces && + OopEncodingHeapMax <= 32*G) { + + uint64_t class_space = align_size_up(CompressedClassSpaceSize, alignment); + assert(is_size_aligned((size_t)OopEncodingHeapMax-class_space, + alignment), "difference must be aligned too"); + uint64_t new_top = OopEncodingHeapMax-class_space; + + if (total_size <= new_top) { + heap_top = new_top; + } + } + + // Align base to the adjusted top of the heap + base = heap_top - heap_size; } } } else { @@ -737,7 +757,7 @@ // Set to a non-NULL value so the ReservedSpace ctor computes // the correct no-access prefix. // The final value will be set in initialize_heap() below. - Universe::set_narrow_oop_base((address)NarrowOopHeapMax); + Universe::set_narrow_oop_base((address)UnscaledOopHeapMax); #ifdef _WIN64 if (UseLargePages) { // Cannot allocate guard pages for implicit checks in indexed @@ -765,6 +785,7 @@ } else if (UseG1GC) { #if INCLUDE_ALL_GCS G1CollectorPolicy* g1p = new G1CollectorPolicy(); + g1p->initialize_all(); G1CollectedHeap* g1h = new G1CollectedHeap(g1p); Universe::_collectedHeap = g1h; #else // INCLUDE_ALL_GCS @@ -789,6 +810,7 @@ } else { // default old generation gc_policy = new MarkSweepPolicy(); } + gc_policy->initialize_all(); Universe::_collectedHeap = new GenCollectedHeap(gc_policy); } @@ -833,7 +855,7 @@ Universe::set_narrow_oop_use_implicit_null_checks(true); } #endif // _WIN64 - if((uint64_t)Universe::heap()->reserved_region().end() > NarrowOopHeapMax) { + if((uint64_t)Universe::heap()->reserved_region().end() > UnscaledOopHeapMax) { // Can't reserve heap below 4Gb. Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); } else { @@ -1021,7 +1043,7 @@ Universe::_virtual_machine_error_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false); - Universe::_vm_exception = InstanceKlass::cast(k)->allocate_instance(CHECK_false); + Universe::_vm_exception = InstanceKlass::cast(k)->allocate_instance(CHECK_false); if (!DumpSharedSpaces) { // These are the only Java fields that are currently set during shared space dumping. @@ -1029,7 +1051,7 @@ Handle msg = java_lang_String::create_from_str("Java heap space", CHECK_false); java_lang_Throwable::set_message(Universe::_out_of_memory_error_java_heap, msg()); - msg = java_lang_String::create_from_str("Metadata space", CHECK_false); + msg = java_lang_String::create_from_str("Metaspace", CHECK_false); java_lang_Throwable::set_message(Universe::_out_of_memory_error_metaspace, msg()); msg = java_lang_String::create_from_str("Compressed class space", CHECK_false); java_lang_Throwable::set_message(Universe::_out_of_memory_error_class_metaspace, msg());
--- a/src/share/vm/oops/cpCache.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/oops/cpCache.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -554,24 +554,37 @@ // Implementation of ConstantPoolCache ConstantPoolCache* ConstantPoolCache::allocate(ClassLoaderData* loader_data, - int length, const intStack& index_map, + const intStack& invokedynamic_index_map, const intStack& invokedynamic_map, TRAPS) { + + const int length = index_map.length() + invokedynamic_index_map.length(); int size = ConstantPoolCache::size(length); return new (loader_data, size, false, MetaspaceObj::ConstantPoolCacheType, THREAD) - ConstantPoolCache(length, index_map, invokedynamic_map); + ConstantPoolCache(length, index_map, invokedynamic_index_map, invokedynamic_map); } void ConstantPoolCache::initialize(const intArray& inverse_index_map, + const intArray& invokedynamic_inverse_index_map, const intArray& invokedynamic_references_map) { - assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache"); - for (int i = 0; i < length(); i++) { + for (int i = 0; i < inverse_index_map.length(); i++) { ConstantPoolCacheEntry* e = entry_at(i); int original_index = inverse_index_map[i]; e->initialize_entry(original_index); assert(entry_at(i) == e, "sanity"); } + + // Append invokedynamic entries at the end + int invokedynamic_offset = inverse_index_map.length(); + for (int i = 0; i < invokedynamic_inverse_index_map.length(); i++) { + int offset = i + invokedynamic_offset; + ConstantPoolCacheEntry* e = entry_at(offset); + int original_index = invokedynamic_inverse_index_map[i]; + e->initialize_entry(original_index); + assert(entry_at(offset) == e, "sanity"); + } + for (int ref = 0; ref < invokedynamic_references_map.length(); ref++) { const int cpci = invokedynamic_references_map[ref]; if (cpci >= 0) {
--- a/src/share/vm/oops/cpCache.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/oops/cpCache.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -31,6 +31,10 @@ class PSPromotionManager; +// The ConstantPoolCache is not a cache! It is the resolution table that the +// interpreter uses to avoid going into the runtime and a way to access resolved +// values. + // A ConstantPoolCacheEntry describes an individual entry of the constant // pool cache. There's 2 principal kinds of entries: field entries for in- // stance & static field access, and method entries for invokes. Some of @@ -392,26 +396,33 @@ friend class MetadataFactory; private: int _length; - ConstantPool* _constant_pool; // the corresponding constant pool + ConstantPool* _constant_pool; // the corresponding constant pool // Sizing debug_only(friend class ClassVerifier;) // Constructor - ConstantPoolCache(int length, const intStack& inverse_index_map, + ConstantPoolCache(int length, + const intStack& inverse_index_map, + const intStack& invokedynamic_inverse_index_map, const intStack& invokedynamic_references_map) : - _length(length), _constant_pool(NULL) { - initialize(inverse_index_map, invokedynamic_references_map); + _length(length), + _constant_pool(NULL) { + initialize(inverse_index_map, invokedynamic_inverse_index_map, + invokedynamic_references_map); for (int i = 0; i < length; i++) { assert(entry_at(i)->is_f1_null(), "Failed to clear?"); } } // Initialization - void initialize(const intArray& inverse_index_map, const intArray& invokedynamic_references_map); + void initialize(const intArray& inverse_index_map, + const intArray& invokedynamic_inverse_index_map, + const intArray& invokedynamic_references_map); public: - static ConstantPoolCache* allocate(ClassLoaderData* loader_data, int length, - const intStack& inverse_index_map, + static ConstantPoolCache* allocate(ClassLoaderData* loader_data, + const intStack& cp_cache_map, + const intStack& invokedynamic_cp_cache_map, const intStack& invokedynamic_references_map, TRAPS); bool is_constantPoolCache() const { return true; }
--- a/src/share/vm/oops/instanceKlass.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/oops/instanceKlass.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -2211,6 +2211,10 @@ data = mdo->next_data(data)) { data->clean_weak_klass_links(is_alive); } + ParametersTypeData* parameters = mdo->parameters_type_data(); + if (parameters != NULL) { + parameters->clean_weak_klass_links(is_alive); + } } } } @@ -2393,15 +2397,38 @@ const char* InstanceKlass::signature_name() const { + int hash_len = 0; + char hash_buf[40]; + + // If this is an anonymous class, append a hash to make the name unique + if (is_anonymous()) { + assert(EnableInvokeDynamic, "EnableInvokeDynamic was not set."); + intptr_t hash = (java_mirror() != NULL) ? java_mirror()->identity_hash() : 0; + sprintf(hash_buf, "/" UINTX_FORMAT, (uintx)hash); + hash_len = (int)strlen(hash_buf); + } + + // Get the internal name as a c string const char* src = (const char*) (name()->as_C_string()); const int src_length = (int)strlen(src); - char* dest = NEW_RESOURCE_ARRAY(char, src_length + 3); - int src_index = 0; + + char* dest = NEW_RESOURCE_ARRAY(char, src_length + hash_len + 3); + + // Add L as type indicator int dest_index = 0; dest[dest_index++] = 'L'; - while (src_index < src_length) { + + // Add the actual class name + for (int src_index = 0; src_index < src_length; ) { dest[dest_index++] = src[src_index++]; } + + // If we have a hash, append it + for (int hash_index = 0; hash_index < hash_len; ) { + dest[dest_index++] = hash_buf[hash_index++]; + } + + // Add the semicolon and the NULL dest[dest_index++] = ';'; dest[dest_index] = '\0'; return dest;
--- a/src/share/vm/oops/klassVtable.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/oops/klassVtable.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -86,7 +86,11 @@ get_mirandas(&new_mirandas, all_mirandas, super, methods, NULL, local_interfaces); *num_new_mirandas = new_mirandas.length(); - vtable_length += *num_new_mirandas * vtableEntry::size(); + // Interfaces do not need interface methods in their vtables + // This includes miranda methods and during later processing, default methods + if (!class_flags.is_interface()) { + vtable_length += *num_new_mirandas * vtableEntry::size(); + } if (Universe::is_bootstrapping() && vtable_length == 0) { // array classes don't have their superclass set correctly during @@ -224,7 +228,11 @@ } // add miranda methods; it will also return the updated initialized - initialized = fill_in_mirandas(initialized); + // Interfaces do not need interface methods in their vtables + // This includes miranda methods and during later processing, default methods + if (!ik()->is_interface()) { + initialized = fill_in_mirandas(initialized); + } // In class hierarchies where the accessibility is not increasing (i.e., going from private -> // package_private -> public/protected), the vtable might actually be smaller than our initial @@ -264,12 +272,12 @@ _klass->internal_name(), sig, vtable_index); super_method->access_flags().print_on(tty); if (super_method->is_default_method()) { - tty->print("default"); + tty->print("default "); } tty->print("overriders flags: "); target_method->access_flags().print_on(tty); if (target_method->is_default_method()) { - tty->print("default"); + tty->print("default "); } } #endif /*PRODUCT*/ @@ -332,9 +340,15 @@ // An interface never allocates new vtable slots, only inherits old ones. // This method will either be assigned its own itable index later, // or be assigned an inherited vtable index in the loop below. - // default methods store their vtable indices in the inheritors default_vtable_indices - assert (default_index == -1, "interfaces don't store resolved default methods"); - target_method()->set_vtable_index(Method::pending_itable_index); + // default methods inherited by classes store their vtable indices + // in the inheritor's default_vtable_indices + // default methods inherited by interfaces may already have a + // valid itable index, if so, don't change it + // overpass methods in an interface will be assigned an itable index later + // by an inheriting class + if (!is_default || !target_method()->has_itable_index()) { + target_method()->set_vtable_index(Method::pending_itable_index); + } } // we need a new entry if there is no superclass @@ -441,7 +455,7 @@ target_klass->internal_name(), sig, i); super_method->access_flags().print_on(tty); if (super_method->is_default_method()) { - tty->print("default"); + tty->print("default "); } if (super_method->is_overpass()) { tty->print("overpass"); @@ -449,7 +463,7 @@ tty->print("overriders flags: "); target_method->access_flags().print_on(tty); if (target_method->is_default_method()) { - tty->print("default"); + tty->print("default "); } if (target_method->is_overpass()) { tty->print("overpass"); @@ -468,7 +482,7 @@ target_klass->internal_name(), sig,i); super_method->access_flags().print_on(tty); if (super_method->is_default_method()) { - tty->print("default"); + tty->print("default "); } if (super_method->is_overpass()) { tty->print("overpass"); @@ -476,7 +490,7 @@ tty->print("overriders flags: "); target_method->access_flags().print_on(tty); if (target_method->is_default_method()) { - tty->print("default"); + tty->print("default "); } if (target_method->is_overpass()) { tty->print("overpass"); @@ -494,8 +508,18 @@ #ifndef PRODUCT if (PrintVtables && Verbose) { ResourceMark rm; - tty->print_cr("adding %s::%s at index %d", _klass->internal_name(), - (m != NULL) ? m->name()->as_C_string() : "<NULL>", index); + const char* sig = (m != NULL) ? m->name_and_sig_as_C_string() : "<NULL>"; + tty->print("adding %s at index %d, flags: ", sig, index); + if (m != NULL) { + m->access_flags().print_on(tty); + if (m->is_default_method()) { + tty->print("default "); + } + if (m->is_overpass()) { + tty->print("overpass"); + } + } + tty->cr(); } #endif table()[index].set(m); @@ -631,8 +655,10 @@ if (mhk->is_interface()) { assert(m->is_public(), "should be public"); assert(ik()->implements_interface(method_holder) , "this class should implement the interface"); - assert(is_miranda(m, ik()->methods(), ik()->default_methods(), ik()->super()), "should be a miranda_method"); - return true; + // the search could find a miranda or a default method + if (is_miranda(m, ik()->methods(), ik()->default_methods(), ik()->super())) { + return true; + } } return false; } @@ -644,9 +670,10 @@ // the caller must make sure that the method belongs to an interface implemented by the class // Miranda methods only include public interface instance methods // Not private methods, not static methods, not default == concrete abstract +// Miranda methods also do not include overpass methods in interfaces bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods, Array<Method*>* default_methods, Klass* super) { - if (m->is_static() || m->is_private()) { + if (m->is_static() || m->is_private() || m->is_overpass()) { return false; } Symbol* name = m->name(); @@ -744,6 +771,8 @@ // Discover miranda methods ("miranda" = "interface abstract, no binding"), // and append them into the vtable starting at index initialized, // return the new value of initialized. +// Miranda methods use vtable entries, but do not get assigned a vtable_index +// The vtable_index is discovered by searching from the end of the vtable int klassVtable::fill_in_mirandas(int initialized) { GrowableArray<Method*> mirandas(20); get_mirandas(&mirandas, NULL, ik()->super(), ik()->methods(), @@ -758,7 +787,7 @@ sig, initialized); meth->access_flags().print_on(tty); if (meth->is_default_method()) { - tty->print("default"); + tty->print("default "); } tty->cr(); } @@ -858,7 +887,7 @@ tty->print(" (%5d) ", i); m->access_flags().print_on(tty); if (m->is_default_method()) { - tty->print("default"); + tty->print("default "); } if (m->is_overpass()) { tty->print("overpass"); @@ -977,6 +1006,25 @@ if (interface_method_needs_itable_index(m)) { assert(!m->is_final_method(), "no final interface methods"); // If m is already assigned a vtable index, do not disturb it. + if (TraceItables && Verbose) { + ResourceMark rm; + const char* sig = (m != NULL) ? m->name_and_sig_as_C_string() : "<NULL>"; + if (m->has_vtable_index()) { + tty->print("itable index %d for method: %s, flags: ", m->vtable_index(), sig); + } else { + tty->print("itable index %d for method: %s, flags: ", ime_num, sig); + } + if (m != NULL) { + m->access_flags().print_on(tty); + if (m->is_default_method()) { + tty->print("default "); + } + if (m->is_overpass()) { + tty->print("overpass"); + } + } + tty->cr(); + } if (!m->has_vtable_index()) { assert(m->vtable_index() == Method::pending_itable_index, "set by initialize_vtable"); m->set_itable_index(ime_num); @@ -1079,7 +1127,7 @@ tty->print("target_method flags: "); target()->access_flags().print_on(tty); if (target()->is_default_method()) { - tty->print("default"); + tty->print("default "); } tty->cr(); } @@ -1158,7 +1206,7 @@ tty->print(" (%5d) ", i); m->access_flags().print_on(tty); if (m->is_default_method()) { - tty->print("default"); + tty->print("default "); } tty->print(" -- "); m->print_name(tty);
--- a/src/share/vm/oops/method.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/oops/method.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -1515,7 +1515,10 @@ return bp->orig_bytecode(); } } - ShouldNotReachHere(); + { + ResourceMark rm; + fatal(err_msg("no original bytecode found in %s at bci %d", name_and_sig_as_C_string(), bci)); + } return Bytecodes::_shouldnotreachhere; }
--- a/src/share/vm/oops/methodData.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/oops/methodData.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -275,23 +275,23 @@ } bool TypeEntries::is_loader_alive(BoolObjectClosure* is_alive_cl, intptr_t p) { - return !is_type_none(p) && - !((Klass*)klass_part(p))->is_loader_alive(is_alive_cl); + Klass* k = (Klass*)klass_part(p); + return k != NULL && k->is_loader_alive(is_alive_cl); } void TypeStackSlotEntries::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) { for (int i = 0; i < _number_of_entries; i++) { intptr_t p = type(i); - if (is_loader_alive(is_alive_cl, p)) { - set_type(i, type_none()); + if (!is_loader_alive(is_alive_cl, p)) { + set_type(i, with_status((Klass*)NULL, p)); } } } void ReturnTypeEntry::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) { intptr_t p = type(); - if (is_loader_alive(is_alive_cl, p)) { - set_type(type_none()); + if (!is_loader_alive(is_alive_cl, p)) { + set_type(with_status((Klass*)NULL, p)); } }
--- a/src/share/vm/oops/methodData.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/oops/methodData.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -690,7 +690,6 @@ // recorded type: cell without bit 0 and 1 static intptr_t klass_part(intptr_t v) { intptr_t r = v & type_klass_mask; - assert (r != 0, "invalid"); return r; } @@ -698,7 +697,9 @@ static Klass* valid_klass(intptr_t k) { if (!is_type_none(k) && !is_type_unknown(k)) { - return (Klass*)klass_part(k); + Klass* res = (Klass*)klass_part(k); + assert(res != NULL, "invalid"); + return res; } else { return NULL; }
--- a/src/share/vm/opto/bytecodeInfo.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/opto/bytecodeInfo.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -389,6 +389,10 @@ return false; } if (inline_level() > _max_inline_level) { + if (callee_method->force_inline() && inline_level() > MaxForceInlineLevel) { + set_msg("MaxForceInlineLevel"); + return false; + } if (!callee_method->force_inline() || !IncrementalInline) { set_msg("inlining too deep"); return false;
--- a/src/share/vm/opto/callGenerator.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/opto/callGenerator.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -776,7 +776,7 @@ guarantee(!target->is_method_handle_intrinsic(), "should not happen"); // XXX remove const int vtable_index = Method::invalid_vtable_index; CallGenerator* cg = C->call_generator(target, vtable_index, false, jvms, true, PROB_ALWAYS, NULL, true, true); - assert(!cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here"); + assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here"); if (cg != NULL && cg->is_inline()) return cg; } @@ -846,7 +846,7 @@ } CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms, true, PROB_ALWAYS, speculative_receiver_type, true, true); - assert(!cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here"); + assert(cg == NULL || !cg->is_late_inline() || cg->is_mh_late_inline(), "no late inline here"); if (cg != NULL && cg->is_inline()) return cg; }
--- a/src/share/vm/opto/compile.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/opto/compile.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -848,6 +848,7 @@ } #endif + NOT_PRODUCT( verify_barriers(); ) // Now that we know the size of all the monitors we can add a fixed slot // for the original deopt pc. @@ -3018,12 +3019,17 @@ // Phi nodes shouldn't be moved. They would only match below if they // had the same control as the MathExactNode. The only time that // would happen is if the Phi is also an input to the MathExact - if (!out->is_Phi()) { - if (out->in(0) == NULL) { - out->set_req(0, non_throwing); - } else if (out->in(0) == ctrl) { - out->set_req(0, non_throwing); - } + // + // Cmp nodes shouldn't have control set at all. + if (out->is_Phi() || + out->is_Cmp()) { + continue; + } + + if (out->in(0) == NULL) { + out->set_req(0, non_throwing); + } else if (out->in(0) == ctrl) { + out->set_req(0, non_throwing); } } } @@ -3368,6 +3374,72 @@ } } } + +// Verify GC barriers consistency +// Currently supported: +// - G1 pre-barriers (see GraphKit::g1_write_barrier_pre()) +void Compile::verify_barriers() { + if (UseG1GC) { + // Verify G1 pre-barriers + const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()); + + ResourceArea *area = Thread::current()->resource_area(); + Unique_Node_List visited(area); + Node_List worklist(area); + // We're going to walk control flow backwards starting from the Root + worklist.push(_root); + while (worklist.size() > 0) { + Node* x = worklist.pop(); + if (x == NULL || x == top()) continue; + if (visited.member(x)) { + continue; + } else { + visited.push(x); + } + + if (x->is_Region()) { + for (uint i = 1; i < x->req(); i++) { + worklist.push(x->in(i)); + } + } else { + worklist.push(x->in(0)); + // We are looking for the pattern: + // /->ThreadLocal + // If->Bool->CmpI->LoadB->AddP->ConL(marking_offset) + // \->ConI(0) + // We want to verify that the If and the LoadB have the same control + // See GraphKit::g1_write_barrier_pre() + if (x->is_If()) { + IfNode *iff = x->as_If(); + if (iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) { + CmpNode *cmp = iff->in(1)->in(1)->as_Cmp(); + if (cmp->Opcode() == Op_CmpI && cmp->in(2)->is_Con() && cmp->in(2)->bottom_type()->is_int()->get_con() == 0 + && cmp->in(1)->is_Load()) { + LoadNode* load = cmp->in(1)->as_Load(); + if (load->Opcode() == Op_LoadB && load->in(2)->is_AddP() && load->in(2)->in(2)->Opcode() == Op_ThreadLocal + && load->in(2)->in(3)->is_Con() + && load->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset) { + + Node* if_ctrl = iff->in(0); + Node* load_ctrl = load->in(0); + + if (if_ctrl != load_ctrl) { + // Skip possible CProj->NeverBranch in infinite loops + if ((if_ctrl->is_Proj() && if_ctrl->Opcode() == Op_CProj) + && (if_ctrl->in(0)->is_MultiBranch() && if_ctrl->in(0)->Opcode() == Op_NeverBranch)) { + if_ctrl = if_ctrl->in(0)->in(0); + } + } + assert(load_ctrl != NULL && if_ctrl == load_ctrl, "controls must match"); + } + } + } + } + } + } + } +} + #endif // The Compile object keeps track of failure reasons separately from the ciEnv.
--- a/src/share/vm/opto/compile.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/opto/compile.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -1148,6 +1148,9 @@ // graph is strongly connected from root in both directions. void verify_graph_edges(bool no_dead_code = false) PRODUCT_RETURN; + // Verify GC barrier patterns + void verify_barriers() PRODUCT_RETURN; + // End-of-run dumps. static void print_statistics() PRODUCT_RETURN;
--- a/src/share/vm/opto/library_call.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/opto/library_call.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -2006,9 +2006,9 @@ Node* arg2 = NULL; if (is_increment) { - arg2 = intcon(1); + arg2 = intcon(1); } else { - arg2 = argument(1); + arg2 = argument(1); } Node* add = _gvn.transform( new(C) AddExactINode(NULL, arg1, arg2) ); @@ -2056,7 +2056,7 @@ if (is_decrement) { arg2 = longcon(1); } else { - Node* arg2 = argument(2); // type long + arg2 = argument(2); // type long // argument(3) == TOP }
--- a/src/share/vm/opto/loopTransform.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/opto/loopTransform.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -713,6 +713,10 @@ case Op_ModL: body_size += 30; break; case Op_DivL: body_size += 30; break; case Op_MulL: body_size += 10; break; + case Op_FlagsProj: + // Can't handle unrolling of loops containing + // nodes that generate a FlagsProj at the moment + return false; case Op_StrComp: case Op_StrEquals: case Op_StrIndexOf: @@ -1960,7 +1964,7 @@ // Find loads off the surviving projection; remove their control edge for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) { Node* cd = dp->fast_out(i); // Control-dependent node - if( cd->is_Load() ) { // Loads can now float around in the loop + if (cd->is_Load() && cd->depends_only_on_test()) { // Loads can now float around in the loop // Allow the load to float around in the loop, or before it // but NOT before the pre-loop. _igvn.replace_input_of(cd, 0, ctrl); // ctrl, not NULL
--- a/src/share/vm/opto/loopopts.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/opto/loopopts.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -42,6 +42,13 @@ // so disable this for now return NULL; } + + if (n->is_MathExact()) { + // MathExact has projections that are not correctly handled in the code + // below. + return NULL; + } + int wins = 0; assert(!n->is_CFG(), ""); assert(region->is_Region(), "");
--- a/src/share/vm/opto/matcher.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/opto/matcher.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -464,17 +464,17 @@ C->FIRST_STACK_mask().Clear(); // Add in the incoming argument area - OptoReg::Name init = OptoReg::add(_old_SP, C->out_preserve_stack_slots()); - for (i = init; i < _in_arg_limit; i = OptoReg::add(i,1)) + OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots()); + for (i = init_in; i < _in_arg_limit; i = OptoReg::add(i,1)) { C->FIRST_STACK_mask().Insert(i); - + } // Add in all bits past the outgoing argument area guarantee(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1)), "must be able to represent all call arguments in reg mask"); - init = _out_arg_limit; - for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) + OptoReg::Name init = _out_arg_limit; + for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) { C->FIRST_STACK_mask().Insert(i); - + } // Finally, set the "infinite stack" bit. C->FIRST_STACK_mask().set_AllStack(); @@ -506,16 +506,36 @@ idealreg2spillmask[Op_VecS]->OR(C->FIRST_STACK_mask()); } if (Matcher::vector_size_supported(T_FLOAT,2)) { + // For VecD we need dual alignment and 8 bytes (2 slots) for spills. + // RA guarantees such alignment since it is needed for Double and Long values. *idealreg2spillmask[Op_VecD] = *idealreg2regmask[Op_VecD]; idealreg2spillmask[Op_VecD]->OR(aligned_stack_mask); } if (Matcher::vector_size_supported(T_FLOAT,4)) { + // For VecX we need quadro alignment and 16 bytes (4 slots) for spills. + // + // RA can use input arguments stack slots for spills but until RA + // we don't know frame size and offset of input arg stack slots. + // + // Exclude last input arg stack slots to avoid spilling vectors there + // otherwise vector spills could stomp over stack slots in caller frame. + OptoReg::Name in = OptoReg::add(_in_arg_limit, -1); + for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecX); k++) { + aligned_stack_mask.Remove(in); + in = OptoReg::add(in, -1); + } aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX); assert(aligned_stack_mask.is_AllStack(), "should be infinite stack"); *idealreg2spillmask[Op_VecX] = *idealreg2regmask[Op_VecX]; idealreg2spillmask[Op_VecX]->OR(aligned_stack_mask); } if (Matcher::vector_size_supported(T_FLOAT,8)) { + // For VecY we need octo alignment and 32 bytes (8 slots) for spills. + OptoReg::Name in = OptoReg::add(_in_arg_limit, -1); + for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecY); k++) { + aligned_stack_mask.Remove(in); + in = OptoReg::add(in, -1); + } aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY); assert(aligned_stack_mask.is_AllStack(), "should be infinite stack"); *idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY];
--- a/src/share/vm/opto/mathexactnode.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/opto/mathexactnode.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -49,7 +49,7 @@ virtual Node* Identity(PhaseTransform* phase) { return this; } virtual Node* Ideal(PhaseGVN* phase, bool can_reshape) { return NULL; } virtual const Type* Value(PhaseTransform* phase) const { return bottom_type(); } - virtual uint hash() const { return Node::hash(); } + virtual uint hash() const { return NO_HASH; } virtual bool is_CFG() const { return false; } virtual uint ideal_reg() const { return NotAMachineReg; }
--- a/src/share/vm/opto/memnode.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/opto/memnode.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -204,6 +204,17 @@ protected: const Type* load_array_final_field(const TypeKlassPtr *tkls, ciKlass* klass) const; + // depends_only_on_test is almost always true, and needs to be almost always + // true to enable key hoisting & commoning optimizations. However, for the + // special case of RawPtr loads from TLS top & end, and other loads performed by + // GC barriers, the control edge carries the dependence preventing hoisting past + // a Safepoint instead of the memory edge. (An unfortunate consequence of having + // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes + // which produce results (new raw memory state) inside of loops preventing all + // manner of other optimizations). Basically, it's ugly but so is the alternative. + // See comment in macro.cpp, around line 125 expand_allocate_common(). + virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; } + }; //------------------------------LoadBNode-------------------------------------- @@ -370,16 +381,6 @@ virtual uint ideal_reg() const { return Op_RegP; } virtual int store_Opcode() const { return Op_StoreP; } virtual BasicType memory_type() const { return T_ADDRESS; } - // depends_only_on_test is almost always true, and needs to be almost always - // true to enable key hoisting & commoning optimizations. However, for the - // special case of RawPtr loads from TLS top & end, the control edge carries - // the dependence preventing hoisting past a Safepoint instead of the memory - // edge. (An unfortunate consequence of having Safepoints not set Raw - // Memory; itself an unfortunate consequence of having Nodes which produce - // results (new raw memory state) inside of loops preventing all manner of - // other optimizations). Basically, it's ugly but so is the alternative. - // See comment in macro.cpp, around line 125 expand_allocate_common(). - virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; } }; @@ -393,16 +394,6 @@ virtual uint ideal_reg() const { return Op_RegN; } virtual int store_Opcode() const { return Op_StoreN; } virtual BasicType memory_type() const { return T_NARROWOOP; } - // depends_only_on_test is almost always true, and needs to be almost always - // true to enable key hoisting & commoning optimizations. However, for the - // special case of RawPtr loads from TLS top & end, the control edge carries - // the dependence preventing hoisting past a Safepoint instead of the memory - // edge. (An unfortunate consequence of having Safepoints not set Raw - // Memory; itself an unfortunate consequence of having Nodes which produce - // results (new raw memory state) inside of loops preventing all manner of - // other optimizations). Basically, it's ugly but so is the alternative. - // See comment in macro.cpp, around line 125 expand_allocate_common(). - virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; } }; //------------------------------LoadKlassNode----------------------------------
--- a/src/share/vm/opto/postaloc.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/opto/postaloc.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -97,7 +97,8 @@ static bool expected_yanked_node(Node *old, Node *orig_old) { // This code is expected only next original nodes: // - load from constant table node which may have next data input nodes: - // MachConstantBase, Phi, MachTemp, MachSpillCopy + // MachConstantBase, MachTemp, MachSpillCopy + // - Phi nodes that are considered Junk // - load constant node which may have next data input nodes: // MachTemp, MachSpillCopy // - MachSpillCopy @@ -112,7 +113,9 @@ return (old == orig_old); } else if (old->is_MachTemp()) { return orig_old->is_Con(); - } else if (old->is_Phi() || old->is_MachConstantBase()) { + } else if (old->is_Phi()) { // Junk phi's + return true; + } else if (old->is_MachConstantBase()) { return (orig_old->is_Con() && orig_old->is_MachConstant()); } return false; @@ -522,11 +525,9 @@ u = u ? NodeSentinel : x; // Capture unique input, or NodeSentinel for 2nd input } if (u != NodeSentinel) { // Junk Phi. Remove - block->remove_node(j--); + phi->replace_by(u); + j -= yank_if_dead(phi, block, &value, ®nd); phi_dex--; - _cfg.unmap_node_from_block(phi); - phi->replace_by(u); - phi->disconnect_inputs(NULL, C); continue; } // Note that if value[pidx] exists, then we merged no new values here
--- a/src/share/vm/opto/type.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/opto/type.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -2787,13 +2787,11 @@ //-----------------------------filter------------------------------------------ // Do not allow interface-vs.-noninterface joins to collapse to top. -const Type *TypeOopPtr::filter( const Type *kills ) const { +const Type *TypeOopPtr::filter(const Type *kills) const { const Type* ft = join(kills); const TypeInstPtr* ftip = ft->isa_instptr(); const TypeInstPtr* ktip = kills->isa_instptr(); - const TypeKlassPtr* ftkp = ft->isa_klassptr(); - const TypeKlassPtr* ktkp = kills->isa_klassptr(); if (ft->empty()) { // Check for evil case of 'this' being a class and 'kills' expecting an @@ -2807,8 +2805,6 @@ // uplift the type. if (!empty() && ktip != NULL && ktip->is_loaded() && ktip->klass()->is_interface()) return kills; // Uplift to interface - if (!empty() && ktkp != NULL && ktkp->klass()->is_loaded() && ktkp->klass()->is_interface()) - return kills; // Uplift to interface return Type::TOP; // Canonical empty value } @@ -2825,14 +2821,6 @@ assert(!ftip->klass_is_exact(), "interface could not be exact"); return ktip->cast_to_ptr_type(ftip->ptr()); } - // Interface klass type could be exact in opposite to interface type, - // return it here instead of incorrect Constant ptr J/L/Object (6894807). - if (ftkp != NULL && ktkp != NULL && - ftkp->is_loaded() && ftkp->klass()->is_interface() && - !ftkp->klass_is_exact() && // Keep exact interface klass - ktkp->is_loaded() && !ktkp->klass()->is_interface()) { - return ktkp->cast_to_ptr_type(ftkp->ptr()); - } return ft; } @@ -4385,6 +4373,33 @@ return (_offset == 0) && !below_centerline(_ptr); } +// Do not allow interface-vs.-noninterface joins to collapse to top. +const Type *TypeKlassPtr::filter(const Type *kills) const { + // logic here mirrors the one from TypeOopPtr::filter. See comments + // there. + const Type* ft = join(kills); + const TypeKlassPtr* ftkp = ft->isa_klassptr(); + const TypeKlassPtr* ktkp = kills->isa_klassptr(); + + if (ft->empty()) { + if (!empty() && ktkp != NULL && ktkp->klass()->is_loaded() && ktkp->klass()->is_interface()) + return kills; // Uplift to interface + + return Type::TOP; // Canonical empty value + } + + // Interface klass type could be exact in opposite to interface type, + // return it here instead of incorrect Constant ptr J/L/Object (6894807). + if (ftkp != NULL && ktkp != NULL && + ftkp->is_loaded() && ftkp->klass()->is_interface() && + !ftkp->klass_is_exact() && // Keep exact interface klass + ktkp->is_loaded() && !ktkp->klass()->is_interface()) { + return ktkp->cast_to_ptr_type(ftkp->ptr()); + } + + return ft; +} + //----------------------compute_klass------------------------------------------ // Compute the defining klass for this class ciKlass* TypeAryPtr::compute_klass(DEBUG_ONLY(bool verify)) const {
--- a/src/share/vm/opto/type.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/opto/type.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -63,7 +63,7 @@ class TypeOopPtr; class TypeInstPtr; class TypeAryPtr; -class TypeKlassPtr; +class TypeKlassPtr; class TypeMetadataPtr; //------------------------------Type------------------------------------------- @@ -1202,6 +1202,9 @@ virtual intptr_t get_con() const; + // Do not allow interface-vs.-noninterface joins to collapse to top. + virtual const Type *filter( const Type *kills ) const; + // Convenience common pre-built types. static const TypeKlassPtr* OBJECT; // Not-null object klass or below static const TypeKlassPtr* OBJECT_OR_NULL; // Maybe-null version of same
--- a/src/share/vm/prims/jvmtiGetLoadedClasses.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/prims/jvmtiGetLoadedClasses.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -29,8 +29,43 @@ #include "runtime/thread.hpp" +// The closure for GetLoadedClasses +class LoadedClassesClosure : public KlassClosure { +private: + Stack<jclass, mtInternal> _classStack; + JvmtiEnv* _env; -// The closure for GetLoadedClasses and GetClassLoaderClasses +public: + LoadedClassesClosure(JvmtiEnv* env) { + _env = env; + } + + void do_klass(Klass* k) { + // Collect all jclasses + _classStack.push((jclass) _env->jni_reference(k->java_mirror())); + } + + int extract(jclass* result_list) { + // The size of the Stack will be 0 after extract, so get it here + int count = (int)_classStack.size(); + int i = count; + + // Pop all jclasses, fill backwards + while (!_classStack.is_empty()) { + result_list[--i] = _classStack.pop(); + } + + // Return the number of elements written + return count; + } + + // Return current size of the Stack + int get_count() { + return (int)_classStack.size(); + } +}; + +// The closure for GetClassLoaderClasses class JvmtiGetLoadedClassesClosure : public StackObj { // Since the SystemDictionary::classes_do callback // doesn't pass a closureData pointer, @@ -165,19 +200,6 @@ } } - // Finally, the static methods that are the callbacks - static void increment(Klass* k) { - JvmtiGetLoadedClassesClosure* that = JvmtiGetLoadedClassesClosure::get_this(); - if (that->get_initiatingLoader() == NULL) { - for (Klass* l = k; l != NULL; l = l->array_klass_or_null()) { - that->set_count(that->get_count() + 1); - } - } else if (k != NULL) { - // if initiating loader not null, just include the instance with 1 dimension - that->set_count(that->get_count() + 1); - } - } - static void increment_with_loader(Klass* k, ClassLoaderData* loader_data) { JvmtiGetLoadedClassesClosure* that = JvmtiGetLoadedClassesClosure::get_this(); oop class_loader = loader_data->class_loader(); @@ -196,24 +218,6 @@ } } - static void add(Klass* k) { - JvmtiGetLoadedClassesClosure* that = JvmtiGetLoadedClassesClosure::get_this(); - if (that->available()) { - if (that->get_initiatingLoader() == NULL) { - for (Klass* l = k; l != NULL; l = l->array_klass_or_null()) { - oop mirror = l->java_mirror(); - that->set_element(that->get_index(), mirror); - that->set_index(that->get_index() + 1); - } - } else if (k != NULL) { - // if initiating loader not null, just include the instance with 1 dimension - oop mirror = k->java_mirror(); - that->set_element(that->get_index(), mirror); - that->set_index(that->get_index() + 1); - } - } - } - static void add_with_loader(Klass* k, ClassLoaderData* loader_data) { JvmtiGetLoadedClassesClosure* that = JvmtiGetLoadedClassesClosure::get_this(); if (that->available()) { @@ -255,39 +259,30 @@ jvmtiError JvmtiGetLoadedClasses::getLoadedClasses(JvmtiEnv *env, jint* classCountPtr, jclass** classesPtr) { - // Since SystemDictionary::classes_do only takes a function pointer - // and doesn't call back with a closure data pointer, - // we can only pass static methods. - JvmtiGetLoadedClassesClosure closure; + LoadedClassesClosure closure(env); { // To get a consistent list of classes we need MultiArray_lock to ensure - // array classes aren't created, and SystemDictionary_lock to ensure that - // classes aren't added to the system dictionary, + // array classes aren't created. MutexLocker ma(MultiArray_lock); - MutexLocker sd(SystemDictionary_lock); + + // Iterate through all classes in ClassLoaderDataGraph + // and collect them using the LoadedClassesClosure + ClassLoaderDataGraph::loaded_classes_do(&closure); + } - // First, count the classes - SystemDictionary::classes_do(&JvmtiGetLoadedClassesClosure::increment); - Universe::basic_type_classes_do(&JvmtiGetLoadedClassesClosure::increment); - // Next, fill in the classes - closure.allocate(); - SystemDictionary::classes_do(&JvmtiGetLoadedClassesClosure::add); - Universe::basic_type_classes_do(&JvmtiGetLoadedClassesClosure::add); - // Drop the SystemDictionary_lock, so the results could be wrong from here, - // but we still have a snapshot. + // Return results by extracting the collected contents into a list + // allocated via JvmtiEnv + jclass* result_list; + jvmtiError error = env->Allocate(closure.get_count() * sizeof(jclass), + (unsigned char**)&result_list); + + if (error == JVMTI_ERROR_NONE) { + int count = closure.extract(result_list); + *classCountPtr = count; + *classesPtr = result_list; } - // Post results - jclass* result_list; - jvmtiError err = env->Allocate(closure.get_count() * sizeof(jclass), - (unsigned char**)&result_list); - if (err != JVMTI_ERROR_NONE) { - return err; - } - closure.extract(env, result_list); - *classCountPtr = closure.get_count(); - *classesPtr = result_list; - return JVMTI_ERROR_NONE; + return error; } jvmtiError
--- a/src/share/vm/prims/jvmtiImpl.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/prims/jvmtiImpl.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -210,6 +210,14 @@ } } +void GrowableCache::metadata_do(void f(Metadata*)) { + int len = _elements->length(); + for (int i=0; i<len; i++) { + GrowableElement *e = _elements->at(i); + e->metadata_do(f); + } +} + void GrowableCache::gc_epilogue() { int len = _elements->length(); for (int i=0; i<len; i++) { @@ -224,20 +232,20 @@ JvmtiBreakpoint::JvmtiBreakpoint() { _method = NULL; _bci = 0; - _class_loader = NULL; + _class_holder = NULL; } JvmtiBreakpoint::JvmtiBreakpoint(Method* m_method, jlocation location) { _method = m_method; - _class_loader = _method->method_holder()->class_loader_data()->class_loader(); + _class_holder = _method->method_holder()->klass_holder(); #ifdef CHECK_UNHANDLED_OOPS - // _class_loader can't be wrapped in a Handle, because JvmtiBreakpoint:s are - // eventually allocated on the heap. + // _class_holder can't be wrapped in a Handle, because JvmtiBreakpoints are + // sometimes allocated on the heap. // - // The code handling JvmtiBreakpoint:s allocated on the stack can't be - // interrupted by a GC until _class_loader is reachable by the GC via the + // The code handling JvmtiBreakpoints allocated on the stack can't be + // interrupted by a GC until _class_holder is reachable by the GC via the // oops_do method. - Thread::current()->allow_unhandled_oop(&_class_loader); + Thread::current()->allow_unhandled_oop(&_class_holder); #endif // CHECK_UNHANDLED_OOPS assert(_method != NULL, "_method != NULL"); _bci = (int) location; @@ -247,7 +255,7 @@ void JvmtiBreakpoint::copy(JvmtiBreakpoint& bp) { _method = bp._method; _bci = bp._bci; - _class_loader = bp._class_loader; + _class_holder = bp._class_holder; } bool JvmtiBreakpoint::lessThan(JvmtiBreakpoint& bp) { @@ -365,6 +373,13 @@ } } +void VM_ChangeBreakpoints::metadata_do(void f(Metadata*)) { + // Walk metadata in breakpoints to keep from being deallocated with RedefineClasses + if (_bp != NULL) { + _bp->metadata_do(f); + } +} + // // class JvmtiBreakpoints // @@ -381,6 +396,10 @@ _bps.oops_do(f); } +void JvmtiBreakpoints::metadata_do(void f(Metadata*)) { + _bps.metadata_do(f); +} + void JvmtiBreakpoints::gc_epilogue() { _bps.gc_epilogue(); } @@ -499,6 +518,12 @@ } } +void JvmtiCurrentBreakpoints::metadata_do(void f(Metadata*)) { + if (_jvmti_breakpoints != NULL) { + _jvmti_breakpoints->metadata_do(f); + } +} + void JvmtiCurrentBreakpoints::gc_epilogue() { if (_jvmti_breakpoints != NULL) { _jvmti_breakpoints->gc_epilogue();
--- a/src/share/vm/prims/jvmtiImpl.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/prims/jvmtiImpl.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -69,6 +69,7 @@ virtual bool lessThan(GrowableElement *e)=0; virtual GrowableElement *clone() =0; virtual void oops_do(OopClosure* f) =0; + virtual void metadata_do(void f(Metadata*)) =0; }; class GrowableCache VALUE_OBJ_CLASS_SPEC { @@ -115,6 +116,8 @@ void clear(); // apply f to every element and update the cache void oops_do(OopClosure* f); + // walk metadata to preserve for RedefineClasses + void metadata_do(void f(Metadata*)); // update the cache after a full gc void gc_epilogue(); }; @@ -148,6 +151,7 @@ void remove (int index) { _cache.remove(index); } void clear() { _cache.clear(); } void oops_do(OopClosure* f) { _cache.oops_do(f); } + void metadata_do(void f(Metadata*)) { _cache.metadata_do(f); } void gc_epilogue() { _cache.gc_epilogue(); } }; @@ -169,7 +173,7 @@ Method* _method; int _bci; Bytecodes::Code _orig_bytecode; - oop _class_loader; + oop _class_holder; // keeps _method memory from being deallocated public: JvmtiBreakpoint(); @@ -191,9 +195,15 @@ bool lessThan(GrowableElement* e) { Unimplemented(); return false; } bool equals(GrowableElement* e) { return equals((JvmtiBreakpoint&) *e); } void oops_do(OopClosure* f) { - // Mark the method loader as live - f->do_oop(&_class_loader); + // Mark the method loader as live so the Method* class loader doesn't get + // unloaded and Method* memory reclaimed. + f->do_oop(&_class_holder); } + void metadata_do(void f(Metadata*)) { + // walk metadata to preserve for RedefineClasses + f(_method); + } + GrowableElement *clone() { JvmtiBreakpoint *bp = new JvmtiBreakpoint(); bp->copy(*this); @@ -239,6 +249,7 @@ int length(); void oops_do(OopClosure* f); + void metadata_do(void f(Metadata*)); void print(); int set(JvmtiBreakpoint& bp); @@ -288,6 +299,7 @@ static inline bool is_breakpoint(address bcp); static void oops_do(OopClosure* f); + static void metadata_do(void f(Metadata*)); static void gc_epilogue(); }; @@ -332,6 +344,7 @@ VMOp_Type type() const { return VMOp_ChangeBreakpoints; } void doit(); void oops_do(OopClosure* f); + void metadata_do(void f(Metadata*)); };
--- a/src/share/vm/prims/whitebox.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/prims/whitebox.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -53,6 +53,8 @@ #include "compiler/compileBroker.hpp" #include "runtime/compilationPolicy.hpp" +#define SIZE_T_MAX_VALUE ((size_t) -1) + bool WhiteBox::_used = false; WB_ENTRY(jlong, WB_GetObjectAddress(JNIEnv* env, jobject o, jobject obj)) @@ -105,10 +107,116 @@ gclog_or_tty->print_cr("Minimum heap "SIZE_FORMAT" Initial heap " SIZE_FORMAT" Maximum heap "SIZE_FORMAT" Min alignment "SIZE_FORMAT" Max alignment "SIZE_FORMAT, p->min_heap_byte_size(), p->initial_heap_byte_size(), p->max_heap_byte_size(), - p->min_alignment(), p->max_alignment()); + p->space_alignment(), p->heap_alignment()); } WB_END +#ifndef PRODUCT +// Forward declaration +void TestReservedSpace_test(); +void TestReserveMemorySpecial_test(); +void TestVirtualSpace_test(); +void TestMetaspaceAux_test(); +#endif + +WB_ENTRY(void, WB_RunMemoryUnitTests(JNIEnv* env, jobject o)) +#ifndef PRODUCT + TestReservedSpace_test(); + TestReserveMemorySpecial_test(); + TestVirtualSpace_test(); + TestMetaspaceAux_test(); +#endif +WB_END + +WB_ENTRY(void, WB_ReadFromNoaccessArea(JNIEnv* env, jobject o)) + size_t granularity = os::vm_allocation_granularity(); + ReservedHeapSpace rhs(100 * granularity, granularity, false, NULL); + VirtualSpace vs; + vs.initialize(rhs, 50 * granularity); + + //Check if constraints are complied + if (!( UseCompressedOops && rhs.base() != NULL && + Universe::narrow_oop_base() != NULL && + Universe::narrow_oop_use_implicit_null_checks() )) { + tty->print_cr("WB_ReadFromNoaccessArea method is useless:\n " + "\tUseCompressedOops is %d\n" + "\trhs.base() is "PTR_FORMAT"\n" + "\tUniverse::narrow_oop_base() is "PTR_FORMAT"\n" + "\tUniverse::narrow_oop_use_implicit_null_checks() is %d", + UseCompressedOops, + rhs.base(), + Universe::narrow_oop_base(), + Universe::narrow_oop_use_implicit_null_checks()); + return; + } + tty->print_cr("Reading from no access area... "); + tty->print_cr("*(vs.low_boundary() - rhs.noaccess_prefix() / 2 ) = %c", + *(vs.low_boundary() - rhs.noaccess_prefix() / 2 )); +WB_END + +static jint wb_stress_virtual_space_resize(size_t reserved_space_size, + size_t magnitude, size_t iterations) { + size_t granularity = os::vm_allocation_granularity(); + ReservedHeapSpace rhs(reserved_space_size * granularity, granularity, false, NULL); + VirtualSpace vs; + if (!vs.initialize(rhs, 0)) { + tty->print_cr("Failed to initialize VirtualSpace. Can't proceed."); + return 3; + } + + long seed = os::random(); + tty->print_cr("Random seed is %ld", seed); + os::init_random(seed); + + for (size_t i = 0; i < iterations; i++) { + + // Whether we will shrink or grow + bool shrink = os::random() % 2L == 0; + + // Get random delta to resize virtual space + size_t delta = (size_t)os::random() % magnitude; + + // If we are about to shrink virtual space below zero, then expand instead + if (shrink && vs.committed_size() < delta) { + shrink = false; + } + + // Resizing by delta + if (shrink) { + vs.shrink_by(delta); + } else { + // If expanding fails expand_by will silently return false + vs.expand_by(delta, true); + } + } + return 0; +} + +WB_ENTRY(jint, WB_StressVirtualSpaceResize(JNIEnv* env, jobject o, + jlong reserved_space_size, jlong magnitude, jlong iterations)) + tty->print_cr("reservedSpaceSize="JLONG_FORMAT", magnitude="JLONG_FORMAT", " + "iterations="JLONG_FORMAT"\n", reserved_space_size, magnitude, + iterations); + if (reserved_space_size < 0 || magnitude < 0 || iterations < 0) { + tty->print_cr("One of variables printed above is negative. Can't proceed.\n"); + return 1; + } + + // sizeof(size_t) depends on whether OS is 32bit or 64bit. sizeof(jlong) is + // always 8 byte. That's why we should avoid overflow in case of 32bit platform. + if (sizeof(size_t) < sizeof(jlong)) { + jlong size_t_max_value = (jlong) SIZE_T_MAX_VALUE; + if (reserved_space_size > size_t_max_value || magnitude > size_t_max_value + || iterations > size_t_max_value) { + tty->print_cr("One of variables printed above overflows size_t. Can't proceed.\n"); + return 2; + } + } + + return wb_stress_virtual_space_resize((size_t) reserved_space_size, + (size_t) magnitude, (size_t) iterations); +WB_END + #if INCLUDE_ALL_GCS WB_ENTRY(jboolean, WB_G1IsHumongous(JNIEnv* env, jobject o, jobject obj)) G1CollectedHeap* g1 = G1CollectedHeap::heap(); @@ -445,6 +553,9 @@ {CC"getCompressedOopsMaxHeapSize", CC"()J", (void*)&WB_GetCompressedOopsMaxHeapSize}, {CC"printHeapSizes", CC"()V", (void*)&WB_PrintHeapSizes }, + {CC"runMemoryUnitTests", CC"()V", (void*)&WB_RunMemoryUnitTests}, + {CC"readFromNoaccessArea",CC"()V", (void*)&WB_ReadFromNoaccessArea}, + {CC"stressVirtualSpaceResize",CC"(JJJ)I", (void*)&WB_StressVirtualSpaceResize}, #if INCLUDE_ALL_GCS {CC"g1InConcurrentMark", CC"()Z", (void*)&WB_G1InConcurrentMark}, {CC"g1IsHumongous", CC"(Ljava/lang/Object;)Z", (void*)&WB_G1IsHumongous },
--- a/src/share/vm/runtime/arguments.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/runtime/arguments.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -1132,9 +1132,6 @@ Tier3InvokeNotifyFreqLog = 0; Tier4InvocationThreshold = 0; } - if (FLAG_IS_DEFAULT(NmethodSweepFraction)) { - FLAG_SET_DEFAULT(NmethodSweepFraction, 1 + ReservedCodeCacheSize / (16 * M)); - } } #if INCLUDE_ALL_GCS @@ -1408,7 +1405,7 @@ // NULL page is located before the heap, we pad the NULL page to the conservative // maximum alignment that the GC may ever impose upon the heap. size_t displacement_due_to_null_page = align_size_up_(os::vm_page_size(), - Arguments::conservative_max_heap_alignment()); + _conservative_max_heap_alignment); LP64_ONLY(return OopEncodingHeapMax - displacement_due_to_null_page); NOT_LP64(ShouldNotReachHere(); return 0); @@ -1505,7 +1502,7 @@ } #endif // INCLUDE_ALL_GCS _conservative_max_heap_alignment = MAX3(heap_alignment, os::max_page_size(), - CollectorPolicy::compute_max_alignment()); + CollectorPolicy::compute_heap_alignment()); } void Arguments::set_ergonomics_flags() { @@ -1988,6 +1985,15 @@ warning("DefaultMaxRAMFraction is deprecated and will likely be removed in a future release. " "Use MaxRAMFraction instead."); } + if (FLAG_IS_CMDLINE(UseCMSCompactAtFullCollection)) { + warning("UseCMSCompactAtFullCollection is deprecated and will likely be removed in a future release."); + } + if (FLAG_IS_CMDLINE(CMSFullGCsBeforeCompaction)) { + warning("CMSFullGCsBeforeCompaction is deprecated and will likely be removed in a future release."); + } + if (FLAG_IS_CMDLINE(UseCMSCollectionPassing)) { + warning("UseCMSCollectionPassing is deprecated and will likely be removed in a future release."); + } } // Check stack pages settings @@ -2156,6 +2162,10 @@ #if INCLUDE_ALL_GCS if (UseG1GC) { + status = status && verify_percentage(G1NewSizePercent, "G1NewSizePercent"); + status = status && verify_percentage(G1MaxNewSizePercent, "G1MaxNewSizePercent"); + status = status && verify_interval(G1NewSizePercent, 0, G1MaxNewSizePercent, "G1NewSizePercent"); + status = status && verify_percentage(InitiatingHeapOccupancyPercent, "InitiatingHeapOccupancyPercent"); status = status && verify_min_value(G1RefProcDrainInterval, 1, @@ -2672,9 +2682,10 @@ describe_range_error(errcode); return JNI_EINVAL; } - FLAG_SET_CMDLINE(uintx, InitialHeapSize, (uintx)long_initial_heap_size); + set_min_heap_size((uintx)long_initial_heap_size); // Currently the minimum size and the initial heap sizes are the same. - set_min_heap_size(InitialHeapSize); + // Can be overridden with -XX:InitialHeapSize. + FLAG_SET_CMDLINE(uintx, InitialHeapSize, (uintx)long_initial_heap_size); // -Xmx } else if (match_option(option, "-Xmx", &tail) || match_option(option, "-XX:MaxHeapSize=", &tail)) { julong long_max_heap_size = 0; @@ -3634,6 +3645,11 @@ "Incompatible compilation policy selected", NULL); } } + // Set NmethodSweepFraction after the size of the code cache is adapted (in case of tiered) + if (FLAG_IS_DEFAULT(NmethodSweepFraction)) { + FLAG_SET_DEFAULT(NmethodSweepFraction, 1 + ReservedCodeCacheSize / (16 * M)); + } + // Set heap size based on available physical memory set_heap_size();
--- a/src/share/vm/runtime/globals.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/runtime/globals.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -321,6 +321,8 @@ { KIND_PRODUCT, "product" }, { KIND_MANAGEABLE, "manageable" }, { KIND_DIAGNOSTIC, "diagnostic" }, + { KIND_EXPERIMENTAL, "experimental" }, + { KIND_COMMERCIAL, "commercial" }, { KIND_NOT_PRODUCT, "notproduct" }, { KIND_DEVELOP, "develop" }, { KIND_LP64_PRODUCT, "lp64_product" },
--- a/src/share/vm/runtime/globals.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/runtime/globals.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -2954,6 +2954,9 @@ product(intx, MaxRecursiveInlineLevel, 1, \ "maximum number of nested recursive calls that are inlined") \ \ + develop(intx, MaxForceInlineLevel, 100, \ + "maximum number of nested @ForceInline calls that are inlined") \ + \ product_pd(intx, InlineSmallCode, \ "Only inline already compiled methods if their code size is " \ "less than this") \ @@ -3019,9 +3022,6 @@ notproduct(intx, ZombieALotInterval, 5, \ "Number of exits until ZombieALot kicks in") \ \ - develop(bool, StressNonEntrant, false, \ - "Mark nmethods non-entrant at registration") \ - \ diagnostic(intx, MallocVerifyInterval, 0, \ "If non-zero, verify C heap after every N calls to " \ "malloc/realloc/free") \ @@ -3289,7 +3289,7 @@ "Exit the VM if we fill the code cache") \ \ product(bool, UseCodeCacheFlushing, true, \ - "Attempt to clean the code cache before shutting off compiler") \ + "Remove cold/old nmethods from the code cache") \ \ /* interpreter debugging */ \ develop(intx, BinarySwitchThreshold, 5, \ @@ -3622,9 +3622,6 @@ "Temporary flag for transition to AbstractMethodError wrapped " \ "in InvocationTargetException. See 6531596") \ \ - develop(bool, VerifyLambdaBytecodes, false, \ - "Force verification of jdk 8 lambda metafactory bytecodes") \ - \ develop(intx, FastSuperclassLimit, 8, \ "Depth of hardwired instanceof accelerator array") \ \
--- a/src/share/vm/runtime/handles.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/runtime/handles.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -45,7 +45,7 @@ oop* HandleArea::allocate_handle(oop obj) { assert(_handle_mark_nesting > 1, "memory leak: allocating handle outside HandleMark"); assert(_no_handle_mark_nesting == 0, "allocating handle inside NoHandleMark"); - assert(obj->is_oop(), "sanity check"); + assert(obj->is_oop(), err_msg("not an oop: " INTPTR_FORMAT, (intptr_t*) obj)); return real_allocate_handle(obj); }
--- a/src/share/vm/runtime/reflection.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/runtime/reflection.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -470,12 +470,6 @@ return true; } - // Also allow all accesses from - // java/lang/invoke/MagicLambdaImpl subclasses to succeed trivially. - if (current_class->is_subclass_of(SystemDictionary::lambda_MagicLambdaImpl_klass())) { - return true; - } - return can_relax_access_check_for(current_class, new_class, classloader_only); } @@ -570,12 +564,6 @@ return true; } - // Also allow all accesses from - // java/lang/invoke/MagicLambdaImpl subclasses to succeed trivially. - if (current_class->is_subclass_of(SystemDictionary::lambda_MagicLambdaImpl_klass())) { - return true; - } - return can_relax_access_check_for( current_class, field_class, classloader_only); }
--- a/src/share/vm/runtime/sharedRuntime.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/runtime/sharedRuntime.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -84,6 +84,7 @@ // Shared stub locations RuntimeStub* SharedRuntime::_wrong_method_blob; +RuntimeStub* SharedRuntime::_wrong_method_abstract_blob; RuntimeStub* SharedRuntime::_ic_miss_blob; RuntimeStub* SharedRuntime::_resolve_opt_virtual_call_blob; RuntimeStub* SharedRuntime::_resolve_virtual_call_blob; @@ -101,11 +102,12 @@ //----------------------------generate_stubs----------------------------------- void SharedRuntime::generate_stubs() { - _wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method), "wrong_method_stub"); - _ic_miss_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss), "ic_miss_stub"); - _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C), "resolve_opt_virtual_call"); - _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C), "resolve_virtual_call"); - _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C), "resolve_static_call"); + _wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method), "wrong_method_stub"); + _wrong_method_abstract_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract), "wrong_method_abstract_stub"); + _ic_miss_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss), "ic_miss_stub"); + _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C), "resolve_opt_virtual_call"); + _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C), "resolve_virtual_call"); + _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C), "resolve_static_call"); #ifdef COMPILER2 // Vectors are generated only by C2. @@ -1345,6 +1347,11 @@ return callee_method->verified_code_entry(); JRT_END +// Handle abstract method call +JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* thread)) + return StubRoutines::throw_AbstractMethodError_entry(); +JRT_END + // resolve a static call and patch code JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread *thread )) @@ -2341,12 +2348,13 @@ // Create a special handler for abstract methods. Abstract methods // are never compiled so an i2c entry is somewhat meaningless, but - // fill it in with something appropriate just in case. Pass handle - // wrong method for the c2i transitions. - address wrong_method = SharedRuntime::get_handle_wrong_method_stub(); + // throw AbstractMethodError just in case. + // Pass wrong_method_abstract for the c2i transitions to return + // AbstractMethodError for invalid invocations. + address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub(); _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, NULL), StubRoutines::throw_AbstractMethodError_entry(), - wrong_method, wrong_method); + wrong_method_abstract, wrong_method_abstract); } AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,
--- a/src/share/vm/runtime/sharedRuntime.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/runtime/sharedRuntime.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -56,6 +56,7 @@ // Shared stub locations static RuntimeStub* _wrong_method_blob; + static RuntimeStub* _wrong_method_abstract_blob; static RuntimeStub* _ic_miss_blob; static RuntimeStub* _resolve_opt_virtual_call_blob; static RuntimeStub* _resolve_virtual_call_blob; @@ -206,6 +207,11 @@ return _wrong_method_blob->entry_point(); } + static address get_handle_wrong_method_abstract_stub() { + assert(_wrong_method_abstract_blob!= NULL, "oops"); + return _wrong_method_abstract_blob->entry_point(); + } + #ifdef COMPILER2 static void generate_uncommon_trap_blob(void); static UncommonTrapBlob* uncommon_trap_blob() { return _uncommon_trap_blob; } @@ -481,6 +487,7 @@ // handle ic miss with caller being compiled code // wrong method handling (inline cache misses, zombie methods) static address handle_wrong_method(JavaThread* thread); + static address handle_wrong_method_abstract(JavaThread* thread); static address handle_wrong_method_ic_miss(JavaThread* thread); #ifndef PRODUCT
--- a/src/share/vm/runtime/sweeper.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/runtime/sweeper.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -112,14 +112,13 @@ if (_records != NULL) { _records[_sweep_index].traversal = _traversals; _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark; - _records[_sweep_index].invocation = _invocations; + _records[_sweep_index].invocation = _sweep_fractions_left; _records[_sweep_index].compile_id = nm->compile_id(); _records[_sweep_index].kind = nm->compile_kind(); _records[_sweep_index].state = nm->_state; _records[_sweep_index].vep = nm->verified_entry_point(); _records[_sweep_index].uep = nm->entry_point(); _records[_sweep_index].line = line; - _sweep_index = (_sweep_index + 1) % SweeperLogEntries; } } @@ -127,26 +126,29 @@ #define SWEEP(nm) #endif -nmethod* NMethodSweeper::_current = NULL; // Current nmethod -long NMethodSweeper::_traversals = 0; // Nof. stack traversals performed -int NMethodSweeper::_seen = 0; // Nof. nmethods we have currently processed in current pass of CodeCache -int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep -int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep -int NMethodSweeper::_marked_count = 0; // Nof. nmethods marked for reclaim in current sweep - -volatile int NMethodSweeper::_invocations = 0; // Nof. invocations left until we are completed with this pass -volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress. +nmethod* NMethodSweeper::_current = NULL; // Current nmethod +long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID. +long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper +long NMethodSweeper::_last_sweep = 0; // Value of _time_counter when the last sweep happened +int NMethodSweeper::_seen = 0; // Nof. nmethod we have currently processed in current pass of CodeCache +int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep +int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep +int NMethodSweeper::_marked_for_reclamation_count = 0; // Nof. nmethods marked for reclaim in current sweep -jint NMethodSweeper::_locked_seen = 0; -jint NMethodSweeper::_not_entrant_seen_on_stack = 0; -bool NMethodSweeper::_request_mark_phase = false; +volatile bool NMethodSweeper::_should_sweep = true; // Indicates if we should invoke the sweeper +volatile int NMethodSweeper::_sweep_fractions_left = 0; // Nof. invocations left until we are completed with this pass +volatile int NMethodSweeper::_sweep_started = 0; // Flag to control conc sweeper +volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from: + // 1) alive -> not_entrant + // 2) not_entrant -> zombie + // 3) zombie -> marked_for_reclamation -int NMethodSweeper::_total_nof_methods_reclaimed = 0; -jlong NMethodSweeper::_total_time_sweeping = 0; -jlong NMethodSweeper::_total_time_this_sweep = 0; -jlong NMethodSweeper::_peak_sweep_time = 0; -jlong NMethodSweeper::_peak_sweep_fraction_time = 0; -int NMethodSweeper::_hotness_counter_reset_val = 0; +int NMethodSweeper::_total_nof_methods_reclaimed = 0; // Accumulated nof methods flushed +jlong NMethodSweeper::_total_time_sweeping = 0; // Accumulated time sweeping +jlong NMethodSweeper::_total_time_this_sweep = 0; // Total time this sweep +jlong NMethodSweeper::_peak_sweep_time = 0; // Peak time for a full sweep +jlong NMethodSweeper::_peak_sweep_fraction_time = 0; // Peak time sweeping one fraction +int NMethodSweeper::_hotness_counter_reset_val = 0; class MarkActivationClosure: public CodeBlobClosure { @@ -197,13 +199,16 @@ return; } + // Increase time so that we can estimate when to invoke the sweeper again. + _time_counter++; + // Check for restart assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid"); - if (!sweep_in_progress() && need_marking_phase()) { - _seen = 0; - _invocations = NmethodSweepFraction; - _current = CodeCache::first_nmethod(); - _traversals += 1; + if (!sweep_in_progress()) { + _seen = 0; + _sweep_fractions_left = NmethodSweepFraction; + _current = CodeCache::first_nmethod(); + _traversals += 1; _total_time_this_sweep = 0; if (PrintMethodFlushing) { @@ -211,10 +216,6 @@ } Threads::nmethods_do(&mark_activation_closure); - // reset the flags since we started a scan from the beginning. - reset_nmethod_marking(); - _locked_seen = 0; - _not_entrant_seen_on_stack = 0; } else { // Only set hotness counter Threads::nmethods_do(&set_hotness_closure); @@ -222,14 +223,49 @@ OrderAccess::storestore(); } - +/** + * This function invokes the sweeper if at least one of the three conditions is met: + * (1) The code cache is getting full + * (2) There are sufficient state changes in/since the last sweep. + * (3) We have not been sweeping for 'some time' + */ void NMethodSweeper::possibly_sweep() { assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode"); - if (!MethodFlushing || !sweep_in_progress()) { + // Only compiler threads are allowed to sweep + if (!MethodFlushing || !sweep_in_progress() || !Thread::current()->is_Compiler_thread()) { return; } - if (_invocations > 0) { + // If there was no state change while nmethod sweeping, 'should_sweep' will be false. + // This is one of the two places where should_sweep can be set to true. The general + // idea is as follows: If there is enough free space in the code cache, there is no + // need to invoke the sweeper. The following formula (which determines whether to invoke + // the sweeper or not) depends on the assumption that for larger ReservedCodeCacheSizes + // we need less frequent sweeps than for smaller ReservedCodecCacheSizes. Furthermore, + // the formula considers how much space in the code cache is currently used. Here are + // some examples that will (hopefully) help in understanding. + // + // Small ReservedCodeCacheSizes: (e.g., < 16M) We invoke the sweeper every time, since + // the result of the division is 0. This + // keeps the used code cache size small + // (important for embedded Java) + // Large ReservedCodeCacheSize : (e.g., 256M + code cache is 10% full). The formula + // computes: (256 / 16) - 1 = 15 + // As a result, we invoke the sweeper after + // 15 invocations of 'mark_active_nmethods. + // Large ReservedCodeCacheSize: (e.g., 256M + code Cache is 90% full). The formula + // computes: (256 / 16) - 10 = 6. + if (!_should_sweep) { + int time_since_last_sweep = _time_counter - _last_sweep; + double wait_until_next_sweep = (ReservedCodeCacheSize / (16 * M)) - time_since_last_sweep - + CodeCache::reverse_free_ratio(); + + if ((wait_until_next_sweep <= 0.0) || !CompileBroker::should_compile_new_jobs()) { + _should_sweep = true; + } + } + + if (_should_sweep && _sweep_fractions_left > 0) { // Only one thread at a time will sweep jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 ); if (old != 0) { @@ -242,31 +278,46 @@ memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries); } #endif - if (_invocations > 0) { + + if (_sweep_fractions_left > 0) { sweep_code_cache(); - _invocations--; + _sweep_fractions_left--; + } + + // We are done with sweeping the code cache once. + if (_sweep_fractions_left == 0) { + _last_sweep = _time_counter; + // Reset flag; temporarily disables sweeper + _should_sweep = false; + // If there was enough state change, 'possibly_enable_sweeper()' + // sets '_should_sweep' to true + possibly_enable_sweeper(); + // Reset _bytes_changed only if there was enough state change. _bytes_changed + // can further increase by calls to 'report_state_change'. + if (_should_sweep) { + _bytes_changed = 0; + } } _sweep_started = 0; } } void NMethodSweeper::sweep_code_cache() { - jlong sweep_start_counter = os::elapsed_counter(); - _flushed_count = 0; - _zombified_count = 0; - _marked_count = 0; + _flushed_count = 0; + _zombified_count = 0; + _marked_for_reclamation_count = 0; if (PrintMethodFlushing && Verbose) { - tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations); + tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left); } if (!CompileBroker::should_compile_new_jobs()) { // If we have turned off compilations we might as well do full sweeps // in order to reach the clean state faster. Otherwise the sleeping compiler // threads will slow down sweeping. - _invocations = 1; + _sweep_fractions_left = 1; } // We want to visit all nmethods after NmethodSweepFraction @@ -274,7 +325,7 @@ // remaining number of invocations. This is only an estimate since // the number of nmethods changes during the sweep so the final // stage must iterate until it there are no more nmethods. - int todo = (CodeCache::nof_nmethods() - _seen) / _invocations; + int todo = (CodeCache::nof_nmethods() - _seen) / _sweep_fractions_left; int swept_count = 0; @@ -286,11 +337,11 @@ MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); // The last invocation iterates until there are no more nmethods - for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) { + for (int i = 0; (i < todo || _sweep_fractions_left == 1) && _current != NULL; i++) { swept_count++; if (SafepointSynchronize::is_synchronizing()) { // Safepoint request if (PrintMethodFlushing && Verbose) { - tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _invocations); + tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _sweep_fractions_left); } MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); @@ -314,19 +365,7 @@ } } - assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache"); - - if (!sweep_in_progress() && !need_marking_phase() && (_locked_seen || _not_entrant_seen_on_stack)) { - // we've completed a scan without making progress but there were - // nmethods we were unable to process either because they were - // locked or were still on stack. We don't have to aggressively - // clean them up so just stop scanning. We could scan once more - // but that complicates the control logic and it's unlikely to - // matter much. - if (PrintMethodFlushing) { - tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep"); - } - } + assert(_sweep_fractions_left > 1 || _current == NULL, "must have scanned the whole cache"); jlong sweep_end_counter = os::elapsed_counter(); jlong sweep_time = sweep_end_counter - sweep_start_counter; @@ -340,21 +379,21 @@ event.set_starttime(sweep_start_counter); event.set_endtime(sweep_end_counter); event.set_sweepIndex(_traversals); - event.set_sweepFractionIndex(NmethodSweepFraction - _invocations + 1); + event.set_sweepFractionIndex(NmethodSweepFraction - _sweep_fractions_left + 1); event.set_sweptCount(swept_count); event.set_flushedCount(_flushed_count); - event.set_markedCount(_marked_count); + event.set_markedCount(_marked_for_reclamation_count); event.set_zombifiedCount(_zombified_count); event.commit(); } #ifdef ASSERT if(PrintMethodFlushing) { - tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, (jlong)sweep_time); + tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _sweep_fractions_left, (jlong)sweep_time); } #endif - if (_invocations == 1) { + if (_sweep_fractions_left == 1) { _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep); log_sweep("finished"); } @@ -368,12 +407,37 @@ // it only makes sense to re-enable compilation if we have actually freed memory. // Note that typically several kB are released for sweeping 16MB of the code // cache. As a result, 'freed_memory' > 0 to restart the compiler. - if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0))) { + if (!CompileBroker::should_compile_new_jobs() && (freed_memory > 0)) { CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); log_sweep("restart_compiler"); } } +/** + * This function updates the sweeper statistics that keep track of nmethods + * state changes. If there is 'enough' state change, the sweeper is invoked + * as soon as possible. There can be data races on _bytes_changed. The data + * races are benign, since it does not matter if we loose a couple of bytes. + * In the worst case we call the sweeper a little later. Also, we are guaranteed + * to invoke the sweeper if the code cache gets full. + */ +void NMethodSweeper::report_state_change(nmethod* nm) { + _bytes_changed += nm->total_size(); + possibly_enable_sweeper(); +} + +/** + * Function determines if there was 'enough' state change in the code cache to invoke + * the sweeper again. Currently, we determine 'enough' as more than 1% state change in + * the code cache since the last sweep. + */ +void NMethodSweeper::possibly_enable_sweeper() { + double percent_changed = ((double)_bytes_changed / (double)ReservedCodeCacheSize) * 100; + if (percent_changed > 1.0) { + _should_sweep = true; + } +} + class NMethodMarker: public StackObj { private: CompilerThread* _thread; @@ -424,9 +488,6 @@ MutexLocker cl(CompiledIC_lock); nm->cleanup_inline_caches(); SWEEP(nm); - } else { - _locked_seen++; - SWEEP(nm); } return freed_memory; } @@ -448,8 +509,9 @@ tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm); } nm->mark_for_reclamation(); - request_nmethod_marking(); - _marked_count++; + // Keep track of code cache state change + _bytes_changed += nm->total_size(); + _marked_for_reclamation_count++; SWEEP(nm); } } else if (nm->is_not_entrant()) { @@ -459,18 +521,14 @@ if (PrintMethodFlushing && Verbose) { tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm); } + // Code cache state change is tracked in make_zombie() nm->make_zombie(); - request_nmethod_marking(); _zombified_count++; SWEEP(nm); } else { // Still alive, clean up its inline caches MutexLocker cl(CompiledIC_lock); nm->cleanup_inline_caches(); - // we coudn't transition this nmethod so don't immediately - // request a rescan. If this method stays on the stack for a - // long time we don't want to keep rescanning the code cache. - _not_entrant_seen_on_stack++; SWEEP(nm); } } else if (nm->is_unloaded()) { @@ -485,8 +543,8 @@ release_nmethod(nm); _flushed_count++; } else { + // Code cache state change is tracked in make_zombie() nm->make_zombie(); - request_nmethod_marking(); _zombified_count++; SWEEP(nm); } @@ -514,7 +572,11 @@ // The second condition ensures that methods are not immediately made not-entrant // after compilation. nm->make_not_entrant(); - request_nmethod_marking(); + // Code cache state change is tracked in make_not_entrant() + if (PrintMethodFlushing && Verbose) { + tty->print_cr("### Nmethod %d/" PTR_FORMAT "made not-entrant: hotness counter %d/%d threshold %f", + nm->compile_id(), nm, nm->hotness_counter(), reset_val, threshold); + } } } }
--- a/src/share/vm/runtime/sweeper.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/runtime/sweeper.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -53,22 +53,22 @@ // is full. class NMethodSweeper : public AllStatic { - static long _traversals; // Stack scan count, also sweep ID. - static nmethod* _current; // Current nmethod - static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache - static int _flushed_count; // Nof. nmethods flushed in current sweep - static int _zombified_count; // Nof. nmethods made zombie in current sweep - static int _marked_count; // Nof. nmethods marked for reclaim in current sweep + static long _traversals; // Stack scan count, also sweep ID. + static long _time_counter; // Virtual time used to periodically invoke sweeper + static long _last_sweep; // Value of _time_counter when the last sweep happened + static nmethod* _current; // Current nmethod + static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache + static int _flushed_count; // Nof. nmethods flushed in current sweep + static int _zombified_count; // Nof. nmethods made zombie in current sweep + static int _marked_for_reclamation_count; // Nof. nmethods marked for reclaim in current sweep - static volatile int _invocations; // No. of invocations left until we are completed with this pass - static volatile int _sweep_started; // Flag to control conc sweeper - - //The following are reset in mark_active_nmethods and synchronized by the safepoint - static bool _request_mark_phase; // Indicates that a change has happend and we need another mark pahse, - // always checked and reset at a safepoint so memory will be in sync. - static int _locked_seen; // Number of locked nmethods encountered during the scan - static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack - + static volatile int _sweep_fractions_left; // Nof. invocations left until we are completed with this pass + static volatile int _sweep_started; // Flag to control conc sweeper + static volatile bool _should_sweep; // Indicates if we should invoke the sweeper + static volatile int _bytes_changed; // Counts the total nmethod size if the nmethod changed from: + // 1) alive -> not_entrant + // 2) not_entrant -> zombie + // 3) zombie -> marked_for_reclamation // Stat counters static int _total_nof_methods_reclaimed; // Accumulated nof methods flushed static jlong _total_time_sweeping; // Accumulated time sweeping @@ -81,9 +81,6 @@ static bool sweep_in_progress(); static void sweep_code_cache(); - static void request_nmethod_marking() { _request_mark_phase = true; } - static void reset_nmethod_marking() { _request_mark_phase = false; } - static bool need_marking_phase() { return _request_mark_phase; } static int _hotness_counter_reset_val; @@ -109,13 +106,8 @@ static int sort_nmethods_by_hotness(nmethod** nm1, nmethod** nm2); static int hotness_counter_reset_val(); - - static void notify() { - // Request a new sweep of the code cache from the beginning. No - // need to synchronize the setting of this flag since it only - // changes to false at safepoint so we can never overwrite it with false. - request_nmethod_marking(); - } + static void report_state_change(nmethod* nm); + static void possibly_enable_sweeper(); }; #endif // SHARE_VM_RUNTIME_SWEEPER_HPP
--- a/src/share/vm/runtime/thread.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/runtime/thread.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -1097,7 +1097,7 @@ // General purpose hook into Java code, run once when the VM is initialized. // The Java library method itself may be changed independently from the VM. static void call_postVMInitHook(TRAPS) { - Klass* k = SystemDictionary::PostVMInitHook_klass(); + Klass* k = SystemDictionary::resolve_or_null(vmSymbols::sun_misc_PostVMInitHook(), THREAD); instanceKlassHandle klass (THREAD, k); if (klass.not_null()) { JavaValue result(T_VOID);
--- a/src/share/vm/services/jmm.h Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/services/jmm.h Tue Dec 03 14:13:06 2013 +0400 @@ -78,6 +78,7 @@ JMM_COMPILE_TOTAL_TIME_MS = 8, /* Total accumulated time spent in compilation */ JMM_GC_TIME_MS = 9, /* Total accumulated time spent in collection */ JMM_GC_COUNT = 10, /* Total number of collections */ + JMM_JVM_UPTIME_MS = 11, /* The JVM uptime in milliseconds */ JMM_INTERNAL_ATTRIBUTE_INDEX = 100, JMM_CLASS_LOADED_BYTES = 101, /* Number of bytes loaded instance classes */
--- a/src/share/vm/services/management.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/services/management.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -1032,6 +1032,9 @@ case JMM_JVM_INIT_DONE_TIME_MS: return Management::vm_init_done_time(); + case JMM_JVM_UPTIME_MS: + return Management::ticks_to_ms(os::elapsed_counter()); + case JMM_COMPILE_TOTAL_TIME_MS: return Management::ticks_to_ms(CompileBroker::total_compilation_ticks());
--- a/src/share/vm/services/threadService.cpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/services/threadService.cpp Tue Dec 03 14:13:06 2013 +0400 @@ -200,6 +200,12 @@ } } +void ThreadService::metadata_do(void f(Metadata*)) { + for (ThreadDumpResult* dump = _threaddump_list; dump != NULL; dump = dump->next()) { + dump->metadata_do(f); + } +} + void ThreadService::add_thread_dump(ThreadDumpResult* dump) { MutexLocker ml(Management_lock); if (_threaddump_list == NULL) { @@ -451,9 +457,16 @@ } } +void ThreadDumpResult::metadata_do(void f(Metadata*)) { + for (ThreadSnapshot* ts = _snapshots; ts != NULL; ts = ts->next()) { + ts->metadata_do(f); + } +} + StackFrameInfo::StackFrameInfo(javaVFrame* jvf, bool with_lock_info) { _method = jvf->method(); _bci = jvf->bci(); + _class_holder = _method->method_holder()->klass_holder(); _locked_monitors = NULL; if (with_lock_info) { ResourceMark rm; @@ -477,6 +490,11 @@ f->do_oop((oop*) _locked_monitors->adr_at(i)); } } + f->do_oop(&_class_holder); +} + +void StackFrameInfo::metadata_do(void f(Metadata*)) { + f(_method); } void StackFrameInfo::print_on(outputStream* st) const { @@ -620,6 +638,14 @@ } } +void ThreadStackTrace::metadata_do(void f(Metadata*)) { + int length = _frames->length(); + for (int i = 0; i < length; i++) { + _frames->at(i)->metadata_do(f); + } +} + + ConcurrentLocksDump::~ConcurrentLocksDump() { if (_retain_map_on_free) { return; @@ -823,6 +849,13 @@ } } +void ThreadSnapshot::metadata_do(void f(Metadata*)) { + if (_stack_trace != NULL) { + _stack_trace->metadata_do(f); + } +} + + DeadlockCycle::DeadlockCycle() { _is_deadlock = false; _threads = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<JavaThread*>(INITIAL_ARRAY_SIZE, true);
--- a/src/share/vm/services/threadService.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/services/threadService.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -113,6 +113,7 @@ // GC support static void oops_do(OopClosure* f); + static void metadata_do(void f(Metadata*)); }; // Per-thread Statistics for synchronization @@ -242,6 +243,7 @@ void dump_stack_at_safepoint(int max_depth, bool with_locked_monitors); void set_concurrent_locks(ThreadConcurrentLocks* l) { _concurrent_locks = l; } void oops_do(OopClosure* f); + void metadata_do(void f(Metadata*)); }; class ThreadStackTrace : public CHeapObj<mtInternal> { @@ -265,6 +267,7 @@ void dump_stack_at_safepoint(int max_depth); Handle allocate_fill_stack_trace_element_array(TRAPS); void oops_do(OopClosure* f); + void metadata_do(void f(Metadata*)); GrowableArray<oop>* jni_locked_monitors() { return _jni_locked_monitors; } int num_jni_locked_monitors() { return (_jni_locked_monitors != NULL ? _jni_locked_monitors->length() : 0); } @@ -280,6 +283,9 @@ Method* _method; int _bci; GrowableArray<oop>* _locked_monitors; // list of object monitors locked by this frame + // We need to save the mirrors in the backtrace to keep the class + // from being unloaded while we still have this stack trace. + oop _class_holder; public: @@ -289,9 +295,10 @@ delete _locked_monitors; } }; - Method* method() const { return _method; } + Method* method() const { return _method; } int bci() const { return _bci; } void oops_do(OopClosure* f); + void metadata_do(void f(Metadata*)); int num_locked_monitors() { return (_locked_monitors != NULL ? _locked_monitors->length() : 0); } GrowableArray<oop>* locked_monitors() { return _locked_monitors; } @@ -354,6 +361,7 @@ int num_snapshots() { return _num_snapshots; } ThreadSnapshot* snapshots() { return _snapshots; } void oops_do(OopClosure* f); + void metadata_do(void f(Metadata*)); }; class DeadlockCycle : public CHeapObj<mtInternal> {
--- a/src/share/vm/utilities/globalDefinitions.hpp Sun Nov 03 07:50:24 2013 +0000 +++ b/src/share/vm/utilities/globalDefinitions.hpp Tue Dec 03 14:13:06 2013 +0400 @@ -368,8 +368,6 @@ // Klass encoding metaspace max size const uint64_t KlassEncodingMetaspaceMax = (uint64_t(max_juint) + 1) << LogKlassAlignmentInBytes; -const jlong CompressedKlassPointersBase = NOT_LP64(0) LP64_ONLY(CONST64(0x800000000)); // 32*G - // Machine dependent stuff #ifdef TARGET_ARCH_x86 @@ -458,6 +456,13 @@ return (void*) align_size_up_((uintptr_t)addr, size); } +// Align down with a lower bound. If the aligning results in 0, return 'alignment'. + +inline size_t align_size_down_bounded(size_t size, size_t alignment) { + size_t aligned_size = align_size_down_(size, alignment); + return aligned_size > 0 ? aligned_size : alignment; +} + // Clamp an address to be within a specific page // 1. If addr is on the page it is returned as is // 2. If addr is above the page_address the start of the *next* page will be returned
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/inlining/InlineDefaultMethod.java Tue Dec 03 14:13:06 2013 +0400 @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026735 + * @summary CHA in C1 should make correct decisions about default methods + * @run main/othervm -Xcomp -XX:CompileOnly=InlineDefaultMethod::test -XX:TieredStopAtLevel=1 InlineDefaultMethod + */ + + +interface InterfaceWithDefaultMethod0 { + default public int defaultMethod() { + return 1; + } +} + +interface InterfaceWithDefaultMethod1 extends InterfaceWithDefaultMethod0 { } + +abstract class Subtype implements InterfaceWithDefaultMethod1 { } + +class Decoy extends Subtype { + public int defaultMethod() { + return 2; + } +} + +class Instance extends Subtype { } + +public class InlineDefaultMethod { + public static int test(InterfaceWithDefaultMethod1 x) { + return x.defaultMethod(); + } + public static void main(String[] args) { + InterfaceWithDefaultMethod1 a = new Decoy(); + InterfaceWithDefaultMethod1 b = new Instance(); + if (test(a) != 2 || + test(b) != 1) { + System.err.println("FAILED"); + System.exit(97); + } + System.err.println("PASSED"); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/CompareTest.java Tue Dec 03 14:13:06 2013 +0400 @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026722 + * @summary Verify that the compare after addExact is a signed compare + * @compile CompareTest.java + * @run main CompareTest + * + */ + +public class CompareTest { + public static long store = 0; + public static long addValue = 1231; + + public static void main(String[] args) { + for (int i = 0; i < 20000; ++i) { + runTest(i, i); + runTest(i-1, i); + } + } + + public static long create(long value, int v) { + if ((value | v) == 0) { + return 0; + } + + // C2 turned this test into unsigned test when a control edge was set on the Cmp + if (value < -31557014167219200L || value > 31556889864403199L) { + throw new RuntimeException("error"); + } + + return value; + } + + public static void runTest(long value, int value2) { + long res = Math.addExact(value, addValue); + store = create(res, Math.floorMod(value2, 100000)); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/GVNTest.java Tue Dec 03 14:13:06 2013 +0400 @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8028207 + * @summary Verify that GVN doesn't mess up the two addExacts + * @compile GVNTest.java + * @run main GVNTest + * + */ + +public class GVNTest { + public static int result = 0; + public static int value = 93; + public static void main(String[] args) { + for (int i = 0; i < 50000; ++i) { + result = runTest(value + i); + result = runTest(value + i); + result = runTest(value + i); + result = runTest(value + i); + result = runTest(value + i); + } + } + + public static int runTest(int value) { + int v = value + value; + int sum = 0; + if (v < 4032) { + for (int i = 0; i < 1023; ++i) { + sum += Math.addExact(value, value); + } + } else { + for (int i = 0; i < 321; ++i) { + sum += Math.addExact(value, value); + } + } + return sum + v; + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/NestedMathExactTest.java Tue Dec 03 14:13:06 2013 +0400 @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8027444 + * @summary Test nested loops + * @compile NestedMathExactTest.java + * @run main NestedMathExactTest + * + */ + +public class NestedMathExactTest { + public static final int LIMIT = 100; + public static int[] result = new int[LIMIT]; + public static int value = 17; + + public static void main(String[] args) { + for (int i = 0; i < 100; ++i) { + result[i] = runTest(); + } + } + + public static int runTest() { + int sum = 0; + for (int j = 0; j < 100000; j = Math.addExact(j, 1)) { + sum = 1; + for (int i = 0; i < 5; ++i) { + sum *= value; + } + } + return sum; + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/mathexact/SplitThruPhiTest.java Tue Dec 03 14:13:06 2013 +0400 @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8028198 + * @summary Verify that split through phi does the right thing + * @compile SplitThruPhiTest.java + * @run main SplitThruPhiTest + * + */ + +public class SplitThruPhiTest { + public static volatile int value = 19; + public static int store = 0; + public static void main(String[] args) { + for (int i = 0; i < 150000; ++i) { + store = runTest(value); + } + } + + public static int runTest(int val) { + int result = Math.addExact(val, 1); + int total = 0; + for (int i = val; i < 200; i = Math.addExact(i, 1)) { + total += i; + } + return total; + } +}
--- a/test/compiler/intrinsics/mathexact/SubExactLConstantTest.java Sun Nov 03 07:50:24 2013 +0000 +++ b/test/compiler/intrinsics/mathexact/SubExactLConstantTest.java Tue Dec 03 14:13:06 2013 +0400 @@ -24,6 +24,7 @@ /* * @test * @bug 8026844 + * @bug 8027353 * @summary Test constant subtractExact * @compile SubExactLConstantTest.java Verify.java * @run main SubExactLConstantTest
--- a/test/compiler/intrinsics/mathexact/SubExactLNonConstantTest.java Sun Nov 03 07:50:24 2013 +0000 +++ b/test/compiler/intrinsics/mathexact/SubExactLNonConstantTest.java Tue Dec 03 14:13:06 2013 +0400 @@ -24,6 +24,7 @@ /* * @test * @bug 8026844 + * @bug 8027353 * @summary Test non constant subtractExact * @compile SubExactLNonConstantTest.java Verify.java * @run main SubExactLNonConstantTest
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/intrinsics/stringequals/TestStringEqualsBadLength.java Tue Dec 03 14:13:06 2013 +0400 @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8027445 + * @summary String.equals() may be called with a length whose upper bits are not cleared + * @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation TestStringEqualsBadLength + * + */ + +import java.util.Arrays; + +public class TestStringEqualsBadLength { + + int v1; + int v2; + + boolean m(String s1) { + int l = v2 - v1; // 0 - (-1) = 1. On 64 bit: 0xffffffff00000001 + char[] arr = new char[l]; + arr[0] = 'a'; + String s2 = new String(arr); + // The string length is not reloaded but the value computed is + // reused so pointer computation must not use + // 0xffffffff00000001 + return s2.equals(s1); + } + + // Same thing with String.compareTo() + int m2(String s1) { + int l = v2 - v1; + char[] arr = new char[l+1]; + arr[0] = 'a'; + arr[1] = 'b'; + String s2 = new String(arr); + return s2.compareTo(s1); + } + + // Same thing with equals() for arrays + boolean m3(char[] arr1) { + int l = v2 - v1; // 0 - (-1) = 1. On 64 bit: 0xffffffff00000001 + char[] arr2 = new char[l]; + arr2[0] = 'a'; + return Arrays.equals(arr2, arr1); + } + + static public void main(String[] args) { + TestStringEqualsBadLength tse = new TestStringEqualsBadLength(); + tse.v1 = -1; + tse.v2 = 0; + char[] arr = new char[1]; + arr[0] = 'a'; + for (int i = 0; i < 20000; i++) { + tse.m("a"); + tse.m2("ab"); + tse.m3(arr); + } + + System.out.println("TEST PASSED"); + } +}
--- a/test/compiler/jsr292/ConcurrentClassLoadingTest.java Sun Nov 03 07:50:24 2013 +0000 +++ b/test/compiler/jsr292/ConcurrentClassLoadingTest.java Tue Dec 03 14:13:06 2013 +0400 @@ -172,7 +172,6 @@ "java.lang.invoke.LambdaConversionException", "java.lang.invoke.LambdaForm", "java.lang.invoke.LambdaMetafactory", - "java.lang.invoke.MagicLambdaImpl", "java.lang.invoke.MemberName", "java.lang.invoke.MethodHandle", "java.lang.invoke.MethodHandleImpl",
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/profiling/TestUnexpectedProfilingMismatch.java Tue Dec 03 14:13:06 2013 +0400 @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8027631 + * @summary profiling of arguments at calls cannot rely on signature of callee for types + * @run main/othervm -XX:-BackgroundCompilation -XX:TieredStopAtLevel=3 -XX:TypeProfileLevel=111 -XX:Tier3InvocationThreshold=200 -XX:Tier0InvokeNotifyFreqLog=7 TestUnexpectedProfilingMismatch + * + */ + +import java.lang.invoke.*; + +public class TestUnexpectedProfilingMismatch { + + static class A { + } + + static class B { + } + + static void mA(A a) { + } + + static void mB(B b) { + } + + static final MethodHandle mhA; + static final MethodHandle mhB; + static { + MethodHandles.Lookup lookup = MethodHandles.lookup(); + MethodType mt = MethodType.methodType(void.class, A.class); + MethodHandle res = null; + try { + res = lookup.findStatic(TestUnexpectedProfilingMismatch.class, "mA", mt); + } catch(NoSuchMethodException ex) { + } catch(IllegalAccessException ex) { + } + mhA = res; + mt = MethodType.methodType(void.class, B.class); + try { + res = lookup.findStatic(TestUnexpectedProfilingMismatch.class, "mB", mt); + } catch(NoSuchMethodException ex) { + } catch(IllegalAccessException ex) { + } + mhB = res; + } + + void m1(A a, boolean doit) throws Throwable { + if (doit) { + mhA.invoke(a); + } + } + + void m2(B b) throws Throwable { + mhB.invoke(b); + } + + static public void main(String[] args) { + TestUnexpectedProfilingMismatch tih = new TestUnexpectedProfilingMismatch(); + A a = new A(); + B b = new B(); + try { + for (int i = 0; i < 256 - 1; i++) { + tih.m1(a, true); + } + // Will trigger the compilation but will also run once + // more interpreted with a non null MDO which it will + // update. Make it skip the body of the method. + tih.m1(a, false); + // Compile this one as well and do the profiling + for (int i = 0; i < 256; i++) { + tih.m2(b); + } + // Will run and see a conflict + tih.m1(a, true); + } catch(Throwable ex) { + ex.printStackTrace(); + } + System.out.println("TEST PASSED"); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/profiling/unloadingconflict/B.java Tue Dec 03 14:13:06 2013 +0400 @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +public class B { +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/profiling/unloadingconflict/TestProfileConflictClassUnloading.java Tue Dec 03 14:13:06 2013 +0400 @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8027572 + * @summary class unloading resets profile, method compiled after the profile is first set and before class loading sets unknown bit with not recorded class + * @build B + * @run main/othervm -XX:TypeProfileLevel=222 -XX:-BackgroundCompilation TestProfileConflictClassUnloading + * + */ + +import java.net.MalformedURLException; +import java.net.URL; +import java.net.URLClassLoader; +import java.nio.file.Paths; + +public class TestProfileConflictClassUnloading { + static class A { + } + + + static void m1(Object o) { + } + + static void m2(Object o) { + m1(o); + } + + static void m3(A a, boolean do_call) { + if (!do_call) { + return; + } + m2(a); + } + + public static ClassLoader newClassLoader() { + try { + return new URLClassLoader(new URL[] { + Paths.get(System.getProperty("test.classes",".")).toUri().toURL(), + }, null); + } catch (MalformedURLException e){ + throw new RuntimeException("Unexpected URL conversion failure", e); + } + } + + public static void main(String[] args) throws Exception { + ClassLoader loader = newClassLoader(); + Object o = loader.loadClass("B").newInstance(); + // collect conflicting profiles + for (int i = 0; i < 5000; i++) { + m2(o); + } + // prepare for conflict + A a = new A(); + for (int i = 0; i < 5000; i++) { + m3(a, false); + } + // unload class in profile + o = null; + loader = null; + System.gc(); + // record the conflict + m3(a, true); + // trigger another GC + System.gc(); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/regalloc/C1ObjectSpillInLogicOp.java Tue Dec 03 14:13:06 2013 +0400 @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8027751 + * @summary C1 crashes generating G1 post-barrier in Unsafe.getAndSetObject() intrinsic because of the new value spill + * @run main/othervm -XX:+UseG1GC C1ObjectSpillInLogicOp + * + * G1 barriers use logical operators (xor) on T_OBJECT mixed with T_LONG or T_INT. + * The current implementation of logical operations on x86 in C1 doesn't allow for long operands to be on stack. + * There is a special code in the register allocator that forces long arguments in registers on x86. However T_OBJECT + * can be spilled just fine, and in that case the xor emission will fail. + */ + +import java.util.concurrent.atomic.*; +class C1ObjectSpillInLogicOp { + static public void main(String[] args) { + AtomicReferenceArray<Integer> x = new AtomicReferenceArray(128); + Integer y = new Integer(0); + for (int i = 0; i < 50000; i++) { + x.getAndSet(i % x.length(), y); + } + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/startup/StartupOutput.java Tue Dec 03 14:13:06 2013 +0400 @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8026949 + * @summary Test ensures correct VM output during startup + * @library ../../testlibrary + * + */ +import com.oracle.java.testlibrary.*; + +public class StartupOutput { + public static void main(String[] args) throws Exception { + ProcessBuilder pb; + OutputAnalyzer out; + + pb = ProcessTools.createJavaProcessBuilder("-Xint", "-XX:+DisplayVMOutputToStdout", "-version"); + out = new OutputAnalyzer(pb.start()); + out.shouldNotContain("no space to run compilers"); + + out.shouldHaveExitValue(0); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/uncommontrap/TestStackBangRbp.java Tue Dec 03 14:13:06 2013 +0400 @@ -0,0 +1,294 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8028308 + * @summary rbp not restored when stack overflow is thrown from deopt/uncommon trap blobs + * @run main/othervm -XX:-BackgroundCompilation -XX:CompileCommand=dontinline,TestStackBangRbp::m1 -XX:CompileCommand=exclude,TestStackBangRbp::m2 -Xss256K -XX:-UseOnStackReplacement TestStackBangRbp + * + */ +public class TestStackBangRbp { + + static class UnloadedClass1 { + } + + static class UnloadedClass2 { + } + + static Object m1(boolean deopt) { + long l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, + l13, l14, l15, l16, l17, l18, l19, l20, l21, l22, l23, l24, + l25, l26, l27, l28, l29, l30, l31, l32, l33, l34, l35, l36, + l37, l38, l39, l40, l41, l42, l43, l44, l45, l46, l47, l48, + l49, l50, l51, l52, l53, l54, l55, l56, l57, l58, l59, l60, + l61, l62, l63, l64, l65, l66, l67, l68, l69, l70, l71, l72, + l73, l74, l75, l76, l77, l78, l79, l80, l81, l82, l83, l84, + l85, l86, l87, l88, l89, l90, l91, l92, l93, l94, l95, l96, + l97, l98, l99, l100, l101, l102, l103, l104, l105, l106, l107, + l108, l109, l110, l111, l112, l113, l114, l115, l116, l117, + l118, l119, l120, l121, l122, l123, l124, l125, l126, l127, + l128, l129, l130, l131, l132, l133, l134, l135, l136, l137, + l138, l139, l140, l141, l142, l143, l144, l145, l146, l147, + l148, l149, l150, l151, l152, l153, l154, l155, l156, l157, + l158, l159, l160, l161, l162, l163, l164, l165, l166, l167, + l168, l169, l170, l171, l172, l173, l174, l175, l176, l177, + l178, l179, l180, l181, l182, l183, l184, l185, l186, l187, + l188, l189, l190, l191, l192, l193, l194, l195, l196, l197, + l198, l199, l200, l201, l202, l203, l204, l205, l206, l207, + l208, l209, l210, l211, l212, l213, l214, l215, l216, l217, + l218, l219, l220, l221, l222, l223, l224, l225, l226, l227, + l228, l229, l230, l231, l232, l233, l234, l235, l236, l237, + l238, l239, l240, l241, l242, l243, l244, l245, l246, l247, + l248, l249, l250, l251, l252, l253, l254, l255, l256, l257, + l258, l259, l260, l261, l262, l263, l264, l265, l266, l267, + l268, l269, l270, l271, l272, l273, l274, l275, l276, l277, + l278, l279, l280, l281, l282, l283, l284, l285, l286, l287, + l288, l289, l290, l291, l292, l293, l294, l295, l296, l297, + l298, l299, l300, l301, l302, l303, l304, l305, l306, l307, + l308, l309, l310, l311, l312, l313, l314, l315, l316, l317, + l318, l319, l320, l321, l322, l323, l324, l325, l326, l327, + l328, l329, l330, l331, l332, l333, l334, l335, l336, l337, + l338, l339, l340, l341, l342, l343, l344, l345, l346, l347, + l348, l349, l350, l351, l352, l353, l354, l355, l356, l357, + l358, l359, l360, l361, l362, l363, l364, l365, l366, l367, + l368, l369, l370, l371, l372, l373, l374, l375, l376, l377, + l378, l379, l380, l381, l382, l383, l384, l385, l386, l387, + l388, l389, l390, l391, l392, l393, l394, l395, l396, l397, + l398, l399, l400, l401, l402, l403, l404, l405, l406, l407, + l408, l409, l410, l411, l412, l413, l414, l415, l416, l417, + l418, l419, l420, l421, l422, l423, l424, l425, l426, l427, + l428, l429, l430, l431, l432, l433, l434, l435, l436, l437, + l438, l439, l440, l441, l442, l443, l444, l445, l446, l447, + l448, l449, l450, l451, l452, l453, l454, l455, l456, l457, + l458, l459, l460, l461, l462, l463, l464, l465, l466, l467, + l468, l469, l470, l471, l472, l473, l474, l475, l476, l477, + l478, l479, l480, l481, l482, l483, l484, l485, l486, l487, + l488, l489, l490, l491, l492, l493, l494, l495, l496, l497, + l498, l499, l500, l501, l502, l503, l504, l505, l506, l507, + l508, l509, l510, l511; + + long ll0, ll1, ll2, ll3, ll4, ll5, ll6, ll7, ll8, ll9, ll10, ll11, ll12, + ll13, ll14, ll15, ll16, ll17, ll18, ll19, ll20, ll21, ll22, ll23, ll24, + ll25, ll26, ll27, ll28, ll29, ll30, ll31, ll32, ll33, ll34, ll35, ll36, + ll37, ll38, ll39, ll40, ll41, ll42, ll43, ll44, ll45, ll46, ll47, ll48, + ll49, ll50, ll51, ll52, ll53, ll54, ll55, ll56, ll57, ll58, ll59, ll60, + ll61, ll62, ll63, ll64, ll65, ll66, ll67, ll68, ll69, ll70, ll71, ll72, + ll73, ll74, ll75, ll76, ll77, ll78, ll79, ll80, ll81, ll82, ll83, ll84, + ll85, ll86, ll87, ll88, ll89, ll90, ll91, ll92, ll93, ll94, ll95, ll96, + ll97, ll98, ll99, ll100, ll101, ll102, ll103, ll104, ll105, ll106, ll107, + ll108, ll109, ll110, ll111, ll112, ll113, ll114, ll115, ll116, ll117, + ll118, ll119, ll120, ll121, ll122, ll123, ll124, ll125, ll126, ll127, + ll128, ll129, ll130, ll131, ll132, ll133, ll134, ll135, ll136, ll137, + ll138, ll139, ll140, ll141, ll142, ll143, ll144, ll145, ll146, ll147, + ll148, ll149, ll150, ll151, ll152, ll153, ll154, ll155, ll156, ll157, + ll158, ll159, ll160, ll161, ll162, ll163, ll164, ll165, ll166, ll167, + ll168, ll169, ll170, ll171, ll172, ll173, ll174, ll175, ll176, ll177, + ll178, ll179, ll180, ll181, ll182, ll183, ll184, ll185, ll186, ll187, + ll188, ll189, ll190, ll191, ll192, ll193, ll194, ll195, ll196, ll197, + ll198, ll199, ll200, ll201, ll202, ll203, ll204, ll205, ll206, ll207, + ll208, ll209, ll210, ll211, ll212, ll213, ll214, ll215, ll216, ll217, + ll218, ll219, ll220, ll221, ll222, ll223, ll224, ll225, ll226, ll227, + ll228, ll229, ll230, ll231, ll232, ll233, ll234, ll235, ll236, ll237, + ll238, ll239, ll240, ll241, ll242, ll243, ll244, ll245, ll246, ll247, + ll248, ll249, ll250, ll251, ll252, ll253, ll254, ll255, ll256, ll257, + ll258, ll259, ll260, ll261, ll262, ll263, ll264, ll265, ll266, ll267, + ll268, ll269, ll270, ll271, ll272, ll273, ll274, ll275, ll276, ll277, + ll278, ll279, ll280, ll281, ll282, ll283, ll284, ll285, ll286, ll287, + ll288, ll289, ll290, ll291, ll292, ll293, ll294, ll295, ll296, ll297, + ll298, ll299, ll300, ll301, ll302, ll303, ll304, ll305, ll306, ll307, + ll308, ll309, ll310, ll311, ll312, ll313, ll314, ll315, ll316, ll317, + ll318, ll319, ll320, ll321, ll322, ll323, ll324, ll325, ll326, ll327, + ll328, ll329, ll330, ll331, ll332, ll333, ll334, ll335, ll336, ll337, + ll338, ll339, ll340, ll341, ll342, ll343, ll344, ll345, ll346, ll347, + ll348, ll349, ll350, ll351, ll352, ll353, ll354, ll355, ll356, ll357, + ll358, ll359, ll360, ll361, ll362, ll363, ll364, ll365, ll366, ll367, + ll368, ll369, ll370, ll371, ll372, ll373, ll374, ll375, ll376, ll377, + ll378, ll379, ll380, ll381, ll382, ll383, ll384, ll385, ll386, ll387, + ll388, ll389, ll390, ll391, ll392, ll393, ll394, ll395, ll396, ll397, + ll398, ll399, ll400, ll401, ll402, ll403, ll404, ll405, ll406, ll407, + ll408, ll409, ll410, ll411, ll412, ll413, ll414, ll415, ll416, ll417, + ll418, ll419, ll420, ll421, ll422, ll423, ll424, ll425, ll426, ll427, + ll428, ll429, ll430, ll431, ll432, ll433, ll434, ll435, ll436, ll437, + ll438, ll439, ll440, ll441, ll442, ll443, ll444, ll445, ll446, ll447, + ll448, ll449, ll450, ll451, ll452, ll453, ll454, ll455, ll456, ll457, + ll458, ll459, ll460, ll461, ll462, ll463, ll464, ll465, ll466, ll467, + ll468, ll469, ll470, ll471, ll472, ll473, ll474, ll475, ll476, ll477, + ll478, ll479, ll480, ll481, ll482, ll483, ll484, ll485, ll486, ll487, + ll488, ll489, ll490, ll491, ll492, ll493, ll494, ll495, ll496, ll497, + ll498, ll499, ll500, ll501, ll502, ll503, ll504, ll505, ll506, ll507, + ll508, ll509, ll510, ll511; + + int i1 = TestStackBangRbp.i1; + int i2 = TestStackBangRbp.i2; + int i3 = TestStackBangRbp.i3; + int i4 = TestStackBangRbp.i4; + int i5 = TestStackBangRbp.i5; + int i6 = TestStackBangRbp.i6; + int i7 = TestStackBangRbp.i7; + int i8 = TestStackBangRbp.i8; + int i9 = TestStackBangRbp.i9; + int i10 = TestStackBangRbp.i10; + int i11 = TestStackBangRbp.i11; + int i12 = TestStackBangRbp.i12; + int i13 = TestStackBangRbp.i13; + int i14 = TestStackBangRbp.i14; + int i15 = TestStackBangRbp.i15; + int i16 = TestStackBangRbp.i16; + + TestStackBangRbp.i1 = i1; + TestStackBangRbp.i2 = i2; + TestStackBangRbp.i3 = i3; + TestStackBangRbp.i4 = i4; + TestStackBangRbp.i5 = i5; + TestStackBangRbp.i6 = i6; + TestStackBangRbp.i7 = i7; + TestStackBangRbp.i8 = i8; + TestStackBangRbp.i9 = i9; + TestStackBangRbp.i10 = i10; + TestStackBangRbp.i11 = i11; + TestStackBangRbp.i12 = i12; + TestStackBangRbp.i13 = i13; + TestStackBangRbp.i14 = i14; + TestStackBangRbp.i15 = i15; + TestStackBangRbp.i16 = i16; + + if (deopt) { + // deoptimize with integer in rbp + UnloadedClass1 res = new UnloadedClass1(); // forces deopt with c2 + return res; + } + return null; + } + + static boolean m2(boolean deopt) { + // call m2 recursively until stack overflow. Then call m3 that + // will call m1 and trigger and deopt in m1 while keeping a + // lot of objects live in registers at the call to m1 + + long l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, + l13, l14, l15, l16, l17, l18, l19, l20, l21, l22, l23, l24, + l25, l26, l27, l28, l29, l30, l31, l32, l33, l34, l35, l36, + l37, l38, l39, l40, l41, l42, l43, l44, l45, l46, l47, l48, + l49, l50, l51, l52, l53, l54, l55, l56, l57, l58, l59, l60, + l61, l62, l63, l64, l65, l66, l67, l68, l69, l70, l71, l72, + l73, l74, l75, l76, l77, l78, l79, l80, l81, l82, l83, l84, + l85, l86, l87, l88, l89, l90, l91, l92, l93, l94, l95, l96, + l97, l98, l99, l100, l101, l102, l103, l104, l105, l106, l107, + l108, l109, l110, l111, l112, l113, l114, l115, l116, l117, + l118, l119, l120, l121, l122, l123, l124, l125, l126, l127, + l128, l129, l130, l131, l132, l133, l134, l135, l136, l137, + l138, l139, l140, l141, l142, l143, l144, l145, l146, l147, + l148, l149, l150, l151, l152, l153, l154, l155, l156, l157, + l158, l159, l160, l161, l162, l163, l164, l165, l166, l167, + l168, l169, l170, l171, l172, l173, l174, l175, l176, l177, + l178, l179, l180, l181, l182, l183, l184, l185, l186, l187, + l188, l189, l190, l191, l192, l193, l194, l195, l196, l197, + l198, l199, l200, l201, l202, l203, l204, l205, l206, l207, + l208, l209, l210, l211, l212, l213, l214, l215, l216, l217, + l218, l219, l220, l221, l222, l223, l224, l225, l226, l227, + l228, l229, l230, l231, l232, l233, l234, l235, l236, l237, + l238, l239, l240, l241, l242, l243, l244, l245, l246, l247, + l248, l249, l250, l251, l252, l253, l254, l255, l256, l257, + l258, l259, l260, l261, l262, l263, l264, l265, l266, l267, + l268, l269, l270, l271, l272, l273, l274, l275, l276, l277, + l278, l279, l280, l281, l282, l283, l284, l285, l286, l287, + l288, l289, l290, l291, l292, l293, l294, l295, l296, l297, + l298, l299, l300, l301, l302, l303, l304, l305, l306, l307, + l308, l309, l310, l311, l312, l313, l314, l315, l316, l317, + l318, l319, l320, l321, l322, l323, l324, l325, l326, l327, + l328, l329, l330, l331, l332, l333, l334, l335, l336, l337, + l338, l339, l340, l341, l342, l343, l344, l345, l346, l347, + l348, l349, l350, l351, l352, l353, l354, l355, l356, l357, + l358, l359, l360, l361, l362, l363, l364, l365, l366, l367, + l368, l369, l370, l371, l372, l373, l374, l375, l376, l377, + l378, l379, l380, l381, l382, l383, l384, l385, l386, l387, + l388, l389, l390, l391, l392, l393, l394, l395, l396, l397, + l398, l399, l400, l401, l402, l403, l404, l405, l406, l407, + l408, l409, l410, l411, l412, l413, l414, l415, l416, l417, + l418, l419, l420, l421, l422, l423, l424, l425, l426, l427, + l428, l429, l430, l431, l432, l433, l434, l435, l436, l437, + l438, l439, l440, l441, l442, l443, l444, l445, l446, l447, + l448, l449, l450, l451, l452, l453, l454, l455, l456, l457, + l458, l459, l460, l461, l462, l463, l464, l465, l466, l467, + l468, l469, l470, l471, l472, l473, l474, l475, l476, l477, + l478, l479, l480, l481, l482, l483, l484, l485, l486, l487, + l488, l489, l490, l491, l492, l493, l494, l495, l496, l497, + l498, l499, l500, l501, l502, l503, l504, l505, l506, l507, + l508, l509, l510, l511; + + boolean do_m3 = false; + try { + do_m3 = m2(deopt); + } catch (StackOverflowError e) { + return true; + } + if (do_m3) { + m3(deopt); + } + return false; + } + + static volatile Object o1 = new Object(); + + static volatile int i1 = 1; + static volatile int i2 = 2; + static volatile int i3 = 3; + static volatile int i4 = 4; + static volatile int i5 = 5; + static volatile int i6 = 6; + static volatile int i7 = 7; + static volatile int i8 = 8; + static volatile int i9 = 9; + static volatile int i10 = 10; + static volatile int i11 = 11; + static volatile int i12 = 12; + static volatile int i13 = 13; + static volatile int i14 = 14; + static volatile int i15 = 15; + static volatile int i16 = 16; + + static void m3(boolean deopt) { + Object o1 = TestStackBangRbp.o1; + TestStackBangRbp.o1 = o1; + + try { + m1(deopt); + } catch (StackOverflowError e) { + // deoptimize again. rbp holds an integer. It should have an object. + UnloadedClass2 res = new UnloadedClass2(); // forces deopt with c2 + } + TestStackBangRbp.o1 = o1; + } + + static public void main(String[] args) { + // get m1 & m3 compiled + for (int i = 0; i < 20000; i++) { + m1(false); + m3(false); + } + m2(true); + + System.out.println("TEST PASSED"); + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/compiler/uncommontrap/UncommonTrapStackBang.java Tue Dec 03 14:13:06 2013 +0400 @@ -0,0 +1,10908 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +/** + * @test + * @bug 8026775 + * @summary Uncommon trap blob did not bang all the stack shadow pages + * + * @run main/othervm -server -XX:+IgnoreUnrecognizedVMOptions -XX:+TieredCompilation UncommonTrapStackBang + * @run main/othervm -server -XX:+IgnoreUnrecognizedVMOptions -XX:-TieredCompilation UncommonTrapStackBang + * + * + * Note: This test does not reproduce the problem with absolute + * certainty. Empirically the bug reproduces on Windows some 80+% of + * the time. Setting everything up to fail in 100% of the cases turns + * out to be tricky at best. + * + * + * The goal of this test is to set up the following stack: + * + * doIt() + * eatStack() + * ... + * eatStack() + * run() + * + * + * When doIt() gets executed it will hit an uncommon trap and expand + * into a huge interpreter frame. The doIt method then calls the + * compiled version of StringBuilder.<init>() which does a single + * stack bang StackShadowPages down. + * + * The uncommon trap blob is supposed to prepare for the interpreter + * by pre-touching stack pages. The bug was that it didn't correctly + * pre-touch all the stack shadow pages, leaving a "hole" on the stack + * which raises an exception on Windows when the stack bang in + * StringBuilder is performed. + */ +public class UncommonTrapStackBang extends Thread { + class Foo { } + + public static void main(String[] args) throws Exception { + doWarmup(); + + // Fork off a new thread to increase odds of the stack being unmapped + UncommonTrapStackBang htsb = new UncommonTrapStackBang(); + htsb.run(); + htsb.join(); + } + + // Make sure: + // + // a) StringBuilder.<init>()V is compiled + // b) doIt() is compiled with an uncommon trap in the unlikely path + public static void doWarmup() { + for (int i = 0; i < 100_000; i++) { + new StringBuilder(); + + doIt(false); + } + } + + public void run() { + eatStack(200); + } + + // Consume some stack to get down to some unused/unmapped pages, + // then call doIt and provoke the uncommon trap/deoptimization + private void eatStack(int n) { + if (n <= 0) { + doIt(true); + return; + } + + eatStack(n - 1); + } + + static public void doIt(boolean unlikely) { + int i0; + int i1; + int i2; + int i3; + int i4; + int i5; + int i6; + int i7; + int i8; + int i9; + int i10; + int i11; + int i12; + int i13; + int i14; + int i15; + int i16; + int i17; + int i18; + int i19; + int i20; + int i21; + int i22; + int i23; + int i24; + int i25; + int i26; + int i27; + int i28; + int i29; + int i30; + int i31; + int i32; + int i33; + int i34; + int i35; + int i36; + int i37; + int i38; + int i39; + int i40; + int i41; + int i42; + int i43; + int i44; + int i45; + int i46; + int i47; + int i48; + int i49; + int i50; + int i51; + int i52; + int i53; + int i54; + int i55; + int i56; + int i57; + int i58; + int i59; + int i60; + int i61; + int i62; + int i63; + int i64; + int i65; + int i66; + int i67; + int i68; + int i69; + int i70; + int i71; + int i72; + int i73; + int i74; + int i75; + int i76; + int i77; + int i78; + int i79; + int i80; + int i81; + int i82; + int i83; + int i84; + int i85; + int i86; + int i87; + int i88; + int i89; + int i90; + int i91; + int i92; + int i93; + int i94; + int i95; + int i96; + int i97; + int i98; + int i99; + int i100; + int i101; + int i102; + int i103; + int i104; + int i105; + int i106; + int i107; + int i108; + int i109; + int i110; + int i111; + int i112; + int i113; + int i114; + int i115; + int i116; + int i117; + int i118; + int i119; + int i120; + int i121; + int i122; + int i123; + int i124; + int i125; + int i126; + int i127; + int i128; + int i129; + int i130; + int i131; + int i132; + int i133; + int i134; + int i135; + int i136; + int i137; + int i138; + int i139; + int i140; + int i141; + int i142; + int i143; + int i144; + int i145; + int i146; + int i147; + int i148; + int i149; + int i150; + int i151; + int i152; + int i153; + int i154; + int i155; + int i156; + int i157; + int i158; + int i159; + int i160; + int i161; + int i162; + int i163; + int i164; + int i165; + int i166; + int i167; + int i168; + int i169; + int i170; + int i171; + int i172; + int i173; + int i174; + int i175; + int i176; + int i177; + int i178; + int i179; + int i180; + int i181; + int i182; + int i183; + int i184; + int i185; + int i186; + int i187; + int i188; + int i189; + int i190; + int i191; + int i192; + int i193; + int i194; + int i195; + int i196; + int i197; + int i198; + int i199; + int i200; + int i201; + int i202; + int i203; + int i204; + int i205; + int i206; + int i207; + int i208; + int i209; + int i210; + int i211; + int i212; + int i213; + int i214; + int i215; + int i216; + int i217; + int i218; + int i219; + int i220; + int i221; + int i222; + int i223; + int i224; + int i225; + int i226; + int i227; + int i228; + int i229; + int i230; + int i231; + int i232; + int i233; + int i234; + int i235; + int i236; + int i237; + int i238; + int i239; + int i240; + int i241; + int i242; + int i243; + int i244; + int i245; + int i246; + int i247; + int i248; + int i249; + int i250; + int i251; + int i252; + int i253; + int i254; + int i255; + int i256; + int i257; + int i258; + int i259; + int i260; + int i261; + int i262; + int i263; + int i264; + int i265; + int i266; + int i267; + int i268; + int i269; + int i270; + int i271; + int i272; + int i273; + int i274; + int i275; + int i276; + int i277; + int i278; + int i279; + int i280; + int i281; + int i282; + int i283; + int i284; + int i285; + int i286; + int i287; + int i288; + int i289; + int i290; + int i291; + int i292; + int i293; + int i294; + int i295; + int i296; + int i297; + int i298; + int i299; + int i300; + int i301; + int i302; + int i303; + int i304; + int i305; + int i306; + int i307; + int i308; + int i309; + int i310; + int i311; + int i312; + int i313; + int i314; + int i315; + int i316; + int i317; + int i318; + int i319; + int i320; + int i321; + int i322; + int i323; + int i324; + int i325; + int i326; + int i327; + int i328; + int i329; + int i330; + int i331; + int i332; + int i333; + int i334; + int i335; + int i336; + int i337; + int i338; + int i339; + int i340; + int i341; + int i342; + int i343; + int i344; + int i345; + int i346; + int i347; + int i348; + int i349; + int i350; + int i351; + int i352; + int i353; + int i354; + int i355; + int i356; + int i357; + int i358; + int i359; + int i360; + int i361; + int i362; + int i363; + int i364; + int i365; + int i366; + int i367; + int i368; + int i369; + int i370; + int i371; + int i372; + int i373; + int i374; + int i375; + int i376; + int i377; + int i378; + int i379; + int i380; + int i381; + int i382; + int i383; + int i384; + int i385; + int i386; + int i387; + int i388; + int i389; + int i390; + int i391; + int i392; + int i393; + int i394; + int i395; + int i396; + int i397; + int i398; + int i399; + int i400; + int i401; + int i402; + int i403; + int i404; + int i405; + int i406; + int i407; + int i408; + int i409; + int i410; + int i411; + int i412; + int i413; + int i414; + int i415; + int i416; + int i417; + int i418; + int i419; + int i420; + int i421; + int i422; + int i423; + int i424; + int i425; + int i426; + int i427; + int i428; + int i429; + int i430; + int i431; + int i432; + int i433; + int i434; + int i435; + int i436; + int i437; + int i438; + int i439; + int i440; + int i441; + int i442; + int i443; + int i444; + int i445; + int i446; + int i447; + int i448; + int i449; + int i450; + int i451; + int i452; + int i453; + int i454; + int i455; + int i456; + int i457; + int i458; + int i459; + int i460; + int i461; + int i462; + int i463; + int i464; + int i465; + int i466; + int i467; + int i468; + int i469; + int i470; + int i471; + int i472; + int i473; + int i474; + int i475; + int i476; + int i477; + int i478; + int i479; + int i480; + int i481; + int i482; + int i483; + int i484; + int i485; + int i486; + int i487; + int i488; + int i489; + int i490; + int i491; + int i492; + int i493; + int i494; + int i495; + int i496; + int i497; + int i498; + int i499; + int i500; + int i501; + int i502; + int i503; + int i504; + int i505; + int i506; + int i507; + int i508; + int i509; + int i510; + int i511; + int i512; + int i513; + int i514; + int i515; + int i516; + int i517; + int i518; + int i519; + int i520; + int i521; + int i522; + int i523; + int i524; + int i525; + int i526; + int i527; + int i528; + int i529; + int i530; + int i531; + int i532; + int i533; + int i534; + int i535; + int i536; + int i537; + int i538; + int i539; + int i540; + int i541; + int i542; + int i543; + int i544; + int i545; + int i546; + int i547; + int i548; + int i549; + int i550; + int i551; + int i552; + int i553; + int i554; + int i555; + int i556; + int i557; + int i558; + int i559; + int i560; + int i561; + int i562; + int i563; + int i564; + int i565; + int i566; + int i567; + int i568; + int i569; + int i570; + int i571; + int i572; + int i573; + int i574; + int i575; + int i576; + int i577; + int i578; + int i579; + int i580; + int i581; + int i582; + int i583; + int i584; + int i585; + int i586; + int i587; + int i588; + int i589; + int i590; + int i591; + int i592; + int i593; + int i594; + int i595; + int i596; + int i597; + int i598; + int i599; + int i600; + int i601; + int i602; + int i603; + int i604; + int i605; + int i606; + int i607; + int i608; + int i609; + int i610; + int i611; + int i612; + int i613; + int i614; + int i615; + int i616; + int i617; + int i618; + int i619; + int i620; + int i621; + int i622; + int i623; + int i624; + int i625; + int i626; + int i627; + int i628; + int i629; + int i630; + int i631; + int i632; + int i633; + int i634; + int i635; + int i636; + int i637; + int i638; + int i639; + int i640; + int i641; + int i642; + int i643; + int i644; + int i645; + int i646; + int i647; + int i648; + int i649; + int i650; + int i651; + int i652; + int i653; + int i654; + int i655; + int i656; + int i657; + int i658; + int i659; + int i660; + int i661; + int i662; + int i663; + int i664; + int i665; + int i666; + int i667; + int i668; + int i669; + int i670; + int i671; + int i672; + int i673; + int i674; + int i675; + int i676; + int i677; + int i678; + int i679; + int i680; + int i681; + int i682; + int i683; + int i684; + int i685; + int i686; + int i687; + int i688; + int i689; + int i690; + int i691; + int i692; + int i693; + int i694; + int i695; + int i696; + int i697; + int i698; + int i699; + int i700; + int i701; + int i702; + int i703; + int i704; + int i705; + int i706; + int i707; + int i708; + int i709; + int i710; + int i711; + int i712; + int i713; + int i714; + int i715; + int i716; + int i717; + int i718; + int i719; + int i720; + int i721; + int i722; + int i723; + int i724; + int i725; + int i726; + int i727; + int i728; + int i729; + int i730; + int i731; + int i732; + int i733; + int i734; + int i735; + int i736; + int i737; + int i738; + int i739; + int i740; + int i741; + int i742; + int i743; + int i744; + int i745; + int i746; + int i747; + int i748; + int i749; + int i750; + int i751; + int i752; + int i753; + int i754; + int i755; + int i756; + int i757; + int i758; + int i759; + int i760; + int i761; + int i762; + int i763; + int i764; + int i765; + int i766; + int i767; + int i768; + int i769; + int i770; + int i771; + int i772; + int i773; + int i774; + int i775; + int i776; + int i777; + int i778; + int i779; + int i780; + int i781; + int i782; + int i783; + int i784; + int i785; + int i786; + int i787; + int i788; + int i789; + int i790; + int i791; + int i792; + int i793; + int i794; + int i795; + int i796; + int i797; + int i798; + int i799; + int i800; + int i801; + int i802; + int i803; + int i804; + int i805; + int i806; + int i807; + int i808; + int i809; + int i810; + int i811; + int i812; + int i813; + int i814; + int i815; + int i816; + int i817; + int i818; + int i819; + int i820; + int i821; + int i822; + int i823; + int i824; + int i825; + int i826; + int i827; + int i828; + int i829; + int i830; + int i831; + int i832; + int i833; + int i834; + int i835; + int i836; + int i837; + int i838; + int i839; + int i840; + int i841; + int i842; + int i843; + int i844; + int i845; + int i846; + int i847; + int i848; + int i849; + int i850; + int i851; + int i852; + int i853; + int i854; + int i855; + int i856; + int i857; + int i858; + int i859; + int i860; + int i861; + int i862; + int i863; + int i864; + int i865; + int i866; + int i867; + int i868; + int i869; + int i870; + int i871; + int i872; + int i873; + int i874; + int i875; + int i876; + int i877; + int i878; + int i879; + int i880; + int i881; + int i882; + int i883; + int i884; + int i885; + int i886; + int i887; + int i888; + int i889; + int i890; + int i891; + int i892; + int i893; + int i894; + int i895; + int i896; + int i897; + int i898; + int i899; + int i900; + int i901; + int i902; + int i903; + int i904; + int i905; + int i906; + int i907; + int i908; + int i909; + int i910; + int i911; + int i912; + int i913; + int i914; + int i915; + int i916; + int i917; + int i918; + int i919; + int i920; + int i921; + int i922; + int i923; + int i924; + int i925; + int i926; + int i927; + int i928; + int i929; + int i930; + int i931; + int i932; + int i933; + int i934; + int i935; + int i936; + int i937; + int i938; + int i939; + int i940; + int i941; + int i942; + int i943; + int i944; + int i945; + int i946; + int i947; + int i948; + int i949; + int i950; + int i951; + int i952; + int i953; + int i954; + int i955; + int i956; + int i957; + int i958; + int i959; + int i960; + int i961; + int i962; + int i963;