OpenJDK / zgc / zgc
changeset 25427:ad0a3d24d1d6
Merge
author | duke |
---|---|
date | Wed, 05 Jul 2017 19:49:46 +0200 |
parents | babcd39958ce a69d849bb635 |
children | 1c53a8dd22ed |
files | hotspot/src/cpu/ppc/vm/bytecodes_ppc.cpp hotspot/src/cpu/ppc/vm/bytecodes_ppc.hpp hotspot/src/cpu/sparc/vm/bytecodes_sparc.cpp hotspot/src/cpu/sparc/vm/bytecodes_sparc.hpp hotspot/src/cpu/x86/vm/bytecodes_x86.cpp hotspot/src/cpu/x86/vm/bytecodes_x86.hpp hotspot/src/cpu/zero/vm/bytecodes_zero.cpp hotspot/src/cpu/zero/vm/bytecodes_zero.hpp hotspot/test/runtime/8001071/Test8001071.java hotspot/test/runtime/8001071/Test8001071.sh jdk/src/share/lib/security/BlacklistedCertsConverter.java jdk/src/share/lib/security/blacklisted.certs jdk/src/share/lib/security/blacklisted.certs.pem nashorn/src/jdk/nashorn/api/scripting/resources/engine.js |
diffstat | 434 files changed, 5960 insertions(+), 3678 deletions(-) [+] |
line wrap: on
line diff
--- a/.hgtags-top-repo Thu Jul 10 12:13:22 2014 -0700 +++ b/.hgtags-top-repo Wed Jul 05 19:49:46 2017 +0200 @@ -264,3 +264,4 @@ 75a08df650eb3126bab0c4d15241f5886162393c jdk9-b19 ee4fd72b2ec3d92497f37163352f294aa695c6fb jdk9-b20 9052803f4d01feda28b3d65f2b64dd457d21c7b6 jdk9-b21 +8e4bdab4c362aadde2d321f968cd503a2f779e2f jdk9-b22
--- a/common/autoconf/hotspot-spec.gmk.in Thu Jul 10 12:13:22 2014 -0700 +++ b/common/autoconf/hotspot-spec.gmk.in Wed Jul 05 19:49:46 2017 +0200 @@ -69,8 +69,8 @@ # Yet another name for arch used for an extra subdir below the jvm lib. # Uses i386 and amd64, instead of x86 and x86_64. LIBARCH=$(OPENJDK_TARGET_CPU_LEGACY_LIB) -# Old name for OPENJDK_TARGET_CPU, uses i586 and amd64, instead of x86 and x86_64. -ARCH=$(OPENJDK_TARGET_CPU_LEGACY) +# Set the cpu architecture +ARCH=$(OPENJDK_TARGET_CPU_ARCH) # Legacy setting for building for a 64 bit machine. # If yes then this expands to _LP64:=1 @LP64@
--- a/corba/.hgtags Thu Jul 10 12:13:22 2014 -0700 +++ b/corba/.hgtags Wed Jul 05 19:49:46 2017 +0200 @@ -264,3 +264,4 @@ eecc1b6adc7e193d00a0641eb0963add5a4c06e8 jdk9-b19 87f36eecb1665012d01c5cf102494e591c943ea6 jdk9-b20 3615a4e7f0542ca7552ad6454b742c73ee211d8e jdk9-b21 +ddc07abf4307855c0dc904cc5c96cc764023a930 jdk9-b22
--- a/get_source.sh Thu Jul 10 12:13:22 2014 -0700 +++ b/get_source.sh Wed Jul 05 19:49:46 2017 +0200 @@ -25,11 +25,34 @@ # questions. # +to_stderr() { + echo "$@" >&2 +} + +error() { + to_stderr "ERROR: $1" + exit ${2:-126} +} + +warning() { + to_stderr "WARNING: $1" +} + +version_field() { + # rev is typically omitted for minor and major releases + field=`echo ${1}.0 | cut -f ${2} -d .` + if expr 1 + $field >/dev/null 2> /dev/null; then + echo $field + else + echo -1 + fi +} + # Version check # required reqdmajor=1 -reqdminor=5 +reqdminor=4 reqdrev=0 # requested @@ -37,34 +60,39 @@ rqstminor=6 rqstrev=3 + # installed -hgwhere="`which hg 2> /dev/null | grep -v '^no hg in '`" +hgwhere="`command -v hg`" if [ "x$hgwhere" = "x" ]; then - echo "ERROR: Could not locate Mercurial command" >&2 - exit 126 + error "Could not locate Mercurial command" fi -hgversion="`hg --version 2> /dev/null | sed -n -e 's@^Mercurial Distributed SCM (version \(.*\))\$@\1@p'`" +hgversion="`hg --version 2> /dev/null | sed -n -e 's@^Mercurial Distributed SCM (version \([^+]*\).*)\$@\1@p'`" if [ "x${hgversion}" = "x" ] ; then - echo "ERROR: Could not determine Mercurial version" >&2 - exit 126 + error "Could not determine Mercurial version of $hgwhere" fi -hgmajor="`echo $hgversion | cut -f 1 -d .`" -hgminor="`echo $hgversion | cut -f 2 -d .`" -hgrev="`echo $hgversion.0 | cut -f 3 -d .`" # rev is omitted for minor and major releases +hgmajor="`version_field $hgversion 1`" +hgminor="`version_field $hgversion 2`" +hgrev="`version_field $hgversion 3`" + +if [ $hgmajor -eq -1 -o $hgminor -eq -1 -o $hgrev -eq -1 ] ; then + error "Could not determine Mercurial version of $hgwhere from \"$hgversion\"" +fi + # Require if [ $hgmajor -lt $reqdmajor -o \( $hgmajor -eq $reqdmajor -a $hgminor -lt $reqdminor \) -o \( $hgmajor -eq $reqdmajor -a $hgminor -eq $reqdminor -a $hgrev -lt $reqdrev \) ] ; then - echo "ERROR: Mercurial version $reqdmajor.$reqdminor.$reqdrev or later is required. $hgwhere is version $hgversion" >&2 - exit 126 + error "Mercurial version $reqdmajor.$reqdminor.$reqdrev or later is required. $hgwhere is version $hgversion" fi + # Request if [ $hgmajor -lt $rqstmajor -o \( $hgmajor -eq $rqstmajor -a $hgminor -lt $rqstminor \) -o \( $hgmajor -eq $rqstmajor -a $hgminor -eq $rqstminor -a $hgrev -lt $rqstrev \) ] ; then - echo "WARNING: Mercurial version $rqstmajor.$rqstminor.$rqstrev or later is recommended. $hgwhere is version $hgversion" >&2 + warning "Mercurial version $rqstmajor.$rqstminor.$rqstrev or later is recommended. $hgwhere is version $hgversion" fi + # Get clones of all absent nested repositories (harmless if already exist) sh ./common/bin/hgforest.sh clone "$@" || exit $?
--- a/hotspot/.hgtags Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/.hgtags Wed Jul 05 19:49:46 2017 +0200 @@ -424,3 +424,4 @@ d4cffb3ae6213c66c7522ebffe0349360a45f0ef jdk9-b19 c1af79d122ec9f715fa29312b5e91763f3a4dfc4 jdk9-b20 17b4a5e831b398738feedb0afe75245744510153 jdk9-b21 +518d1fcc0799494f013e00e0a94a91b6f212d54f jdk9-b22
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/G1YCType.java Wed Jul 05 19:49:46 2017 +0200 @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.gc_interface; + +//These definitions should be kept in sync with the definitions in the HotSpot +//code. + +public enum G1YCType { + Normal ("Normal"), + InitialMark ("Initial Mark"), + DuringMark ("During Mark"), + Mixed ("Mixed"), + G1YCTypeEndSentinel ("Unknown"); + + private final String value; + + G1YCType(String val) { + this.value = val; + } + public String value() { + return value; + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/GCCause.java Wed Jul 05 19:49:46 2017 +0200 @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.gc_interface; + +//These definitions should be kept in sync with the definitions in the HotSpot code. + +public enum GCCause { + _java_lang_system_gc ("System.gc()"), + _full_gc_alot ("FullGCAlot"), + _scavenge_alot ("ScavengeAlot"), + _allocation_profiler ("Allocation Profiler"), + _jvmti_force_gc ("JvmtiEnv ForceGarbageCollection"), + _gc_locker ("GCLocker Initiated GC"), + _heap_inspection ("Heap Inspection Initiated GC"), + _heap_dump ("Heap Dump Initiated GC"), + + _no_gc ("No GC"), + _no_cause_specified ("Unknown GCCause"), + _allocation_failure ("Allocation Failure"), + + _tenured_generation_full ("Tenured Generation Full"), + _metadata_GC_threshold ("Metadata GC Threshold"), + + _cms_generation_full ("CMS Generation Full"), + _cms_initial_mark ("CMS Initial Mark"), + _cms_final_remark ("CMS Final Remark"), + _cms_concurrent_mark ("CMS Concurrent Mark"), + + _old_generation_expanded_on_last_scavenge ("Old Generation Expanded On Last Scavenge"), + _old_generation_too_full_to_scavenge ("Old Generation Too Full To Scavenge"), + _adaptive_size_policy ("Ergonomics"), + + _g1_inc_collection_pause ("G1 Evacuation Pause"), + _g1_humongous_allocation ("G1 Humongous Allocation"), + + _last_ditch_collection ("Last ditch collection"), + _last_gc_cause ("ILLEGAL VALUE - last gc cause - ILLEGAL VALUE"); + + private final String value; + + GCCause(String val) { + this.value = val; + } + public String value() { + return value; + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/GCName.java Wed Jul 05 19:49:46 2017 +0200 @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.gc_interface; + +//These definitions should be kept in sync with the definitions in the HotSpot code. + +public enum GCName { + ParallelOld ("ParallelOld"), + SerialOld ("SerialOld"), + PSMarkSweep ("PSMarkSweep"), + ParallelScavenge ("ParallelScavenge"), + DefNew ("DefNew"), + ParNew ("ParNew"), + G1New ("G1New"), + ConcurrentMarkSweep ("ConcurrentMarkSweep"), + G1Old ("G1Old"), + GCNameEndSentinel ("GCNameEndSentinel"); + + private final String value; + + GCName(String val) { + this.value = val; + } + public String value() { + return value; + } +} +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/GCWhen.java Wed Jul 05 19:49:46 2017 +0200 @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.gc_interface; + +//These definitions should be kept in sync with the definitions in the HotSpot code. + +public enum GCWhen { + BeforeGC ("Before GC"), + AfterGC ("After GC"), + GCWhenEndSentinel ("GCWhenEndSentinel"); + + private final String value; + + GCWhen(String val) { + this.value = val; + } + public String value() { + return value; + } +} + + +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_interface/ReferenceType.java Wed Jul 05 19:49:46 2017 +0200 @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.gc_interface; + +//These definitions should be kept in sync with the definitions in the HotSpot code. + +public enum ReferenceType { + REF_NONE ("None reference"), // Regular class + REF_OTHER ("Other reference"), // Subclass of java/lang/ref/Reference, but not subclass of one of the classes below + REF_SOFT ("Soft reference"), // Subclass of java/lang/ref/SoftReference + REF_WEAK ("Weak reference"), // Subclass of java/lang/ref/WeakReference + REF_FINAL ("Final reference"), // Subclass of java/lang/ref/FinalReference + REF_PHANTOM ("Phantom reference"); // Subclass of java/lang/ref/PhantomReference + + private final String value; + + ReferenceType(String val) { + this.value = val; + } + public String value() { + return value; + } +}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/Universe.java Wed Jul 05 19:49:46 2017 +0200 @@ -56,6 +56,12 @@ private static AddressField narrowKlassBaseField; private static CIntegerField narrowKlassShiftField; + public enum NARROW_OOP_MODE { + UnscaledNarrowOop, + ZeroBasedNarrowOop, + HeapBasedNarrowOop + } + static { VM.registerVMInitializedObserver(new Observer() { public void update(Observable o, Object data) { @@ -94,7 +100,17 @@ public Universe() { } - + public static String narrowOopModeToString(NARROW_OOP_MODE mode) { + switch (mode) { + case UnscaledNarrowOop: + return "32-bits Oops"; + case ZeroBasedNarrowOop: + return "zero based Compressed Oops"; + case HeapBasedNarrowOop: + return "Compressed Oops with base"; + } + return ""; + } public CollectedHeap heap() { try { return (CollectedHeap) heapConstructor.instantiateWrapperFor(collectedHeapField.getValue());
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Klass.java Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Klass.java Wed Jul 05 19:49:46 2017 +0200 @@ -55,6 +55,7 @@ layoutHelper = new IntField(type.getJIntField("_layout_helper"), 0); name = type.getAddressField("_name"); accessFlags = new CIntField(type.getCIntegerField("_access_flags"), 0); + traceIDField = type.getField("_trace_id"); subklass = new MetadataField(type.getAddressField("_subklass"), 0); nextSibling = new MetadataField(type.getAddressField("_next_sibling"), 0); @@ -86,6 +87,7 @@ private static CIntField accessFlags; private static MetadataField subklass; private static MetadataField nextSibling; + private static sun.jvm.hotspot.types.Field traceIDField; private Address getValue(AddressField field) { return addr.getAddressAt(field.getOffset()); @@ -106,6 +108,7 @@ public AccessFlags getAccessFlagsObj(){ return new AccessFlags(getAccessFlags()); } public Klass getSubklassKlass() { return (Klass) subklass.getValue(this); } public Klass getNextSiblingKlass() { return (Klass) nextSibling.getValue(this); } + public long traceID() { return traceIDField.getJLong(addr); } // computed access flags - takes care of inner classes etc. // This is closer to actual source level than getAccessFlags() etc.
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/OopUtilities.java Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/OopUtilities.java Wed Jul 05 19:49:46 2017 +0200 @@ -54,6 +54,8 @@ private static OopField threadNameField; private static OopField threadGroupField; private static LongField threadEETopField; + //tid field is new since 1.5 + private static LongField threadTIDField; // threadStatus field is new since 1.5 private static IntField threadStatusField; // parkBlocker field is new since 1.6 @@ -220,6 +222,7 @@ threadNameField = (OopField) k.findField("name", "[C"); threadGroupField = (OopField) k.findField("group", "Ljava/lang/ThreadGroup;"); threadEETopField = (LongField) k.findField("eetop", "J"); + threadTIDField = (LongField) k.findField("tid", "J"); threadStatusField = (IntField) k.findField("threadStatus", "I"); threadParkBlockerField = (OopField) k.findField("parkBlocker", "Ljava/lang/Object;"); @@ -268,6 +271,15 @@ return VM.getVM().getThreads().createJavaThreadWrapper(addr); } + public static long threadOopGetTID(Oop threadOop) { + initThreadFields(); + if (threadTIDField != null) { + return threadTIDField.getValue(threadOop); + } else { + return 0; + } + } + /** returns value of java.lang.Thread.threadStatus field */ public static int threadOopGetThreadStatus(Oop threadOop) { initThreadFields();
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/opto/CompilerPhaseType.java Wed Jul 05 19:49:46 2017 +0200 @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.opto; + +//These definitions should be kept in sync with the definitions in the HotSpot code. + +public enum CompilerPhaseType { + PHASE_BEFORE_STRINGOPTS ("Before StringOpts"), + PHASE_AFTER_STRINGOPTS ("After StringOpts"), + PHASE_BEFORE_REMOVEUSELESS ("Before RemoveUseless"), + PHASE_AFTER_PARSING ("After Parsing"), + PHASE_ITER_GVN1 ("Iter GVN 1"), + PHASE_PHASEIDEAL_BEFORE_EA ("PhaseIdealLoop before EA"), + PHASE_ITER_GVN_AFTER_EA ("Iter GVN after EA"), + PHASE_ITER_GVN_AFTER_ELIMINATION ("Iter GVN after eliminating allocations and locks"), + PHASE_PHASEIDEALLOOP1 ("PhaseIdealLoop 1"), + PHASE_PHASEIDEALLOOP2 ("PhaseIdealLoop 2"), + PHASE_PHASEIDEALLOOP3 ("PhaseIdealLoop 3"), + PHASE_CPP1 ("PhaseCPP 1"), + PHASE_ITER_GVN2 ("Iter GVN 2"), + PHASE_PHASEIDEALLOOP_ITERATIONS ("PhaseIdealLoop iterations"), + PHASE_OPTIMIZE_FINISHED ("Optimize finished"), + PHASE_GLOBAL_CODE_MOTION ("Global code motion"), + PHASE_FINAL_CODE ("Final Code"), + PHASE_AFTER_EA ("After Escape Analysis"), + PHASE_BEFORE_CLOOPS ("Before CountedLoop"), + PHASE_AFTER_CLOOPS ("After CountedLoop"), + PHASE_BEFORE_BEAUTIFY_LOOPS ("Before beautify loops"), + PHASE_AFTER_BEAUTIFY_LOOPS ("After beautify loops"), + PHASE_BEFORE_MATCHING ("Before Matching"), + PHASE_INCREMENTAL_INLINE ("Incremental Inline"), + PHASE_INCREMENTAL_BOXING_INLINE ("Incremental Boxing Inline"), + PHASE_END ("End"), + PHASE_FAILURE ("Failure"), + PHASE_NUM_TYPES ("Number of Phase Types"); + + private final String value; + + CompilerPhaseType(String val) { + this.value = val; + } + public String value() { + return value; + } +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Flags.java Wed Jul 05 19:49:46 2017 +0200 @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.runtime; + +//These definitions should be kept in sync with the definitions in the HotSpot code. + +public enum Flags { + // value origin + DEFAULT ("Default"), + COMMAND_LINE ("Command line"), + ENVIRON_VAR ("Environment variable"), + CONFIG_FILE ("Config file"), + MANAGEMENT ("Management"), + ERGONOMIC ("Ergonomic"), + ATTACH_ON_DEMAND ("Attach on demand"), + INTERNAL ("Internal"); + + private final String value; + + Flags(String val) { + this.value = val; + } + public String value() { + return value; + } +}
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Thread.java Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/Thread.java Wed Jul 05 19:49:46 2017 +0200 @@ -41,6 +41,8 @@ private static AddressField currentPendingMonitorField; private static AddressField currentWaitingMonitorField; + private static JLongField allocatedBytesField; + static { VM.registerVMInitializedObserver(new Observer() { public void update(Observable o, Object data) { @@ -61,6 +63,7 @@ activeHandlesField = type.getAddressField("_active_handles"); currentPendingMonitorField = type.getAddressField("_current_pending_monitor"); currentWaitingMonitorField = type.getAddressField("_current_waiting_monitor"); + allocatedBytesField = type.getJLongField("_allocated_bytes"); } public Thread(Address addr) { @@ -104,6 +107,10 @@ return new JNIHandleBlock(a); } + public long allocatedBytes() { + return allocatedBytesField.getValue(addr); + } + public boolean isVMThread() { return false; } public boolean isJavaThread() { return false; } public boolean isCompilerThread() { return false; }
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VMOps.java Wed Jul 05 19:49:46 2017 +0200 @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +package sun.jvm.hotspot.runtime; + +//These definitions should be kept in sync with the definitions in the HotSpot code. + +public enum VMOps { + Dummy, + ThreadStop, + ThreadDump, + PrintThreads, + FindDeadlocks, + ForceSafepoint, + ForceAsyncSafepoint, + Deoptimize, + DeoptimizeFrame, + DeoptimizeAll, + ZombieAll, + UnlinkSymbols, + Verify, + PrintJNI, + HeapDumper, + DeoptimizeTheWorld, + CollectForMetadataAllocation, + GC_HeapInspection, + GenCollectFull, + GenCollectFullConcurrent, + GenCollectForAllocation, + ParallelGCFailedAllocation, + ParallelGCSystemGC, + CGC_Operation, + CMS_Initial_Mark, + CMS_Final_Remark, + G1CollectFull, + G1CollectForAllocation, + G1IncCollectionPause, + EnableBiasedLocking, + RevokeBias, + BulkRevokeBias, + PopulateDumpSharedSpace, + JNIFunctionTableCopier, + RedefineClasses, + GetOwnedMonitorInfo, + GetObjectMonitorUsage, + GetCurrentContendedMonitor, + GetStackTrace, + GetMultipleStackTraces, + GetAllStackTraces, + GetThreadListStackTraces, + GetFrameCount, + GetFrameLocation, + ChangeBreakpoints, + GetOrSetLocal, + GetCurrentLocation, + EnterInterpOnlyMode, + ChangeSingleStep, + HeapWalkOperation, + HeapIterateOperation, + ReportJavaOutOfMemory, + JFRCheckpoint, + Exit, + LinuxDllLoad, + Terminating +}
--- a/hotspot/make/aix/makefiles/vm.make Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/make/aix/makefiles/vm.make Wed Jul 05 19:49:46 2017 +0200 @@ -82,14 +82,12 @@ -DJRE_RELEASE_VERSION="\"$(JRE_RELEASE_VER)\"" \ $(JDK_VER_DEFS) HS_LIB_ARCH = -DHOTSPOT_LIB_ARCH=\"$(LIBARCH)\" -BUILD_TARGET = -DHOTSPOT_BUILD_TARGET="\"$(TARGET)\"" BUILD_USER = -DHOTSPOT_BUILD_USER="\"$(HOTSPOT_BUILD_USER)\"" VM_DISTRO = -DHOTSPOT_VM_DISTRO="\"$(HOTSPOT_VM_DISTRO)\"" CXXFLAGS = \ ${SYSDEFS} \ ${INCLUDES} \ - ${BUILD_TARGET} \ ${BUILD_USER} \ ${HS_LIB_ARCH} \ ${VM_DISTRO}
--- a/hotspot/make/bsd/makefiles/gcc.make Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/make/bsd/makefiles/gcc.make Wed Jul 05 19:49:46 2017 +0200 @@ -280,7 +280,10 @@ # optimization control flags (Used by fastdebug and release variants) OPT_CFLAGS/NOOPT=-O0 -ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1" +ifeq ($(USE_CLANG), true) + # Clang does not support -Og + OPT_CFLAGS/DEBUG=-O0 +else ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1" # Allow basic optimizations which don't distrupt debugging. (Principally dead code elimination) OPT_CFLAGS/DEBUG=-Og else @@ -319,9 +322,20 @@ # Work around some compiler bugs. ifeq ($(USE_CLANG), true) + # Clang 4.2 ifeq ($(shell expr $(CC_VER_MAJOR) = 4 \& $(CC_VER_MINOR) = 2), 1) OPT_CFLAGS/loopTransform.o += $(OPT_CFLAGS/NOOPT) OPT_CFLAGS/unsafe.o += -O1 + # Clang 5.0 + else ifeq ($(shell expr $(CC_VER_MAJOR) = 5 \& $(CC_VER_MINOR) = 0), 1) + OPT_CFLAGS/loopTransform.o += $(OPT_CFLAGS/NOOPT) + OPT_CFLAGS/unsafe.o += -O1 + # Clang 5.1 + else ifeq ($(shell expr $(CC_VER_MAJOR) = 5 \& $(CC_VER_MINOR) = 1), 1) + OPT_CFLAGS/loopTransform.o += $(OPT_CFLAGS/NOOPT) + OPT_CFLAGS/unsafe.o += -O1 + else + $(error "Update compiler workarounds for Clang $(CC_VER_MAJOR).$(CC_VER_MINOR)") endif else # 6835796. Problem in GCC 4.3.0 with mulnode.o optimized compilation. @@ -443,7 +457,10 @@ CFLAGS += -flimit-debug-info endif -ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1" +ifeq ($(USE_CLANG), true) + # Clang does not support -Og + DEBUG_CFLAGS=-O0 +else ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 8 \) \))" "1" # Allow basic optimizations which don't distrupt debugging. (Principally dead code elimination) DEBUG_CFLAGS=-Og else
--- a/hotspot/make/bsd/makefiles/vm.make Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/make/bsd/makefiles/vm.make Wed Jul 05 19:49:46 2017 +0200 @@ -81,14 +81,12 @@ -DJRE_RELEASE_VERSION="\"$(JRE_RELEASE_VER)\"" \ $(JDK_VER_DEFS) HS_LIB_ARCH = -DHOTSPOT_LIB_ARCH=\"$(LIBARCH)\" -BUILD_TARGET = -DHOTSPOT_BUILD_TARGET="\"$(TARGET)\"" BUILD_USER = -DHOTSPOT_BUILD_USER="\"$(HOTSPOT_BUILD_USER)\"" VM_DISTRO = -DHOTSPOT_VM_DISTRO="\"$(HOTSPOT_VM_DISTRO)\"" CXXFLAGS = \ ${SYSDEFS} \ ${INCLUDES} \ - ${BUILD_TARGET} \ ${BUILD_USER} \ ${HS_LIB_ARCH} \ ${VM_DISTRO}
--- a/hotspot/make/defs.make Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/make/defs.make Wed Jul 05 19:49:46 2017 +0200 @@ -135,8 +135,12 @@ ifeq ($(JDK_MKTG_VERSION),) JDK_MKTG_VERSION=$(JDK_MINOR_VERSION).$(JDK_MICRO_VERSION) endif -ifeq ($(JDK_VERSION),) - JDK_VERSION=$(JDK_MAJOR_VERSION).$(JDK_MINOR_VERSION).$(JDK_MICRO_VERSION) +ifeq ($(JDK_VERSION),) + ifeq ($(BUILD_FLAVOR), product) + JDK_VERSION=$(JDK_MAJOR_VERSION).$(JDK_MINOR_VERSION).$(JDK_MICRO_VERSION) + else + JDK_VERSION=$(JDK_MAJOR_VERSION).$(JDK_MINOR_VERSION).$(JDK_MICRO_VERSION)-$(BUILD_FLAVOR) + endif endif ifeq ($(FULL_VERSION),) FULL_VERSION="$(JDK_VERSION)"
--- a/hotspot/make/linux/makefiles/defs.make Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/make/linux/makefiles/defs.make Wed Jul 05 19:49:46 2017 +0200 @@ -69,7 +69,7 @@ endif # sparc -ifeq ($(ARCH), sparc64) +ifneq (,$(findstring $(ARCH), sparc)) ifeq ($(ARCH_DATA_MODEL), 64) ARCH_DATA_MODEL = 64 MAKE_ARGS += LP64=1 @@ -83,30 +83,20 @@ HS_ARCH = sparc endif -# amd64/x86_64 -ifneq (,$(findstring $(ARCH), amd64 x86_64)) +# i686/i586 and amd64/x86_64 +ifneq (,$(findstring $(ARCH), amd64 x86_64 i686 i586)) ifeq ($(ARCH_DATA_MODEL), 64) ARCH_DATA_MODEL = 64 MAKE_ARGS += LP64=1 PLATFORM = linux-amd64 VM_PLATFORM = linux_amd64 - HS_ARCH = x86 else ARCH_DATA_MODEL = 32 PLATFORM = linux-i586 VM_PLATFORM = linux_i486 - HS_ARCH = x86 - # We have to reset ARCH to i686 since SRCARCH relies on it - ARCH = i686 endif -endif -# i686/i586 ie 32-bit x86 -ifneq (,$(findstring $(ARCH), i686 i586)) - ARCH_DATA_MODEL = 32 - PLATFORM = linux-i586 - VM_PLATFORM = linux_i486 - HS_ARCH = x86 + HS_ARCH = x86 endif # ARM @@ -118,20 +108,18 @@ endif # PPC -ifeq ($(ARCH), ppc) - ARCH_DATA_MODEL = 32 - PLATFORM = linux-ppc - VM_PLATFORM = linux_ppc - HS_ARCH = ppc -endif +ifneq (,$(findstring $(ARCH), ppc)) + ifeq ($(ARCH_DATA_MODEL), 64) + MAKE_ARGS += LP64=1 + PLATFORM = linux-ppc64 + VM_PLATFORM = linux_ppc64 + else + ARCH_DATA_MODEL = 32 + PLATFORM = linux-ppc + VM_PLATFORM = linux_ppc + endif -# PPC64 -ifeq ($(ARCH), ppc64) - ARCH_DATA_MODEL = 64 - MAKE_ARGS += LP64=1 - PLATFORM = linux-ppc64 - VM_PLATFORM = linux_ppc64 - HS_ARCH = ppc + HS_ARCH = ppc endif # On 32 bit linux we build server and client, on 64 bit just server.
--- a/hotspot/make/linux/makefiles/vm.make Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/make/linux/makefiles/vm.make Wed Jul 05 19:49:46 2017 +0200 @@ -82,14 +82,12 @@ -DJRE_RELEASE_VERSION="\"$(JRE_RELEASE_VER)\"" \ $(JDK_VER_DEFS) HS_LIB_ARCH = -DHOTSPOT_LIB_ARCH=\"$(LIBARCH)\" -BUILD_TARGET = -DHOTSPOT_BUILD_TARGET="\"$(TARGET)\"" BUILD_USER = -DHOTSPOT_BUILD_USER="\"$(HOTSPOT_BUILD_USER)\"" VM_DISTRO = -DHOTSPOT_VM_DISTRO="\"$(HOTSPOT_VM_DISTRO)\"" CXXFLAGS = \ ${SYSDEFS} \ ${INCLUDES} \ - ${BUILD_TARGET} \ ${BUILD_USER} \ ${HS_LIB_ARCH} \ ${VM_DISTRO}
--- a/hotspot/make/solaris/makefiles/sa.make Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/make/solaris/makefiles/sa.make Wed Jul 05 19:49:46 2017 +0200 @@ -29,8 +29,12 @@ # and generate JNI header file for native methods. include $(GAMMADIR)/make/solaris/makefiles/rules.make +include $(GAMMADIR)/make/defs.make AGENT_DIR = $(GAMMADIR)/agent include $(GAMMADIR)/make/sa.files + +-include $(HS_ALT_MAKE)/solaris/makefiles/sa.make + GENERATED = ../generated # tools.jar is needed by the JDI - SA binding
--- a/hotspot/make/solaris/makefiles/vm.make Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/make/solaris/makefiles/vm.make Wed Jul 05 19:49:46 2017 +0200 @@ -77,14 +77,12 @@ -DJRE_RELEASE_VERSION="\"$(JRE_RELEASE_VER)\"" \ $(JDK_VER_DEFS) HS_LIB_ARCH = -DHOTSPOT_LIB_ARCH=\"$(LIBARCH)\" -BUILD_TARGET = -DHOTSPOT_BUILD_TARGET="\"$(TARGET)\"" BUILD_USER = -DHOTSPOT_BUILD_USER="\"$(HOTSPOT_BUILD_USER)\"" VM_DISTRO = -DHOTSPOT_VM_DISTRO="\"$(HOTSPOT_VM_DISTRO)\"" CXXFLAGS = \ ${SYSDEFS} \ ${INCLUDES} \ - ${BUILD_TARGET} \ ${BUILD_USER} \ ${HS_LIB_ARCH} \ ${VM_DISTRO}
--- a/hotspot/make/windows/makefiles/sa.make Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/make/windows/makefiles/sa.make Wed Jul 05 19:49:46 2017 +0200 @@ -38,6 +38,22 @@ GENERATED = ../generated +HS_COMMON_SRC_REL = src + +!if "$(OPENJDK)" != "true" +HS_ALT_SRC_REL=src/closed +HS_ALT_SRC = $(WorkSpace)/$(HS_ALT_SRC_REL) +!ifndef HS_ALT_MAKE +HS_ALT_MAKE=$(WorkSpace)/make/closed +!endif +!endif + +HS_COMMON_SRC = $(WorkSpace)/$(HS_COMMON_SRC_REL) + +!ifdef HS_ALT_MAKE +!include $(HS_ALT_MAKE)/windows/makefiles/sa.make +!endif + # tools.jar is needed by the JDI - SA binding SA_CLASSPATH = $(BOOT_JAVA_HOME)/lib/tools.jar
--- a/hotspot/make/windows/projectfiles/common/Makefile Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/make/windows/projectfiles/common/Makefile Wed Jul 05 19:49:46 2017 +0200 @@ -116,7 +116,7 @@ JDK_MINOR_VERSION="\\\"$(JDK_MINOR_VER)\\\"" JDK_MICRO_VERSION="\\\"$(JDK_MICRO_VER)\\\"" -ReleaseOptions = -define HOTSPOT_RELEASE_VERSION=$(HOTSPOT_RELEASE_VERSION) -define JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION) -define HOTSPOT_VM_DISTRO=$(HOTSPOT_VM_DISTRO) -define JDK_MAJOR_VERSION=$(JDK_MAJOR_VERSION) -define JDK_MINOR_VERSION=$(JDK_MINOR_VERSION) -define JDK_MICRO_VERSION=$(JDK_MICRO_VERSION) -define JDK_BUILD_NUMBER=$(JDK_BUILD_NUMBER) +ReleaseOptions = -define HOTSPOT_RELEASE_VERSION=$(HOTSPOT_RELEASE_VERSION) -define JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION) -define HOTSPOT_VM_DISTRO=$(HOTSPOT_VM_DISTRO) -define JDK_MAJOR_VERSION=$(JDK_MAJOR_VERSION) -define JDK_MINOR_VERSION=$(JDK_MINOR_VERSION) -define JDK_MICRO_VERSION=$(JDK_MICRO_VERSION) -define JDK_BUILD_NUMBER=$(JDK_BUILD_NUMBER) -define VISUAL_STUDIO_BUILD=true ProjectCreatorIDEOptions = $(ProjectCreatorIDEOptions) $(ReleaseOptions) $(HOTSPOTBUILDSPACE)/$(ProjectFile): $(HOTSPOTBUILDSPACE)/classes/ProjectCreator.class
--- a/hotspot/src/cpu/ppc/vm/assembler_ppc.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/cpu/ppc/vm/assembler_ppc.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright 2012, 2014 SAP AG. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -693,7 +693,7 @@ // PPC 1, section 4.6.7 Floating-Point Compare Instructions fcmpu( CCR7, F24, F25); - tty->print_cr("\ntest_asm disassembly (0x%lx 0x%lx):", code()->insts_begin(), code()->insts_end()); + tty->print_cr("\ntest_asm disassembly (0x%lx 0x%lx):", p2i(code()->insts_begin()), p2i(code()->insts_end())); code()->decode(); }
--- a/hotspot/src/cpu/ppc/vm/bytecodes_ppc.cpp Thu Jul 10 12:13:22 2014 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,31 +0,0 @@ -/* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. - * Copyright 2012, 2013 SAP AG. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "interpreter/bytecodes.hpp" - -void Bytecodes::pd_initialize() { - // No ppc specific initialization. -}
--- a/hotspot/src/cpu/ppc/vm/bytecodes_ppc.hpp Thu Jul 10 12:13:22 2014 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. - * Copyright 2012, 2013 SAP AG. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef CPU_PPC_VM_BYTECODES_PPC_HPP -#define CPU_PPC_VM_BYTECODES_PPC_HPP - -// No ppc64 specific bytecodes - -#endif // CPU_PPC_VM_BYTECODES_PPC_HPP
--- a/hotspot/src/cpu/ppc/vm/compiledIC_ppc.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/cpu/ppc/vm/compiledIC_ppc.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,34 +50,6 @@ return is_icholder_entry(call->destination()); } -//----------------------------------------------------------------------------- -// High-level access to an inline cache. Guaranteed to be MT-safe. - -CompiledIC::CompiledIC(nmethod* nm, NativeCall* call) - : _ic_call(call) -{ - address ic_call = call->instruction_address(); - - assert(ic_call != NULL, "ic_call address must be set"); - assert(nm != NULL, "must pass nmethod"); - assert(nm->contains(ic_call), "must be in nmethod"); - - // Search for the ic_call at the given address. - RelocIterator iter(nm, ic_call, ic_call+1); - bool ret = iter.next(); - assert(ret == true, "relocInfo must exist at this address"); - assert(iter.addr() == ic_call, "must find ic_call"); - if (iter.type() == relocInfo::virtual_call_type) { - virtual_call_Relocation* r = iter.virtual_call_reloc(); - _is_optimized = false; - _value = nativeMovConstReg_at(r->cached_value()); - } else { - assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); - _is_optimized = true; - _value = NULL; - } -} - // ---------------------------------------------------------------------------- // A PPC CompiledStaticCall looks like this: @@ -203,7 +175,7 @@ if (TraceICs) { ResourceMark rm; tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", - instruction_address(), + p2i(instruction_address()), callee->name_and_sig_as_C_string()); }
--- a/hotspot/src/cpu/ppc/vm/frame_ppc.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/cpu/ppc/vm/frame_ppc.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright 2012, 2014 SAP AG. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -140,7 +140,7 @@ void frame::patch_pc(Thread* thread, address pc) { if (TracePcPatching) { tty->print_cr("patch_pc at address " PTR_FORMAT " [" PTR_FORMAT " -> " PTR_FORMAT "]", - &((address*) _sp)[-1], ((address*) _sp)[-1], pc); + p2i(&((address*) _sp)[-1]), p2i(((address*) _sp)[-1]), p2i(pc)); } own_abi()->lr = (uint64_t)pc; _cb = CodeCache::find_blob(pc);
--- a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright 2012, 2014 SAP AG. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -3099,7 +3099,7 @@ }; static void stop_on_request(int tp, const char* msg) { - tty->print("PPC assembly code requires stop: (%s) %s\n", (void *)stop_types[tp%/*stop_end*/4], msg); + tty->print("PPC assembly code requires stop: (%s) %s\n", stop_types[tp%/*stop_end*/4], msg); guarantee(false, err_msg("PPC assembly code requires stop: %s", msg)); }
--- a/hotspot/src/cpu/ppc/vm/methodHandles_ppc.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/cpu/ppc/vm/methodHandles_ppc.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * Copyright 2012, 2014 SAP AG. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -464,7 +464,7 @@ strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH const char* mh_reg_name = has_mh ? "R23_method_handle" : "G23"; tty->print_cr("MH %s %s="INTPTR_FORMAT " sp=" INTPTR_FORMAT, - adaptername, mh_reg_name, (intptr_t) mh, entry_sp); + adaptername, mh_reg_name, (intptr_t) mh, (intptr_t) entry_sp); if (Verbose) { tty->print_cr("Registers:");
--- a/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -1,6 +1,6 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. - * Copyright 2012, 2013 SAP AG. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright 2012, 2014 SAP AG. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -147,9 +147,9 @@ address addr = addr_at(0); if (!NativeCall::is_call_at(addr)) { - tty->print_cr("not a NativeCall at " PTR_FORMAT, addr); + tty->print_cr("not a NativeCall at " PTR_FORMAT, p2i(addr)); // TODO: PPC port: Disassembler::decode(addr - 20, addr + 20, tty); - fatal(err_msg("not a NativeCall at " PTR_FORMAT, addr)); + fatal(err_msg("not a NativeCall at " PTR_FORMAT, p2i(addr))); } } #endif // ASSERT @@ -160,9 +160,9 @@ NativeInstruction::verify(); if (!NativeFarCall::is_far_call_at(addr)) { - tty->print_cr("not a NativeFarCall at " PTR_FORMAT, addr); + tty->print_cr("not a NativeFarCall at " PTR_FORMAT, p2i(addr)); // TODO: PPC port: Disassembler::decode(addr, 20, 20, tty); - fatal(err_msg("not a NativeFarCall at " PTR_FORMAT, addr)); + fatal(err_msg("not a NativeFarCall at " PTR_FORMAT, p2i(addr))); } } #endif // ASSERT @@ -306,9 +306,9 @@ if (! (cb != NULL && MacroAssembler::is_calculate_address_from_global_toc_at(addr, cb->content_begin())) && ! (cb != NULL && MacroAssembler::is_set_narrow_oop(addr, cb->content_begin())) && ! MacroAssembler::is_bl(*((int*) addr))) { - tty->print_cr("not a NativeMovConstReg at " PTR_FORMAT, addr); + tty->print_cr("not a NativeMovConstReg at " PTR_FORMAT, p2i(addr)); // TODO: PPC port: Disassembler::decode(addr, 20, 20, tty); - fatal(err_msg("not a NativeMovConstReg at " PTR_FORMAT, addr)); + fatal(err_msg("not a NativeMovConstReg at " PTR_FORMAT, p2i(addr))); } } } @@ -344,9 +344,9 @@ NativeInstruction::verify(); if (!NativeJump::is_jump_at(addr)) { - tty->print_cr("not a NativeJump at " PTR_FORMAT, addr); + tty->print_cr("not a NativeJump at " PTR_FORMAT, p2i(addr)); // TODO: PPC port: Disassembler::decode(addr, 20, 20, tty); - fatal(err_msg("not a NativeJump at " PTR_FORMAT, addr)); + fatal(err_msg("not a NativeJump at " PTR_FORMAT, p2i(addr))); } } #endif // ASSERT
--- a/hotspot/src/cpu/ppc/vm/ppc.ad Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/cpu/ppc/vm/ppc.ad Wed Jul 05 19:49:46 2017 +0200 @@ -1329,7 +1329,7 @@ if (!false /* TODO: PPC port C->is_frameless_method()*/) { st->print("save return pc\n\t"); - st->print("push frame %d\n\t", -framesize); + st->print("push frame %ld\n\t", -framesize); } } #endif
--- a/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -352,7 +352,7 @@ if (PrintAssembly) { ttyLocker ttyl; - tty->print_cr("Decoding section size detection stub at " INTPTR_FORMAT " before execution:", code); + tty->print_cr("Decoding section size detection stub at " INTPTR_FORMAT " before execution:", p2i(code)); Disassembler::decode((u_char*)code, (u_char*)code_end, tty); tty->print_cr("Time loop1 :%f", loop1_seconds); tty->print_cr("Time loop2 :%f", loop2_seconds); @@ -435,7 +435,7 @@ // Print the detection code. if (PrintAssembly) { ttyLocker ttyl; - tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " before execution:", code); + tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " before execution:", p2i(code)); Disassembler::decode((u_char*)code, (u_char*)code_end, tty); } @@ -468,7 +468,7 @@ // Print the detection code. if (PrintAssembly) { ttyLocker ttyl; - tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " after execution:", code); + tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " after execution:", p2i(code)); Disassembler::decode((u_char*)code, (u_char*)code_end, tty); }
--- a/hotspot/src/cpu/sparc/vm/bytecodes_sparc.cpp Thu Jul 10 12:13:22 2014 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,35 +0,0 @@ -/* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "interpreter/bytecodes.hpp" - - -void Bytecodes::pd_initialize() { - // (nothing) -} - -Bytecodes::Code Bytecodes::pd_base_code_for(Code code) { - return code; -}
--- a/hotspot/src/cpu/sparc/vm/bytecodes_sparc.hpp Thu Jul 10 12:13:22 2014 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,37 +0,0 @@ -/* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef CPU_SPARC_VM_BYTECODES_SPARC_HPP -#define CPU_SPARC_VM_BYTECODES_SPARC_HPP - -#ifdef SPARC -#define NLOCALS_IN_REGS 6 -#endif - - -// Sparc specific bytecodes - -// (none) - -#endif // CPU_SPARC_VM_BYTECODES_SPARC_HPP
--- a/hotspot/src/cpu/sparc/vm/compiledIC_sparc.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/cpu/sparc/vm/compiledIC_sparc.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -50,34 +50,6 @@ return is_icholder_entry(call->destination()); } -//----------------------------------------------------------------------------- -// High-level access to an inline cache. Guaranteed to be MT-safe. - -CompiledIC::CompiledIC(nmethod* nm, NativeCall* call) - : _ic_call(call) -{ - address ic_call = call->instruction_address(); - - assert(ic_call != NULL, "ic_call address must be set"); - assert(nm != NULL, "must pass nmethod"); - assert(nm->contains(ic_call), "must be in nmethod"); - - // Search for the ic_call at the given address. - RelocIterator iter(nm, ic_call, ic_call+1); - bool ret = iter.next(); - assert(ret == true, "relocInfo must exist at this address"); - assert(iter.addr() == ic_call, "must find ic_call"); - if (iter.type() == relocInfo::virtual_call_type) { - virtual_call_Relocation* r = iter.virtual_call_reloc(); - _is_optimized = false; - _value = nativeMovConstReg_at(r->cached_value()); - } else { - assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); - _is_optimized = true; - _value = NULL; - } -} - // ---------------------------------------------------------------------------- #define __ _masm.
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/cpu/sparc/vm/frame_sparc.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -436,32 +436,6 @@ } #endif // CC_INTERP - -#ifdef ASSERT -// Debugging aid -static frame nth_sender(int n) { - frame f = JavaThread::current()->last_frame(); - - for(int i = 0; i < n; ++i) - f = f.sender((RegisterMap*)NULL); - - printf("first frame %d\n", f.is_first_frame() ? 1 : 0); - printf("interpreted frame %d\n", f.is_interpreted_frame() ? 1 : 0); - printf("java frame %d\n", f.is_java_frame() ? 1 : 0); - printf("entry frame %d\n", f.is_entry_frame() ? 1 : 0); - printf("native frame %d\n", f.is_native_frame() ? 1 : 0); - if (f.is_compiled_frame()) { - if (f.is_deoptimized_frame()) - printf("deoptimized frame 1\n"); - else - printf("compiled frame 1\n"); - } - - return f; -} -#endif - - frame frame::sender_for_entry_frame(RegisterMap *map) const { assert(map != NULL, "map must be set"); // Java frame called from C; skip all C frames and return top C
--- a/hotspot/src/cpu/x86/vm/bytecodes_x86.cpp Thu Jul 10 12:13:22 2014 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,37 +0,0 @@ -/* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "interpreter/bytecodes.hpp" - - -void Bytecodes::pd_initialize() { - // No i486 specific initialization -} - - -Bytecodes::Code Bytecodes::pd_base_code_for(Code code) { - // No i486 specific bytecodes - return code; -}
--- a/hotspot/src/cpu/x86/vm/bytecodes_x86.hpp Thu Jul 10 12:13:22 2014 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,30 +0,0 @@ -/* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef CPU_X86_VM_BYTECODES_X86_HPP -#define CPU_X86_VM_BYTECODES_X86_HPP - -// No i486 specific bytecodes - -#endif // CPU_X86_VM_BYTECODES_X86_HPP
--- a/hotspot/src/cpu/x86/vm/compiledIC_x86.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/cpu/x86/vm/compiledIC_x86.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -47,34 +47,6 @@ return is_icholder_entry(call->destination()); } -//----------------------------------------------------------------------------- -// High-level access to an inline cache. Guaranteed to be MT-safe. - -CompiledIC::CompiledIC(nmethod* nm, NativeCall* call) - : _ic_call(call) -{ - address ic_call = call->instruction_address(); - - assert(ic_call != NULL, "ic_call address must be set"); - assert(nm != NULL, "must pass nmethod"); - assert(nm->contains(ic_call), "must be in nmethod"); - - // Search for the ic_call at the given address. - RelocIterator iter(nm, ic_call, ic_call+1); - bool ret = iter.next(); - assert(ret == true, "relocInfo must exist at this address"); - assert(iter.addr() == ic_call, "must find ic_call"); - if (iter.type() == relocInfo::virtual_call_type) { - virtual_call_Relocation* r = iter.virtual_call_reloc(); - _is_optimized = false; - _value = nativeMovConstReg_at(r->cached_value()); - } else { - assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); - _is_optimized = true; - _value = NULL; - } -} - // ---------------------------------------------------------------------------- #define __ _masm.
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -280,6 +280,8 @@ case T_BYTE : __ sign_extend_byte (rax); break; case T_SHORT : __ sign_extend_short(rax); break; case T_INT : /* nothing to do */ break; + case T_LONG : /* nothing to do */ break; + case T_VOID : /* nothing to do */ break; case T_DOUBLE : case T_FLOAT : { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
--- a/hotspot/src/cpu/zero/vm/bytecodes_zero.cpp Thu Jul 10 12:13:22 2014 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright 2007 Red Hat, Inc. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "interpreter/bytecodes.hpp" - -void Bytecodes::pd_initialize() { - // No zero specific initialization -}
--- a/hotspot/src/cpu/zero/vm/bytecodes_zero.hpp Thu Jul 10 12:13:22 2014 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,31 +0,0 @@ -/* - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright 2009 Red Hat, Inc. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef CPU_ZERO_VM_BYTECODES_ZERO_HPP -#define CPU_ZERO_VM_BYTECODES_ZERO_HPP - -// This file is intentionally empty - -#endif // CPU_ZERO_VM_BYTECODES_ZERO_HPP
--- a/hotspot/src/cpu/zero/vm/compiledIC_zero.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/cpu/zero/vm/compiledIC_zero.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -58,34 +58,6 @@ return is_icholder_entry(call->destination()); } -//----------------------------------------------------------------------------- -// High-level access to an inline cache. Guaranteed to be MT-safe. - -CompiledIC::CompiledIC(nmethod* nm, NativeCall* call) - : _ic_call(call) -{ - address ic_call = call->instruction_address(); - - assert(ic_call != NULL, "ic_call address must be set"); - assert(nm != NULL, "must pass nmethod"); - assert(nm->contains(ic_call), "must be in nmethod"); - - // Search for the ic_call at the given address. - RelocIterator iter(nm, ic_call, ic_call+1); - bool ret = iter.next(); - assert(ret == true, "relocInfo must exist at this address"); - assert(iter.addr() == ic_call, "must find ic_call"); - if (iter.type() == relocInfo::virtual_call_type) { - virtual_call_Relocation* r = iter.virtual_call_reloc(); - _is_optimized = false; - _value = nativeMovConstReg_at(r->cached_value()); - } else { - assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); - _is_optimized = true; - _value = NULL; - } -} - // ---------------------------------------------------------------------------- void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
--- a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -37,6 +37,7 @@ #include "prims/jvmtiExport.hpp" #include "prims/jvmtiThreadState.hpp" #include "runtime/arguments.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/deoptimization.hpp" #include "runtime/frame.inline.hpp" #include "runtime/interfaceSupport.hpp"
--- a/hotspot/src/os/aix/vm/osThread_aix.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/os/aix/vm/osThread_aix.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -24,17 +24,13 @@ */ // no precompiled headers -#include "runtime/atomic.hpp" + #include "runtime/handles.inline.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/os.hpp" #include "runtime/osThread.hpp" #include "runtime/safepoint.hpp" #include "runtime/vmThread.hpp" -#ifdef TARGET_ARCH_ppc -# include "assembler_ppc.inline.hpp" -#endif - void OSThread::pd_initialize() { assert(this != NULL, "check");
--- a/hotspot/src/os/aix/vm/os_aix.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/os/aix/vm/os_aix.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -48,6 +48,7 @@ #include "prims/jvm.h" #include "prims/jvm_misc.hpp" #include "runtime/arguments.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/extendedPC.hpp" #include "runtime/globals.hpp" #include "runtime/interfaceSupport.hpp" @@ -2812,13 +2813,6 @@ os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN; } -void os::yield_all() { - // Yields to all threads, including threads with lower priorities - // Threads on Linux are all with same priority. The Solaris style - // os::yield_all() with nanosleep(1ms) is not necessary. - sched_yield(); -} - //////////////////////////////////////////////////////////////////////////////// // thread priority support @@ -3075,7 +3069,7 @@ for (int n = 0; !osthread->sr.is_suspended(); n++) { for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) { - os::yield_all(); + os::yield(); } // timeout, try to cancel the request @@ -3109,7 +3103,7 @@ if (sr_notify(osthread) == 0) { for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) { for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) { - os::yield_all(); + os::yield(); } } } else {
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/os/bsd/vm/os_bsd.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -41,6 +41,7 @@ #include "prims/jvm.h" #include "prims/jvm_misc.hpp" #include "runtime/arguments.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/extendedPC.hpp" #include "runtime/globals.hpp" #include "runtime/interfaceSupport.hpp" @@ -2600,13 +2601,6 @@ os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN; } -void os::yield_all() { - // Yields to all threads, including threads with lower priorities - // Threads on Bsd are all with same priority. The Solaris style - // os::yield_all() with nanosleep(1ms) is not necessary. - sched_yield(); -} - //////////////////////////////////////////////////////////////////////////////// // thread priority support
--- a/hotspot/src/os/linux/vm/os_linux.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/os/linux/vm/os_linux.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -41,6 +41,7 @@ #include "prims/jvm.h" #include "prims/jvm_misc.hpp" #include "runtime/arguments.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/extendedPC.hpp" #include "runtime/globals.hpp" #include "runtime/interfaceSupport.hpp" @@ -3795,13 +3796,6 @@ os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN; } -void os::yield_all() { - // Yields to all threads, including threads with lower priorities - // Threads on Linux are all with same priority. The Solaris style - // os::yield_all() with nanosleep(1ms) is not necessary. - sched_yield(); -} - //////////////////////////////////////////////////////////////////////////////// // thread priority support
--- a/hotspot/src/os/solaris/vm/osThread_solaris.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/os/solaris/vm/osThread_solaris.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -23,7 +23,6 @@ */ // no precompiled headers -#include "runtime/atomic.hpp" #include "runtime/handles.inline.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/os.hpp"
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/os/solaris/vm/os_solaris.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -41,6 +41,7 @@ #include "prims/jvm.h" #include "prims/jvm_misc.hpp" #include "runtime/arguments.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/extendedPC.hpp" #include "runtime/globals.hpp" #include "runtime/interfaceSupport.hpp" @@ -3186,11 +3187,6 @@ os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; } -void os::yield_all() { - // Yields to all threads, including threads with lower priorities - os::sleep(Thread::current(), 1, false); -} - // Interface for setting lwp priorities. If we are using T2 libthread, // which forces the use of BoundThreads or we manually set UseBoundThreads, // all of our threads will be assigned to real lwp's. Using the thr_setprio
--- a/hotspot/src/os/solaris/vm/thread_solaris.inline.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/os/solaris/vm/thread_solaris.inline.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -29,7 +29,6 @@ #error "This file should only be included from thread.inline.hpp" #endif -#include "runtime/atomic.inline.hpp" #include "runtime/thread.hpp" #include "runtime/threadLocalStorage.hpp"
--- a/hotspot/src/os/windows/vm/osThread_windows.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/os/windows/vm/osThread_windows.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -23,7 +23,6 @@ */ // no precompiled headers -#include "runtime/atomic.hpp" #include "runtime/handles.inline.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/os.hpp"
--- a/hotspot/src/os/windows/vm/os_windows.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/os/windows/vm/os_windows.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -44,6 +44,7 @@ #include "prims/jvm.h" #include "prims/jvm_misc.hpp" #include "runtime/arguments.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/extendedPC.hpp" #include "runtime/globals.hpp" #include "runtime/interfaceSupport.hpp" @@ -3526,11 +3527,6 @@ void os::yield() { os::NakedYield(); } -void os::yield_all() { - // Yields to all threads, including threads with lower priorities - Sleep(1); -} - // Win32 only gives you access to seven real priorities at a time, // so we compress Java's ten down to seven. It would be better // if we dynamically adjusted relative priorities.
--- a/hotspot/src/os/windows/vm/threadCritical_windows.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/os/windows/vm/threadCritical_windows.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/thread.inline.hpp" #include "runtime/threadCritical.hpp"
--- a/hotspot/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -1,6 +1,6 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. - * Copyright 2012, 2013 SAP AG. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright 2012, 2014 SAP AG. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -307,7 +307,7 @@ // doesn't work for us. We use: ((NativeInstruction*)pc)->is_safepoint_poll()) { if (TraceTraps) { - tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (SIGSEGV)", pc); + tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (SIGSEGV)", p2i(pc)); } stub = SharedRuntime::get_poll_stub(pc); } @@ -316,7 +316,7 @@ else if (sig == SIGTRAP && TrapBasedICMissChecks && nativeInstruction_at(pc)->is_sigtrap_ic_miss_check()) { if (TraceTraps) { - tty->print_cr("trap: ic_miss_check at " INTPTR_FORMAT " (SIGTRAP)", pc); + tty->print_cr("trap: ic_miss_check at " INTPTR_FORMAT " (SIGTRAP)", p2i(pc)); } stub = SharedRuntime::get_ic_miss_stub(); } @@ -325,7 +325,7 @@ else if (sig == SIGTRAP && TrapBasedNullChecks && nativeInstruction_at(pc)->is_sigtrap_null_check()) { if (TraceTraps) { - tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGTRAP)", pc); + tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGTRAP)", p2i(pc)); } stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); } @@ -335,7 +335,7 @@ CodeCache::contains((void*) pc) && !MacroAssembler::needs_explicit_null_check((intptr_t) info->si_addr)) { if (TraceTraps) { - tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc); + tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", p2i(pc)); } stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); } @@ -345,7 +345,7 @@ else if (sig == SIGTRAP && TrapBasedRangeChecks && nativeInstruction_at(pc)->is_sigtrap_range_check()) { if (TraceTraps) { - tty->print_cr("trap: range_check at " INTPTR_FORMAT " (SIGTRAP)", pc); + tty->print_cr("trap: range_check at " INTPTR_FORMAT " (SIGTRAP)", p2i(pc)); } stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); } @@ -572,7 +572,7 @@ st->cr(); intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc); - st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp); + st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", p2i(sp)); print_hex_dump(st, (address)sp, (address)(sp + 128), sizeof(intptr_t)); st->cr(); @@ -580,7 +580,7 @@ // point to garbage if entry point in an nmethod is corrupted. Leave // this at the end, and hope for the best. address pc = os::Linux::ucontext_get_pc(uc); - st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc); + st->print_cr("Instructions: (pc=" PTR_FORMAT ")", p2i(pc)); print_hex_dump(st, pc - 64, pc + 64, /*instrsize=*/4); st->cr(); }
--- a/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -38,6 +38,7 @@ #include "prims/jvm.h" #include "prims/jvm_misc.hpp" #include "runtime/arguments.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/extendedPC.hpp" #include "runtime/frame.inline.hpp" #include "runtime/interfaceSupport.hpp"
--- a/hotspot/src/share/vm/asm/assembler.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/asm/assembler.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -26,7 +26,6 @@ #include "asm/macroAssembler.hpp" #include "asm/macroAssembler.inline.hpp" #include "asm/codeBuffer.hpp" -#include "runtime/atomic.hpp" #include "runtime/atomic.inline.hpp" #include "runtime/icache.hpp" #include "runtime/os.hpp"
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -47,6 +47,7 @@ #include "memory/resourceArea.hpp" #include "oops/objArrayKlass.hpp" #include "oops/oop.inline.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/compilationPolicy.hpp" #include "runtime/interfaceSupport.hpp"
--- a/hotspot/src/share/vm/ci/ciConstantPoolCache.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/ci/ciConstantPoolCache.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -38,7 +38,7 @@ int expected_size) { _elements = new (arena) GrowableArray<void*>(arena, expected_size, 0, 0); - _keys = new (arena) GrowableArray<intptr_t>(arena, expected_size, 0, 0); + _keys = new (arena) GrowableArray<int>(arena, expected_size, 0, 0); } // ------------------------------------------------------------------
--- a/hotspot/src/share/vm/ci/ciConstantPoolCache.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/ci/ciConstantPoolCache.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -35,7 +35,7 @@ // Usage note: this klass has nothing to do with ConstantPoolCache*. class ciConstantPoolCache : public ResourceObj { private: - GrowableArray<intptr_t>* _keys; + GrowableArray<int>* _keys; GrowableArray<void*>* _elements; int find(int index);
--- a/hotspot/src/share/vm/ci/ciEnv.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/ci/ciEnv.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -512,24 +512,9 @@ } else { // Check if it's resolved if it's not a symbol constant pool entry. klass = KlassHandle(THREAD, ConstantPool::klass_at_if_loaded(cpool, index)); - + // Try to look it up by name. if (klass.is_null()) { - // The klass has not been inserted into the constant pool. - // Try to look it up by name. - { - // We have to lock the cpool to keep the oop from being resolved - // while we are accessing it. - MonitorLockerEx ml(cpool->lock()); - constantTag tag = cpool->tag_at(index); - if (tag.is_klass()) { - // The klass has been inserted into the constant pool - // very recently. - klass = KlassHandle(THREAD, cpool->resolved_klass_at(index)); - } else { - assert(cpool->tag_at(index).is_unresolved_klass(), "wrong tag"); - klass_name = cpool->unresolved_klass_at(index); - } - } + klass_name = cpool->klass_name_at(index); } }
--- a/hotspot/src/share/vm/ci/ciReplay.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/ci/ciReplay.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -1,4 +1,4 @@ -/* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. +/* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -686,7 +686,7 @@ switch (cp->tag_at(i).value()) { case JVM_CONSTANT_UnresolvedClass: { if (tag == JVM_CONSTANT_Class) { - tty->print_cr("Resolving klass %s at %d", cp->unresolved_klass_at(i)->as_utf8(), i); + tty->print_cr("Resolving klass %s at %d", cp->klass_name_at(i)->as_utf8(), i); Klass* k = cp->klass_at(i, CHECK); } break;
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/classfile/classFileParser.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -510,7 +510,7 @@ jbyte tag = cp->tag_at(index).value(); switch (tag) { case JVM_CONSTANT_UnresolvedClass: { - Symbol* class_name = cp->unresolved_klass_at(index); + Symbol* class_name = cp->klass_name_at(index); // check the name, even if _cp_patches will overwrite it verify_legal_class_name(class_name, CHECK_(nullHandle)); break; @@ -3161,7 +3161,7 @@ if (_need_verify) is_array = super_klass->oop_is_array(); } else if (_need_verify) { - is_array = (_cp->unresolved_klass_at(super_class_index)->byte_at(0) == JVM_SIGNATURE_ARRAY); + is_array = (_cp->klass_name_at(super_class_index)->byte_at(0) == JVM_SIGNATURE_ARRAY); } if (_need_verify) { guarantee_property(!is_array, @@ -3855,7 +3855,7 @@ "Invalid this class index %u in constant pool in class file %s", this_class_index, CHECK_(nullHandle)); - Symbol* class_name = cp->unresolved_klass_at(this_class_index); + Symbol* class_name = cp->klass_name_at(this_class_index); assert(class_name != NULL, "class_name can't be null"); // It's important to set parsed_name *before* resolving the super class. @@ -4139,8 +4139,8 @@ } // Allocate mirror and initialize static fields - java_lang_Class::create_mirror(this_klass, protection_domain, CHECK_(nullHandle)); - + java_lang_Class::create_mirror(this_klass, class_loader, protection_domain, + CHECK_(nullHandle)); // Generate any default methods - default methods are interface methods // that have a default implementation. This is new with Lambda project.
--- a/hotspot/src/share/vm/classfile/classLoaderData.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -57,6 +57,7 @@ #include "memory/metadataFactory.hpp" #include "memory/metaspaceShared.hpp" #include "memory/oopFactory.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/jniHandles.hpp" #include "runtime/mutex.hpp" #include "runtime/safepoint.hpp" @@ -549,6 +550,7 @@ // GC root of class loader data created. ClassLoaderData* ClassLoaderDataGraph::_head = NULL; ClassLoaderData* ClassLoaderDataGraph::_unloading = NULL; +ClassLoaderData* ClassLoaderDataGraph::_saved_unloading = NULL; ClassLoaderData* ClassLoaderDataGraph::_saved_head = NULL; bool ClassLoaderDataGraph::_should_purge = false; @@ -656,7 +658,9 @@ void ClassLoaderDataGraph::classes_unloading_do(void f(Klass* const)) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!"); - for (ClassLoaderData* cld = _unloading; cld != NULL; cld = cld->next()) { + // Only walk the head until any clds not purged from prior unloading + // (CMS doesn't purge right away). + for (ClassLoaderData* cld = _unloading; cld != _saved_unloading; cld = cld->next()) { cld->classes_do(f); } } @@ -704,6 +708,11 @@ ClassLoaderData* data = _head; ClassLoaderData* prev = NULL; bool seen_dead_loader = false; + + // Save previous _unloading pointer for CMS which may add to unloading list before + // purging and we don't want to rewalk the previously unloaded class loader data. + _saved_unloading = _unloading; + // mark metadata seen on the stack and code cache so we can delete // unneeded entries. bool has_redefined_a_class = JvmtiExport::has_redefined_a_class();
--- a/hotspot/src/share/vm/classfile/classLoaderData.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/classfile/classLoaderData.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -66,6 +66,7 @@ static ClassLoaderData* _unloading; // CMS support. static ClassLoaderData* _saved_head; + static ClassLoaderData* _saved_unloading; static bool _should_purge; static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS); @@ -187,8 +188,6 @@ JNIHandleBlock* handles() const; void set_handles(JNIHandleBlock* handles); - Mutex* metaspace_lock() const { return _metaspace_lock; } - // GC interface. void clear_claimed() { _claimed = 0; } bool claimed() const { return _claimed == 1; } @@ -216,6 +215,8 @@ return _the_null_class_loader_data; } + Mutex* metaspace_lock() const { return _metaspace_lock; } + bool is_anonymous() const { return _is_anonymous; } static void init_null_class_loader_data() {
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/classfile/javaClasses.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -558,7 +558,7 @@ } } } - create_mirror(k, Handle(NULL), CHECK); + create_mirror(k, Handle(NULL), Handle(NULL), CHECK); } void java_lang_Class::initialize_mirror_fields(KlassHandle k, @@ -578,7 +578,8 @@ InstanceKlass::cast(k())->do_local_static_fields(&initialize_static_field, mirror, CHECK); } -void java_lang_Class::create_mirror(KlassHandle k, Handle protection_domain, TRAPS) { +void java_lang_Class::create_mirror(KlassHandle k, Handle class_loader, + Handle protection_domain, TRAPS) { assert(k->java_mirror() == NULL, "should only assign mirror once"); // Use this moment of initialization to cache modifier_flags also, // to support Class.getModifiers(). Instance classes recalculate @@ -633,6 +634,9 @@ } } + // set the classLoader field in the java_lang_Class instance + set_class_loader(mirror(), class_loader()); + // Setup indirection from klass->mirror last // after any exceptions can happen during allocations. if (!k.is_null()) { @@ -694,6 +698,18 @@ } +void java_lang_Class::set_class_loader(oop java_class, oop loader) { + // jdk7 runs Queens in bootstrapping and jdk8-9 has no coordinated pushes yet. + if (_class_loader_offset != 0) { + java_class->obj_field_put(_class_loader_offset, loader); + } +} + +oop java_lang_Class::class_loader(oop java_class) { + assert(_class_loader_offset != 0, "must be set"); + return java_class->obj_field(_class_loader_offset); +} + oop java_lang_Class::create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS) { // This should be improved by adding a field at the Java level or by // introducing a new VM klass (see comment in ClassFileParser) @@ -853,6 +869,12 @@ compute_optional_offset(classRedefinedCount_offset, klass_oop, vmSymbols::classRedefinedCount_name(), vmSymbols::int_signature()); + // Needs to be optional because the old build runs Queens during bootstrapping + // and jdk8-9 doesn't have coordinated pushes yet. + compute_optional_offset(_class_loader_offset, + klass_oop, vmSymbols::classLoader_name(), + vmSymbols::classloader_signature()); + CLASS_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET); } @@ -3073,6 +3095,7 @@ int java_lang_Class::_array_klass_offset; int java_lang_Class::_oop_size_offset; int java_lang_Class::_static_oop_field_count_offset; +int java_lang_Class::_class_loader_offset; int java_lang_Class::_protection_domain_offset; int java_lang_Class::_init_lock_offset; int java_lang_Class::_signers_offset;
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/classfile/javaClasses.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -240,19 +240,23 @@ static int _protection_domain_offset; static int _init_lock_offset; static int _signers_offset; + static int _class_loader_offset; static bool offsets_computed; static int classRedefinedCount_offset; + static GrowableArray<Klass*>* _fixup_mirror_list; static void set_init_lock(oop java_class, oop init_lock); static void set_protection_domain(oop java_class, oop protection_domain); + static void set_class_loader(oop java_class, oop class_loader); static void initialize_mirror_fields(KlassHandle k, Handle mirror, Handle protection_domain, TRAPS); public: static void compute_offsets(); // Instance creation - static void create_mirror(KlassHandle k, Handle protection_domain, TRAPS); + static void create_mirror(KlassHandle k, Handle class_loader, + Handle protection_domain, TRAPS); static void fixup_mirror(KlassHandle k, TRAPS); static oop create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS); // Conversion @@ -290,6 +294,8 @@ static objArrayOop signers(oop java_class); static void set_signers(oop java_class, objArrayOop signers); + static oop class_loader(oop java_class); + static int oop_size(oop java_class); static void set_oop_size(oop java_class, int size); static int static_oop_field_count(oop java_class);
--- a/hotspot/src/share/vm/classfile/stringTable.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/classfile/stringTable.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -33,6 +33,7 @@ #include "memory/gcLocker.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/oop.inline2.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/mutexLocker.hpp" #include "utilities/hashtable.inline.hpp" #if INCLUDE_ALL_GCS
--- a/hotspot/src/share/vm/classfile/symbolTable.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/classfile/symbolTable.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -33,6 +33,7 @@ #include "memory/gcLocker.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/oop.inline2.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/mutexLocker.hpp" #include "utilities/hashtable.inline.hpp"
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -572,6 +572,7 @@ template(serializePropertiesToByteArray_signature, "()[B") \ template(serializeAgentPropertiesToByteArray_name, "serializeAgentPropertiesToByteArray") \ template(classRedefinedCount_name, "classRedefinedCount") \ + template(classLoader_name, "classLoader") \ \ /* trace signatures */ \ TRACE_TEMPLATES(template) \
--- a/hotspot/src/share/vm/code/compiledIC.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/code/compiledIC.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -159,6 +159,30 @@ //----------------------------------------------------------------------------- // High-level access to an inline cache. Guaranteed to be MT-safe. +CompiledIC::CompiledIC(nmethod* nm, NativeCall* call) + : _ic_call(call) +{ + address ic_call = call->instruction_address(); + + assert(ic_call != NULL, "ic_call address must be set"); + assert(nm != NULL, "must pass nmethod"); + assert(nm->contains(ic_call), "must be in nmethod"); + + // Search for the ic_call at the given address. + RelocIterator iter(nm, ic_call, ic_call+1); + bool ret = iter.next(); + assert(ret == true, "relocInfo must exist at this address"); + assert(iter.addr() == ic_call, "must find ic_call"); + if (iter.type() == relocInfo::virtual_call_type) { + virtual_call_Relocation* r = iter.virtual_call_reloc(); + _is_optimized = false; + _value = nativeMovConstReg_at(r->cached_value()); + } else { + assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); + _is_optimized = true; + _value = NULL; + } +} bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) { assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
--- a/hotspot/src/share/vm/code/nmethod.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/code/nmethod.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -37,6 +37,7 @@ #include "oops/methodData.hpp" #include "prims/jvmtiRedefineClassesTrace.hpp" #include "prims/jvmtiImpl.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/orderAccess.inline.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/sweeper.hpp" @@ -363,27 +364,30 @@ set_exception_cache(new_entry); } -void nmethod::remove_from_exception_cache(ExceptionCache* ec) { +void nmethod::clean_exception_cache(BoolObjectClosure* is_alive) { ExceptionCache* prev = NULL; ExceptionCache* curr = exception_cache(); - assert(curr != NULL, "nothing to remove"); - // find the previous and next entry of ec - while (curr != ec) { - prev = curr; - curr = curr->next(); - assert(curr != NULL, "ExceptionCache not found"); + + while (curr != NULL) { + ExceptionCache* next = curr->next(); + + Klass* ex_klass = curr->exception_type(); + if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) { + if (prev == NULL) { + set_exception_cache(next); + } else { + prev->set_next(next); + } + delete curr; + // prev stays the same. + } else { + prev = curr; + } + + curr = next; } - // now: curr == ec - ExceptionCache* next = curr->next(); - if (prev == NULL) { - set_exception_cache(next); - } else { - prev->set_next(next); - } - delete curr; } - // public method for accessing the exception cache // These are the public access methods. address nmethod::handler_for_exception_and_pc(Handle exception, address pc) { @@ -668,8 +672,10 @@ _hotness_counter = NMethodSweeper::hotness_counter_reset_val(); code_buffer->copy_values_to(this); - if (ScavengeRootsInCode && detect_scavenge_root_oops()) { - CodeCache::add_scavenge_root_nmethod(this); + if (ScavengeRootsInCode) { + if (detect_scavenge_root_oops()) { + CodeCache::add_scavenge_root_nmethod(this); + } Universe::heap()->register_nmethod(this); } debug_only(verify_scavenge_root_oops()); @@ -753,8 +759,10 @@ _hotness_counter = NMethodSweeper::hotness_counter_reset_val(); code_buffer->copy_values_to(this); - if (ScavengeRootsInCode && detect_scavenge_root_oops()) { - CodeCache::add_scavenge_root_nmethod(this); + if (ScavengeRootsInCode) { + if (detect_scavenge_root_oops()) { + CodeCache::add_scavenge_root_nmethod(this); + } Universe::heap()->register_nmethod(this); } DEBUG_ONLY(verify_scavenge_root_oops();) @@ -869,8 +877,10 @@ code_buffer->copy_values_to(this); debug_info->copy_to(this); dependencies->copy_to(this); - if (ScavengeRootsInCode && detect_scavenge_root_oops()) { - CodeCache::add_scavenge_root_nmethod(this); + if (ScavengeRootsInCode) { + if (detect_scavenge_root_oops()) { + CodeCache::add_scavenge_root_nmethod(this); + } Universe::heap()->register_nmethod(this); } debug_only(verify_scavenge_root_oops()); @@ -1612,15 +1622,7 @@ } // Exception cache - ExceptionCache* ec = exception_cache(); - while (ec != NULL) { - Klass* ex_klass = ec->exception_type(); - ExceptionCache* next_ec = ec->next(); - if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) { - remove_from_exception_cache(ec); - } - ec = next_ec; - } + clean_exception_cache(is_alive); // If class unloading occurred we first iterate over all inline caches and // clear ICs where the cached oop is referring to an unloaded klass or method.
--- a/hotspot/src/share/vm/code/nmethod.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/code/nmethod.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -529,7 +529,7 @@ void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; } address handler_for_exception_and_pc(Handle exception, address pc); void add_handler_for_exception_and_pc(Handle exception, address pc, address handler); - void remove_from_exception_cache(ExceptionCache* ec); + void clean_exception_cache(BoolObjectClosure* is_alive); // implicit exceptions support address continuation_for_implicit_exception(address pc);
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/compiler/compileBroker.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -36,6 +36,7 @@ #include "oops/oop.inline.hpp" #include "prims/nativeLookup.hpp" #include "runtime/arguments.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/compilationPolicy.hpp" #include "runtime/init.hpp" #include "runtime/interfaceSupport.hpp" @@ -589,11 +590,10 @@ /** - * Add a CompileTask to a CompileQueue + * Add a CompileTask to a CompileQueue. */ void CompileQueue::add(CompileTask* task) { assert(lock()->owned_by_self(), "must own lock"); - assert(!CompileBroker::is_compilation_disabled_forever(), "Do not add task if compilation is turned off forever"); task->set_next(NULL); task->set_prev(NULL); @@ -639,8 +639,11 @@ while (next != NULL) { CompileTask* current = next; next = current->next(); - // Wake up thread that blocks on the compile task. - current->lock()->notify(); + { + // Wake up thread that blocks on the compile task. + MutexLocker ct_lock(current->lock()); + current->lock()->notify(); + } // Put the task back on the freelist. CompileTask::free(current); }
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -26,6 +26,7 @@ #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_HPP #include "memory/genOopClosures.hpp" +#include "memory/iterator.hpp" ///////////////////////////////////////////////////////////////// // Closures used by ConcurrentMarkSweepGeneration's collector @@ -48,33 +49,13 @@ } \ } -// Applies the given oop closure to all oops in all klasses visited. -class CMKlassClosure : public KlassClosure { - friend class CMSOopClosure; - friend class CMSOopsInGenClosure; - - OopClosure* _oop_closure; - - // Used when _oop_closure couldn't be set in an initialization list. - void initialize(OopClosure* oop_closure) { - assert(_oop_closure == NULL, "Should only be called once"); - _oop_closure = oop_closure; - } +// TODO: This duplication of the MetadataAwareOopClosure class is only needed +// because some CMS OopClosures derive from OopsInGenClosure. It would be +// good to get rid of them completely. +class MetadataAwareOopsInGenClosure: public OopsInGenClosure { + KlassToOopClosure _klass_closure; public: - CMKlassClosure(OopClosure* oop_closure = NULL) : _oop_closure(oop_closure) { } - - void do_klass(Klass* k); -}; - -// The base class for all CMS marking closures. -// It's used to proxy through the metadata to the oops defined in them. -class CMSOopClosure: public ExtendedOopClosure { - CMKlassClosure _klass_closure; - public: - CMSOopClosure() : ExtendedOopClosure() { - _klass_closure.initialize(this); - } - CMSOopClosure(ReferenceProcessor* rp) : ExtendedOopClosure(rp) { + MetadataAwareOopsInGenClosure() { _klass_closure.initialize(this); } @@ -87,26 +68,7 @@ virtual void do_class_loader_data(ClassLoaderData* cld); }; -// TODO: This duplication of the CMSOopClosure class is only needed because -// some CMS OopClosures derive from OopsInGenClosure. It would be good -// to get rid of them completely. -class CMSOopsInGenClosure: public OopsInGenClosure { - CMKlassClosure _klass_closure; - public: - CMSOopsInGenClosure() { - _klass_closure.initialize(this); - } - - virtual bool do_metadata() { return do_metadata_nv(); } - inline bool do_metadata_nv() { return true; } - - virtual void do_klass(Klass* k); - void do_klass_nv(Klass* k); - - virtual void do_class_loader_data(ClassLoaderData* cld); -}; - -class MarkRefsIntoClosure: public CMSOopsInGenClosure { +class MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure { private: const MemRegion _span; CMSBitMap* _bitMap; @@ -118,7 +80,7 @@ virtual void do_oop(narrowOop* p); }; -class Par_MarkRefsIntoClosure: public CMSOopsInGenClosure { +class Par_MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure { private: const MemRegion _span; CMSBitMap* _bitMap; @@ -132,7 +94,7 @@ // A variant of the above used in certain kinds of CMS // marking verification. -class MarkRefsIntoVerifyClosure: public CMSOopsInGenClosure { +class MarkRefsIntoVerifyClosure: public MetadataAwareOopsInGenClosure { private: const MemRegion _span; CMSBitMap* _verification_bm; @@ -147,7 +109,7 @@ }; // The non-parallel version (the parallel version appears further below). -class PushAndMarkClosure: public CMSOopClosure { +class PushAndMarkClosure: public MetadataAwareOopClosure { private: CMSCollector* _collector; MemRegion _span; @@ -177,7 +139,7 @@ // synchronization (for instance, via CAS). The marking stack // used in the non-parallel case above is here replaced with // an OopTaskQueue structure to allow efficient work stealing. -class Par_PushAndMarkClosure: public CMSOopClosure { +class Par_PushAndMarkClosure: public MetadataAwareOopClosure { private: CMSCollector* _collector; MemRegion _span; @@ -198,7 +160,7 @@ }; // The non-parallel version (the parallel version appears further below). -class MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure { +class MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure { private: MemRegion _span; CMSBitMap* _bit_map; @@ -239,7 +201,7 @@ // stack and the bitMap are shared, so access needs to be suitably // synchronized. An OopTaskQueue structure, supporting efficient // work stealing, replaces a CMSMarkStack for storing grey objects. -class Par_MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure { +class Par_MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure { private: MemRegion _span; CMSBitMap* _bit_map; @@ -265,7 +227,7 @@ // This closure is used during the concurrent marking phase // following the first checkpoint. Its use is buried in // the closure MarkFromRootsClosure. -class PushOrMarkClosure: public CMSOopClosure { +class PushOrMarkClosure: public MetadataAwareOopClosure { private: CMSCollector* _collector; MemRegion _span; @@ -298,7 +260,7 @@ // This closure is used during the concurrent marking phase // following the first checkpoint. Its use is buried in // the closure Par_MarkFromRootsClosure. -class Par_PushOrMarkClosure: public CMSOopClosure { +class Par_PushOrMarkClosure: public MetadataAwareOopClosure { private: CMSCollector* _collector; MemRegion _whole_span; @@ -338,7 +300,7 @@ // processing phase of the CMS final checkpoint step, as // well as during the concurrent precleaning of the discovered // reference lists. -class CMSKeepAliveClosure: public CMSOopClosure { +class CMSKeepAliveClosure: public MetadataAwareOopClosure { private: CMSCollector* _collector; const MemRegion _span; @@ -358,7 +320,7 @@ inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); } }; -class CMSInnerParMarkAndPushClosure: public CMSOopClosure { +class CMSInnerParMarkAndPushClosure: public MetadataAwareOopClosure { private: CMSCollector* _collector; MemRegion _span; @@ -379,7 +341,7 @@ // A parallel (MT) version of the above, used when // reference processing is parallel; the only difference // is in the do_oop method. -class CMSParKeepAliveClosure: public CMSOopClosure { +class CMSParKeepAliveClosure: public MetadataAwareOopClosure { private: MemRegion _span; OopTaskQueue* _work_queue;
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -44,33 +44,20 @@ } } -// CMSOopClosure and CMSoopsInGenClosure are duplicated, +// MetadataAwareOopClosure and MetadataAwareOopsInGenClosure are duplicated, // until we get rid of OopsInGenClosure. -inline void CMSOopClosure::do_klass(Klass* k) { do_klass_nv(k); } -inline void CMSOopsInGenClosure::do_klass(Klass* k) { do_klass_nv(k); } - -inline void CMSOopClosure::do_klass_nv(Klass* k) { +inline void MetadataAwareOopsInGenClosure::do_klass_nv(Klass* k) { ClassLoaderData* cld = k->class_loader_data(); do_class_loader_data(cld); } -inline void CMSOopsInGenClosure::do_klass_nv(Klass* k) { - ClassLoaderData* cld = k->class_loader_data(); - do_class_loader_data(cld); -} +inline void MetadataAwareOopsInGenClosure::do_klass(Klass* k) { do_klass_nv(k); } -inline void CMSOopClosure::do_class_loader_data(ClassLoaderData* cld) { - assert(_klass_closure._oop_closure == this, "Must be"); - - bool claim = true; // Must claim the class loader data before processing. - cld->oops_do(_klass_closure._oop_closure, &_klass_closure, claim); -} -inline void CMSOopsInGenClosure::do_class_loader_data(ClassLoaderData* cld) { +inline void MetadataAwareOopsInGenClosure::do_class_loader_data(ClassLoaderData* cld) { assert(_klass_closure._oop_closure == this, "Must be"); bool claim = true; // Must claim the class loader data before processing. cld->oops_do(_klass_closure._oop_closure, &_klass_closure, claim); } - #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSOOPCLOSURES_INLINE_HPP
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -49,13 +49,14 @@ #include "memory/genCollectedHeap.hpp" #include "memory/genMarkSweep.hpp" #include "memory/genOopClosures.inline.hpp" -#include "memory/iterator.hpp" +#include "memory/iterator.inline.hpp" #include "memory/padded.hpp" #include "memory/referencePolicy.hpp" #include "memory/resourceArea.hpp" #include "memory/tenuredGeneration.hpp" #include "oops/oop.inline.hpp" #include "prims/jvmtiExport.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/globals_extension.hpp" #include "runtime/handles.inline.hpp" #include "runtime/java.hpp" @@ -2024,7 +2025,7 @@ SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer(); gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start()); - GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL); + GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id()); if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) { gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d " "collections passed to foreground collector", _full_gcs_since_conc_gc); @@ -2534,8 +2535,10 @@ assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), "VM thread should have CMS token"); + // The gc id is created in register_foreground_gc_start if this collection is synchronous + const GCId gc_id = _collectorState == InitialMarking ? GCId::peek() : _gc_tracer_cm->gc_id(); NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose, - true, NULL);) + true, NULL, gc_id);) if (UseAdaptiveSizePolicy) { size_policy()->ms_collection_begin(); } @@ -3120,7 +3123,7 @@ // Mark from roots one level into CMS MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(), markBitMap()); - CMKlassClosure klass_closure(¬Older); + KlassToOopClosure klass_closure(¬Older); gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. gch->gen_process_strong_roots(_cmsGen->level(), @@ -3538,6 +3541,7 @@ public: CMSPhaseAccounting(CMSCollector *collector, const char *phase, + const GCId gc_id, bool print_cr = true); ~CMSPhaseAccounting(); @@ -3546,6 +3550,7 @@ const char *_phase; elapsedTimer _wallclock; bool _print_cr; + const GCId _gc_id; public: // Not MT-safe; so do not pass around these StackObj's @@ -3561,15 +3566,15 @@ CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector, const char *phase, + const GCId gc_id, bool print_cr) : - _collector(collector), _phase(phase), _print_cr(print_cr) { + _collector(collector), _phase(phase), _print_cr(print_cr), _gc_id(gc_id) { if (PrintCMSStatistics != 0) { _collector->resetYields(); } if (PrintGCDetails) { - gclog_or_tty->date_stamp(PrintGCDateStamps); - gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->gclog_stamp(_gc_id); gclog_or_tty->print_cr("[%s-concurrent-%s-start]", _collector->cmsGen()->short_name(), _phase); } @@ -3583,8 +3588,7 @@ _collector->stopTimer(); _wallclock.stop(); if (PrintGCDetails) { - gclog_or_tty->date_stamp(PrintGCDateStamps); - gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->gclog_stamp(_gc_id); gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]", _collector->cmsGen()->short_name(), _phase, _collector->timerValue(), _wallclock.seconds()); @@ -3682,7 +3686,7 @@ setup_cms_unloading_and_verification_state(); NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork", - PrintGCDetails && Verbose, true, _gc_timer_cm);) + PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());) if (UseAdaptiveSizePolicy) { size_policy()->checkpoint_roots_initial_begin(); } @@ -3740,7 +3744,7 @@ gch->set_par_threads(0); } else { // The serial version. - CMKlassClosure klass_closure(¬Older); + KlassToOopClosure klass_closure(¬Older); gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. gch->gen_process_strong_roots(_cmsGen->level(), true, // younger gens are roots @@ -3799,7 +3803,7 @@ CMSTokenSyncWithLocks ts(true, bitMapLock()); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - CMSPhaseAccounting pa(this, "mark", !PrintGCDetails); + CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails); res = markFromRootsWork(asynch); if (res) { _collectorState = Precleaning; @@ -4202,7 +4206,7 @@ pst->all_tasks_completed(); } -class Par_ConcMarkingClosure: public CMSOopClosure { +class Par_ConcMarkingClosure: public MetadataAwareOopClosure { private: CMSCollector* _collector; CMSConcMarkingTask* _task; @@ -4215,7 +4219,7 @@ public: Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue, CMSBitMap* bit_map, CMSMarkStack* overflow_stack): - CMSOopClosure(collector->ref_processor()), + MetadataAwareOopClosure(collector->ref_processor()), _collector(collector), _task(task), _span(collector->_span), @@ -4522,7 +4526,7 @@ _start_sampling = false; } TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails); + CMSPhaseAccounting pa(this, "preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails); preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1); } CMSTokenSync x(true); // is cms thread @@ -4551,7 +4555,7 @@ // we will never do an actual abortable preclean cycle. if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) { TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails); + CMSPhaseAccounting pa(this, "abortable-preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails); // We need more smarts in the abortable preclean // loop below to deal with cases where allocation // in young gen is very very slow, and our precleaning @@ -4696,7 +4700,7 @@ GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases rp->preclean_discovered_references( rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl, - gc_timer); + gc_timer, _gc_tracer_cm->gc_id()); } if (clean_survivor) { // preclean the active survivor space(s) @@ -4986,7 +4990,7 @@ } class PrecleanKlassClosure : public KlassClosure { - CMKlassClosure _cm_klass_closure; + KlassToOopClosure _cm_klass_closure; public: PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {} void do_klass(Klass* k) { @@ -5039,7 +5043,7 @@ // expect it to be false and set to true FlagSetting fl(gch->_is_gc_active, false); NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark", - PrintGCDetails && Verbose, true, _gc_timer_cm);) + PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());) int level = _cmsGen->level() - 1; if (level >= 0) { gch->do_collection(true, // full (i.e. force, see below) @@ -5068,7 +5072,7 @@ void CMSCollector::checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs, bool init_mark_was_synchronous) { - NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);) + NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());) assert(haveFreelistLocks(), "must have free list locks"); assert_lock_strong(bitMapLock()); @@ -5123,11 +5127,11 @@ // the most recent young generation GC, minus those cleaned up by the // concurrent precleaning. if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) { - GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm); + GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); do_remark_parallel(); } else { GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, - _gc_timer_cm); + _gc_timer_cm, _gc_tracer_cm->gc_id()); do_remark_non_parallel(); } } @@ -5140,7 +5144,7 @@ verify_overflow_empty(); { - NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);) + NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());) refProcessingWork(asynch, clear_all_soft_refs); } verify_work_stacks_empty(); @@ -5224,7 +5228,7 @@ _timer.start(); GenCollectedHeap* gch = GenCollectedHeap::heap(); Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap)); - CMKlassClosure klass_closure(&par_mri_cl); + KlassToOopClosure klass_closure(&par_mri_cl); // ---------- young gen roots -------------- { @@ -5298,7 +5302,7 @@ }; class RemarkKlassClosure : public KlassClosure { - CMKlassClosure _cm_klass_closure; + KlassToOopClosure _cm_klass_closure; public: RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {} void do_klass(Klass* k) { @@ -5921,7 +5925,7 @@ NULL, // space is set further below &_markBitMap, &_markStack, &mrias_cl); { - GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm); + GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); // Iterate over the dirty cards, setting the corresponding bits in the // mod union table. { @@ -5958,7 +5962,7 @@ Universe::verify(); } { - GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm); + GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); verify_work_stacks_empty(); @@ -5978,7 +5982,7 @@ } { - GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm); + GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); verify_work_stacks_empty(); @@ -5997,7 +6001,7 @@ } { - GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm); + GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); verify_work_stacks_empty(); @@ -6199,7 +6203,7 @@ _span, &_markBitMap, &_markStack, &cmsKeepAliveClosure, false /* !preclean */); { - GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm); + GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); ReferenceProcessorStats stats; if (rp->processing_is_mt()) { @@ -6224,13 +6228,15 @@ &cmsKeepAliveClosure, &cmsDrainMarkingStackClosure, &task_executor, - _gc_timer_cm); + _gc_timer_cm, + _gc_tracer_cm->gc_id()); } else { stats = rp->process_discovered_references(&_is_alive_closure, &cmsKeepAliveClosure, &cmsDrainMarkingStackClosure, NULL, - _gc_timer_cm); + _gc_timer_cm, + _gc_tracer_cm->gc_id()); } _gc_tracer_cm->report_gc_reference_stats(stats); @@ -6241,7 +6247,7 @@ if (should_unload_classes()) { { - GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm); + GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); // Unload classes and purge the SystemDictionary. bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure); @@ -6254,7 +6260,7 @@ } { - GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm); + GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); } @@ -6263,7 +6269,7 @@ // CMS doesn't use the StringTable as hard roots when class unloading is turned off. // Need to check if we really scanned the StringTable. if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) { - GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm); + GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); // Delete entries for dead interned strings. StringTable::unlink(&_is_alive_closure); } @@ -6330,7 +6336,7 @@ _intra_sweep_timer.start(); if (asynch) { TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails); + CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails); // First sweep the old gen { CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(), @@ -6551,7 +6557,7 @@ // Clear the mark bitmap (no grey objects to start with) // for the next cycle. TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails); + CMSPhaseAccounting cmspa(this, "reset", _gc_tracer_cm->gc_id(), !PrintGCDetails); HeapWord* curAddr = _markBitMap.startWord(); while (curAddr < _markBitMap.endWord()) { @@ -6617,7 +6623,7 @@ void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) { gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL); + GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id()); TraceCollectorStats tcs(counters()); switch (op) { @@ -7735,7 +7741,7 @@ CMSCollector* collector, MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm, CMSMarkStack* mark_stack): - CMSOopClosure(collector->ref_processor()), + MetadataAwareOopClosure(collector->ref_processor()), _collector(collector), _span(span), _verification_bm(verification_bm), @@ -7788,7 +7794,7 @@ MemRegion span, CMSBitMap* bitMap, CMSMarkStack* markStack, HeapWord* finger, MarkFromRootsClosure* parent) : - CMSOopClosure(collector->ref_processor()), + MetadataAwareOopClosure(collector->ref_processor()), _collector(collector), _span(span), _bitMap(bitMap), @@ -7805,7 +7811,7 @@ HeapWord* finger, HeapWord** global_finger_addr, Par_MarkFromRootsClosure* parent) : - CMSOopClosure(collector->ref_processor()), + MetadataAwareOopClosure(collector->ref_processor()), _collector(collector), _whole_span(collector->_span), _span(span), @@ -7854,11 +7860,6 @@ _overflow_stack->expand(); // expand the stack if possible } -void CMKlassClosure::do_klass(Klass* k) { - assert(_oop_closure != NULL, "Not initialized?"); - k->oops_do(_oop_closure); -} - void PushOrMarkClosure::do_oop(oop obj) { // Ignore mark word because we are running concurrent with mutators. assert(obj->is_oop_or_null(true), "expected an oop or NULL"); @@ -7956,7 +7957,7 @@ CMSBitMap* mod_union_table, CMSMarkStack* mark_stack, bool concurrent_precleaning): - CMSOopClosure(rp), + MetadataAwareOopClosure(rp), _collector(collector), _span(span), _bit_map(bit_map), @@ -8029,7 +8030,7 @@ ReferenceProcessor* rp, CMSBitMap* bit_map, OopTaskQueue* work_queue): - CMSOopClosure(rp), + MetadataAwareOopClosure(rp), _collector(collector), _span(span), _bit_map(bit_map),
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -1444,7 +1444,7 @@ // The following closures are used to do certain kinds of verification of // CMS marking. -class PushAndMarkVerifyClosure: public CMSOopClosure { +class PushAndMarkVerifyClosure: public MetadataAwareOopClosure { CMSCollector* _collector; MemRegion _span; CMSBitMap* _verification_bm;
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -56,7 +56,7 @@ void VM_CMS_Operation::verify_before_gc() { if (VerifyBeforeGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - GCTraceTime tm("Verify Before", false, false, _collector->_gc_timer_cm); + GCTraceTime tm("Verify Before", false, false, _collector->_gc_timer_cm, _collector->_gc_tracer_cm->gc_id()); HandleMark hm; FreelistLocker x(_collector); MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag); @@ -68,7 +68,7 @@ void VM_CMS_Operation::verify_after_gc() { if (VerifyAfterGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - GCTraceTime tm("Verify After", false, false, _collector->_gc_timer_cm); + GCTraceTime tm("Verify After", false, false, _collector->_gc_timer_cm, _collector->_gc_tracer_cm->gc_id()); HandleMark hm; FreelistLocker x(_collector); MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
--- a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -28,6 +28,7 @@ #include "gc_implementation/g1/g1CollectorPolicy.hpp" #include "gc_implementation/g1/g1ErgoVerbose.hpp" #include "memory/space.inline.hpp" +#include "runtime/atomic.inline.hpp" // Even though we don't use the GC efficiency in our heuristics as // much as we used to, we still order according to GC efficiency. This
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -45,6 +45,7 @@ #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/java.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/prefetch.inline.hpp" #include "services/memTracker.hpp" @@ -511,6 +512,7 @@ _has_overflown(false), _concurrent(false), _has_aborted(false), + _aborted_gc_id(GCId::undefined()), _restart_for_overflow(false), _concurrent_marking_in_progress(false), @@ -1020,8 +1022,7 @@ force_overflow()->update(); if (G1Log::fine()) { - gclog_or_tty->date_stamp(PrintGCDateStamps); - gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->gclog_stamp(concurrent_gc_id()); gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); } } @@ -2469,7 +2470,7 @@ if (G1Log::finer()) { gclog_or_tty->put(' '); } - GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm()); + GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm(), concurrent_gc_id()); ReferenceProcessor* rp = g1h->ref_processor_cm(); @@ -2526,7 +2527,8 @@ &g1_keep_alive, &g1_drain_mark_stack, executor, - g1h->gc_timer_cm()); + g1h->gc_timer_cm(), + concurrent_gc_id()); g1h->gc_tracer_cm()->report_gc_reference_stats(stats); // The do_oop work routines of the keep_alive and drain_marking_stack @@ -3261,6 +3263,12 @@ } _first_overflow_barrier_sync.abort(); _second_overflow_barrier_sync.abort(); + const GCId& gc_id = _g1h->gc_tracer_cm()->gc_id(); + if (!gc_id.is_undefined()) { + // We can do multiple full GCs before ConcurrentMarkThread::run() gets a chance + // to detect that it was aborted. Only keep track of the first GC id that we aborted. + _aborted_gc_id = gc_id; + } _has_aborted = true; SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); @@ -3275,6 +3283,13 @@ _g1h->register_concurrent_cycle_end(); } +const GCId& ConcurrentMark::concurrent_gc_id() { + if (has_aborted()) { + return _aborted_gc_id; + } + return _g1h->gc_tracer_cm()->gc_id(); +} + static void print_ms_time_info(const char* prefix, const char* name, NumberSeq& ns) { gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -26,6 +26,7 @@ #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP #include "gc_implementation/g1/heapRegionSet.hpp" +#include "gc_implementation/shared/gcId.hpp" #include "utilities/taskqueue.hpp" class G1CollectedHeap; @@ -444,6 +445,7 @@ volatile bool _concurrent; // Set at the end of a Full GC so that marking aborts volatile bool _has_aborted; + GCId _aborted_gc_id; // Used when remark aborts due to an overflow to indicate that // another concurrent marking phase should start @@ -824,6 +826,8 @@ bool has_aborted() { return _has_aborted; } + const GCId& concurrent_gc_id(); + // This prints the global/local fingers. It is used for debugging. NOT_PRODUCT(void print_finger();)
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -1,4 +1,4 @@ -/* + /* * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -29,6 +29,7 @@ #include "gc_implementation/g1/g1Log.hpp" #include "gc_implementation/g1/g1MMUTracker.hpp" #include "gc_implementation/g1/vm_operations_g1.hpp" +#include "gc_implementation/shared/gcTrace.hpp" #include "memory/resourceArea.hpp" #include "runtime/vmThread.hpp" @@ -111,8 +112,7 @@ double scan_start = os::elapsedTime(); if (!cm()->has_aborted()) { if (G1Log::fine()) { - gclog_or_tty->date_stamp(PrintGCDateStamps); - gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id()); gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]"); } @@ -120,8 +120,7 @@ double scan_end = os::elapsedTime(); if (G1Log::fine()) { - gclog_or_tty->date_stamp(PrintGCDateStamps); - gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id()); gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]", scan_end - scan_start); } @@ -129,8 +128,7 @@ double mark_start_sec = os::elapsedTime(); if (G1Log::fine()) { - gclog_or_tty->date_stamp(PrintGCDateStamps); - gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id()); gclog_or_tty->print_cr("[GC concurrent-mark-start]"); } @@ -153,8 +151,7 @@ } if (G1Log::fine()) { - gclog_or_tty->date_stamp(PrintGCDateStamps); - gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id()); gclog_or_tty->print_cr("[GC concurrent-mark-end, %1.7lf secs]", mark_end_sec - mark_start_sec); } @@ -169,8 +166,7 @@ "in remark (restart #%d).", iter); } if (G1Log::fine()) { - gclog_or_tty->date_stamp(PrintGCDateStamps); - gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id()); gclog_or_tty->print_cr("[GC concurrent-mark-restart-for-overflow]"); } } @@ -213,8 +209,7 @@ double cleanup_start_sec = os::elapsedTime(); if (G1Log::fine()) { - gclog_or_tty->date_stamp(PrintGCDateStamps); - gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id()); gclog_or_tty->print_cr("[GC concurrent-cleanup-start]"); } @@ -234,8 +229,7 @@ double cleanup_end_sec = os::elapsedTime(); if (G1Log::fine()) { - gclog_or_tty->date_stamp(PrintGCDateStamps); - gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id()); gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf secs]", cleanup_end_sec - cleanup_start_sec); } @@ -276,8 +270,7 @@ if (cm()->has_aborted()) { if (G1Log::fine()) { - gclog_or_tty->date_stamp(PrintGCDateStamps); - gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id()); gclog_or_tty->print_cr("[GC concurrent-mark-abort]"); } }
--- a/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -26,7 +26,7 @@ #include "gc_implementation/g1/dirtyCardQueue.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/heapRegionRemSet.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/safepoint.hpp" #include "runtime/thread.inline.hpp"
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" +#include "gc_implementation/g1/heapRegion.hpp" #include "memory/space.hpp" #include "oops/oop.inline.hpp" #include "runtime/java.hpp" @@ -98,6 +99,20 @@ return (delta & right_n_bits(LogN_words)) == (size_t)NoBits; } +void G1BlockOffsetSharedArray::set_offset_array(HeapWord* left, HeapWord* right, u_char offset) { + check_index(index_for(right - 1), "right address out of range"); + assert(left < right, "Heap addresses out of order"); + size_t num_cards = pointer_delta(right, left) >> LogN_words; + if (UseMemSetInBOT) { + memset(&_offset_array[index_for(left)], offset, num_cards); + } else { + size_t i = index_for(left); + const size_t end = i + num_cards; + for (; i < end; i++) { + _offset_array[i] = offset; + } + } +} ////////////////////////////////////////////////////////////////////// // G1BlockOffsetArray @@ -107,7 +122,7 @@ MemRegion mr, bool init_to_zero) : G1BlockOffsetTable(mr.start(), mr.end()), _unallocated_block(_bottom), - _array(array), _csp(NULL), + _array(array), _gsp(NULL), _init_to_zero(init_to_zero) { assert(_bottom <= _end, "arguments out of order"); if (!_init_to_zero) { @@ -117,9 +132,8 @@ } } -void G1BlockOffsetArray::set_space(Space* sp) { - _sp = sp; - _csp = sp->toContiguousSpace(); +void G1BlockOffsetArray::set_space(G1OffsetTableContigSpace* sp) { + _gsp = sp; } // The arguments follow the normal convention of denoting @@ -378,7 +392,7 @@ } // Otherwise, find the block start using the table. HeapWord* q = block_at_or_preceding(addr, false, 0); - HeapWord* n = q + _sp->block_size(q); + HeapWord* n = q + block_size(q); return forward_to_block_containing_addr_const(q, n, addr); } @@ -406,31 +420,17 @@ err_msg("next_boundary is beyond the end of the covered region " " next_boundary " PTR_FORMAT " _array->_end " PTR_FORMAT, next_boundary, _array->_end)); - if (csp() != NULL) { - if (addr >= csp()->top()) return csp()->top(); - while (next_boundary < addr) { - while (n <= next_boundary) { - q = n; - oop obj = oop(q); - if (obj->klass_or_null() == NULL) return q; - n += obj->size(); - } - assert(q <= next_boundary && n > next_boundary, "Consequence of loop"); - // [q, n) is the block that crosses the boundary. - alloc_block_work2(&next_boundary, &next_index, q, n); + if (addr >= gsp()->top()) return gsp()->top(); + while (next_boundary < addr) { + while (n <= next_boundary) { + q = n; + oop obj = oop(q); + if (obj->klass_or_null() == NULL) return q; + n += obj->size(); } - } else { - while (next_boundary < addr) { - while (n <= next_boundary) { - q = n; - oop obj = oop(q); - if (obj->klass_or_null() == NULL) return q; - n += _sp->block_size(q); - } - assert(q <= next_boundary && n > next_boundary, "Consequence of loop"); - // [q, n) is the block that crosses the boundary. - alloc_block_work2(&next_boundary, &next_index, q, n); - } + assert(q <= next_boundary && n > next_boundary, "Consequence of loop"); + // [q, n) is the block that crosses the boundary. + alloc_block_work2(&next_boundary, &next_index, q, n); } return forward_to_block_containing_addr_const(q, n, addr); } @@ -637,7 +637,7 @@ assert(_bottom <= addr && addr < _end, "addr must be covered by this Array"); HeapWord* q = block_at_or_preceding(addr, true, _next_offset_index-1); - HeapWord* n = q + _sp->block_size(q); + HeapWord* n = q + block_size(q); return forward_to_block_containing_addr_const(q, n, addr); }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -52,8 +52,8 @@ // consolidation. // Forward declarations -class ContiguousSpace; class G1BlockOffsetSharedArray; +class G1OffsetTableContigSpace; class G1BlockOffsetTable VALUE_OBJ_CLASS_SPEC { friend class VMStructs; @@ -157,6 +157,8 @@ return _offset_array[index]; } + void set_offset_array(HeapWord* left, HeapWord* right, u_char offset); + void set_offset_array(size_t index, u_char offset) { check_index(index, "index out of range"); check_offset(offset, "offset too large"); @@ -170,21 +172,6 @@ _offset_array[index] = (u_char) pointer_delta(high, low); } - void set_offset_array(HeapWord* left, HeapWord* right, u_char offset) { - check_index(index_for(right - 1), "right address out of range"); - assert(left < right, "Heap addresses out of order"); - size_t num_cards = pointer_delta(right, left) >> LogN_words; - if (UseMemSetInBOT) { - memset(&_offset_array[index_for(left)], offset, num_cards); - } else { - size_t i = index_for(left); - const size_t end = i + num_cards; - for (; i < end; i++) { - _offset_array[i] = offset; - } - } - } - void set_offset_array(size_t left, size_t right, u_char offset) { check_index(right, "right index out of range"); assert(left <= right, "indexes out of order"); @@ -281,11 +268,7 @@ G1BlockOffsetSharedArray* _array; // The space that owns this subregion. - Space* _sp; - - // If "_sp" is a contiguous space, the field below is the view of "_sp" - // as a contiguous space, else NULL. - ContiguousSpace* _csp; + G1OffsetTableContigSpace* _gsp; // If true, array entries are initialized to 0; otherwise, they are // initialized to point backwards to the beginning of the covered region. @@ -310,7 +293,9 @@ protected: - ContiguousSpace* csp() const { return _csp; } + G1OffsetTableContigSpace* gsp() const { return _gsp; } + + inline size_t block_size(const HeapWord* p) const; // Returns the address of a block whose start is at most "addr". // If "has_max_index" is true, "assumes "max_index" is the last valid one @@ -363,7 +348,7 @@ // "this" to be passed as a parameter to a member constructor for // the containing concrete subtype of Space. // This would be legal C++, but MS VC++ doesn't allow it. - void set_space(Space* sp); + void set_space(G1OffsetTableContigSpace* sp); // Resets the covered region to the given "mr". void set_region(MemRegion mr);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -26,6 +26,7 @@ #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP #include "gc_implementation/g1/g1BlockOffsetTable.hpp" +#include "gc_implementation/g1/heapRegion.hpp" #include "memory/space.hpp" inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) { @@ -69,6 +70,11 @@ return result; } +inline size_t +G1BlockOffsetArray::block_size(const HeapWord* p) const { + return gsp()->block_size(p); +} + inline HeapWord* G1BlockOffsetArray::block_at_or_preceding(const void* addr, bool has_max_index, @@ -88,7 +94,7 @@ // to go back by. size_t n_cards_back = BlockOffsetArray::entry_to_cards_back(offset); q -= (N_words * n_cards_back); - assert(q >= _sp->bottom(), "Went below bottom!"); + assert(q >= gsp()->bottom(), "Went below bottom!"); index -= n_cards_back; offset = _array->offset_array(index); } @@ -101,21 +107,12 @@ G1BlockOffsetArray:: forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n, const void* addr) const { - if (csp() != NULL) { - if (addr >= csp()->top()) return csp()->top(); - while (n <= addr) { - q = n; - oop obj = oop(q); - if (obj->klass_or_null() == NULL) return q; - n += obj->size(); - } - } else { - while (n <= addr) { - q = n; - oop obj = oop(q); - if (obj->klass_or_null() == NULL) return q; - n += _sp->block_size(q); - } + if (addr >= gsp()->top()) return gsp()->top(); + while (n <= addr) { + q = n; + oop obj = oop(q); + if (obj->klass_or_null() == NULL) return q; + n += obj->size(); } assert(q <= n, "wrong order for q and addr"); assert(addr < n, "wrong order for addr and n"); @@ -126,7 +123,7 @@ G1BlockOffsetArray::forward_to_block_containing_addr(HeapWord* q, const void* addr) { if (oop(q)->klass_or_null() == NULL) return q; - HeapWord* n = q + _sp->block_size(q); + HeapWord* n = q + block_size(q); // In the normal case, where the query "addr" is a card boundary, and the // offset table chunks are the same size as cards, the block starting at // "q" will contain addr, so the test below will fail, and we'll fall
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -62,6 +62,7 @@ #include "memory/referenceProcessor.hpp" #include "oops/oop.inline.hpp" #include "oops/oop.pcgc.inline.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/prefetch.inline.hpp" #include "runtime/orderAccess.inline.hpp" #include "runtime/vmThread.hpp" @@ -1298,7 +1299,7 @@ TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); { - GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL); + GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL, gc_tracer->gc_id()); TraceCollectorStats tcs(g1mm()->full_collection_counters()); TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); @@ -3858,8 +3859,7 @@ return; } - gclog_or_tty->date_stamp(PrintGCDateStamps); - gclog_or_tty->stamp(PrintGCTimeStamps); + gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id()); GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause()) .append(g1_policy()->gcs_are_young() ? "(young)" : "(mixed)") @@ -5340,17 +5340,14 @@ class G1CopyingKeepAliveClosure: public OopClosure { G1CollectedHeap* _g1h; OopClosure* _copy_non_heap_obj_cl; - OopsInHeapRegionClosure* _copy_metadata_obj_cl; G1ParScanThreadState* _par_scan_state; public: G1CopyingKeepAliveClosure(G1CollectedHeap* g1h, OopClosure* non_heap_obj_cl, - OopsInHeapRegionClosure* metadata_obj_cl, G1ParScanThreadState* pss): _g1h(g1h), _copy_non_heap_obj_cl(non_heap_obj_cl), - _copy_metadata_obj_cl(metadata_obj_cl), _par_scan_state(pss) {} @@ -5383,7 +5380,7 @@ _par_scan_state->push_on_queue(p); } else { assert(!Metaspace::contains((const void*)p), - err_msg("Otherwise need to call _copy_metadata_obj_cl->do_oop(p) " + err_msg("Unexpectedly found a pointer from metadata: " PTR_FORMAT, p)); _copy_non_heap_obj_cl->do_oop(p); } @@ -5478,22 +5475,18 @@ pss.set_evac_failure_closure(&evac_failure_cl); G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL); - G1ParScanMetadataClosure only_copy_metadata_cl(_g1h, &pss, NULL); G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL); - G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL); OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; - OopsInHeapRegionClosure* copy_metadata_cl = &only_copy_metadata_cl; if (_g1h->g1_policy()->during_initial_mark_pause()) { // We also need to mark copied objects. copy_non_heap_cl = ©_mark_non_heap_cl; - copy_metadata_cl = ©_mark_metadata_cl; } // Keep alive closure. - G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss); + G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss); // Complete GC closure G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator); @@ -5588,18 +5581,14 @@ G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL); - G1ParScanMetadataClosure only_copy_metadata_cl(_g1h, &pss, NULL); G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL); - G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(_g1h, &pss, NULL); OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; - OopsInHeapRegionClosure* copy_metadata_cl = &only_copy_metadata_cl; if (_g1h->g1_policy()->during_initial_mark_pause()) { // We also need to mark copied objects. copy_non_heap_cl = ©_mark_non_heap_cl; - copy_metadata_cl = ©_mark_metadata_cl; } // Is alive closure @@ -5607,7 +5596,7 @@ // Copying keep alive closure. Applied to referent objects that need // to be copied. - G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_metadata_cl, &pss); + G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss); ReferenceProcessor* rp = _g1h->ref_processor_cm(); @@ -5713,22 +5702,18 @@ assert(pss.refs()->is_empty(), "pre-condition"); G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL); - G1ParScanMetadataClosure only_copy_metadata_cl(this, &pss, NULL); G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL); - G1ParScanAndMarkMetadataClosure copy_mark_metadata_cl(this, &pss, NULL); OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl; - OopsInHeapRegionClosure* copy_metadata_cl = &only_copy_metadata_cl; if (_g1h->g1_policy()->during_initial_mark_pause()) { // We also need to mark copied objects. copy_non_heap_cl = ©_mark_non_heap_cl; - copy_metadata_cl = ©_mark_metadata_cl; } // Keep alive closure. - G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_metadata_cl, &pss); + G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, &pss); // Serial Complete GC closure G1STWDrainQueueClosure drain_queue(this, &pss); @@ -5743,7 +5728,8 @@ &keep_alive, &drain_queue, NULL, - _gc_timer_stw); + _gc_timer_stw, + _gc_tracer_stw->gc_id()); } else { // Parallel reference processing assert(rp->num_q() == no_of_gc_workers, "sanity"); @@ -5754,7 +5740,8 @@ &keep_alive, &drain_queue, &par_task_executor, - _gc_timer_stw); + _gc_timer_stw, + _gc_tracer_stw->gc_id()); } _gc_tracer_stw->report_gc_reference_stats(stats); @@ -6993,7 +6980,7 @@ return; } - if (ScavengeRootsInCode && nm->detect_scavenge_root_oops()) { + if (ScavengeRootsInCode) { _g1h->register_nmethod(nm); } }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -28,6 +28,7 @@ #include "gc_implementation/g1/g1GCPhaseTimes.hpp" #include "gc_implementation/g1/g1Log.hpp" #include "gc_implementation/g1/g1StringDedup.hpp" +#include "runtime/atomic.inline.hpp" // Helper class for avoiding interleaved logging class LineBuffer: public StackObj {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -28,7 +28,7 @@ #include "gc_implementation/g1/g1HotCardCache.hpp" #include "gc_implementation/g1/g1RemSet.hpp" #include "gc_implementation/g1/heapRegion.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomic.inline.hpp" G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h): _g1h(g1h), _hot_cache(NULL), _use_cache(false), _card_counts(g1h) {}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -44,6 +44,7 @@ #include "oops/instanceRefKlass.hpp" #include "oops/oop.inline.hpp" #include "prims/jvmtiExport.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/fprofiler.hpp" #include "runtime/synchronizer.hpp" @@ -123,7 +124,7 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading, bool clear_all_softrefs) { // Recursively traverse all live objects and mark them - GCTraceTime tm("phase 1", G1Log::fine() && Verbose, true, gc_timer()); + GCTraceTime tm("phase 1", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id()); GenMarkSweep::trace(" 1"); SharedHeap* sh = SharedHeap::heap(); @@ -146,7 +147,8 @@ &GenMarkSweep::keep_alive, &GenMarkSweep::follow_stack_closure, NULL, - gc_timer()); + gc_timer(), + gc_tracer()->gc_id()); gc_tracer()->report_gc_reference_stats(stats); @@ -260,7 +262,7 @@ G1CollectedHeap* g1h = G1CollectedHeap::heap(); - GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer()); + GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id()); GenMarkSweep::trace("2"); // find the first region @@ -297,7 +299,7 @@ G1CollectedHeap* g1h = G1CollectedHeap::heap(); // Adjust the pointers to reflect the new locations - GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer()); + GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id()); GenMarkSweep::trace("3"); SharedHeap* sh = SharedHeap::heap(); @@ -358,7 +360,7 @@ // to use a higher index (saved from phase2) when verifying perm_gen. G1CollectedHeap* g1h = G1CollectedHeap::heap(); - GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer()); + GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id()); GenMarkSweep::trace("4"); G1SpaceCompactClosure blk;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -26,6 +26,7 @@ #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" #include "gc_implementation/g1/heapRegion.hpp" #include "gc_implementation/g1/satbQueue.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/orderAccess.inline.hpp" #include "runtime/thread.inline.hpp"
--- a/hotspot/src/share/vm/gc_implementation/g1/g1StringDedup.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1StringDedup.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -31,6 +31,7 @@ #include "gc_implementation/g1/g1StringDedupStat.hpp" #include "gc_implementation/g1/g1StringDedupTable.hpp" #include "gc_implementation/g1/g1StringDedupThread.hpp" +#include "runtime/atomic.inline.hpp" bool G1StringDedup::_enabled = false; @@ -211,3 +212,16 @@ G1StringDedupTable::finish_rehash(_rehashed_table); } } + +// Atomically claims the next available queue for exclusive access by +// the current thread. Returns the queue number of the claimed queue. +size_t G1StringDedupUnlinkOrOopsDoClosure::claim_queue() { + return (size_t)Atomic::add_ptr(1, &_next_queue) - 1; +} + +// Atomically claims the next available table partition for exclusive +// access by the current thread. Returns the table bucket number where +// the claimed partition starts. +size_t G1StringDedupUnlinkOrOopsDoClosure::claim_table_partition(size_t partition_size) { + return (size_t)Atomic::add_ptr(partition_size, &_next_bucket) - partition_size; +}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1StringDedup.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1StringDedup.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -84,6 +84,7 @@ #include "memory/allocation.hpp" #include "oops/oop.hpp" +#include "runtime/atomic.hpp" class OopClosure; class BoolObjectClosure; @@ -174,16 +175,12 @@ // Atomically claims the next available queue for exclusive access by // the current thread. Returns the queue number of the claimed queue. - size_t claim_queue() { - return (size_t)Atomic::add_ptr(1, &_next_queue) - 1; - } + size_t claim_queue(); // Atomically claims the next available table partition for exclusive // access by the current thread. Returns the table bucket number where // the claimed partition starts. - size_t claim_table_partition(size_t partition_size) { - return (size_t)Atomic::add_ptr(partition_size, &_next_bucket) - partition_size; - } + size_t claim_table_partition(size_t partition_size); // Applies and returns the result from the is_alive closure, or // returns true if no such closure was provided.
--- a/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupQueue.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupQueue.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -26,6 +26,7 @@ #include "classfile/javaClasses.hpp" #include "gc_implementation/g1/g1StringDedupQueue.hpp" #include "memory/gcLocker.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/mutexLocker.hpp" #include "utilities/stack.inline.hpp"
--- a/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupThread.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupThread.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -28,6 +28,7 @@ #include "gc_implementation/g1/g1StringDedupTable.hpp" #include "gc_implementation/g1/g1StringDedupThread.hpp" #include "gc_implementation/g1/g1StringDedupQueue.hpp" +#include "runtime/atomic.inline.hpp" G1StringDedupThread* G1StringDedupThread::_thread = NULL;
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -34,6 +34,7 @@ #include "memory/iterator.hpp" #include "memory/space.inline.hpp" #include "oops/oop.inline.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/orderAccess.inline.hpp" PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC @@ -48,7 +49,7 @@ HeapRegion* hr, ExtendedOopClosure* cl, CardTableModRefBS::PrecisionStyle precision, FilterKind fk) : - ContiguousSpaceDCTOC(hr, cl, precision, NULL), + DirtyCardToOopClosure(hr, cl, precision, NULL), _hr(hr), _fk(fk), _g1(g1) { } FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r, @@ -77,19 +78,18 @@ return cur; } -void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr, - HeapWord* bottom, - HeapWord* top, - ExtendedOopClosure* cl) { +void HeapRegionDCTOC::walk_mem_region(MemRegion mr, + HeapWord* bottom, + HeapWord* top) { G1CollectedHeap* g1h = _g1; int oop_size; ExtendedOopClosure* cl2 = NULL; - FilterIntoCSClosure intoCSFilt(this, g1h, cl); - FilterOutOfRegionClosure outOfRegionFilt(_hr, cl); + FilterIntoCSClosure intoCSFilt(this, g1h, _cl); + FilterOutOfRegionClosure outOfRegionFilt(_hr, _cl); switch (_fk) { - case NoFilterKind: cl2 = cl; break; + case NoFilterKind: cl2 = _cl; break; case IntoCSFilterKind: cl2 = &intoCSFilt; break; case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break; default: ShouldNotReachHere(); @@ -111,17 +111,17 @@ // We replicate the loop below for several kinds of possible filters. switch (_fk) { case NoFilterKind: - bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top); + bottom = walk_mem_region_loop(_cl, g1h, _hr, bottom, top); break; case IntoCSFilterKind: { - FilterIntoCSClosure filt(this, g1h, cl); + FilterIntoCSClosure filt(this, g1h, _cl); bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); break; } case OutOfRegionFilterKind: { - FilterOutOfRegionClosure filt(_hr, cl); + FilterOutOfRegionClosure filt(_hr, _cl); bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); break; }
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -25,7 +25,7 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP -#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" +#include "gc_implementation/g1/g1BlockOffsetTable.hpp" #include "gc_implementation/g1/g1_specialized_oop_closures.hpp" #include "gc_implementation/g1/survRateGroup.hpp" #include "gc_implementation/shared/ageTable.hpp" @@ -71,7 +71,7 @@ // in the concurrent marker used by G1 to filter remembered // sets. -class HeapRegionDCTOC : public ContiguousSpaceDCTOC { +class HeapRegionDCTOC : public DirtyCardToOopClosure { public: // Specification of possible DirtyCardToOopClosure filtering. enum FilterKind { @@ -85,39 +85,13 @@ FilterKind _fk; G1CollectedHeap* _g1; - void walk_mem_region_with_cl(MemRegion mr, - HeapWord* bottom, HeapWord* top, - ExtendedOopClosure* cl); - - // We don't specialize this for FilteringClosure; filtering is handled by - // the "FilterKind" mechanism. But we provide this to avoid a compiler - // warning. - void walk_mem_region_with_cl(MemRegion mr, - HeapWord* bottom, HeapWord* top, - FilteringClosure* cl) { - HeapRegionDCTOC::walk_mem_region_with_cl(mr, bottom, top, - (ExtendedOopClosure*)cl); - } - - // Get the actual top of the area on which the closure will - // operate, given where the top is assumed to be (the end of the - // memory region passed to do_MemRegion) and where the object - // at the top is assumed to start. For example, an object may - // start at the top but actually extend past the assumed top, - // in which case the top becomes the end of the object. - HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj) { - return ContiguousSpaceDCTOC::get_actual_top(top, top_obj); - } - // Walk the given memory region from bottom to (actual) top // looking for objects and applying the oop closure (_cl) to // them. The base implementation of this treats the area as // blocks, where a block may or may not be an object. Sub- // classes should override this to provide more accurate // or possibly more efficient walking. - void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top) { - Filtering_DCTOC::walk_mem_region(mr, bottom, top); - } + void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top); public: HeapRegionDCTOC(G1CollectedHeap* g1,
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -25,6 +25,8 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP +#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" + inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) { HeapWord* res = ContiguousSpace::allocate(size); if (res != NULL) {
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -32,6 +32,7 @@ #include "memory/padded.inline.hpp" #include "memory/space.inline.hpp" #include "oops/oop.inline.hpp" +#include "runtime/atomic.inline.hpp" #include "utilities/bitMap.inline.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/growableArray.hpp"
--- a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -29,6 +29,7 @@ #include "memory/allocation.inline.hpp" #include "memory/cardTableModRefBS.hpp" #include "memory/space.inline.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/mutexLocker.hpp" #define SPARSE_PRT_VERBOSE 0
--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -226,7 +226,7 @@ void VM_CGC_Operation::doit() { gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps); TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); - GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm()); + GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm(), G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()); SharedHeap* sh = SharedHeap::heap(); // This could go away if CollectedHeap gave access to _gc_is_active... if (sh != NULL) {
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -47,6 +47,7 @@ #include "oops/objArrayOop.hpp" #include "oops/oop.inline.hpp" #include "oops/oop.pcgc.inline.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/handles.hpp" #include "runtime/handles.inline.hpp" #include "runtime/java.hpp" @@ -955,7 +956,7 @@ size_policy->minor_collection_begin(); } - GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); + GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id()); // Capture heap used before collection (for printing). size_t gch_prev_used = gch->used(); @@ -1013,14 +1014,14 @@ ParNewRefProcTaskExecutor task_executor(*this, thread_state_set); stats = rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, &task_executor, - _gc_timer); + _gc_timer, gc_tracer.gc_id()); } else { thread_state_set.flush(); gch->set_par_threads(0); // 0 ==> non-parallel. gch->save_marks(); stats = rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, NULL, - _gc_timer); + _gc_timer, gc_tracer.gc_id()); } gc_tracer.report_gc_reference_stats(stats); if (!promotion_failed()) {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -26,6 +26,7 @@ #include "gc_implementation/parallelScavenge/parMarkBitMap.hpp" #include "gc_implementation/parallelScavenge/psParallelCompact.hpp" #include "oops/oop.inline.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/os.hpp" #include "utilities/bitMap.inline.hpp" #include "services/memTracker.hpp"
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -53,7 +53,7 @@ ResourceMark rm; NOT_PRODUCT(GCTraceTime tm("ThreadRootsMarkingTask", - PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); + PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); @@ -82,7 +82,7 @@ assert(Universe::heap()->is_gc_active(), "called outside gc"); NOT_PRODUCT(GCTraceTime tm("MarkFromRootsTask", - PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); + PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); @@ -153,7 +153,7 @@ assert(Universe::heap()->is_gc_active(), "called outside gc"); NOT_PRODUCT(GCTraceTime tm("RefProcTask", - PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); + PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); @@ -209,7 +209,7 @@ assert(Universe::heap()->is_gc_active(), "called outside gc"); NOT_PRODUCT(GCTraceTime tm("StealMarkingTask", - PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); + PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); @@ -242,7 +242,7 @@ assert(Universe::heap()->is_gc_active(), "called outside gc"); NOT_PRODUCT(GCTraceTime tm("StealRegionCompactionTask", - PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); + PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); @@ -309,7 +309,7 @@ void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) { NOT_PRODUCT(GCTraceTime tm("UpdateDensePrefixTask", - PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); + PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); @@ -324,7 +324,7 @@ assert(Universe::heap()->is_gc_active(), "called outside gc"); NOT_PRODUCT(GCTraceTime tm("DrainStacksCompactionTask", - PrintGCDetails && TraceParallelOldGCTasks, true, NULL)); + PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which);
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -34,6 +34,7 @@ #include "oops/objArrayKlass.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/oop.pcgc.inline.hpp" +#include "runtime/atomic.inline.hpp" #include "utilities/stack.inline.hpp" PSOldGen* ParCompactionManager::_old_gen = NULL;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -170,7 +170,7 @@ gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL); + GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer->gc_id()); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); @@ -521,7 +521,7 @@ void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { // Recursively traverse all live objects and mark them - GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer); + GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); trace(" 1"); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); @@ -556,7 +556,7 @@ ref_processor()->setup_policy(clear_all_softrefs); const ReferenceProcessorStats& stats = ref_processor()->process_discovered_references( - is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer); + is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer, _gc_tracer->gc_id()); gc_tracer()->report_gc_reference_stats(stats); } @@ -582,7 +582,7 @@ void PSMarkSweep::mark_sweep_phase2() { - GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer); + GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); trace("2"); // Now all live objects are marked, compute the new object addresses. @@ -612,7 +612,7 @@ void PSMarkSweep::mark_sweep_phase3() { // Adjust the pointers to reflect the new locations - GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer); + GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); trace("3"); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); @@ -655,7 +655,7 @@ void PSMarkSweep::mark_sweep_phase4() { EventMark m("4 compact heap"); - GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer); + GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer, _gc_tracer->gc_id()); trace("4"); // All pointers are now adjusted, move objects accordingly
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -51,6 +51,7 @@ #include "oops/methodData.hpp" #include "oops/oop.inline.hpp" #include "oops/oop.pcgc.inline.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/fprofiler.hpp" #include "runtime/safepoint.hpp" #include "runtime/vmThread.hpp" @@ -979,7 +980,7 @@ // at each young gen gc. Do the update unconditionally (even though a // promotion failure does not swap spaces) because an unknown number of minor // collections will have swapped the spaces an unknown number of times. - GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer); + GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ParallelScavengeHeap* heap = gc_heap(); _space_info[from_space_id].set_space(heap->young_gen()->from_space()); _space_info[to_space_id].set_space(heap->young_gen()->to_space()); @@ -1022,7 +1023,7 @@ void PSParallelCompact::post_compact() { - GCTraceTime tm("post compact", print_phases(), true, &_gc_timer); + GCTraceTime tm("post compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); for (unsigned int id = old_space_id; id < last_space_id; ++id) { // Clear the marking bitmap, summary data and split info. @@ -1848,7 +1849,7 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm, bool maximum_compaction) { - GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer); + GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); // trace("2"); #ifdef ASSERT @@ -2057,7 +2058,7 @@ gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL); + GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id()); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(true /* Full GC */,gc_cause); @@ -2359,7 +2360,7 @@ bool maximum_heap_compaction, ParallelOldTracer *gc_tracer) { // Recursively traverse all live objects and mark them - GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer); + GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ParallelScavengeHeap* heap = gc_heap(); uint parallel_gc_threads = heap->gc_task_manager()->workers(); @@ -2374,7 +2375,7 @@ ClassLoaderDataGraph::clear_claimed_marks(); { - GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer); + GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ParallelScavengeHeap::ParStrongRootsScope psrs; @@ -2403,24 +2404,24 @@ // Process reference objects found during marking { - GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer); + GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ReferenceProcessorStats stats; if (ref_processor()->processing_is_mt()) { RefProcTaskExecutor task_executor; stats = ref_processor()->process_discovered_references( is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, - &task_executor, &_gc_timer); + &task_executor, &_gc_timer, _gc_tracer.gc_id()); } else { stats = ref_processor()->process_discovered_references( is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL, - &_gc_timer); + &_gc_timer, _gc_tracer.gc_id()); } gc_tracer->report_gc_reference_stats(stats); } - GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer); + GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); // This is the point where the entire marking should have completed. assert(cm->marking_stacks_empty(), "Marking should have completed"); @@ -2459,7 +2460,7 @@ void PSParallelCompact::adjust_roots() { // Adjust the pointers to reflect the new locations - GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer); + GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); // Need new claim bits when tracing through and adjusting pointers. ClassLoaderDataGraph::clear_claimed_marks(); @@ -2495,7 +2496,7 @@ void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q, uint parallel_gc_threads) { - GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer); + GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); // Find the threads that are active unsigned int which = 0; @@ -2569,7 +2570,7 @@ void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q, uint parallel_gc_threads) { - GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer); + GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ParallelCompactData& sd = PSParallelCompact::summary_data(); @@ -2651,7 +2652,7 @@ GCTaskQueue* q, ParallelTaskTerminator* terminator_ptr, uint parallel_gc_threads) { - GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer); + GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); // Once a thread has drained it's stack, it should try to steal regions from // other threads. @@ -2699,7 +2700,7 @@ void PSParallelCompact::compact() { // trace("5"); - GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer); + GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap(); assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity"); @@ -2716,7 +2717,7 @@ enqueue_region_stealing_tasks(q, &terminator, active_gc_threads); { - GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer); + GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); gc_task_manager()->execute_and_wait(q); @@ -2730,7 +2731,7 @@ { // Update the deferred objects, if any. Any compaction manager can be used. - GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer); + GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer, _gc_tracer.gc_id()); ParCompactionManager* cm = ParCompactionManager::manager_array(0); for (unsigned int id = old_space_id; id < last_space_id; ++id) { update_deferred_objects(cm, SpaceId(id));
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -1004,6 +1004,10 @@ static bool _dwl_initialized; #endif // #ifdef ASSERT + + public: + static ParallelOldTracer* gc_tracer() { return &_gc_tracer; } + private: static void initialize_space_info();
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -332,7 +332,7 @@ gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL); + GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id()); TraceCollectorStats tcs(counters()); TraceMemoryManagerStats tms(false /* not full GC */,gc_cause); @@ -398,7 +398,7 @@ // We'll use the promotion manager again later. PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager(); { - GCTraceTime tm("Scavenge", false, false, &_gc_timer); + GCTraceTime tm("Scavenge", false, false, &_gc_timer, _gc_tracer.gc_id()); ParallelScavengeHeap::ParStrongRootsScope psrs; GCTaskQueue* q = GCTaskQueue::create(); @@ -440,7 +440,7 @@ // Process reference objects discovered during scavenge { - GCTraceTime tm("References", false, false, &_gc_timer); + GCTraceTime tm("References", false, false, &_gc_timer, _gc_tracer.gc_id()); reference_processor()->setup_policy(false); // not always_clear reference_processor()->set_active_mt_degree(active_workers); @@ -451,10 +451,10 @@ PSRefProcTaskExecutor task_executor; stats = reference_processor()->process_discovered_references( &_is_alive_closure, &keep_alive, &evac_followers, &task_executor, - &_gc_timer); + &_gc_timer, _gc_tracer.gc_id()); } else { stats = reference_processor()->process_discovered_references( - &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer); + &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer, _gc_tracer.gc_id()); } _gc_tracer.report_gc_reference_stats(stats); @@ -469,7 +469,7 @@ } { - GCTraceTime tm("StringTable", false, false, &_gc_timer); + GCTraceTime tm("StringTable", false, false, &_gc_timer, _gc_tracer.gc_id()); // Unlink any dead interned Strings and process the remaining live ones. PSScavengeRootsClosure root_closure(promotion_manager); StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure); @@ -641,7 +641,7 @@ NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); { - GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer); + GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer, _gc_tracer.gc_id()); CodeCache::prune_scavenge_root_nmethods(); }
--- a/hotspot/src/share/vm/gc_implementation/shared/ageTable.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/shared/ageTable.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -28,6 +28,7 @@ #include "memory/collectorPolicy.hpp" #include "memory/resourceArea.hpp" #include "memory/sharedHeap.hpp" +#include "runtime/atomic.inline.hpp" #include "utilities/copy.hpp" /* Copyright (c) 1992-2009 Oracle and/or its affiliates, and Stanford University.
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/gc_implementation/shared/gcId.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/shared/gcId.hpp" +#include "runtime/safepoint.hpp" + +uint GCId::_next_id = 0; + +const GCId GCId::create() { + return GCId(_next_id++); +} +const GCId GCId::peek() { + return GCId(_next_id); +} +const GCId GCId::undefined() { + return GCId(UNDEFINED); +} +bool GCId::is_undefined() const { + return _id == UNDEFINED; +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/gc_implementation/shared/gcId.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCID_HPP +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCID_HPP + +#include "memory/allocation.hpp" + +class GCId VALUE_OBJ_CLASS_SPEC { + private: + uint _id; + GCId(uint id) : _id(id) {} + GCId() { } // Unused + + static uint _next_id; + static const uint UNDEFINED = (uint)-1; + + public: + uint id() const { + assert(_id != UNDEFINED, "Using undefined GC ID"); + return _id; + } + bool is_undefined() const; + + static const GCId create(); + static const GCId peek(); + static const GCId undefined(); +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCID_HPP
--- a/hotspot/src/share/vm/gc_implementation/shared/gcTrace.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/shared/gcTrace.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "gc_implementation/shared/copyFailedInfo.hpp" #include "gc_implementation/shared/gcHeapSummary.hpp" +#include "gc_implementation/shared/gcId.hpp" #include "gc_implementation/shared/gcTimer.hpp" #include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/objectCountEventSender.hpp" @@ -38,19 +39,14 @@ #include "gc_implementation/g1/evacuationInfo.hpp" #endif -#define assert_unset_gc_id() assert(_shared_gc_info.id() == SharedGCInfo::UNSET_GCID, "GC already started?") -#define assert_set_gc_id() assert(_shared_gc_info.id() != SharedGCInfo::UNSET_GCID, "GC not started?") - -static GCId GCTracer_next_gc_id = 0; -static GCId create_new_gc_id() { - return GCTracer_next_gc_id++; -} +#define assert_unset_gc_id() assert(_shared_gc_info.gc_id().is_undefined(), "GC already started?") +#define assert_set_gc_id() assert(!_shared_gc_info.gc_id().is_undefined(), "GC not started?") void GCTracer::report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp) { assert_unset_gc_id(); - GCId gc_id = create_new_gc_id(); - _shared_gc_info.set_id(gc_id); + GCId gc_id = GCId::create(); + _shared_gc_info.set_gc_id(gc_id); _shared_gc_info.set_cause(cause); _shared_gc_info.set_start_timestamp(timestamp); } @@ -62,7 +58,7 @@ } bool GCTracer::has_reported_gc_start() const { - return _shared_gc_info.id() != SharedGCInfo::UNSET_GCID; + return !_shared_gc_info.gc_id().is_undefined(); } void GCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) { @@ -81,7 +77,7 @@ report_gc_end_impl(timestamp, time_partitions); - _shared_gc_info.set_id(SharedGCInfo::UNSET_GCID); + _shared_gc_info.set_gc_id(GCId::undefined()); } void GCTracer::report_gc_reference_stats(const ReferenceProcessorStats& rps) const { @@ -132,7 +128,7 @@ if (!cit.allocation_failed()) { HeapInspection hi(false, false, false, NULL); hi.populate_table(&cit, is_alive_cl); - ObjectCountEventSenderClosure event_sender(_shared_gc_info.id(), cit.size_of_instances_in_words(), Ticks::now()); + ObjectCountEventSenderClosure event_sender(_shared_gc_info.gc_id(), cit.size_of_instances_in_words(), Ticks::now()); cit.iterate(&event_sender); } }
--- a/hotspot/src/share/vm/gc_implementation/shared/gcTrace.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/shared/gcTrace.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -27,6 +27,7 @@ #include "gc_interface/gcCause.hpp" #include "gc_interface/gcName.hpp" +#include "gc_implementation/shared/gcId.hpp" #include "gc_implementation/shared/gcWhen.hpp" #include "gc_implementation/shared/copyFailedInfo.hpp" #include "memory/allocation.hpp" @@ -38,7 +39,6 @@ #include "utilities/macros.hpp" #include "utilities/ticks.hpp" -typedef uint GCId; class EvacuationInfo; class GCHeapSummary; @@ -50,11 +50,8 @@ class BoolObjectClosure; class SharedGCInfo VALUE_OBJ_CLASS_SPEC { - public: - static const GCId UNSET_GCID = (GCId)-1; - private: - GCId _id; + GCId _gc_id; GCName _name; GCCause::Cause _cause; Ticks _start_timestamp; @@ -64,7 +61,7 @@ public: SharedGCInfo(GCName name) : - _id(UNSET_GCID), + _gc_id(GCId::undefined()), _name(name), _cause(GCCause::_last_gc_cause), _start_timestamp(), @@ -73,8 +70,8 @@ _longest_pause() { } - void set_id(GCId id) { _id = id; } - GCId id() const { return _id; } + void set_gc_id(GCId gc_id) { _gc_id = gc_id; } + const GCId& gc_id() const { return _gc_id; } void set_start_timestamp(const Ticks& timestamp) { _start_timestamp = timestamp; } const Ticks start_timestamp() const { return _start_timestamp; } @@ -131,10 +128,11 @@ void report_gc_reference_stats(const ReferenceProcessorStats& rp) const; void report_object_count_after_gc(BoolObjectClosure* object_filter) NOT_SERVICES_RETURN; bool has_reported_gc_start() const; + const GCId& gc_id() { return _shared_gc_info.gc_id(); } protected: GCTracer(GCName name) : _shared_gc_info(name) {} - virtual void report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp); + void report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp); virtual void report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions); private:
--- a/hotspot/src/share/vm/gc_implementation/shared/gcTraceSend.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/shared/gcTraceSend.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -43,7 +43,7 @@ void GCTracer::send_garbage_collection_event() const { EventGCGarbageCollection event(UNTIMED); if (event.should_commit()) { - event.set_gcId(_shared_gc_info.id()); + event.set_gcId(_shared_gc_info.gc_id().id()); event.set_name(_shared_gc_info.name()); event.set_cause((u2) _shared_gc_info.cause()); event.set_sumOfPauses(_shared_gc_info.sum_of_pauses()); @@ -57,7 +57,7 @@ void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const { EventGCReferenceStatistics e; if (e.should_commit()) { - e.set_gcId(_shared_gc_info.id()); + e.set_gcId(_shared_gc_info.gc_id().id()); e.set_type((u1)type); e.set_count(count); e.commit(); @@ -68,7 +68,7 @@ const MetaspaceChunkFreeListSummary& summary) const { EventMetaspaceChunkFreeListSummary e; if (e.should_commit()) { - e.set_gcId(_shared_gc_info.id()); + e.set_gcId(_shared_gc_info.gc_id().id()); e.set_when(when); e.set_metadataType(mdtype); @@ -91,7 +91,7 @@ void ParallelOldTracer::send_parallel_old_event() const { EventGCParallelOld e(UNTIMED); if (e.should_commit()) { - e.set_gcId(_shared_gc_info.id()); + e.set_gcId(_shared_gc_info.gc_id().id()); e.set_densePrefix((TraceAddress)_parallel_old_gc_info.dense_prefix()); e.set_starttime(_shared_gc_info.start_timestamp()); e.set_endtime(_shared_gc_info.end_timestamp()); @@ -102,7 +102,7 @@ void YoungGCTracer::send_young_gc_event() const { EventGCYoungGarbageCollection e(UNTIMED); if (e.should_commit()) { - e.set_gcId(_shared_gc_info.id()); + e.set_gcId(_shared_gc_info.gc_id().id()); e.set_tenuringThreshold(_tenuring_threshold); e.set_starttime(_shared_gc_info.start_timestamp()); e.set_endtime(_shared_gc_info.end_timestamp()); @@ -113,7 +113,7 @@ void OldGCTracer::send_old_gc_event() const { EventGCOldGarbageCollection e(UNTIMED); if (e.should_commit()) { - e.set_gcId(_shared_gc_info.id()); + e.set_gcId(_shared_gc_info.gc_id().id()); e.set_starttime(_shared_gc_info.start_timestamp()); e.set_endtime(_shared_gc_info.end_timestamp()); e.commit(); @@ -132,7 +132,7 @@ void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const { EventPromotionFailed e; if (e.should_commit()) { - e.set_gcId(_shared_gc_info.id()); + e.set_gcId(_shared_gc_info.gc_id().id()); e.set_data(to_trace_struct(pf_info)); e.set_thread(pf_info.thread()->thread_id()); e.commit(); @@ -143,7 +143,7 @@ void OldGCTracer::send_concurrent_mode_failure_event() { EventConcurrentModeFailure e; if (e.should_commit()) { - e.set_gcId(_shared_gc_info.id()); + e.set_gcId(_shared_gc_info.gc_id().id()); e.commit(); } } @@ -152,7 +152,7 @@ void G1NewTracer::send_g1_young_gc_event() { EventGCG1GarbageCollection e(UNTIMED); if (e.should_commit()) { - e.set_gcId(_shared_gc_info.id()); + e.set_gcId(_shared_gc_info.gc_id().id()); e.set_type(_g1_young_gc_info.type()); e.set_starttime(_shared_gc_info.start_timestamp()); e.set_endtime(_shared_gc_info.end_timestamp()); @@ -163,7 +163,7 @@ void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) { EventEvacuationInfo e; if (e.should_commit()) { - e.set_gcId(_shared_gc_info.id()); + e.set_gcId(_shared_gc_info.gc_id().id()); e.set_cSetRegions(info->collectionset_regions()); e.set_cSetUsedBefore(info->collectionset_used_before()); e.set_cSetUsedAfter(info->collectionset_used_after()); @@ -179,7 +179,7 @@ void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const { EventEvacuationFailed e; if (e.should_commit()) { - e.set_gcId(_shared_gc_info.id()); + e.set_gcId(_shared_gc_info.gc_id().id()); e.set_data(to_trace_struct(ef_info)); e.commit(); } @@ -206,17 +206,17 @@ } class GCHeapSummaryEventSender : public GCHeapSummaryVisitor { - GCId _id; + GCId _gc_id; GCWhen::Type _when; public: - GCHeapSummaryEventSender(GCId id, GCWhen::Type when) : _id(id), _when(when) {} + GCHeapSummaryEventSender(GCId gc_id, GCWhen::Type when) : _gc_id(gc_id), _when(when) {} void visit(const GCHeapSummary* heap_summary) const { const VirtualSpaceSummary& heap_space = heap_summary->heap(); EventGCHeapSummary e; if (e.should_commit()) { - e.set_gcId(_id); + e.set_gcId(_gc_id.id()); e.set_when((u1)_when); e.set_heapSpace(to_trace_struct(heap_space)); e.set_heapUsed(heap_summary->used()); @@ -236,7 +236,7 @@ EventPSHeapSummary e; if (e.should_commit()) { - e.set_gcId(_id); + e.set_gcId(_gc_id.id()); e.set_when((u1)_when); e.set_oldSpace(to_trace_struct(ps_heap_summary->old())); @@ -251,7 +251,7 @@ }; void GCTracer::send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary& heap_summary) const { - GCHeapSummaryEventSender visitor(_shared_gc_info.id(), when); + GCHeapSummaryEventSender visitor(_shared_gc_info.gc_id(), when); heap_summary.accept(&visitor); } @@ -268,7 +268,7 @@ void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const { EventMetaspaceSummary e; if (e.should_commit()) { - e.set_gcId(_shared_gc_info.id()); + e.set_gcId(_shared_gc_info.gc_id().id()); e.set_when((u1) when); e.set_gcThreshold(meta_space_summary.capacity_until_GC()); e.set_metaspace(to_trace_struct(meta_space_summary.meta_space())); @@ -287,7 +287,7 @@ void send_phase(PausePhase* pause) { T event(UNTIMED); if (event.should_commit()) { - event.set_gcId(_gc_id); + event.set_gcId(_gc_id.id()); event.set_name(pause->name()); event.set_starttime(pause->start()); event.set_endtime(pause->end()); @@ -311,7 +311,7 @@ }; void GCTracer::send_phase_events(TimePartitions* time_partitions) const { - PhaseSender phase_reporter(_shared_gc_info.id()); + PhaseSender phase_reporter(_shared_gc_info.gc_id()); TimePartitionPhasesIterator iter(time_partitions); while (iter.has_next()) {
--- a/hotspot/src/share/vm/gc_implementation/shared/gcTraceTime.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/shared/gcTraceTime.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "gc_implementation/shared/gcTimer.hpp" +#include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/gcTraceTime.hpp" #include "runtime/globals.hpp" #include "runtime/os.hpp" @@ -34,7 +35,7 @@ #include "utilities/ticks.inline.hpp" -GCTraceTime::GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer) : +GCTraceTime::GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer, GCId gc_id) : _title(title), _doit(doit), _print_cr(print_cr), _timer(timer), _start_counter() { if (_doit || _timer != NULL) { _start_counter.stamp(); @@ -52,6 +53,9 @@ gclog_or_tty->stamp(); gclog_or_tty->print(": "); } + if (PrintGCID) { + gclog_or_tty->print("#%u: ", gc_id.id()); + } gclog_or_tty->print("[%s", title); gclog_or_tty->flush(); }
--- a/hotspot/src/share/vm/gc_implementation/shared/gcTraceTime.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/shared/gcTraceTime.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -25,6 +25,7 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACETIME_HPP #define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACETIME_HPP +#include "gc_implementation/shared/gcTrace.hpp" #include "prims/jni_md.h" #include "utilities/ticks.hpp" @@ -38,7 +39,7 @@ Ticks _start_counter; public: - GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer); + GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer, GCId gc_id); ~GCTraceTime(); };
--- a/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -28,6 +28,7 @@ #include "gc_implementation/shared/spaceDecorator.hpp" #include "memory/sharedHeap.hpp" #include "oops/oop.inline.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/thread.inline.hpp" PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
--- a/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "runtime/atomic.inline.hpp" #include "utilities/macros.hpp" #if INCLUDE_ALL_GCS #include "gc_implementation/shared/mutableSpace.hpp"
--- a/hotspot/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -24,6 +24,7 @@ #include "precompiled.hpp" +#include "gc_implementation/shared/gcId.hpp" #include "gc_implementation/shared/objectCountEventSender.hpp" #include "memory/heapInspection.hpp" #include "trace/tracing.hpp" @@ -38,7 +39,7 @@ "Only call this method if the event is enabled"); EventObjectCountAfterGC event(UNTIMED); - event.set_gcId(gc_id); + event.set_gcId(gc_id.id()); event.set_class(entry->klass()); event.set_count(entry->count()); event.set_totalSize(entry->words() * BytesPerWord);
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/gc_interface/collectedHeap.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -558,13 +558,13 @@ void CollectedHeap::pre_full_gc_dump(GCTimer* timer) { if (HeapDumpBeforeFullGC) { - GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer); + GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer, GCId::create()); // We are doing a "major" collection and a heap dump before // major collection has been requested. HeapDumper::dump_heap(); } if (PrintClassHistogramBeforeFullGC) { - GCTraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, timer); + GCTraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, timer, GCId::create()); VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */); inspector.doit(); } @@ -572,11 +572,11 @@ void CollectedHeap::post_full_gc_dump(GCTimer* timer) { if (HeapDumpAfterFullGC) { - GCTraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, timer); + GCTraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, timer, GCId::create()); HeapDumper::dump_heap(); } if (PrintClassHistogramAfterFullGC) { - GCTraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, timer); + GCTraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, timer, GCId::create()); VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */); inspector.doit(); }
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,6 +37,7 @@ #include "oops/oop.inline.hpp" #include "prims/jvmtiExport.hpp" #include "prims/jvmtiThreadState.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" @@ -2782,11 +2783,11 @@ if (TraceExceptions) { ttyLocker ttyl; ResourceMark rm; - tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), (void*)except_oop()); + tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), p2i(except_oop())); tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string()); tty->print_cr(" at bci %d, continuing at %d for thread " INTPTR_FORMAT, - istate->bcp() - (intptr_t)METHOD->code_base(), - continuation_bci, THREAD); + (int)(istate->bcp() - METHOD->code_base()), + (int)continuation_bci, p2i(THREAD)); } // for AbortVMOnException flag NOT_PRODUCT(Exceptions::debug_check_abort(except_oop)); @@ -2798,11 +2799,11 @@ if (TraceExceptions) { ttyLocker ttyl; ResourceMark rm; - tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), (void*)except_oop()); + tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), p2i(except_oop())); tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string()); tty->print_cr(" at bci %d, unwinding for thread " INTPTR_FORMAT, - istate->bcp() - (intptr_t)METHOD->code_base(), - THREAD); + (int)(istate->bcp() - METHOD->code_base()), + p2i(THREAD)); } // for AbortVMOnException flag NOT_PRODUCT(Exceptions::debug_check_abort(except_oop)); @@ -3401,7 +3402,7 @@ tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf); tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry); tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link); - tty->print_cr("native_mirror: " INTPTR_FORMAT, (void*) this->_oop_temp); + tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) this->_oop_temp); tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base); tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit); tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base);
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreterProfiling.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreterProfiling.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -1,6 +1,6 @@ /* - * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. - * Copyright 2012, 2013 SAP AG. All rights reserved. + * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright 2012, 2014 SAP AG. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -86,11 +86,11 @@ " \t-> " PTR_FORMAT "(%d)", \ (int) THREAD->osthread()->thread_id(), \ BCI(), \ - MDX(), \ + p2i(MDX()), \ (MDX() == NULL \ ? 0 \ : istate->method()->method_data()->dp_to_di((address)MDX())), \ - mdx, \ + p2i(mdx), \ istate->method()->method_data()->dp_to_di((address)mdx) \ ); \ }; \ @@ -107,7 +107,7 @@ MethodData *md = istate->method()->method_data(); \ tty->cr(); \ tty->print("method data at mdx " PTR_FORMAT "(0) for", \ - md->data_layout_at(md->bci_to_di(0))); \ + p2i(md->data_layout_at(md->bci_to_di(0)))); \ istate->method()->print_short_name(tty); \ tty->cr(); \ if (md != NULL) { \ @@ -115,7 +115,7 @@ address mdx = (address) MDX(); \ if (mdx != NULL) { \ tty->print_cr("current mdx " PTR_FORMAT "(%d)", \ - mdx, \ + p2i(mdx), \ istate->method()->method_data()->dp_to_di(mdx)); \ } \ } else { \
--- a/hotspot/src/share/vm/interpreter/bytecodes.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/interpreter/bytecodes.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -541,9 +541,6 @@ def(_shouldnotreachhere , "_shouldnotreachhere" , "b" , NULL , T_VOID , 0, false); - // platform specific JVM bytecodes - pd_initialize(); - // compare can_trap information for each bytecode with the // can_trap information for the corresponding base bytecode // (if a rewritten bytecode can trap, so must the base bytecode)
--- a/hotspot/src/share/vm/interpreter/bytecodes.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/interpreter/bytecodes.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -287,23 +287,6 @@ _shouldnotreachhere, // For debugging - // Platform specific JVM bytecodes -#ifdef TARGET_ARCH_x86 -# include "bytecodes_x86.hpp" -#endif -#ifdef TARGET_ARCH_sparc -# include "bytecodes_sparc.hpp" -#endif -#ifdef TARGET_ARCH_zero -# include "bytecodes_zero.hpp" -#endif -#ifdef TARGET_ARCH_arm -# include "bytecodes_arm.hpp" -#endif -#ifdef TARGET_ARCH_ppc -# include "bytecodes_ppc.hpp" -#endif - number_of_codes }; @@ -348,8 +331,6 @@ static void def(Code code, const char* name, const char* format, const char* wide_format, BasicType result_type, int depth, bool can_trap); static void def(Code code, const char* name, const char* format, const char* wide_format, BasicType result_type, int depth, bool can_trap, Code java_code); - static void pd_initialize(); // platform specific initialization - static Code pd_base_code_for(Code code); // platform specific base_code_for implementation // Verify that bcp points into method #ifdef ASSERT
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -42,6 +42,7 @@ #include "oops/symbol.hpp" #include "prims/jvmtiExport.hpp" #include "prims/nativeLookup.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/compilationPolicy.hpp" #include "runtime/deoptimization.hpp"
--- a/hotspot/src/share/vm/memory/allocation.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/memory/allocation.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -29,7 +29,7 @@ #include "memory/metaspaceShared.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/os.hpp" #include "runtime/task.hpp" #include "runtime/threadCritical.hpp"
--- a/hotspot/src/share/vm/memory/cardTableRS.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/memory/cardTableRS.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -29,6 +29,7 @@ #include "memory/generation.hpp" #include "memory/space.hpp" #include "oops/oop.inline.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/java.hpp" #include "runtime/os.hpp" #include "utilities/macros.hpp"
--- a/hotspot/src/share/vm/memory/defNewGeneration.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/memory/defNewGeneration.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -41,6 +41,7 @@ #include "memory/space.inline.hpp" #include "oops/instanceRefKlass.hpp" #include "oops/oop.inline.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/java.hpp" #include "runtime/prefetch.inline.hpp" #include "runtime/thread.inline.hpp" @@ -585,7 +586,7 @@ init_assuming_no_promotion_failure(); - GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); + GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id()); // Capture heap used before collection (for printing). size_t gch_prev_used = gch->used(); @@ -641,7 +642,7 @@ rp->setup_policy(clear_all_soft_refs); const ReferenceProcessorStats& stats = rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, - NULL, _gc_timer); + NULL, _gc_timer, gc_tracer.gc_id()); gc_tracer.report_gc_reference_stats(stats); if (!_promotion_failed) {
--- a/hotspot/src/share/vm/memory/gcLocker.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/memory/gcLocker.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -26,6 +26,7 @@ #include "memory/gcLocker.inline.hpp" #include "memory/resourceArea.hpp" #include "memory/sharedHeap.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/thread.inline.hpp" volatile jint GC_locker::_jni_lock_count = 0; @@ -59,6 +60,17 @@ assert(_jni_lock_count == count, "must be equal"); } } + +// In debug mode track the locking state at all times +void GC_locker::increment_debug_jni_lock_count() { + assert(_debug_jni_lock_count >= 0, "bad value"); + Atomic::inc(&_debug_jni_lock_count); +} + +void GC_locker::decrement_debug_jni_lock_count() { + assert(_debug_jni_lock_count > 0, "bad value"); + Atomic::dec(&_debug_jni_lock_count); +} #endif bool GC_locker::check_active_before_gc() {
--- a/hotspot/src/share/vm/memory/gcLocker.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/memory/gcLocker.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -94,18 +94,8 @@ } // In debug mode track the locking state at all times - static void increment_debug_jni_lock_count() { -#ifdef ASSERT - assert(_debug_jni_lock_count >= 0, "bad value"); - Atomic::inc(&_debug_jni_lock_count); -#endif - } - static void decrement_debug_jni_lock_count() { -#ifdef ASSERT - assert(_debug_jni_lock_count > 0, "bad value"); - Atomic::dec(&_debug_jni_lock_count); -#endif - } + static void increment_debug_jni_lock_count() NOT_DEBUG_RETURN; + static void decrement_debug_jni_lock_count() NOT_DEBUG_RETURN; // Set the current lock count static void set_jni_lock_count(int count) {
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -28,6 +28,7 @@ #include "classfile/vmSymbols.hpp" #include "code/icBuffer.hpp" #include "gc_implementation/shared/collectorCounters.hpp" +#include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_implementation/shared/vmGCOperations.hpp" #include "gc_interface/collectedHeap.inline.hpp" @@ -384,7 +385,9 @@ const char* gc_cause_prefix = complete ? "Full GC" : "GC"; gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL); + // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later + // so we can assume here that the next GC id is what we want. + GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL, GCId::peek()); gc_prologue(complete); increment_total_collections(complete); @@ -417,7 +420,9 @@ } // Timer for individual generations. Last argument is false: no CR // FIXME: We should try to start the timing earlier to cover more of the GC pause - GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL); + // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later + // so we can assume here that the next GC id is what we want. + GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL, GCId::peek()); TraceCollectorStats tcs(_gens[i]->counters()); TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause());
--- a/hotspot/src/share/vm/memory/genMarkSweep.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/memory/genMarkSweep.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -69,7 +69,7 @@ _ref_processor = rp; rp->setup_policy(clear_all_softrefs); - GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); + GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer->gc_id()); gch->trace_heap_before_gc(_gc_tracer); @@ -193,7 +193,7 @@ void GenMarkSweep::mark_sweep_phase1(int level, bool clear_all_softrefs) { // Recursively traverse all live objects and mark them - GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer); + GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id()); trace(" 1"); GenCollectedHeap* gch = GenCollectedHeap::heap(); @@ -220,7 +220,7 @@ ref_processor()->setup_policy(clear_all_softrefs); const ReferenceProcessorStats& stats = ref_processor()->process_discovered_references( - &is_alive, &keep_alive, &follow_stack_closure, NULL, _gc_timer); + &is_alive, &keep_alive, &follow_stack_closure, NULL, _gc_timer, _gc_tracer->gc_id()); gc_tracer()->report_gc_reference_stats(stats); } @@ -262,7 +262,7 @@ GenCollectedHeap* gch = GenCollectedHeap::heap(); - GCTraceTime tm("phase 2", PrintGC && Verbose, true, _gc_timer); + GCTraceTime tm("phase 2", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id()); trace("2"); gch->prepare_for_compaction(); @@ -279,7 +279,7 @@ GenCollectedHeap* gch = GenCollectedHeap::heap(); // Adjust the pointers to reflect the new locations - GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer); + GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id()); trace("3"); // Need new claim bits for the pointer adjustment tracing. @@ -327,7 +327,7 @@ // to use a higher index (saved from phase2) when verifying perm_gen. GenCollectedHeap* gch = GenCollectedHeap::heap(); - GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer); + GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id()); trace("4"); GenCompactClosure blk;
--- a/hotspot/src/share/vm/memory/iterator.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/memory/iterator.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -27,6 +27,7 @@ #include "oops/oop.inline.hpp" void KlassToOopClosure::do_klass(Klass* k) { + assert(_oop_closure != NULL, "Not initialized?"); k->oops_do(_oop_closure); }
--- a/hotspot/src/share/vm/memory/iterator.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/memory/iterator.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -115,9 +115,19 @@ }; class KlassToOopClosure : public KlassClosure { + friend class MetadataAwareOopClosure; + friend class MetadataAwareOopsInGenClosure; + OopClosure* _oop_closure; - public: - KlassToOopClosure(OopClosure* oop_closure) : _oop_closure(oop_closure) {} + + // Used when _oop_closure couldn't be set in an initialization list. + void initialize(OopClosure* oop_closure) { + assert(_oop_closure == NULL, "Should only be called once"); + _oop_closure = oop_closure; + } + +public: + KlassToOopClosure(OopClosure* oop_closure = NULL) : _oop_closure(oop_closure) {} virtual void do_klass(Klass* k); }; @@ -135,6 +145,29 @@ void do_cld(ClassLoaderData* cld); }; +// The base class for all concurrent marking closures, +// that participates in class unloading. +// It's used to proxy through the metadata to the oops defined in them. +class MetadataAwareOopClosure: public ExtendedOopClosure { + KlassToOopClosure _klass_closure; + + public: + MetadataAwareOopClosure() : ExtendedOopClosure() { + _klass_closure.initialize(this); + } + MetadataAwareOopClosure(ReferenceProcessor* rp) : ExtendedOopClosure(rp) { + _klass_closure.initialize(this); + } + + virtual bool do_metadata() { return do_metadata_nv(); } + inline bool do_metadata_nv() { return true; } + + virtual void do_klass(Klass* k); + void do_klass_nv(Klass* k); + + virtual void do_class_loader_data(ClassLoaderData* cld); +}; + // ObjectClosure is used for iterating through an object space class ObjectClosure : public Closure { @@ -318,4 +351,16 @@ } }; + +// Helper defines for ExtendOopClosure + +#define if_do_metadata_checked(closure, nv_suffix) \ + /* Make sure the non-virtual and the virtual versions match. */ \ + assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \ + "Inconsistency in do_metadata"); \ + if (closure->do_metadata##nv_suffix()) + +#define assert_should_ignore_metadata(closure, nv_suffix) \ + assert(!closure->do_metadata##nv_suffix(), "Code to handle metadata is not implemented") + #endif // SHARE_VM_MEMORY_ITERATOR_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/memory/iterator.inline.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_MEMORY_ITERATOR_INLINE_HPP +#define SHARE_VM_MEMORY_ITERATOR_INLINE_HPP + +#include "classfile/classLoaderData.hpp" +#include "memory/iterator.hpp" +#include "oops/klass.hpp" +#include "utilities/debug.hpp" + +inline void MetadataAwareOopClosure::do_class_loader_data(ClassLoaderData* cld) { + assert(_klass_closure._oop_closure == this, "Must be"); + + bool claim = true; // Must claim the class loader data before processing. + cld->oops_do(_klass_closure._oop_closure, &_klass_closure, claim); +} + +inline void MetadataAwareOopClosure::do_klass_nv(Klass* k) { + ClassLoaderData* cld = k->class_loader_data(); + do_class_loader_data(cld); +} + +inline void MetadataAwareOopClosure::do_klass(Klass* k) { do_klass_nv(k); } + +#endif // SHARE_VM_MEMORY_ITERATOR_INLINE_HPP
--- a/hotspot/src/share/vm/memory/referenceProcessor.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/memory/referenceProcessor.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -190,7 +190,8 @@ OopClosure* keep_alive, VoidClosure* complete_gc, AbstractRefProcTaskExecutor* task_executor, - GCTimer* gc_timer) { + GCTimer* gc_timer, + GCId gc_id) { NOT_PRODUCT(verify_ok_to_handle_reflists()); assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); @@ -212,7 +213,7 @@ // Soft references size_t soft_count = 0; { - GCTraceTime tt("SoftReference", trace_time, false, gc_timer); + GCTraceTime tt("SoftReference", trace_time, false, gc_timer, gc_id); soft_count = process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, is_alive, keep_alive, complete_gc, task_executor); @@ -223,7 +224,7 @@ // Weak references size_t weak_count = 0; { - GCTraceTime tt("WeakReference", trace_time, false, gc_timer); + GCTraceTime tt("WeakReference", trace_time, false, gc_timer, gc_id); weak_count = process_discovered_reflist(_discoveredWeakRefs, NULL, true, is_alive, keep_alive, complete_gc, task_executor); @@ -232,7 +233,7 @@ // Final references size_t final_count = 0; { - GCTraceTime tt("FinalReference", trace_time, false, gc_timer); + GCTraceTime tt("FinalReference", trace_time, false, gc_timer, gc_id); final_count = process_discovered_reflist(_discoveredFinalRefs, NULL, false, is_alive, keep_alive, complete_gc, task_executor); @@ -241,7 +242,7 @@ // Phantom references size_t phantom_count = 0; { - GCTraceTime tt("PhantomReference", trace_time, false, gc_timer); + GCTraceTime tt("PhantomReference", trace_time, false, gc_timer, gc_id); phantom_count = process_discovered_reflist(_discoveredPhantomRefs, NULL, false, is_alive, keep_alive, complete_gc, task_executor); @@ -253,7 +254,7 @@ // thus use JNI weak references to circumvent the phantom references and // resurrect a "post-mortem" object. { - GCTraceTime tt("JNI Weak Reference", trace_time, false, gc_timer); + GCTraceTime tt("JNI Weak Reference", trace_time, false, gc_timer, gc_id); if (task_executor != NULL) { task_executor->set_single_threaded_mode(); } @@ -1251,14 +1252,15 @@ OopClosure* keep_alive, VoidClosure* complete_gc, YieldClosure* yield, - GCTimer* gc_timer) { + GCTimer* gc_timer, + GCId gc_id) { NOT_PRODUCT(verify_ok_to_handle_reflists()); // Soft references { GCTraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC, - false, gc_timer); + false, gc_timer, gc_id); for (uint i = 0; i < _max_num_q; i++) { if (yield->should_return()) { return; @@ -1271,7 +1273,7 @@ // Weak references { GCTraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, - false, gc_timer); + false, gc_timer, gc_id); for (uint i = 0; i < _max_num_q; i++) { if (yield->should_return()) { return; @@ -1284,7 +1286,7 @@ // Final references { GCTraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, - false, gc_timer); + false, gc_timer, gc_id); for (uint i = 0; i < _max_num_q; i++) { if (yield->should_return()) { return; @@ -1297,7 +1299,7 @@ // Phantom references { GCTraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, - false, gc_timer); + false, gc_timer, gc_id); for (uint i = 0; i < _max_num_q; i++) { if (yield->should_return()) { return;
--- a/hotspot/src/share/vm/memory/referenceProcessor.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/memory/referenceProcessor.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -25,6 +25,7 @@ #ifndef SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP #define SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP +#include "gc_implementation/shared/gcTrace.hpp" #include "memory/referencePolicy.hpp" #include "memory/referenceProcessorStats.hpp" #include "memory/referenceType.hpp" @@ -349,7 +350,8 @@ OopClosure* keep_alive, VoidClosure* complete_gc, YieldClosure* yield, - GCTimer* gc_timer); + GCTimer* gc_timer, + GCId gc_id); // Delete entries in the discovered lists that have // either a null referent or are not active. Such @@ -480,7 +482,8 @@ OopClosure* keep_alive, VoidClosure* complete_gc, AbstractRefProcTaskExecutor* task_executor, - GCTimer *gc_timer); + GCTimer *gc_timer, + GCId gc_id); // Enqueue references at end of GC (called by the garbage collector) bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL);
--- a/hotspot/src/share/vm/memory/space.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/memory/space.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -37,6 +37,7 @@ #include "oops/oop.inline.hpp" #include "oops/oop.inline2.hpp" #include "runtime/java.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/prefetch.inline.hpp" #include "runtime/orderAccess.inline.hpp" #include "runtime/safepoint.hpp"
--- a/hotspot/src/share/vm/memory/specialized_oop_closures.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/memory/specialized_oop_closures.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -25,7 +25,6 @@ #ifndef SHARE_VM_MEMORY_SPECIALIZED_OOP_CLOSURES_HPP #define SHARE_VM_MEMORY_SPECIALIZED_OOP_CLOSURES_HPP -#include "runtime/atomic.hpp" #include "utilities/macros.hpp" #if INCLUDE_ALL_GCS #include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
--- a/hotspot/src/share/vm/memory/threadLocalAllocBuffer.inline.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/memory/threadLocalAllocBuffer.inline.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -27,7 +27,6 @@ #include "gc_interface/collectedHeap.hpp" #include "memory/threadLocalAllocBuffer.hpp" -#include "runtime/atomic.hpp" #include "runtime/thread.hpp" #include "utilities/copy.hpp"
--- a/hotspot/src/share/vm/memory/universe.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/memory/universe.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -53,6 +53,7 @@ #include "oops/typeArrayKlass.hpp" #include "prims/jvmtiRedefineClassesTrace.hpp" #include "runtime/arguments.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/deoptimization.hpp" #include "runtime/fprofiler.hpp" #include "runtime/handles.inline.hpp"
--- a/hotspot/src/share/vm/oops/arrayKlass.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/oops/arrayKlass.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -93,7 +93,7 @@ ResourceMark rm(THREAD); k->initialize_supers(super_klass(), CHECK); k->vtable()->initialize_vtable(false, CHECK); - java_lang_Class::create_mirror(k, Handle(NULL), CHECK); + java_lang_Class::create_mirror(k, Handle(NULL), Handle(NULL), CHECK); } GrowableArray<Klass*>* ArrayKlass::compute_secondary_supers(int num_extra_slots) {
--- a/hotspot/src/share/vm/oops/compiledICHolder.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/oops/compiledICHolder.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -27,11 +27,27 @@ #include "oops/klass.hpp" #include "oops/method.hpp" #include "oops/oop.inline2.hpp" +#include "runtime/atomic.inline.hpp" volatile int CompiledICHolder::_live_count; volatile int CompiledICHolder::_live_not_claimed_count; +CompiledICHolder::CompiledICHolder(Method* method, Klass* klass) + : _holder_method(method), _holder_klass(klass) { +#ifdef ASSERT + Atomic::inc(&_live_count); + Atomic::inc(&_live_not_claimed_count); +#endif // ASSERT +} + +#ifdef ASSERT +CompiledICHolder::~CompiledICHolder() { + assert(_live_count > 0, "underflow"); + Atomic::dec(&_live_count); +} +#endif // ASSERT + // Printing void CompiledICHolder::print_on(outputStream* st) const { @@ -51,3 +67,11 @@ guarantee(holder_method()->is_method(), "should be method"); guarantee(holder_klass()->is_klass(), "should be klass"); } + +#ifdef ASSERT + +void CompiledICHolder::claim() { + Atomic::dec(&_live_not_claimed_count); +} + +#endif // ASSERT
--- a/hotspot/src/share/vm/oops/compiledICHolder.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/oops/compiledICHolder.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -26,6 +26,7 @@ #define SHARE_VM_OOPS_COMPILEDICHOLDEROOP_HPP #include "oops/oop.hpp" +#include "utilities/macros.hpp" // A CompiledICHolder* is a helper object for the inline cache implementation. // It holds an intermediate value (method+klass pair) used when converting from @@ -50,20 +51,8 @@ public: // Constructor - CompiledICHolder(Method* method, Klass* klass) - : _holder_method(method), _holder_klass(klass) { -#ifdef ASSERT - Atomic::inc(&_live_count); - Atomic::inc(&_live_not_claimed_count); -#endif - } - - ~CompiledICHolder() { -#ifdef ASSERT - assert(_live_count > 0, "underflow"); - Atomic::dec(&_live_count); -#endif - } + CompiledICHolder(Method* method, Klass* klass); + ~CompiledICHolder() NOT_DEBUG_RETURN; static int live_count() { return _live_count; } static int live_not_claimed_count() { return _live_not_claimed_count; } @@ -91,11 +80,7 @@ const char* internal_name() const { return "{compiledICHolder}"; } - void claim() { -#ifdef ASSERT - Atomic::dec(&_live_not_claimed_count); -#endif - } + void claim() NOT_DEBUG_RETURN; }; #endif // SHARE_VM_OOPS_COMPILEDICHOLDEROOP_HPP
--- a/hotspot/src/share/vm/oops/constantPool.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/oops/constantPool.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -71,7 +71,6 @@ // only set to non-zero if constant pool is merged by RedefineClasses set_version(0); - set_lock(new Monitor(Monitor::nonleaf + 2, "A constant pool lock")); // initialize tag array int length = tags->length(); @@ -100,9 +99,6 @@ void ConstantPool::release_C_heap_structures() { // walk constant pool and decrement symbol reference counts unreference_symbols(); - - delete _lock; - set_lock(NULL); } objArrayOop ConstantPool::resolved_references() const { @@ -146,8 +142,7 @@ // CDS support. Create a new resolved_references array. void ConstantPool::restore_unshareable_info(TRAPS) { - // Only create the new resolved references array and lock if it hasn't been - // attempted before + // Only create the new resolved references array if it hasn't been attempted before if (resolved_references() != NULL) return; // restore the C++ vtable from the shared archive @@ -163,9 +158,6 @@ ClassLoaderData* loader_data = pool_holder()->class_loader_data(); set_resolved_references(loader_data->add_handle(refs_handle)); } - - // Also need to recreate the mutex. Make sure this matches the constructor - set_lock(new Monitor(Monitor::nonleaf + 2, "A constant pool lock")); } } @@ -176,7 +168,6 @@ set_resolved_reference_length( resolved_references() != NULL ? resolved_references()->length() : 0); set_resolved_references(NULL); - set_lock(NULL); } int ConstantPool::cp_to_object_index(int cp_index) { @@ -186,11 +177,41 @@ return (i < 0) ? _no_index_sentinel : i; } +void ConstantPool::trace_class_resolution(constantPoolHandle this_cp, KlassHandle k) { + ResourceMark rm; + int line_number = -1; + const char * source_file = NULL; + if (JavaThread::current()->has_last_Java_frame()) { + // try to identify the method which called this function. + vframeStream vfst(JavaThread::current()); + if (!vfst.at_end()) { + line_number = vfst.method()->line_number_from_bci(vfst.bci()); + Symbol* s = vfst.method()->method_holder()->source_file_name(); + if (s != NULL) { + source_file = s->as_C_string(); + } + } + } + if (k() != this_cp->pool_holder()) { + // only print something if the classes are different + if (source_file != NULL) { + tty->print("RESOLVE %s %s %s:%d\n", + this_cp->pool_holder()->external_name(), + InstanceKlass::cast(k())->external_name(), source_file, line_number); + } else { + tty->print("RESOLVE %s %s\n", + this_cp->pool_holder()->external_name(), + InstanceKlass::cast(k())->external_name()); + } + } +} + Klass* ConstantPool::klass_at_impl(constantPoolHandle this_cp, int which, TRAPS) { + assert(THREAD->is_Java_thread(), "must be a Java thread"); + // A resolved constantPool entry will contain a Klass*, otherwise a Symbol*. - // It is not safe to rely on the tag bit's here, since we don't have a lock, and the entry and - // tag is not updated atomicly. - + // It is not safe to rely on the tag bit's here, since we don't have a lock, and + // the entry and tag is not updated atomicly. CPSlot entry = this_cp->slot_at(which); if (entry.is_resolved()) { assert(entry.get_klass()->is_klass(), "must be"); @@ -198,115 +219,51 @@ return entry.get_klass(); } - // Acquire lock on constant oop while doing update. After we get the lock, we check if another object - // already has updated the object - assert(THREAD->is_Java_thread(), "must be a Java thread"); - bool do_resolve = false; - bool in_error = false; - - // Create a handle for the mirror. This will preserve the resolved class - // until the loader_data is registered. - Handle mirror_handle; - - Symbol* name = NULL; - Handle loader; - { MonitorLockerEx ml(this_cp->lock()); - - if (this_cp->tag_at(which).is_unresolved_klass()) { - if (this_cp->tag_at(which).is_unresolved_klass_in_error()) { - in_error = true; - } else { - do_resolve = true; - name = this_cp->unresolved_klass_at(which); - loader = Handle(THREAD, this_cp->pool_holder()->class_loader()); - } - } - } // unlocking constantPool - - - // The original attempt to resolve this constant pool entry failed so find the - // class of the original error and throw another error of the same class (JVMS 5.4.3). - // If there is a detail message, pass that detail message to the error constructor. - // The JVMS does not strictly require us to duplicate the same detail message, - // or any internal exception fields such as cause or stacktrace. But since the - // detail message is often a class name or other literal string, we will repeat it if - // we can find it in the symbol table. - if (in_error) { + // This tag doesn't change back to unresolved class unless at a safepoint. + if (this_cp->tag_at(which).is_unresolved_klass_in_error()) { + // The original attempt to resolve this constant pool entry failed so find the + // class of the original error and throw another error of the same class + // (JVMS 5.4.3). + // If there is a detail message, pass that detail message to the error. + // The JVMS does not strictly require us to duplicate the same detail message, + // or any internal exception fields such as cause or stacktrace. But since the + // detail message is often a class name or other literal string, we will repeat it + // if we can find it in the symbol table. throw_resolution_error(this_cp, which, CHECK_0); + ShouldNotReachHere(); } - if (do_resolve) { - // this_cp must be unlocked during resolve_or_fail - oop protection_domain = this_cp->pool_holder()->protection_domain(); - Handle h_prot (THREAD, protection_domain); - Klass* kk = SystemDictionary::resolve_or_fail(name, loader, h_prot, true, THREAD); - KlassHandle k; - if (!HAS_PENDING_EXCEPTION) { - k = KlassHandle(THREAD, kk); - // preserve the resolved klass. - mirror_handle = Handle(THREAD, kk->java_mirror()); - // Do access check for klasses - verify_constant_pool_resolve(this_cp, k, THREAD); - } - - // Failed to resolve class. We must record the errors so that subsequent attempts - // to resolve this constant pool entry fail with the same error (JVMS 5.4.3). - if (HAS_PENDING_EXCEPTION) { - MonitorLockerEx ml(this_cp->lock()); - - // some other thread has beaten us and has resolved the class. - if (this_cp->tag_at(which).is_klass()) { - CLEAR_PENDING_EXCEPTION; - entry = this_cp->resolved_klass_at(which); - return entry.get_klass(); - } + Handle mirror_handle; + Symbol* name = entry.get_symbol(); + Handle loader (THREAD, this_cp->pool_holder()->class_loader()); + Handle protection_domain (THREAD, this_cp->pool_holder()->protection_domain()); + Klass* kk = SystemDictionary::resolve_or_fail(name, loader, protection_domain, true, THREAD); + KlassHandle k (THREAD, kk); + if (!HAS_PENDING_EXCEPTION) { + // preserve the resolved klass from unloading + mirror_handle = Handle(THREAD, kk->java_mirror()); + // Do access check for klasses + verify_constant_pool_resolve(this_cp, k, THREAD); + } - // The tag could have changed to in-error before the lock but we have to - // handle that here for the class case. - save_and_throw_exception(this_cp, which, constantTag(JVM_CONSTANT_UnresolvedClass), CHECK_0); - } + // Failed to resolve class. We must record the errors so that subsequent attempts + // to resolve this constant pool entry fail with the same error (JVMS 5.4.3). + if (HAS_PENDING_EXCEPTION) { + save_and_throw_exception(this_cp, which, constantTag(JVM_CONSTANT_UnresolvedClass), CHECK_0); + } - if (TraceClassResolution && !k()->oop_is_array()) { - // skip resolving the constant pool so that this code get's - // called the next time some bytecodes refer to this class. - ResourceMark rm; - int line_number = -1; - const char * source_file = NULL; - if (JavaThread::current()->has_last_Java_frame()) { - // try to identify the method which called this function. - vframeStream vfst(JavaThread::current()); - if (!vfst.at_end()) { - line_number = vfst.method()->line_number_from_bci(vfst.bci()); - Symbol* s = vfst.method()->method_holder()->source_file_name(); - if (s != NULL) { - source_file = s->as_C_string(); - } - } - } - if (k() != this_cp->pool_holder()) { - // only print something if the classes are different - if (source_file != NULL) { - tty->print("RESOLVE %s %s %s:%d\n", - this_cp->pool_holder()->external_name(), - InstanceKlass::cast(k())->external_name(), source_file, line_number); - } else { - tty->print("RESOLVE %s %s\n", - this_cp->pool_holder()->external_name(), - InstanceKlass::cast(k())->external_name()); - } - } + // Make this class loader depend upon the class loader owning the class reference + ClassLoaderData* this_key = this_cp->pool_holder()->class_loader_data(); + this_key->record_dependency(k(), CHECK_NULL); // Can throw OOM + + if (TraceClassResolution && !k->oop_is_array()) { + // skip resolving the constant pool so that this code gets + // called the next time some bytecodes refer to this class. + trace_class_resolution(this_cp, k); return k(); } else { - MonitorLockerEx ml(this_cp->lock()); - // Only updated constant pool - if it is resolved. - do_resolve = this_cp->tag_at(which).is_unresolved_klass(); - if (do_resolve) { - ClassLoaderData* this_key = this_cp->pool_holder()->class_loader_data(); - this_key->record_dependency(k(), CHECK_NULL); // Can throw OOM this_cp->klass_at_put(which, k()); } - } - } entry = this_cp->resolved_klass_at(which); assert(entry.is_resolved() && entry.get_klass()->is_klass(), "must be resolved at this point"); @@ -576,7 +533,7 @@ switch (tag.value()) { case JVM_CONSTANT_UnresolvedClass: // return the class name in the error message - message = this_cp->unresolved_klass_at(which); + message = this_cp->klass_name_at(which); break; case JVM_CONSTANT_MethodHandle: // return the method handle name in the error message @@ -606,7 +563,6 @@ // in the resolution error table, so that the same exception is thrown again. void ConstantPool::save_and_throw_exception(constantPoolHandle this_cp, int which, constantTag tag, TRAPS) { - assert(this_cp->lock()->is_locked(), "constant pool lock should be held"); Symbol* error = PENDING_EXCEPTION->klass()->name(); int error_tag = tag.error_value(); @@ -620,7 +576,14 @@ } else if (this_cp->tag_at(which).value() != error_tag) { Symbol* message = exception_message(this_cp, which, tag, PENDING_EXCEPTION); SystemDictionary::add_resolution_error(this_cp, which, error, message); - this_cp->tag_at_put(which, error_tag); + // CAS in the tag. If a thread beat us to registering this error that's fine. + // If another thread resolved the reference, this is an error. The resolution + // must deterministically get an error. So why do we save this? + // We save this because jvmti can add classes to the bootclass path after this + // error, so it needs to get the same error if the error is first. + jbyte old_tag = Atomic::cmpxchg((jbyte)error_tag, + (jbyte*)this_cp->tag_addr_at(which), (jbyte)tag.value()); + assert(old_tag == error_tag || old_tag == tag.value(), "should not be resolved otherwise"); } else { // some other thread put this in error state throw_resolution_error(this_cp, which, CHECK); @@ -710,7 +673,6 @@ THREAD); result_oop = value(); if (HAS_PENDING_EXCEPTION) { - MonitorLockerEx ml(this_cp->lock()); // lock cpool to change tag. save_and_throw_exception(this_cp, index, tag, CHECK_NULL); } break; @@ -727,7 +689,6 @@ Handle value = SystemDictionary::find_method_handle_type(signature, klass, THREAD); result_oop = value(); if (HAS_PENDING_EXCEPTION) { - MonitorLockerEx ml(this_cp->lock()); // lock cpool to change tag. save_and_throw_exception(this_cp, index, tag, CHECK_NULL); } break; @@ -765,22 +726,17 @@ } if (cache_index >= 0) { - // Cache the oop here also. - Handle result_handle(THREAD, result_oop); - MonitorLockerEx ml(this_cp->lock()); // don't know if we really need this - oop result = this_cp->resolved_references()->obj_at(cache_index); - // Benign race condition: resolved_references may already be filled in while we were trying to lock. + // Benign race condition: resolved_references may already be filled in. // The important thing here is that all threads pick up the same result. // It doesn't matter which racing thread wins, as long as only one // result is used by all threads, and all future queries. - // That result may be either a resolved constant or a failure exception. - if (result == NULL) { - this_cp->resolved_references()->obj_at_put(cache_index, result_handle()); - return result_handle(); + oop old_result = this_cp->resolved_references()->atomic_compare_exchange_oop(cache_index, result_oop, NULL); + if (old_result == NULL) { + return result_oop; // was installed } else { // Return the winning thread's result. This can be different than - // result_handle() for MethodHandles. - return result; + // the result here for MethodHandles. + return old_result; } } else { return result_oop; @@ -853,9 +809,8 @@ } -// Iterate over symbols and decrement ones which are Symbol*s. -// This is done during GC so do not need to lock constantPool unless we -// have per-thread safepoints. +// Iterate over symbols and decrement ones which are Symbol*s +// This is done during GC. // Only decrement the UTF8 symbols. Unresolved classes and strings point to // these symbols but didn't increment the reference count. void ConstantPool::unreference_symbols() { @@ -987,8 +942,8 @@ case JVM_CONSTANT_UnresolvedClass: { - Symbol* k1 = unresolved_klass_at(index1); - Symbol* k2 = cp2->unresolved_klass_at(index2); + Symbol* k1 = klass_name_at(index1); + Symbol* k2 = cp2->klass_name_at(index2); if (k1 == k2) { return true; } @@ -1970,7 +1925,6 @@ break; case JVM_CONSTANT_UnresolvedClass : // fall-through case JVM_CONSTANT_UnresolvedClassInError: { - // unresolved_klass_at requires lock or safe world. CPSlot entry = slot_at(index); if (entry.is_resolved()) { entry.get_klass()->print_value_on(st);
--- a/hotspot/src/share/vm/oops/constantPool.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/oops/constantPool.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -112,12 +112,12 @@ int _version; } _saved; - Monitor* _lock; - void set_tags(Array<u1>* tags) { _tags = tags; } void tag_at_put(int which, jbyte t) { tags()->at_put(which, t); } void release_tag_at_put(int which, jbyte t) { tags()->release_at_put(which, t); } + u1* tag_addr_at(int which) const { return tags()->adr_at(which); } + void set_operands(Array<u2>* operands) { _operands = operands; } int flags() const { return _flags; } @@ -362,14 +362,6 @@ return CPSlot((Klass*)OrderAccess::load_ptr_acquire(obj_at_addr_raw(which))).get_klass(); } - // This method should only be used with a cpool lock or during parsing or gc - Symbol* unresolved_klass_at(int which) { // Temporary until actual use - Symbol* s = CPSlot((Symbol*)OrderAccess::load_ptr_acquire(obj_at_addr_raw(which))).get_symbol(); - // check that the klass is still unresolved. - assert(tag_at(which).is_unresolved_klass(), "Corrupted constant pool"); - return s; - } - // RedefineClasses() API support: Symbol* klass_at_noresolve(int which) { return klass_name_at(which); } @@ -818,6 +810,8 @@ static Klass* klass_at_impl(constantPoolHandle this_cp, int which, TRAPS); static oop string_at_impl(constantPoolHandle this_cp, int which, int obj_index, TRAPS); + static void trace_class_resolution(constantPoolHandle this_cp, KlassHandle k); + // Resolve string constants (to prevent allocation during compilation) static void resolve_string_constants_impl(constantPoolHandle this_cp, TRAPS); @@ -848,8 +842,6 @@ void set_resolved_reference_length(int length) { _saved._resolved_reference_length = length; } int resolved_reference_length() const { return _saved._resolved_reference_length; } - void set_lock(Monitor* lock) { _lock = lock; } - Monitor* lock() { return _lock; } // Decrease ref counts of symbols that are in the constant pool // when the holder class is unloaded
--- a/hotspot/src/share/vm/oops/cpCache.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/oops/cpCache.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -32,6 +32,7 @@ #include "oops/oop.inline.hpp" #include "prims/jvmtiRedefineClassesTrace.hpp" #include "prims/methodHandles.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/orderAccess.inline.hpp" #include "utilities/macros.hpp" @@ -286,7 +287,9 @@ // the lock, so that when the losing writer returns, he can use the linked // cache entry. - MonitorLockerEx ml(cpool->lock()); + // Use the lock from the metaspace for this, which cannot stop for safepoint. + Mutex* metaspace_lock = cpool->pool_holder()->class_loader_data()->metaspace_lock(); + MutexLockerEx ml(metaspace_lock, Mutex::_no_safepoint_check_flag); if (!is_f1_null()) { return; }
--- a/hotspot/src/share/vm/oops/instanceClassLoaderKlass.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/oops/instanceClassLoaderKlass.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -28,6 +28,7 @@ #include "gc_implementation/shared/markSweep.inline.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "memory/genOopClosures.inline.hpp" +#include "memory/iterator.inline.hpp" #include "memory/oopFactory.hpp" #include "oops/instanceKlass.hpp" #include "oops/instanceClassLoaderKlass.hpp" @@ -44,12 +45,6 @@ #include "oops/oop.pcgc.inline.hpp" #endif // INCLUDE_ALL_GCS -#define if_do_metadata_checked(closure, nv_suffix) \ - /* Make sure the non-virtual and the virtual versions match. */ \ - assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \ - "Inconsistency in do_metadata"); \ - if (closure->do_metadata##nv_suffix()) - // Macro to define InstanceClassLoaderKlass::oop_oop_iterate for virtual/nonvirtual for // all closures. Macros calling macros above for each oop size. // Since ClassLoader objects have only a pointer to the loader_data, they are not
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/oops/instanceKlass.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -35,6 +35,7 @@ #include "jvmtifiles/jvmti.h" #include "memory/genOopClosures.inline.hpp" #include "memory/heapInspection.hpp" +#include "memory/iterator.inline.hpp" #include "memory/metadataFactory.hpp" #include "memory/oopFactory.hpp" #include "oops/fieldStreams.hpp" @@ -51,6 +52,7 @@ #include "prims/jvmtiRedefineClasses.hpp" #include "prims/jvmtiThreadState.hpp" #include "prims/methodComparator.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/fieldDescriptor.hpp" #include "runtime/handles.inline.hpp" #include "runtime/javaCalls.hpp" @@ -2113,12 +2115,6 @@ // closure's do_metadata() method dictates whether the given closure should be // applied to the klass ptr in the object header. -#define if_do_metadata_checked(closure, nv_suffix) \ - /* Make sure the non-virtual and the virtual versions match. */ \ - assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \ - "Inconsistency in do_metadata"); \ - if (closure->do_metadata##nv_suffix()) - #define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ \ int InstanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ @@ -2142,10 +2138,9 @@ int InstanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, \ OopClosureType* closure) { \ SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \ - /* header */ \ - if_do_metadata_checked(closure, nv_suffix) { \ - closure->do_klass##nv_suffix(obj->klass()); \ - } \ + \ + assert_should_ignore_metadata(closure, nv_suffix); \ + \ /* instance variables */ \ InstanceKlass_OOP_MAP_REVERSE_ITERATE( \ obj, \
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/oops/instanceKlass.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -32,7 +32,6 @@ #include "oops/fieldInfo.hpp" #include "oops/instanceOop.hpp" #include "oops/klassVtable.hpp" -#include "runtime/atomic.hpp" #include "runtime/handles.hpp" #include "runtime/os.hpp" #include "utilities/accessFlags.hpp"
--- a/hotspot/src/share/vm/oops/instanceMirrorKlass.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/oops/instanceMirrorKlass.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -28,6 +28,7 @@ #include "gc_implementation/shared/markSweep.inline.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "memory/genOopClosures.inline.hpp" +#include "memory/iterator.inline.hpp" #include "memory/oopFactory.hpp" #include "oops/instanceKlass.hpp" #include "oops/instanceMirrorKlass.hpp" @@ -241,12 +242,6 @@ return oop_size(obj); \ -#define if_do_metadata_checked(closure, nv_suffix) \ - /* Make sure the non-virtual and the virtual versions match. */ \ - assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \ - "Inconsistency in do_metadata"); \ - if (closure->do_metadata##nv_suffix()) - // Macro to define InstanceMirrorKlass::oop_oop_iterate for virtual/nonvirtual for // all closures. Macros calling macros above for each oop size.
--- a/hotspot/src/share/vm/oops/klass.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/oops/klass.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -508,7 +508,7 @@ // Only recreate it if not present. A previous attempt to restore may have // gotten an OOM later but keep the mirror if it was created. if (java_mirror() == NULL) { - java_lang_Class::create_mirror(this, Handle(NULL), CHECK); + java_lang_Class::create_mirror(this, Handle(NULL), Handle(NULL), CHECK); } }
--- a/hotspot/src/share/vm/oops/objArrayKlass.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/oops/objArrayKlass.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -29,6 +29,7 @@ #include "gc_implementation/shared/markSweep.inline.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "memory/genOopClosures.inline.hpp" +#include "memory/iterator.inline.hpp" #include "memory/metadataFactory.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.inline.hpp" @@ -476,12 +477,6 @@ } #endif // INCLUDE_ALL_GCS -#define if_do_metadata_checked(closure, nv_suffix) \ - /* Make sure the non-virtual and the virtual versions match. */ \ - assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \ - "Inconsistency in do_metadata"); \ - if (closure->do_metadata##nv_suffix()) - #define ObjArrayKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ \ int ObjArrayKlass::oop_oop_iterate##nv_suffix(oop obj, \
--- a/hotspot/src/share/vm/oops/objArrayOop.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/oops/objArrayOop.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,22 @@ #include "oops/objArrayOop.hpp" #include "oops/oop.inline.hpp" +oop objArrayOopDesc::atomic_compare_exchange_oop(int index, oop exchange_value, + oop compare_value) { + volatile HeapWord* dest; + if (UseCompressedOops) { + dest = (HeapWord*)obj_at_addr<narrowOop>(index); + } else { + dest = (HeapWord*)obj_at_addr<oop>(index); + } + oop res = oopDesc::atomic_compare_exchange_oop(exchange_value, dest, compare_value, true); + // update card mark if success + if (res == compare_value) { + update_barrier_set((void*)dest, exchange_value); + } + return res; +} + #define ObjArrayOop_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ \ int objArrayOopDesc::oop_iterate_range(OopClosureType* blk, int start, int end) { \
--- a/hotspot/src/share/vm/oops/objArrayOop.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/oops/objArrayOop.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -95,6 +95,9 @@ oop_store(obj_at_addr<oop>(index), value); } } + + oop atomic_compare_exchange_oop(int index, oop exchange_value, oop compare_value); + // Sizing static int header_size() { return arrayOopDesc::header_size(T_OBJECT); } int object_size() { return object_size(length()); }
--- a/hotspot/src/share/vm/oops/oop.pcgc.inline.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/oops/oop.pcgc.inline.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -26,6 +26,7 @@ #define SHARE_VM_OOPS_OOP_PCGC_INLINE_HPP #include "utilities/macros.hpp" +#include "runtime/atomic.inline.hpp" #if INCLUDE_ALL_GCS #include "gc_implementation/parNew/parNewGeneration.hpp" #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
--- a/hotspot/src/share/vm/oops/symbol.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/oops/symbol.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -26,11 +26,11 @@ #include "precompiled.hpp" #include "classfile/altHashing.hpp" #include "classfile/classLoaderData.hpp" +#include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "oops/symbol.hpp" #include "runtime/atomic.inline.hpp" #include "runtime/os.hpp" -#include "memory/allocation.inline.hpp" -#include "memory/resourceArea.hpp" Symbol::Symbol(const u1* name, int length, int refcount) { _refcount = refcount;
--- a/hotspot/src/share/vm/opto/compile.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/opto/compile.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -3411,7 +3411,7 @@ _root->verify_edges(visited); if (no_dead_code) { // Now make sure that no visited node is used by an unvisited node. - bool dead_nodes = 0; + bool dead_nodes = false; Unique_Node_List checked(area); while (visited.size() > 0) { Node* n = visited.pop(); @@ -3422,14 +3422,16 @@ if (visited.member(use)) continue; // already in the graph if (use->is_Con()) continue; // a dead ConNode is OK // At this point, we have found a dead node which is DU-reachable. - if (dead_nodes++ == 0) + if (!dead_nodes) { tty->print_cr("*** Dead nodes reachable via DU edges:"); + dead_nodes = true; + } use->dump(2); tty->print_cr("---"); checked.push(use); // No repeats; pretend it is now checked. } } - assert(dead_nodes == 0, "using nodes must be reachable from root"); + assert(!dead_nodes, "using nodes must be reachable from root"); } } }
--- a/hotspot/src/share/vm/opto/matcher.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/opto/matcher.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -36,7 +36,6 @@ #include "opto/runtime.hpp" #include "opto/type.hpp" #include "opto/vectornode.hpp" -#include "runtime/atomic.hpp" #include "runtime/os.hpp" #ifdef TARGET_ARCH_MODEL_x86_32 # include "adfiles/ad_x86_32.hpp"
--- a/hotspot/src/share/vm/opto/runtime.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/opto/runtime.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -55,6 +55,7 @@ #include "opto/mulnode.hpp" #include "opto/runtime.hpp" #include "opto/subnode.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/fprofiler.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.hpp"
--- a/hotspot/src/share/vm/prims/jni.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/prims/jni.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -59,6 +59,7 @@ #include "prims/jvm_misc.hpp" #include "prims/jvmtiExport.hpp" #include "prims/jvmtiThreadState.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/compilationPolicy.hpp" #include "runtime/fieldDescriptor.hpp" #include "runtime/fprofiler.hpp" @@ -3336,13 +3337,7 @@ directBufferSupportInitializeEnded = 1; } else { while (!directBufferSupportInitializeEnded && !directBufferSupportInitializeFailed) { - // Set state as yield_all can call os:sleep. On Solaris, yield_all calls - // os::sleep which requires the VM state transition. On other platforms, it - // is not necessary. The following call to change the VM state is purposely - // put inside the loop to avoid potential deadlock when multiple threads - // try to call this method. See 6791815 for more details. - ThreadInVMfromNative tivn(thread); - os::yield_all(); + os::yield(); } }
--- a/hotspot/src/share/vm/prims/jvm.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/prims/jvm.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -44,6 +44,7 @@ #include "prims/nativeLookup.hpp" #include "prims/privilegedStack.hpp" #include "runtime/arguments.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/dtraceJSDT.hpp" #include "runtime/handles.inline.hpp" #include "runtime/init.hpp" @@ -55,6 +56,7 @@ #include "runtime/os.hpp" #include "runtime/perfData.hpp" #include "runtime/reflection.hpp" +#include "runtime/thread.inline.hpp" #include "runtime/vframe.hpp" #include "runtime/vm_operations.hpp" #include "services/attachListener.hpp"
--- a/hotspot/src/share/vm/prims/jvmtiEnv.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/prims/jvmtiEnv.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -258,9 +258,6 @@ // VM representation. We don't attach the reconstituted class // bytes to the InstanceKlass here because they have not been // validated and we're not at a safepoint. - constantPoolHandle constants(current_thread, ikh->constants()); - MonitorLockerEx ml(constants->lock()); // lock constant pool while we query it - JvmtiClassFileReconstituter reconstituter(ikh); if (reconstituter.get_error() != JVMTI_ERROR_NONE) { return reconstituter.get_error(); @@ -2445,9 +2442,6 @@ } instanceKlassHandle ikh(thread, k_oop); - constantPoolHandle constants(thread, ikh->constants()); - MonitorLockerEx ml(constants->lock()); // lock constant pool while we query it - JvmtiConstantPoolReconstituter reconstituter(ikh); if (reconstituter.get_error() != JVMTI_ERROR_NONE) { return reconstituter.get_error(); @@ -2467,6 +2461,7 @@ return reconstituter.get_error(); } + constantPoolHandle constants(thread, ikh->constants()); *constant_pool_count_ptr = constants->length(); *constant_pool_byte_count_ptr = cpool_size; *constant_pool_bytes_ptr = cpool_bytes;
--- a/hotspot/src/share/vm/prims/jvmtiImpl.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/prims/jvmtiImpl.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -32,7 +32,7 @@ #include "prims/jvmtiEventController.inline.hpp" #include "prims/jvmtiImpl.hpp" #include "prims/jvmtiRedefineClasses.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/deoptimization.hpp" #include "runtime/handles.hpp" #include "runtime/handles.inline.hpp"
--- a/hotspot/src/share/vm/prims/jvmtiRawMonitor.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/prims/jvmtiRawMonitor.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "prims/jvmtiRawMonitor.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/orderAccess.inline.hpp" #include "runtime/thread.inline.hpp"
--- a/hotspot/src/share/vm/prims/unsafe.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/prims/unsafe.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -31,6 +31,7 @@ #include "memory/allocation.inline.hpp" #include "prims/jni.h" #include "prims/jvm.h" +#include "runtime/atomic.inline.hpp" #include "runtime/globals.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/prefetch.inline.hpp" @@ -891,6 +892,14 @@ } UNSAFE_END +static jobject get_class_loader(JNIEnv* env, jclass cls) { + if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(cls))) { + return NULL; + } + Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(cls)); + oop loader = k->class_loader(); + return JNIHandles::make_local(env, loader); +} UNSAFE_ENTRY(jclass, Unsafe_DefineClass0(JNIEnv *env, jobject unsafe, jstring name, jbyteArray data, int offset, int length)) UnsafeWrapper("Unsafe_DefineClass"); @@ -899,7 +908,7 @@ int depthFromDefineClass0 = 1; jclass caller = JVM_GetCallerClass(env, depthFromDefineClass0); - jobject loader = (caller == NULL) ? NULL : JVM_GetClassLoader(env, caller); + jobject loader = (caller == NULL) ? NULL : get_class_loader(env, caller); jobject pd = (caller == NULL) ? NULL : JVM_GetProtectionDomain(env, caller); return Unsafe_DefineClass_impl(env, name, data, offset, length, loader, pd);
--- a/hotspot/src/share/vm/runtime/biasedLocking.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/runtime/biasedLocking.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "oops/klass.inline.hpp" #include "oops/markOop.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/basicLock.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/task.hpp"
--- a/hotspot/src/share/vm/runtime/extendedPC.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/runtime/extendedPC.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -25,6 +25,9 @@ #ifndef SHARE_VM_RUNTIME_EXTENDEDPC_HPP #define SHARE_VM_RUNTIME_EXTENDEDPC_HPP +#include "memory/allocation.hpp" +#include "utilities/globalDefinitions.hpp" + // An ExtendedPC contains the _pc from a signal handler in a platform // independent way.
--- a/hotspot/src/share/vm/runtime/frame.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/runtime/frame.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -44,6 +44,7 @@ #include "runtime/signature.hpp" #include "runtime/stubCodeGenerator.hpp" #include "runtime/stubRoutines.hpp" +#include "runtime/thread.inline.hpp" #include "utilities/decoder.hpp" #ifdef TARGET_ARCH_x86
--- a/hotspot/src/share/vm/runtime/globals.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/runtime/globals.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -2299,6 +2299,9 @@ manageable(bool, PrintGCTimeStamps, false, \ "Print timestamps at garbage collection") \ \ + manageable(bool, PrintGCID, true, \ + "Print an identifier for each garbage collection") \ + \ product(bool, PrintGCTaskTimeStamps, false, \ "Print timestamps for individual gc worker thread tasks") \ \
--- a/hotspot/src/share/vm/runtime/handles.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/runtime/handles.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -26,6 +26,7 @@ #include "memory/allocation.inline.hpp" #include "oops/constantPool.hpp" #include "oops/oop.inline.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/thread.inline.hpp" #ifdef TARGET_OS_FAMILY_linux
--- a/hotspot/src/share/vm/runtime/interfaceSupport.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/runtime/interfaceSupport.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -28,6 +28,7 @@ #include "gc_interface/collectedHeap.inline.hpp" #include "memory/genCollectedHeap.hpp" #include "memory/resourceArea.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/init.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/orderAccess.inline.hpp"
--- a/hotspot/src/share/vm/runtime/mutex.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/runtime/mutex.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -24,6 +24,7 @@ */ #include "precompiled.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/mutex.hpp" #include "runtime/orderAccess.inline.hpp" #include "runtime/osThread.hpp"
--- a/hotspot/src/share/vm/runtime/objectMonitor.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/runtime/objectMonitor.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -27,6 +27,7 @@ #include "memory/resourceArea.hpp" #include "oops/markOop.hpp" #include "oops/oop.inline.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/mutexLocker.hpp"
--- a/hotspot/src/share/vm/runtime/os.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/runtime/os.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -40,6 +40,7 @@ #include "prims/jvm_misc.hpp" #include "prims/privilegedStack.hpp" #include "runtime/arguments.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/frame.inline.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/java.hpp"
--- a/hotspot/src/share/vm/runtime/os.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/runtime/os.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -26,7 +26,6 @@ #define SHARE_VM_RUNTIME_OS_HPP #include "jvmtifiles/jvmti.h" -#include "runtime/atomic.hpp" #include "runtime/extendedPC.hpp" #include "runtime/handles.hpp" #include "utilities/top.hpp" @@ -453,8 +452,6 @@ // yield that can be used in lieu of blocking. } ; static YieldResult NakedYield () ; - static void yield_all(); // Yields to all other threads including lower priority - // (for the default scheduling policy) static OSReturn set_priority(Thread* thread, ThreadPriority priority); static OSReturn get_priority(const Thread* const thread, ThreadPriority& priority);
--- a/hotspot/src/share/vm/runtime/safepoint.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/runtime/safepoint.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -32,10 +32,12 @@ #include "code/scopeDesc.hpp" #include "gc_interface/collectedHeap.hpp" #include "interpreter/interpreter.hpp" +#include "memory/gcLocker.inline.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/compilationPolicy.hpp" #include "runtime/deoptimization.hpp" #include "runtime/frame.inline.hpp" @@ -264,8 +266,8 @@ // // Further complicating matters is that yield() does not work as naively expected // on many platforms -- yield() does not guarantee that any other ready threads - // will run. As such we revert yield_all() after some number of iterations. - // Yield_all() is implemented as a short unconditional sleep on some platforms. + // will run. As such we revert to naked_short_sleep() after some number of iterations. + // nakes_short_sleep() is implemented as a short unconditional sleep. // Typical operating systems round a "short" sleep period up to 10 msecs, so sleeping // can actually increase the time it takes the VM thread to detect that a system-wide // stop-the-world safepoint has been reached. In a pathological scenario such as that @@ -322,9 +324,7 @@ if (steps < DeferThrSuspendLoopCount) { os::NakedYield() ; } else { - os::yield_all() ; - // Alternately, the VM thread could transiently depress its scheduling priority or - // transiently increase the priority of the tardy mutator(s). + os::naked_short_sleep(1); } iterations ++ ; @@ -744,80 +744,12 @@ // ------------------------------------------------------------------------------------------------------ // Exception handlers -#ifndef PRODUCT - -#ifdef SPARC - -#ifdef _LP64 -#define PTR_PAD "" -#else -#define PTR_PAD " " -#endif - -static void print_ptrs(intptr_t oldptr, intptr_t newptr, bool wasoop) { - bool is_oop = newptr ? (cast_to_oop(newptr))->is_oop() : false; - tty->print_cr(PTR_FORMAT PTR_PAD " %s %c " PTR_FORMAT PTR_PAD " %s %s", - oldptr, wasoop?"oop":" ", oldptr == newptr ? ' ' : '!', - newptr, is_oop?"oop":" ", (wasoop && !is_oop) ? "STALE" : ((wasoop==false&&is_oop==false&&oldptr !=newptr)?"STOMP":" ")); -} - -static void print_longs(jlong oldptr, jlong newptr, bool wasoop) { - bool is_oop = newptr ? (cast_to_oop(newptr))->is_oop() : false; - tty->print_cr(PTR64_FORMAT " %s %c " PTR64_FORMAT " %s %s", - oldptr, wasoop?"oop":" ", oldptr == newptr ? ' ' : '!', - newptr, is_oop?"oop":" ", (wasoop && !is_oop) ? "STALE" : ((wasoop==false&&is_oop==false&&oldptr !=newptr)?"STOMP":" ")); -} - -static void print_me(intptr_t *new_sp, intptr_t *old_sp, bool *was_oops) { -#ifdef _LP64 - tty->print_cr("--------+------address-----+------before-----------+-------after----------+"); - const int incr = 1; // Increment to skip a long, in units of intptr_t -#else - tty->print_cr("--------+--address-+------before-----------+-------after----------+"); - const int incr = 2; // Increment to skip a long, in units of intptr_t -#endif - tty->print_cr("---SP---|"); - for( int i=0; i<16; i++ ) { - tty->print("blob %c%d |"PTR_FORMAT" ","LO"[i>>3],i&7,new_sp); print_ptrs(*old_sp++,*new_sp++,*was_oops++); } - tty->print_cr("--------|"); - for( int i1=0; i1<frame::memory_parameter_word_sp_offset-16; i1++ ) { - tty->print("argv pad|"PTR_FORMAT" ",new_sp); print_ptrs(*old_sp++,*new_sp++,*was_oops++); } - tty->print(" pad|"PTR_FORMAT" ",new_sp); print_ptrs(*old_sp++,*new_sp++,*was_oops++); - tty->print_cr("--------|"); - tty->print(" G1 |"PTR_FORMAT" ",new_sp); print_longs(*(jlong*)old_sp,*(jlong*)new_sp,was_oops[incr-1]); old_sp += incr; new_sp += incr; was_oops += incr; - tty->print(" G3 |"PTR_FORMAT" ",new_sp); print_longs(*(jlong*)old_sp,*(jlong*)new_sp,was_oops[incr-1]); old_sp += incr; new_sp += incr; was_oops += incr; - tty->print(" G4 |"PTR_FORMAT" ",new_sp); print_longs(*(jlong*)old_sp,*(jlong*)new_sp,was_oops[incr-1]); old_sp += incr; new_sp += incr; was_oops += incr; - tty->print(" G5 |"PTR_FORMAT" ",new_sp); print_longs(*(jlong*)old_sp,*(jlong*)new_sp,was_oops[incr-1]); old_sp += incr; new_sp += incr; was_oops += incr; - tty->print_cr(" FSR |"PTR_FORMAT" "PTR64_FORMAT" "PTR64_FORMAT,new_sp,*(jlong*)old_sp,*(jlong*)new_sp); - old_sp += incr; new_sp += incr; was_oops += incr; - // Skip the floats - tty->print_cr("--Float-|"PTR_FORMAT,new_sp); - tty->print_cr("---FP---|"); - old_sp += incr*32; new_sp += incr*32; was_oops += incr*32; - for( int i2=0; i2<16; i2++ ) { - tty->print("call %c%d |"PTR_FORMAT" ","LI"[i2>>3],i2&7,new_sp); print_ptrs(*old_sp++,*new_sp++,*was_oops++); } - tty->cr(); -} -#endif // SPARC -#endif // PRODUCT - void SafepointSynchronize::handle_polling_page_exception(JavaThread *thread) { assert(thread->is_Java_thread(), "polling reference encountered by VM thread"); assert(thread->thread_state() == _thread_in_Java, "should come from Java code"); assert(SafepointSynchronize::is_synchronizing(), "polling encountered outside safepoint synchronization"); - // Uncomment this to get some serious before/after printing of the - // Sparc safepoint-blob frame structure. - /* - intptr_t* sp = thread->last_Java_sp(); - intptr_t stack_copy[150]; - for( int i=0; i<150; i++ ) stack_copy[i] = sp[i]; - bool was_oops[150]; - for( int i=0; i<150; i++ ) - was_oops[i] = stack_copy[i] ? ((oop)stack_copy[i])->is_oop() : false; - */ - if (ShowSafepointMsgs) { tty->print("handle_polling_page_exception: "); } @@ -829,7 +761,6 @@ ThreadSafepointState* state = thread->safepoint_state(); state->handle_polling_page_exception(); - // print_me(sp,stack_copy,was_oops); }
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -42,6 +42,7 @@ #include "prims/jvmtiRedefineClassesTrace.hpp" #include "prims/methodHandles.hpp" #include "prims/nativeLookup.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/arguments.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/handles.inline.hpp" @@ -1176,10 +1177,7 @@ (!is_virtual && invoke_code == Bytecodes::_invokedynamic) || ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode"); - // We do not patch the call site if the caller nmethod has been made non-entrant. - if (!caller_nm->is_in_use()) { - return callee_method; - } + assert(caller_nm->is_alive(), "It should be alive"); #ifndef PRODUCT // tracing/debugging/statistics @@ -1249,13 +1247,11 @@ // Now that we are ready to patch if the Method* was redefined then // don't update call site and let the caller retry. - // Don't update call site if caller nmethod has been made non-entrant - // as it is a waste of time. // Don't update call site if callee nmethod was unloaded or deoptimized. // Don't update call site if callee nmethod was replaced by an other nmethod // which may happen when multiply alive nmethod (tiered compilation) // will be supported. - if (!callee_method->is_old() && caller_nm->is_in_use() && + if (!callee_method->is_old() && (callee_nm == NULL || callee_nm->is_in_use() && (callee_method->code() == callee_nm))) { #ifdef ASSERT // We must not try to patch to jump to an already unloaded method. @@ -1454,14 +1450,12 @@ // out of scope. JvmtiDynamicCodeEventCollector event_collector; - // Update inline cache to megamorphic. Skip update if caller has been - // made non-entrant or we are called from interpreted. + // Update inline cache to megamorphic. Skip update if we are called from interpreted. { MutexLocker ml_patch (CompiledIC_lock); RegisterMap reg_map(thread, false); frame caller_frame = thread->last_frame().sender(®_map); CodeBlob* cb = caller_frame.cb(); - if (cb->is_nmethod() && ((nmethod*)cb)->is_in_use()) { - // Not a non-entrant nmethod, so find inline_cache + if (cb->is_nmethod()) { CompiledIC* inline_cache = CompiledIC_before(((nmethod*)cb), caller_frame.pc()); bool should_be_mono = false; if (inline_cache->is_optimized()) { @@ -1604,19 +1598,13 @@ // resolve is only done once. MutexLocker ml(CompiledIC_lock); - // - // We do not patch the call site if the nmethod has been made non-entrant - // as it is a waste of time - // - if (caller_nm->is_in_use()) { - if (is_static_call) { - CompiledStaticCall* ssc= compiledStaticCall_at(call_addr); - ssc->set_to_clean(); - } else { - // compiled, dispatched call (which used to call an interpreted method) - CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr); - inline_cache->set_to_clean(); - } + if (is_static_call) { + CompiledStaticCall* ssc= compiledStaticCall_at(call_addr); + ssc->set_to_clean(); + } else { + // compiled, dispatched call (which used to call an interpreted method) + CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr); + inline_cache->set_to_clean(); } }
--- a/hotspot/src/share/vm/runtime/sweeper.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/runtime/sweeper.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -30,7 +30,7 @@ #include "compiler/compileBroker.hpp" #include "memory/resourceArea.hpp" #include "oops/method.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/compilationPolicy.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/orderAccess.inline.hpp"
--- a/hotspot/src/share/vm/runtime/synchronizer.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/runtime/synchronizer.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -27,6 +27,7 @@ #include "memory/resourceArea.hpp" #include "oops/markOop.hpp" #include "oops/oop.inline.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.hpp"
--- a/hotspot/src/share/vm/runtime/thread.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/runtime/thread.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -46,6 +46,7 @@ #include "prims/jvmtiThreadState.hpp" #include "prims/privilegedStack.hpp" #include "runtime/arguments.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/deoptimization.hpp" #include "runtime/fprofiler.hpp" @@ -1357,14 +1358,24 @@ } void WatcherThread::stop() { - { - MutexLockerEx ml(PeriodicTask_lock, Mutex::_no_safepoint_check_flag); - _should_terminate = true; - OrderAccess::fence(); // ensure WatcherThread sees update in main loop - + // Get the PeriodicTask_lock if we can. If we cannot, then the + // WatcherThread is using it and we don't want to block on that lock + // here because that might cause a safepoint deadlock depending on + // what the current WatcherThread tasks are doing. + bool have_lock = PeriodicTask_lock->try_lock(); + + _should_terminate = true; + OrderAccess::fence(); // ensure WatcherThread sees update in main loop + + if (have_lock) { WatcherThread* watcher = watcher_thread(); - if (watcher != NULL) + if (watcher != NULL) { + // If we managed to get the lock, then we should unpark the + // WatcherThread so that it can see we want it to stop. watcher->unpark(); + } + + PeriodicTask_lock->unlock(); } // it is ok to take late safepoints here, if needed
--- a/hotspot/src/share/vm/runtime/thread.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/runtime/thread.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -343,42 +343,16 @@ bool has_async_exception() const { return (_suspend_flags & _has_async_exception) != 0; } - void set_suspend_flag(SuspendFlags f) { - assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch"); - uint32_t flags; - do { - flags = _suspend_flags; - } - while (Atomic::cmpxchg((jint)(flags | f), - (volatile jint*)&_suspend_flags, - (jint)flags) != (jint)flags); - } - void clear_suspend_flag(SuspendFlags f) { - assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch"); - uint32_t flags; - do { - flags = _suspend_flags; - } - while (Atomic::cmpxchg((jint)(flags & ~f), - (volatile jint*)&_suspend_flags, - (jint)flags) != (jint)flags); - } + inline void set_suspend_flag(SuspendFlags f); + inline void clear_suspend_flag(SuspendFlags f); - void set_has_async_exception() { - set_suspend_flag(_has_async_exception); - } - void clear_has_async_exception() { - clear_suspend_flag(_has_async_exception); - } + inline void set_has_async_exception(); + inline void clear_has_async_exception(); bool do_critical_native_unlock() const { return (_suspend_flags & _critical_native_unlock) != 0; } - void set_critical_native_unlock() { - set_suspend_flag(_critical_native_unlock); - } - void clear_critical_native_unlock() { - clear_suspend_flag(_critical_native_unlock); - } + inline void set_critical_native_unlock(); + inline void clear_critical_native_unlock(); // Support for Unhandled Oop detection #ifdef CHECK_UNHANDLED_OOPS @@ -1074,8 +1048,8 @@ // Suspend/resume support for JavaThread private: - void set_ext_suspended() { set_suspend_flag (_ext_suspended); } - void clear_ext_suspended() { clear_suspend_flag(_ext_suspended); } + inline void set_ext_suspended(); + inline void clear_ext_suspended(); public: void java_suspend(); @@ -1123,11 +1097,11 @@ // via the appropriate -XX options. bool wait_for_ext_suspend_completion(int count, int delay, uint32_t *bits); - void set_external_suspend() { set_suspend_flag (_external_suspend); } - void clear_external_suspend() { clear_suspend_flag(_external_suspend); } + inline void set_external_suspend(); + inline void clear_external_suspend(); - void set_deopt_suspend() { set_suspend_flag (_deopt_suspend); } - void clear_deopt_suspend() { clear_suspend_flag(_deopt_suspend); } + inline void set_deopt_suspend(); + inline void clear_deopt_suspend(); bool is_deopt_suspend() { return (_suspend_flags & _deopt_suspend) != 0; } bool is_external_suspend() const { @@ -1215,11 +1189,7 @@ void set_pending_unsafe_access_error() { _special_runtime_exit_condition = _async_unsafe_access_error; } - void set_pending_async_exception(oop e) { - _pending_async_exception = e; - _special_runtime_exit_condition = _async_exception; - set_has_async_exception(); - } + inline void set_pending_async_exception(oop e); // Fast-locking support bool is_lock_owned(address adr) const;
--- a/hotspot/src/share/vm/runtime/thread.inline.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/runtime/thread.inline.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -27,6 +27,7 @@ #define SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE +#include "runtime/atomic.inline.hpp" #include "runtime/thread.hpp" #ifdef TARGET_OS_FAMILY_linux # include "thread_linux.inline.hpp" @@ -46,6 +47,40 @@ #undef SHARE_VM_RUNTIME_THREAD_INLINE_HPP_SCOPE +inline void Thread::set_suspend_flag(SuspendFlags f) { + assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch"); + uint32_t flags; + do { + flags = _suspend_flags; + } + while (Atomic::cmpxchg((jint)(flags | f), + (volatile jint*)&_suspend_flags, + (jint)flags) != (jint)flags); +} +inline void Thread::clear_suspend_flag(SuspendFlags f) { + assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch"); + uint32_t flags; + do { + flags = _suspend_flags; + } + while (Atomic::cmpxchg((jint)(flags & ~f), + (volatile jint*)&_suspend_flags, + (jint)flags) != (jint)flags); +} + +inline void Thread::set_has_async_exception() { + set_suspend_flag(_has_async_exception); +} +inline void Thread::clear_has_async_exception() { + clear_suspend_flag(_has_async_exception); +} +inline void Thread::set_critical_native_unlock() { + set_suspend_flag(_critical_native_unlock); +} +inline void Thread::clear_critical_native_unlock() { + clear_suspend_flag(_critical_native_unlock); +} + inline jlong Thread::cooked_allocated_bytes() { jlong allocated_bytes = OrderAccess::load_acquire(&_allocated_bytes); if (UseTLAB) { @@ -59,6 +94,33 @@ return allocated_bytes; } +inline void JavaThread::set_ext_suspended() { + set_suspend_flag (_ext_suspended); +} +inline void JavaThread::clear_ext_suspended() { + clear_suspend_flag(_ext_suspended); +} + +inline void JavaThread::set_external_suspend() { + set_suspend_flag(_external_suspend); +} +inline void JavaThread::clear_external_suspend() { + clear_suspend_flag(_external_suspend); +} + +inline void JavaThread::set_deopt_suspend() { + set_suspend_flag(_deopt_suspend); +} +inline void JavaThread::clear_deopt_suspend() { + clear_suspend_flag(_deopt_suspend); +} + +inline void JavaThread::set_pending_async_exception(oop e) { + _pending_async_exception = e; + _special_runtime_exit_condition = _async_exception; + set_has_async_exception(); +} + #ifdef PPC64 inline JavaThreadState JavaThread::thread_state() const { return (JavaThreadState) OrderAccess::load_acquire((volatile jint*)&_thread_state);
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/runtime/vmStructs.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -104,6 +104,7 @@ #include "utilities/globalDefinitions.hpp" #include "utilities/hashtable.hpp" #include "utilities/macros.hpp" + #ifdef TARGET_ARCH_x86 # include "vmStructs_x86.hpp" #endif @@ -168,6 +169,11 @@ #include "gc_implementation/parallelScavenge/vmStructs_parallelgc.hpp" #include "gc_implementation/g1/vmStructs_g1.hpp" #endif // INCLUDE_ALL_GCS + +#if INCLUDE_TRACE + #include "runtime/vmStructs_trace.hpp" +#endif + #ifdef COMPILER2 #include "opto/addnode.hpp" #include "opto/block.hpp" @@ -1390,6 +1396,8 @@ /* unsigned short on Win32 */ \ declare_unsigned_integer_type(u1) \ declare_unsigned_integer_type(u2) \ + declare_unsigned_integer_type(u4) \ + declare_unsigned_integer_type(u8) \ declare_unsigned_integer_type(unsigned) \ \ /*****************************/ \ @@ -2923,6 +2931,11 @@ GENERATE_STATIC_VM_STRUCT_ENTRY) #endif // INCLUDE_ALL_GCS +#if INCLUDE_TRACE + VM_STRUCTS_TRACE(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, + GENERATE_STATIC_VM_STRUCT_ENTRY) +#endif + VM_STRUCTS_CPU(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, GENERATE_STATIC_VM_STRUCT_ENTRY, GENERATE_UNCHECKED_NONSTATIC_VM_STRUCT_ENTRY, @@ -2968,6 +2981,11 @@ GENERATE_TOPLEVEL_VM_TYPE_ENTRY) #endif // INCLUDE_ALL_GCS +#if INCLUDE_TRACE + VM_TYPES_TRACE(GENERATE_VM_TYPE_ENTRY, + GENERATE_TOPLEVEL_VM_TYPE_ENTRY) +#endif + VM_TYPES_CPU(GENERATE_VM_TYPE_ENTRY, GENERATE_TOPLEVEL_VM_TYPE_ENTRY, GENERATE_OOP_VM_TYPE_ENTRY, @@ -3003,6 +3021,10 @@ VM_INT_CONSTANTS_PARNEW(GENERATE_VM_INT_CONSTANT_ENTRY) #endif // INCLUDE_ALL_GCS +#if INCLUDE_TRACE + VM_INT_CONSTANTS_TRACE(GENERATE_VM_INT_CONSTANT_ENTRY) +#endif + VM_INT_CONSTANTS_CPU(GENERATE_VM_INT_CONSTANT_ENTRY, GENERATE_PREPROCESSOR_VM_INT_CONSTANT_ENTRY, GENERATE_C1_VM_INT_CONSTANT_ENTRY, @@ -3065,8 +3087,14 @@ VM_STRUCTS_G1(CHECK_NONSTATIC_VM_STRUCT_ENTRY, CHECK_STATIC_VM_STRUCT_ENTRY); + #endif // INCLUDE_ALL_GCS +#if INCLUDE_TRACE + VM_STRUCTS_TRACE(CHECK_NONSTATIC_VM_STRUCT_ENTRY, + CHECK_STATIC_VM_STRUCT_ENTRY); +#endif + VM_STRUCTS_CPU(CHECK_NONSTATIC_VM_STRUCT_ENTRY, CHECK_STATIC_VM_STRUCT_ENTRY, CHECK_NO_OP, @@ -3105,8 +3133,14 @@ VM_TYPES_G1(CHECK_VM_TYPE_ENTRY, CHECK_SINGLE_ARG_VM_TYPE_NO_OP); + #endif // INCLUDE_ALL_GCS +#if INCLUDE_TRACE + VM_TYPES_TRACE(CHECK_VM_TYPE_ENTRY, + CHECK_SINGLE_ARG_VM_TYPE_NO_OP); +#endif + VM_TYPES_CPU(CHECK_VM_TYPE_ENTRY, CHECK_SINGLE_ARG_VM_TYPE_NO_OP, CHECK_SINGLE_ARG_VM_TYPE_NO_OP, @@ -3169,6 +3203,12 @@ debug_only(VM_STRUCTS_G1(ENSURE_FIELD_TYPE_PRESENT, ENSURE_FIELD_TYPE_PRESENT)); #endif // INCLUDE_ALL_GCS + +#if INCLUDE_TRACE + debug_only(VM_STRUCTS_TRACE(ENSURE_FIELD_TYPE_PRESENT, + ENSURE_FIELD_TYPE_PRESENT)); +#endif + debug_only(VM_STRUCTS_CPU(ENSURE_FIELD_TYPE_PRESENT, ENSURE_FIELD_TYPE_PRESENT, CHECK_NO_OP,
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/src/share/vm/runtime/vmStructs_trace.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_RUNTIME_VMSTRUCTS_TRACE_HPP +#define SHARE_VM_RUNTIME_VMSTRUCTS_TRACE_HPP + +#define VM_INT_CONSTANTS_TRACE(a) + +#define VM_STRUCTS_TRACE(a, b) + +#define VM_TYPES_TRACE(a, b) + + +#endif // SHARE_VM_RUNTIME_VMSTRUCTS_TRACE_HPP
--- a/hotspot/src/share/vm/runtime/vm_version.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/runtime/vm_version.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -72,14 +72,16 @@ #ifndef JRE_RELEASE_VERSION #error JRE_RELEASE_VERSION must be defined #endif -#ifndef HOTSPOT_BUILD_TARGET - #error HOTSPOT_BUILD_TARGET must be defined -#endif -#ifdef PRODUCT +// NOTE: Builds within Visual Studio do not define the build target in +// HOTSPOT_RELEASE_VERSION, so it must be done here +#if defined(VISUAL_STUDIO_BUILD) && !defined(PRODUCT) + #ifndef HOTSPOT_BUILD_TARGET + #error HOTSPOT_BUILD_TARGET must be defined + #endif + #define VM_RELEASE HOTSPOT_RELEASE_VERSION "-" HOTSPOT_BUILD_TARGET +#else #define VM_RELEASE HOTSPOT_RELEASE_VERSION -#else - #define VM_RELEASE HOTSPOT_RELEASE_VERSION "-" HOTSPOT_BUILD_TARGET #endif // HOTSPOT_RELEASE_VERSION follows the JDK release version naming convention
--- a/hotspot/src/share/vm/services/memPtr.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/services/memPtr.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "runtime/atomic.inline.hpp" #include "services/memPtr.hpp" #include "services/memTracker.hpp"
--- a/hotspot/src/share/vm/services/memPtr.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/services/memPtr.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -26,7 +26,6 @@ #define SHARE_VM_SERVICES_MEM_PTR_HPP #include "memory/allocation.hpp" -#include "runtime/atomic.hpp" #include "runtime/os.hpp" #include "runtime/safepoint.hpp"
--- a/hotspot/src/share/vm/services/memRecorder.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/services/memRecorder.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -24,7 +24,7 @@ #include "precompiled.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomic.inline.hpp" #include "services/memBaseline.hpp" #include "services/memRecorder.hpp" #include "services/memPtr.hpp"
--- a/hotspot/src/share/vm/services/memTracker.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/services/memTracker.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -24,7 +24,7 @@ #include "precompiled.hpp" #include "oops/instanceKlass.hpp" -#include "runtime/atomic.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/safepoint.hpp"
--- a/hotspot/src/share/vm/services/memTracker.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/services/memTracker.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -481,17 +481,9 @@ if (_slowdown_calling_thread && thr != _worker_thread) { #ifdef _WINDOWS // On Windows, os::NakedYield() does not work as well - // as os::yield_all() - os::yield_all(); + // as short sleep. + os::naked_short_sleep(1); #else - // On Solaris, os::yield_all() depends on os::sleep() - // which requires JavaTherad in _thread_in_vm state. - // Transits thread to _thread_in_vm state can be dangerous - // if caller holds lock, as it may deadlock with Threads_lock. - // So use NaKedYield instead. - // - // Linux and BSD, NakedYield() and yield_all() implementations - // are the same. os::NakedYield(); #endif }
--- a/hotspot/src/share/vm/services/threadService.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/services/threadService.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -29,6 +29,7 @@ #include "memory/oopFactory.hpp" #include "oops/instanceKlass.hpp" #include "oops/oop.inline.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/init.hpp" #include "runtime/thread.hpp"
--- a/hotspot/src/share/vm/shark/sharkRuntime.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/shark/sharkRuntime.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -24,6 +24,7 @@ */ #include "precompiled.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/deoptimization.hpp" #include "runtime/thread.hpp"
--- a/hotspot/src/share/vm/trace/tracetypes.xml Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/trace/tracetypes.xml Wed Jul 05 19:49:46 2017 +0200 @@ -98,6 +98,7 @@ <value type="SYMBOL" field="name" label="Name"/> <value type="SYMBOL" field="signature" label="Signature"/> <value type="SHORT" field="modifiers" label="Access modifiers"/> + <value type="BOOLEAN" field="hidden" label="Hidden"/> </content_type> <content_type id="UTFConstant" hr_name="UTF constant"
--- a/hotspot/src/share/vm/utilities/accessFlags.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/utilities/accessFlags.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "oops/oop.inline.hpp" +#include "runtime/atomic.inline.hpp" #include "utilities/accessFlags.hpp" #ifdef TARGET_OS_FAMILY_linux # include "os_linux.inline.hpp"
--- a/hotspot/src/share/vm/utilities/bitMap.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/utilities/bitMap.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "memory/allocation.inline.hpp" #include "memory/resourceArea.hpp" +#include "runtime/atomic.inline.hpp" #include "utilities/bitMap.inline.hpp" #include "utilities/copy.hpp" #ifdef TARGET_OS_FAMILY_linux
--- a/hotspot/src/share/vm/utilities/bitMap.inline.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/utilities/bitMap.inline.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -25,7 +25,7 @@ #ifndef SHARE_VM_UTILITIES_BITMAP_INLINE_HPP #define SHARE_VM_UTILITIES_BITMAP_INLINE_HPP -#include "runtime/atomic.hpp" +#include "runtime/atomic.inline.hpp" #include "utilities/bitMap.hpp" #ifdef ASSERT
--- a/hotspot/src/share/vm/utilities/debug.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/utilities/debug.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -39,6 +39,7 @@ #include "oops/oop.inline.hpp" #include "prims/privilegedStack.hpp" #include "runtime/arguments.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/frame.hpp" #include "runtime/java.hpp" #include "runtime/sharedRuntime.hpp"
--- a/hotspot/src/share/vm/utilities/histogram.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/utilities/histogram.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "oops/oop.inline.hpp" +#include "runtime/atomic.inline.hpp" #include "utilities/histogram.hpp" #ifdef ASSERT
--- a/hotspot/src/share/vm/utilities/ostream.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/utilities/ostream.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "compiler/compileLog.hpp" +#include "gc_implementation/shared/gcId.hpp" #include "oops/oop.inline.hpp" #include "runtime/arguments.hpp" #include "utilities/defaultStream.hpp" @@ -240,6 +241,14 @@ return; } +void outputStream::gclog_stamp(const GCId& gc_id) { + date_stamp(PrintGCDateStamps); + stamp(PrintGCTimeStamps); + if (PrintGCID) { + print("#%u: ", gc_id.id()); + } +} + outputStream& outputStream::indent() { while (_position < _indentation) sp(); return *this;
--- a/hotspot/src/share/vm/utilities/ostream.hpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/utilities/ostream.hpp Wed Jul 05 19:49:46 2017 +0200 @@ -28,6 +28,7 @@ #include "memory/allocation.hpp" #include "runtime/timer.hpp" +class GCId; DEBUG_ONLY(class ResourceMark;) // Output streams for printing @@ -107,6 +108,7 @@ void date_stamp(bool guard) { date_stamp(guard, "", ": "); } + void gclog_stamp(const GCId& gc_id); // portable printing of 64 bit integers void print_jlong(jlong value);
--- a/hotspot/src/share/vm/utilities/taskqueue.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/utilities/taskqueue.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "oops/oop.inline.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/os.hpp" #include "runtime/thread.inline.hpp" #include "utilities/debug.hpp"
--- a/hotspot/src/share/vm/utilities/vmError.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/utilities/vmError.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -27,6 +27,7 @@ #include "gc_interface/collectedHeap.hpp" #include "prims/whitebox.hpp" #include "runtime/arguments.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/frame.inline.hpp" #include "runtime/init.hpp" #include "runtime/os.hpp"
--- a/hotspot/src/share/vm/utilities/workgroup.cpp Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/src/share/vm/utilities/workgroup.cpp Wed Jul 05 19:49:46 2017 +0200 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "memory/allocation.hpp" #include "memory/allocation.inline.hpp" +#include "runtime/atomic.inline.hpp" #include "runtime/os.hpp" #include "utilities/workgroup.hpp"
--- a/hotspot/test/compiler/8009761/Test8009761.java Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/test/compiler/8009761/Test8009761.java Wed Jul 05 19:49:46 2017 +0200 @@ -21,11 +21,7 @@ * questions. */ -import com.sun.management.HotSpotDiagnosticMXBean; -import com.sun.management.VMOption; import sun.hotspot.WhiteBox; -import sun.management.ManagementFactoryHelper; - import java.lang.reflect.Method; /* @@ -40,6 +36,7 @@ public class Test8009761 { private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox(); + private static int COMP_LEVEL_SIMPLE = 1; private static int COMP_LEVEL_FULL_OPTIMIZATION = 4; private static Method m3 = null; @@ -236,7 +233,7 @@ static public void main(String[] args) { // Make sure background compilation is disabled - if (backgroundCompilationEnabled()) { + if (WHITE_BOX.getBooleanVMFlag("BackgroundCompilation")) { throw new RuntimeException("Background compilation enabled"); } @@ -256,7 +253,11 @@ c1 = count; // Force the compilation of m3() that will inline m1() - WHITE_BOX.enqueueMethodForCompilation(m3, COMP_LEVEL_FULL_OPTIMIZATION); + if(!WHITE_BOX.enqueueMethodForCompilation(m3, COMP_LEVEL_FULL_OPTIMIZATION)) { + // C2 compiler not available, compile with C1 + WHITE_BOX.enqueueMethodForCompilation(m3, COMP_LEVEL_SIMPLE); + } + // Because background compilation is disabled, method should now be compiled if(!WHITE_BOX.isMethodCompiled(m3)) { throw new RuntimeException(m3 + " not compiled"); @@ -278,19 +279,4 @@ System.out.println("PASSED " + c1); } } - - /** - * Checks if background compilation (-XX:+BackgroundCompilation) is enabled. - * @return True if background compilation is enabled, false otherwise - */ - private static boolean backgroundCompilationEnabled() { - HotSpotDiagnosticMXBean diagnostic = ManagementFactoryHelper.getDiagnosticMXBean(); - VMOption backgroundCompilation; - try { - backgroundCompilation = diagnostic.getVMOption("BackgroundCompilation"); - } catch (IllegalArgumentException e) { - return false; - } - return Boolean.valueOf(backgroundCompilation.getValue()); - } }
--- a/hotspot/test/compiler/ciReplay/TestSA.sh Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/test/compiler/ciReplay/TestSA.sh Wed Jul 05 19:49:46 2017 +0200 @@ -26,6 +26,7 @@ ## ## @test ## @bug 8011675 +## @ignore 8032226, 8031978 ## @summary testing of ciReplay with using generated by SA replay.txt ## @author igor.ignatyev@oracle.com ## @run shell TestSA.sh
--- a/hotspot/test/compiler/ciReplay/TestVM.sh Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/test/compiler/ciReplay/TestVM.sh Wed Jul 05 19:49:46 2017 +0200 @@ -26,6 +26,7 @@ ## ## @test ## @bug 8011675 +## @ignore 8031978 ## @summary testing of ciReplay with using generated by VM replay.txt ## @author igor.ignatyev@oracle.com ## @run shell TestVM.sh
--- a/hotspot/test/compiler/ciReplay/TestVM_no_comp_level.sh Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/test/compiler/ciReplay/TestVM_no_comp_level.sh Wed Jul 05 19:49:46 2017 +0200 @@ -26,6 +26,7 @@ ## ## @test ## @bug 8011675 +## @ignore 8031978 ## @summary testing of ciReplay with using generated by VM replay.txt w/o comp_level ## @author igor.ignatyev@oracle.com ## @run shell TestVM_no_comp_level.sh
--- a/hotspot/test/compiler/tiered/NonTieredLevelsTest.java Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/test/compiler/tiered/NonTieredLevelsTest.java Wed Jul 05 19:49:46 2017 +0200 @@ -25,6 +25,7 @@ /** * @test NonTieredLevelsTest + * @ignore 8046268 * @library /testlibrary /testlibrary/whitebox /compiler/whitebox * @build NonTieredLevelsTest * @run main ClassFileInstaller sun.hotspot.WhiteBox
--- a/hotspot/test/compiler/tiered/TieredLevelsTest.java Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/test/compiler/tiered/TieredLevelsTest.java Wed Jul 05 19:49:46 2017 +0200 @@ -23,6 +23,7 @@ /** * @test TieredLevelsTest + * @ignore 8046268 * @library /testlibrary /testlibrary/whitebox /compiler/whitebox * @build TieredLevelsTest * @run main ClassFileInstaller sun.hotspot.WhiteBox
--- a/hotspot/test/compiler/whitebox/ClearMethodStateTest.java Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/test/compiler/whitebox/ClearMethodStateTest.java Wed Jul 05 19:49:46 2017 +0200 @@ -25,6 +25,7 @@ /* * @test ClearMethodStateTest + * @ignore 8046268 * @bug 8006683 8007288 8022832 * @library /testlibrary /testlibrary/whitebox * @build ClearMethodStateTest
--- a/hotspot/test/compiler/whitebox/DeoptimizeAllTest.java Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/test/compiler/whitebox/DeoptimizeAllTest.java Wed Jul 05 19:49:46 2017 +0200 @@ -23,6 +23,7 @@ /* * @test DeoptimizeAllTest + * @ignore 8046268 * @bug 8006683 8007288 8022832 * @library /testlibrary /testlibrary/whitebox * @build DeoptimizeAllTest
--- a/hotspot/test/compiler/whitebox/DeoptimizeMethodTest.java Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/test/compiler/whitebox/DeoptimizeMethodTest.java Wed Jul 05 19:49:46 2017 +0200 @@ -23,6 +23,7 @@ /* * @test DeoptimizeMethodTest + * @ignore 8046268 * @bug 8006683 8007288 8022832 * @library /testlibrary /testlibrary/whitebox * @build DeoptimizeMethodTest
--- a/hotspot/test/compiler/whitebox/EnqueueMethodForCompilationTest.java Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/test/compiler/whitebox/EnqueueMethodForCompilationTest.java Wed Jul 05 19:49:46 2017 +0200 @@ -23,6 +23,7 @@ /* * @test EnqueueMethodForCompilationTest + * @ignore 8046268 * @bug 8006683 8007288 8022832 * @library /testlibrary /testlibrary/whitebox * @build EnqueueMethodForCompilationTest
--- a/hotspot/test/compiler/whitebox/GetNMethodTest.java Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/test/compiler/whitebox/GetNMethodTest.java Wed Jul 05 19:49:46 2017 +0200 @@ -26,6 +26,7 @@ /* * @test GetNMethodTest + * @ignore 8046268 * @bug 8038240 * @library /testlibrary /testlibrary/whitebox * @build GetNMethodTest
--- a/hotspot/test/compiler/whitebox/MakeMethodNotCompilableTest.java Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/test/compiler/whitebox/MakeMethodNotCompilableTest.java Wed Jul 05 19:49:46 2017 +0200 @@ -23,6 +23,7 @@ /* * @test MakeMethodNotCompilableTest + * @ignore 8046268 * @bug 8012322 8006683 8007288 8022832 * @library /testlibrary /testlibrary/whitebox * @build MakeMethodNotCompilableTest
--- a/hotspot/test/gc/g1/TestSummarizeRSetStatsTools.java Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/test/gc/g1/TestSummarizeRSetStatsTools.java Wed Jul 05 19:49:46 2017 +0200 @@ -88,7 +88,6 @@ ArrayList<String> finalargs = new ArrayList<String>(); String[] defaultArgs = new String[] { "-XX:+UseG1GC", - "-XX:+UseCompressedOops", "-Xmn4m", "-Xmx20m", "-XX:InitiatingHeapOccupancyPercent=100", // we don't want the additional GCs due to initial marking
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/gc/logging/TestGCId.java Wed Jul 05 19:49:46 2017 +0200 @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test TestGCId + * @bug 8043607 + * @summary Ensure that the GCId is logged + * @key gc + * @library /testlibrary + */ + +import com.oracle.java.testlibrary.ProcessTools; +import com.oracle.java.testlibrary.OutputAnalyzer; + +public class TestGCId { + public static void main(String[] args) throws Exception { + testGCId("UseParallelGC", "PrintGC"); + testGCId("UseParallelGC", "PrintGCDetails"); + + testGCId("UseG1GC", "PrintGC"); + testGCId("UseG1GC", "PrintGCDetails"); + + testGCId("UseConcMarkSweepGC", "PrintGC"); + testGCId("UseConcMarkSweepGC", "PrintGCDetails"); + + testGCId("UseSerialGC", "PrintGC"); + testGCId("UseSerialGC", "PrintGCDetails"); + } + + private static void verifyContainsGCIDs(OutputAnalyzer output) { + output.shouldMatch("^#0: \\["); + output.shouldMatch("^#1: \\["); + output.shouldHaveExitValue(0); + } + + private static void verifyContainsNoGCIDs(OutputAnalyzer output) { + output.shouldNotMatch("^#[0-9]+: \\["); + output.shouldHaveExitValue(0); + } + + private static void testGCId(String gcFlag, String logFlag) throws Exception { + // GCID logging enabled + ProcessBuilder pb_enabled = + ProcessTools.createJavaProcessBuilder("-XX:+" + gcFlag, "-XX:+" + logFlag, "-Xmx10M", "-XX:+PrintGCID", GCTest.class.getName()); + verifyContainsGCIDs(new OutputAnalyzer(pb_enabled.start())); + + // GCID logging disabled + ProcessBuilder pb_disabled = + ProcessTools.createJavaProcessBuilder("-XX:+" + gcFlag, "-XX:+" + logFlag, "-Xmx10M", "-XX:-PrintGCID", GCTest.class.getName()); + verifyContainsNoGCIDs(new OutputAnalyzer(pb_disabled.start())); + + // GCID logging default + ProcessBuilder pb_default = + ProcessTools.createJavaProcessBuilder("-XX:+" + gcFlag, "-XX:+" + logFlag, "-Xmx10M", GCTest.class.getName()); + verifyContainsGCIDs(new OutputAnalyzer(pb_default.start())); + } + + static class GCTest { + private static byte[] garbage; + public static void main(String [] args) { + System.out.println("Creating garbage"); + // create 128MB of garbage. This should result in at least one GC + for (int i = 0; i < 1024; i++) { + garbage = new byte[128 * 1024]; + } + // do a system gc to get one more gc + System.gc(); + System.out.println("Done"); + } + } +}
--- a/hotspot/test/runtime/8001071/Test8001071.java Thu Jul 10 12:13:22 2014 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -import sun.misc.Unsafe; -import java.lang.reflect.Field; - -@SuppressWarnings("sunapi") -public class Test8001071 { - public static Unsafe unsafe; - - static { - try { - Field f = Unsafe.class.getDeclaredField("theUnsafe"); - f.setAccessible(true); - unsafe = (Unsafe) f.get(null); - } catch ( Exception e ) { - e.printStackTrace(); - } - } - - public static void main(String args[]) { - unsafe.getObject(new Test8001071(), Short.MAX_VALUE); - } - -}
--- a/hotspot/test/runtime/8001071/Test8001071.sh Thu Jul 10 12:13:22 2014 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,63 +0,0 @@ -#!/bin/sh - -# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. - -## @test -## @bug 8001071 -## @summary Add simple range check into VM implemenation of Unsafe access methods -## @compile Test8001071.java -## @run shell Test8001071.sh -## @author filipp.zhinkin@oracle.com - -VERSION=`${TESTJAVA}/bin/java ${TESTVMOPTS} -version 2>&1` - -if [ -n "`echo $VERSION | grep debug`" -o -n "`echo $VERSION | grep jvmg`" ]; then - echo "Build type check passed" - echo "Continue testing" -else - echo "Fastdebug build is required for this test" - exit 0 -fi - -${TESTJAVA}/bin/java -cp ${TESTCLASSES} ${TESTVMOPTS} Test8001071 2>&1 - -HS_ERR_FILE=hs_err_pid*.log - -if [ ! -f $HS_ERR_FILE ] -then - echo "hs_err_pid log file was not found" - echo "Test failed" - exit 1 -fi - -grep "assert(byte_offset < p_size) failed: Unsafe access: offset.*> object's size.*" $HS_ERR_FILE - -if [ "0" = "$?" ]; -then - echo "Range check assertion failed as expected" - echo "Test passed" - exit 0 -else - echo "Range check assertion was not failed" - echo "Test failed" - exit 1 -fi
--- a/hotspot/test/runtime/CDSCompressedKPtrs/XShareAuto.java Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/test/runtime/CDSCompressedKPtrs/XShareAuto.java Wed Jul 05 19:49:46 2017 +0200 @@ -44,8 +44,16 @@ "-server", "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-version"); output = new OutputAnalyzer(pb.start()); - output.shouldNotContain("sharing"); - output.shouldHaveExitValue(0); + // We asked for server but it could be aliased to something else + if (output.getOutput().contains("Server VM")) { + // In server case we don't expect to see sharing flag + output.shouldNotContain("sharing"); + output.shouldHaveExitValue(0); + } + else { + System.out.println("Skipping test - no Server VM available"); + return; + } pb = ProcessTools.createJavaProcessBuilder( "-server", "-Xshare:auto", "-XX:+UnlockDiagnosticVMOptions",
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/Unsafe/RangeCheck.java Wed Jul 05 19:49:46 2017 +0200 @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8001071 + * @summary Add simple range check into VM implemenation of Unsafe access methods + * @library /testlibrary + */ + +import com.oracle.java.testlibrary.*; +import sun.misc.Unsafe; + +public class RangeCheck { + + public static void main(String args[]) throws Exception { + if (!Platform.isDebugBuild()) { + System.out.println("Testing assert which requires a debug build. Passing silently."); + return; + } + + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + true, + "-Xmx32m", + "-XX:-TransmitErrorReport", + "-XX:-InlineUnsafeOps", // The compiler intrinsics doesn't have the assert + DummyClassWithMainRangeCheck.class.getName()); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldMatch("assert\\(byte_offset < p_size\\) failed: Unsafe access: offset \\d+ > object's size \\d+"); + } + + public static class DummyClassWithMainRangeCheck { + public static void main(String args[]) throws Exception { + Unsafe unsafe = Utils.getUnsafe(); + unsafe.getObject(new DummyClassWithMainRangeCheck(), Short.MAX_VALUE); + } + } +}
--- a/hotspot/test/runtime/memory/ReadFromNoaccessArea.java Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/test/runtime/memory/ReadFromNoaccessArea.java Wed Jul 05 19:49:46 2017 +0200 @@ -47,6 +47,7 @@ "-XX:+WhiteBoxAPI", "-XX:+UseCompressedOops", "-XX:HeapBaseMinAddress=33G", + "-Xmx32m", DummyClassWithMainTryingToReadFromNoaccessArea.class.getName()); OutputAnalyzer output = new OutputAnalyzer(pb.start());
--- a/hotspot/test/runtime/memory/ReserveMemory.java Thu Jul 10 12:13:22 2014 -0700 +++ b/hotspot/test/runtime/memory/ReserveMemory.java Wed Jul 05 19:49:46 2017 +0200 @@ -57,6 +57,7 @@ "-XX:+UnlockDiagnosticVMOptions", "-XX:+WhiteBoxAPI", "-XX:-TransmitErrorReport", + "-Xmx32m", "ReserveMemory", "test");
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hotspot/test/runtime/verifier/TestANewArray.java Wed Jul 05 19:49:46 2017 +0200 @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.io.File; +import java.io.FileOutputStream; + +import jdk.internal.org.objectweb.asm.ClassWriter; +import jdk.internal.org.objectweb.asm.MethodVisitor; +import static jdk.internal.org.objectweb.asm.Opcodes.*; + +import com.oracle.java.testlibrary.*; + +/* + * @test + * @summary Test that anewarray bytecode is valid only if it specifies 255 or fewer dimensions. + * @library /testlibrary + * @compile -XDignore.symbol.file TestANewArray.java + * @run main/othervm TestANewArray 49 + * @run main/othervm TestANewArray 50 + * @run main/othervm TestANewArray 51 + * @run main/othervm TestANewArray 52 + */ + +/* + * Testing anewarray instruction with 254, 255 & 264 dimensions to verify JVMS 8, + * Section 4.9.1, Static Constraints that states the following: + * + * "No anewarray instruction may be used to create an array of more than 255 dimensions." + * + */ + +public class TestANewArray { + + static String classCName = null; // the generated class name + + static final int test_Dimension_254 = 254; // should always pass + static final int test_Dimension_255 = 255; // should always pass, except for cfv 49 + static final int test_Dimension_264 = 264; // should always fail + + static final String array_Dimension_254 = genArrayDim(test_Dimension_254); + static final String array_Dimension_255 = genArrayDim(test_Dimension_255); + static final String array_Dimension_264 = genArrayDim(test_Dimension_264); + + public static void main(String... args) throws Exception { + int cfv = Integer.parseInt(args[0]); + + // 254 array dimensions + byte[] classFile_254 = dumpClassFile(cfv, test_Dimension_254, array_Dimension_254); + writeClassFileFromByteArray(classFile_254); + System.err.println("Running with cfv: " + cfv + ", test_Dimension_254"); + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(true, "-verify", "-cp", ".", classCName); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldNotContain("java.lang.VerifyError"); + output.shouldHaveExitValue(0); + + // 255 array dimensions + byte[] classFile_255 = dumpClassFile(cfv, test_Dimension_255, array_Dimension_255); + writeClassFileFromByteArray(classFile_255); + System.err.println("Running with cfv: " + cfv + ", test_Dimension_255"); + pb = ProcessTools.createJavaProcessBuilder(true, "-verify", "-cp", ".", classCName); + output = new OutputAnalyzer(pb.start()); + if (cfv == 49) { + // The type-inferencing verifier used for <=49.0 ClassFiles detects an anewarray instruction + // with exactly 255 dimensions and incorrectly issues the "Array with too many dimensions" VerifyError. + output.shouldContain("Array with too many dimensions"); + output.shouldHaveExitValue(1); + } else { + // 255 dimensions should always pass, except for cfv 49 + output.shouldNotContain("java.lang.VerifyError"); + output.shouldNotContain("java.lang.ClassFormatError"); + output.shouldHaveExitValue(0); + } + + // 264 array dimensions + byte[] classFile_264 = dumpClassFile(cfv, test_Dimension_264, array_Dimension_264); + writeClassFileFromByteArray(classFile_264); + System.err.println("Running with cfv: " + cfv + ", test_Dimension_264"); + pb = ProcessTools.createJavaProcessBuilder(true, "-verify", "-cp", ".", classCName); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("java.lang.ClassFormatError"); + output.shouldHaveExitValue(1); + } + + public static byte[] dumpClassFile(int cfv, int testDimension264, String arrayDim) throws Exception { + ClassWriter cw = new ClassWriter(ClassWriter.COMPUTE_FRAMES); + MethodVisitor mv; + + classCName = "classCName_" + cfv + "_" + testDimension264; + + cw.visit(cfv, ACC_PUBLIC + ACC_SUPER, classCName, null, "java/lang/Object", null); + { + mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null); + mv.visitCode(); + mv.visitVarInsn(ALOAD, 0); + mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "<init>", "()V", false); + mv.visitInsn(RETURN); + mv.visitMaxs(1, 1); + mv.visitEnd(); + } + { // classCName main method + mv = cw.visitMethod(ACC_PUBLIC + ACC_STATIC, "main", "([Ljava/lang/String;)V", null, null); + mv.visitCode(); + mv.visitIntInsn(BIPUSH, 1); + mv.visitTypeInsn(ANEWARRAY, arrayDim); // Test ANEWARRAY bytecode with various dimensions + mv.visitInsn(RETURN); + mv.visitMaxs(2, 2); + mv.visitEnd(); + } + cw.visitEnd(); + return cw.toByteArray(); + } + + public static FileOutputStream writeClassFileFromByteArray(byte[] classFileByteArray) throws Exception { + FileOutputStream fos = new FileOutputStream(new File(classCName + ".class")); + fos.write(classFileByteArray); + fos.close(); + return fos; + } + + private static String genArrayDim(int testDim) { + StringBuilder array_Dimension = new StringBuilder(); + for (int i = 0; i < testDim; i++) + { + array_Dimension.append("["); + } + return array_Dimension.append("Ljava/lang/Object;").toString(); + } +}
--- a/jdk/.hgtags Thu Jul 10 12:13:22 2014 -0700 +++ b/jdk/.hgtags Wed Jul 05 19:49:46 2017 +0200 @@ -264,3 +264,4 @@ 27561aede285c25a3e9ed8bf8918234ad3bb81e3 jdk9-b19 f87c5be90e01a7ffb47947108eb3e0b0b1920880 jdk9-b20 2df45ac1bf491278f38c12e0dfbeebadb6c54c8c jdk9-b21 +85bcf0f99edc08873614afbe5a5563e13ce13c83 jdk9-b22
--- a/jdk/make/CopyFiles.gmk Thu Jul 10 12:13:22 2014 -0700 +++ b/jdk/make/CopyFiles.gmk Wed Jul 05 19:49:46 2017 +0200 @@ -407,16 +407,11 @@ ########################################################################################## -BLACKLISTED_CERTS_SRC := $(JDK_TOPDIR)/src/share/lib/security/blacklisted.certs -BLACKLISTED_CERTS_DST := $(JDK_OUTPUTDIR)/lib/security/blacklisted.certs - ifndef OPENJDK BLACKLIST_SRC := $(JDK_TOPDIR)/src/closed/share/lib/security/blacklist BLACKLIST_DST := $(JDK_OUTPUTDIR)/lib/security/blacklist - BLACKLISTED_CERTS_SRC += $(wildcard $(JDK_TOPDIR)/src/closed/share/lib/security/blacklisted.certs) - TRUSTEDLIBS_SRC := $(JDK_TOPDIR)/src/closed/share/lib/security/trusted.libraries TRUSTEDLIBS_DST := $(JDK_OUTPUTDIR)/lib/security/trusted.libraries @@ -432,20 +427,6 @@ endif -$(BLACKLISTED_CERTS_DST): $(BLACKLISTED_CERTS_SRC) - $(MKDIR) -p $(@D) - $(CAT) $^ | $(SED) '/^$$/d' | $(SORT) | $(UNIQ) > $@.tmp - $(GREP) -i Algorithm $@.tmp > $@ - if [ `$(SED) -n -e "$$=" $@` != 1 ]; then \ - $(ECHO) "Different algorithms defined in $^"; \ - $(RM) $@ $@.tmp; \ - false; \ - fi - $(GREP) -iv Algorithm $@.tmp >> $@ - $(RM) $@.tmp - -COPY_FILES += $(BLACKLISTED_CERTS_DST) - ########################################################################################## ifndef OPENJDK
--- a/jdk/make/GenerateData.gmk Thu Jul 10 12:13:22 2014 -0700 +++ b/jdk/make/GenerateData.gmk Wed Jul 05 19:49:46 2017 +0200 @@ -50,6 +50,9 @@ include gendata/GendataHtml32dtd.gmk GENDATA += $(GENDATA_HTML32DTD) +include gendata/GendataBlacklistedCerts.gmk +GENDATA += $(GENDATA_BLACKLISTED_CERTS) + ########################################################################################## GENDATA_UNINAME := $(JDK_OUTPUTDIR)/classes/java/lang/uniName.dat
--- a/jdk/make/Tools.gmk Thu Jul 10 12:13:22 2014 -0700 +++ b/jdk/make/Tools.gmk Wed Jul 05 19:49:46 2017 +0200 @@ -87,6 +87,9 @@ TOOL_TZDB = $(JAVA_SMALL) -cp $(JDK_OUTPUTDIR)/btclasses \ build.tools.tzdb.TzdbZoneRulesCompiler +TOOL_BLACKLISTED_CERTS = $(JAVA_SMALL) -cp $(JDK_OUTPUTDIR)/btclasses \ + build.tools.blacklistedcertsconverter.BlacklistedCertsConverter + # TODO: There are references to the jdwpgen.jar in jdk/make/netbeans/jdwpgen/build.xml # and nbproject/project.properties in the same dir. Needs to be looked at.
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/jdk/make/data/blacklistedcertsconverter/blacklisted.certs.pem Wed Jul 05 19:49:46 2017 +0200 @@ -0,0 +1,727 @@ +#! java BlacklistedCertsConverter SHA-256 + +# The line above must be the first line of the blacklisted.certs.pem +# file inside src/share/lib/security/. It will be ignored if added in +# src/closed/share/lib/security/blacklisted.certs.pem. + +// Subject: CN=Digisign Server ID (Enrich), +// OU=457608-K, +// O=Digicert Sdn. Bhd., +// C=MY +// Issuer: CN=GTE CyberTrust Global Root, +// OU=GTE CyberTrust Solutions, Inc., +// O=GTE Corporation, +// C=US +// Serial: 120001705 (07:27:14:a9) +-----BEGIN CERTIFICATE----- +MIIDyzCCAzSgAwIBAgIEBycUqTANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQGEwJV +UzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQLEx5HVEUgQ3liZXJU +cnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN0IEds +b2JhbCBSb290MB4XDTA3MDcxNzE1MTc0OFoXDTEyMDcxNzE1MTY1NFowYzELMAkG +A1UEBhMCTVkxGzAZBgNVBAoTEkRpZ2ljZXJ0IFNkbi4gQmhkLjERMA8GA1UECxMI +NDU3NjA4LUsxJDAiBgNVBAMTG0RpZ2lzaWduIFNlcnZlciBJRCAoRW5yaWNoKTCB +nzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEArahkS02Hx4RZufuQRqCmicDx/tXa +VII3DZkrRSYK6Fawf8qo9I5HhAGCKeOzarWR8/uVhbxyqGToCkCcxfRxrnt7agfq +kBRPjYmvlKuyBtQCanuYH1m5Os1U+iDfsioK6bjdaZDAKdNO0JftZszFGUkGf/pe +LHx7hRsyQt97lSUCAwEAAaOCAXgwggF0MBIGA1UdEwEB/wQIMAYBAf8CAQAwXAYD +VR0gBFUwUzBIBgkrBgEEAbE+AQAwOzA5BggrBgEFBQcCARYtaHR0cDovL2N5YmVy +dHJ1c3Qub21uaXJvb3QuY29tL3JlcG9zaXRvcnkuY2ZtMAcGBWCDSgEBMA4GA1Ud +DwEB/wQEAwIB5jCBiQYDVR0jBIGBMH+heaR3MHUxCzAJBgNVBAYTAlVTMRgwFgYD +VQQKEw9HVEUgQ29ycG9yYXRpb24xJzAlBgNVBAsTHkdURSBDeWJlclRydXN0IFNv +bHV0aW9ucywgSW5jLjEjMCEGA1UEAxMaR1RFIEN5YmVyVHJ1c3QgR2xvYmFsIFJv +b3SCAgGlMEUGA1UdHwQ+MDwwOqA4oDaGNGh0dHA6Ly93d3cucHVibGljLXRydXN0 +LmNvbS9jZ2ktYmluL0NSTC8yMDE4L2NkcC5jcmwwHQYDVR0OBBYEFMYWk04WF+wW +royUdvOGbcV0boR3MA0GCSqGSIb3DQEBBQUAA4GBAHYAe6Z4K2Ydjl42xqSOBfIj +knyTZ9P0wAp9iy3Z6tVvGvPhSilaIoRNUC9LDPL/hcJ7VdREgr5trGeOvLQfkpxR +gBoU9m6rYYgLrRx/90tQUdZlG6ZHcRVesHHzNRTyN71jyNXwk1o0X9g96F33xR7A +5c8fhiSpPAdmzcHSNmNZ +-----END CERTIFICATE----- + +// Subject: CN=Digisign Server ID - (Enrich), +// OU=457608-K, +// O=Digicert Sdn. Bhd., +// C=MY +// Issuer: CN=Entrust.net Certification Authority (2048) +// OU=(c) 1999 Entrust.net Limited, +// OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.), +// O=Entrust.net +// Serial: 1184644297 (4c:0e:63:6a) +-----BEGIN CERTIFICATE----- +MIIEzjCCA7agAwIBAgIETA5jajANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML +RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp +bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5 +IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw0xMDA3MTYxNzIzMzdaFw0xNTA3 +MTYxNzUzMzdaMGUxCzAJBgNVBAYTAk1ZMRswGQYDVQQKExJEaWdpY2VydCBTZG4u +IEJoZC4xETAPBgNVBAsTCDQ1NzYwOC1LMSYwJAYDVQQDEx1EaWdpc2lnbiBTZXJ2 +ZXIgSUQgLSAoRW5yaWNoKTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AMWJ5PQNBkCSWccaszXRDkwqM/n4r8qef+65p21g9FTob9Wb8xtjMQRoctE0Foy0 +FyyX3nPF2JAVoBor9cuzSIZE8B2ITM5BQhrv9Qze/kDaOSD3BlU6ap1GwdJvpbLI +Vz4po5zg6YV3ZuiYpyR+vsBZIOVEb7ZX2L7OwmV3WMZhQdF0BMh/SULFcqlyFu6M +3RJdtErU0a9Qt9iqdXZorT5dqjBtYairEFs+E78z4K9EnTgiW+9ML6ZxJhUmyiiM +2fqOjqmiFDXimySItPR/hZ2DTwehthSQNsQ0HI0mYW0Tb3i+6I8nx0uElqOGaAwj +vgvsjJQAqQSKE5D334VsDLECAwEAAaOCATQwggEwMA4GA1UdDwEB/wQEAwIBBjAS +BgNVHRMBAf8ECDAGAQH/AgEAMCcGA1UdJQQgMB4GCCsGAQUFBwMBBggrBgEFBQcD +AgYIKwYBBQUHAwQwMwYIKwYBBQUHAQEEJzAlMCMGCCsGAQUFBzABhhdodHRwOi8v +b2NzcC5lbnRydXN0Lm5ldDBEBgNVHSAEPTA7MDkGBWCDSgEBMDAwLgYIKwYBBQUH +AgEWImh0dHA6Ly93d3cuZGlnaWNlcnQuY29tLm15L2Nwcy5odG0wMgYDVR0fBCsw +KTAnoCWgI4YhaHR0cDovL2NybC5lbnRydXN0Lm5ldC8yMDQ4Y2EuY3JsMBEGA1Ud +DgQKBAhMTswlKAMpgTAfBgNVHSMEGDAWgBRV5IHREYC+2Im5CKMx+aEkCRa5cDAN +BgkqhkiG9w0BAQUFAAOCAQEAl0zvSjpJrHL8MCBrtClbp8WVBJD5MtXChWreA6E3 ++YkAsFqsVX7bQzX/yQH4Ub7MJsrIaqTEVD4mHucMo82XZ5TdpkLrXM2POXlrM3kh +Bnn6gkQVmczBtznTRmJ8snDrb84gqj4Zt+l0gpy0pUtNYQA35IfS8hQ6ZHy4qXth +4JMi59WfPkfmNnagU9gAAzoPtTP+lsrT0oI6Lt3XSOHkp2nMHOmZSufKcEXXCwcO +mnUb0C+Sb/akB8O9HEumhLZ9qJqp0qcp8QtXaR6XVybsK0Os1EWDBQDp4/BGQAf6 +6rFRc5Mcpd1TETfIKqcVJx20qsx/qjEw/LhFn0gJ7RDixQ== +-----END CERTIFICATE----- + +// Subject: CN=Java Media APIs, +// OU=Java Signed Extensions, +// OU=Corporate Object Signing, +// O=Sun Microsystems Inc +// Issuer: CN=Object Signing CA, +// OU=Class 2 OnSite Subscriber CA, +// OU=VeriSign Trust Network, +// O=Sun Microsystems Inc +// Serial: 6a:8b:99:91:37:59:4f:89:53:e2:97:18:9f:19:1e:4e +-----BEGIN CERTIFICATE----- +MIIFdzCCBF+gAwIBAgIQaouZkTdZT4lT4pcYnxkeTjANBgkqhkiG9w0BAQUFADCB +gzEdMBsGA1UEChMUU3VuIE1pY3Jvc3lzdGVtcyBJbmMxHzAdBgNVBAsTFlZlcmlT +aWduIFRydXN0IE5ldHdvcmsxJTAjBgNVBAsTHENsYXNzIDIgT25TaXRlIFN1YnNj +cmliZXIgQ0ExGjAYBgNVBAMTEU9iamVjdCBTaWduaW5nIENBMB4XDTA5MDUxMjAw +MDAwMFoXDTEyMDUxMTIzNTk1OVowfTEdMBsGA1UEChQUU3VuIE1pY3Jvc3lzdGVt +cyBJbmMxITAfBgNVBAsUGENvcnBvcmF0ZSBPYmplY3QgU2lnbmluZzEfMB0GA1UE +CxQWSmF2YSBTaWduZWQgRXh0ZW5zaW9uczEYMBYGA1UEAxQPSmF2YSBNZWRpYSBB +UElzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAl5blzoKTVE8y4Hpz +q6E15RZz1bF5HnYEyYqgHkZXnAKedmYCoMzm1XK8s+gQWShLEvGEAvs5yqarx9gE +nnC21N28aEZgIJMa2/arKxCUkS4pxdGPYGexL9UzSRkUpoBShCZKEGdmX7gfJE2K +/sd9MFvGV5/yZtWXrADzvm0Kd/9mg1KRv1gfrZIq0TJbupoXPYYqb73AkI9eT2ZD +q9MdwD4E5+oojsDFXt8GU/D00fUhtXpYwuplU7D667WHYdJhIah0ST6JywyqcLXG +XSuFTXOgITT2idSHluZVmx3dqJ72u9kPkO4JdJTMDfaK8zgNLaRkiU8Qcj+qhLYH +ytaqcwIDAQABo4IB6jCCAeYwCQYDVR0TBAIwADAOBgNVHQ8BAf8EBAMCB4AwfwYD +VR0fBHgwdjB0oHKgcIZuaHR0cDovL29uc2l0ZWNybC52ZXJpc2lnbi5jb20vU3Vu +TWljcm9zeXN0ZW1zSW5jQ29ycG9yYXRlT2JqZWN0U2lnbmluZ0phdmFTaWduZWRF +eHRlbnNpb25zQ2xhc3NCL0xhdGVzdENSTC5jcmwwHwYDVR0jBBgwFoAUs0crgn5T +tHPKuLsZt76BTQeVx+0wHQYDVR0OBBYEFKS32mVx0gNWTeS4ProHEaeSpvvIMDsG +CCsGAQUFBwEBBC8wLTArBggrBgEFBQcwAYYfaHR0cDovL29uc2l0ZS1vY3NwLnZl +cmlzaWduLmNvbTCBtQYDVR0gBIGtMIGqMDkGC2CGSAGG+EUBBxcCMCowKAYIKwYB +BQUHAgEWHGh0dHBzOi8vd3d3LnZlcmlzaWduLmNvbS9ycGEwbQYLYIZIAYb3AIN9 +nD8wXjAnBggrBgEFBQcCARYbaHR0cHM6Ly93d3cuc3VuLmNvbS9wa2kvY3BzMDMG +CCsGAQUFBwICMCcaJVZhbGlkYXRlZCBGb3IgU3VuIEJ1c2luZXNzIE9wZXJhdGlv +bnMwEwYDVR0lBAwwCgYIKwYBBQUHAwMwDQYJKoZIhvcNAQEFBQADggEBAAe6BO4W +3TSNWfezyelJs6kE3HfulT6Bdyz4UUoh9ykXcV8nRwT+kh25I5MdyG2GfkJoADPR +VhC5DYo13UFpIsTNVjq+hGYe2hML93bN7ad9SxCCyjHUo3yMz2qgBbHZI3VA9ZHA +aWM4Tx0saMwbcnVvlbuGh+PXvStfypJqYT6lzcdFfjNVX4FI/QQNGhBswMY51tC8 +GTBCL2qhJon0gSCU4zaawDOf7+XxJWirLamYL1Aal1/h2z2sFrvA/1ftxtU3kZ6I +7De8DyoHeZg7pYGdrj7g+lPhCga/WvEhN152I+aP08YbFcJHYmK05ngl/Ye4c6Bd +cdrdfbw6QzEUIYY= +-----END CERTIFICATE----- + +// Subject: CN=JavaFX 1.0 Runtime, +// OU=Java Signed Extensions, +// OU=Corporate Object Signing, +// O=Sun Microsystems Inc +// Issuer: CN=Object Signing CA, +// OU=Class 2 OnSite Subscriber CA, +// OU=VeriSign Trust Network, +// O=Sun Microsystems Inc +// Serial: 55:c0:e6:44:59:59:79:9e:d9:26:f1:b0:4a:1e:f0:27 +-----BEGIN CERTIFICATE----- +MIIFezCCBGOgAwIBAgIQVcDmRFlZeZ7ZJvGwSh7wJzANBgkqhkiG9w0BAQUFADCB +gzEdMBsGA1UEChMUU3VuIE1pY3Jvc3lzdGVtcyBJbmMxHzAdBgNVBAsTFlZlcmlT +aWduIFRydXN0IE5ldHdvcmsxJTAjBgNVBAsTHENsYXNzIDIgT25TaXRlIFN1YnNj +cmliZXIgQ0ExGjAYBgNVBAMTEU9iamVjdCBTaWduaW5nIENBMB4XDTA4MTAwOTAw +MDAwMFoXDTExMTAwOTIzNTk1OVowgYAxHTAbBgNVBAoUFFN1biBNaWNyb3N5c3Rl +bXMgSW5jMSEwHwYDVQQLFBhDb3Jwb3JhdGUgT2JqZWN0IFNpZ25pbmcxHzAdBgNV +BAsUFkphdmEgU2lnbmVkIEV4dGVuc2lvbnMxGzAZBgNVBAMUEkphdmFGWCAxLjAg +UnVudGltZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM+WDc6+bu+4 +tmAcS/lBtUc02WOt9QZpVsXg9cG2pu/8bUtmDELa8iiYBVFpIs8DU58HLrGQtCUY +SIAGOVPsOJoN29UKCDWfY9j5JeVhfhMGqk9DwrWhzgsjy4cpZ1pIp+k/fJ8zT8Ul +aYLpow1vg3UNddsmwz02tN7cOrMw9WYIG4CRYnY1OrtJSfe2pYzheC4zyvR+aiVl +nang2OtqikSQsNFOFHsLOJFxngy9LrO8evDSu25VTKI6zlWU6/bMeqtztJPN0VOn +NyUrJZvkxZ207Jg0T693BGSxNC1n+ihztXogql8950M/pEuUbDjylv5FFvlp6DSB +dDT2MkutmyMCAwEAAaOCAeowggHmMAkGA1UdEwQCMAAwDgYDVR0PAQH/BAQDAgeA +MH8GA1UdHwR4MHYwdKByoHCGbmh0dHA6Ly9vbnNpdGVjcmwudmVyaXNpZ24uY29t +L1N1bk1pY3Jvc3lzdGVtc0luY0NvcnBvcmF0ZU9iamVjdFNpZ25pbmdKYXZhU2ln +bmVkRXh0ZW5zaW9uc0NsYXNzQi9MYXRlc3RDUkwuY3JsMB8GA1UdIwQYMBaAFLNH +K4J+U7Rzyri7Gbe+gU0HlcftMB0GA1UdDgQWBBTjgufVi3XJ3gx1ewsA6Rr7BR4Z +zjA7BggrBgEFBQcBAQQvMC0wKwYIKwYBBQUHMAGGH2h0dHA6Ly9vbnNpdGUtb2Nz +cC52ZXJpc2lnbi5jb20wgbUGA1UdIASBrTCBqjA5BgtghkgBhvhFAQcXAjAqMCgG +CCsGAQUFBwIBFhxodHRwczovL3d3dy52ZXJpc2lnbi5jb20vcnBhMG0GC2CGSAGG +9wCDfZw/MF4wJwYIKwYBBQUHAgEWG2h0dHBzOi8vd3d3LnN1bi5jb20vcGtpL2Nw +czAzBggrBgEFBQcCAjAnGiVWYWxpZGF0ZWQgRm9yIFN1biBCdXNpbmVzcyBPcGVy +YXRpb25zMBMGA1UdJQQMMAoGCCsGAQUFBwMDMA0GCSqGSIb3DQEBBQUAA4IBAQAB +YVJTTVe7rzyTO4jc3zajErOT/COkdQTfNo0eIX1QbNynFieJvwY/jRzUZwjktIFR +2p4JtbpHGAtKtjOAOTieQ8xdDOoC1djzpE7/AbMvuvlTavtUKT+F7tPdhfXgWXJV +6Wbt8jryKyk3zZGiEhauIwZUkfjRkEtffEmZWLUd8c8rURJjfC/XHH2oyurscoxc +CjX29c9ynxSiS/VvQp1an0HvErGh69N48wj7cj8mtZ1yHzd2XCzSSR1OfTPfk0Pt +yg51p7yJaFiH21PTZegEL6zyVNOYBTKwwIi2OzpwYalD3uvK6e3OKDrfFCOxu17u +4PveESbrdyrmvLe7IVez +-----END CERTIFICATE----- + +// Subject: CN=JavaFX Runtime, +// OU=Java Signed Extensions, +// OU=Corporate Object Signing, +// O=Sun Microsystems Inc +// Issuer: CN=Object Signing CA, +// OU=Class 2 OnSite Subscriber CA, +// OU=VeriSign Trust Network, +// O=Sun Microsystems Inc +// Serial: 47:f4:55:f1:da:4a:5e:f9:e3:f7:a8:03:62:17:c0:ff +-----BEGIN CERTIFICATE----- +MIIFdjCCBF6gAwIBAgIQR/RV8dpKXvnj96gDYhfA/zANBgkqhkiG9w0BAQUFADCB +gzEdMBsGA1UEChMUU3VuIE1pY3Jvc3lzdGVtcyBJbmMxHzAdBgNVBAsTFlZlcmlT +aWduIFRydXN0IE5ldHdvcmsxJTAjBgNVBAsTHENsYXNzIDIgT25TaXRlIFN1YnNj +cmliZXIgQ0ExGjAYBgNVBAMTEU9iamVjdCBTaWduaW5nIENBMB4XDTA5MDEyOTAw +MDAwMFoXDTEyMDEyOTIzNTk1OVowfDEdMBsGA1UEChQUU3VuIE1pY3Jvc3lzdGVt +cyBJbmMxITAfBgNVBAsUGENvcnBvcmF0ZSBPYmplY3QgU2lnbmluZzEfMB0GA1UE +CxQWSmF2YSBTaWduZWQgRXh0ZW5zaW9uczEXMBUGA1UEAxQOSmF2YUZYIFJ1bnRp +bWUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCIzd0fAk8mI9ONc6RJ +aGieioK2FLdXEwj8zL3vdGDVmBwyR1zwYkaOIFFgF9IW/8qc4iAYA5sGUY+0g8q3 +5DuYAxfTzBB5KdaYvbuq6GGnoHIWmTirXY+1friFp8lyXSvtuEaGB1VHaBoZchEg +k+UgeVDA43dHwcT1Ov3DePczJRUes8T/QHzLX+BxUDG43vjyncCEO/AjqLZxXEz2 +xrNbKLcH3lGMJK7hdbfssUfF5BjC38Hn71HauYlA43b2no+2y0Sjulwzez2YPbDC +0GLR3TnKtA8dqOrnl5t3DniDbfOBNtBE3VOydJO0XW57Ng1HRXD023nm9ECPY2xp +0N/pAgMBAAGjggHqMIIB5jAJBgNVHRMEAjAAMA4GA1UdDwEB/wQEAwIHgDB/BgNV +HR8EeDB2MHSgcqBwhm5odHRwOi8vb25zaXRlY3JsLnZlcmlzaWduLmNvbS9TdW5N +aWNyb3N5c3RlbXNJbmNDb3Jwb3JhdGVPYmplY3RTaWduaW5nSmF2YVNpZ25lZEV4 +dGVuc2lvbnNDbGFzc0IvTGF0ZXN0Q1JMLmNybDAfBgNVHSMEGDAWgBSzRyuCflO0 +c8q4uxm3voFNB5XH7TAdBgNVHQ4EFgQUvOdd0cKPj+Yik/iOBwTdphh5A+gwOwYI +KwYBBQUHAQEELzAtMCsGCCsGAQUFBzABhh9odHRwOi8vb25zaXRlLW9jc3AudmVy +aXNpZ24uY29tMIG1BgNVHSAEga0wgaowOQYLYIZIAYb4RQEHFwIwKjAoBggrBgEF +BQcCARYcaHR0cHM6Ly93d3cudmVyaXNpZ24uY29tL3JwYTBtBgtghkgBhvcAg32c +PzBeMCcGCCsGAQUFBwIBFhtodHRwczovL3d3dy5zdW4uY29tL3BraS9jcHMwMwYI +KwYBBQUHAgIwJxolVmFsaWRhdGVkIEZvciBTdW4gQnVzaW5lc3MgT3BlcmF0aW9u +czATBgNVHSUEDDAKBggrBgEFBQcDAzANBgkqhkiG9w0BAQUFAAOCAQEAbGcf2NjL +AI93HG6ny2BbepaZA1a8xa/R6uUc7xV+Qw6MgLwFD4Q4i6LWUztQDvg9l68MM2/i +Y9LEi1KM4lcNbK5+D+t9x98wXBiuojXhVdp5ZmC03EyEBbriopdBsmXVLDSu/Y3+ +zowOO5xwpMK3dbgsSDs2Vt0UosD3FTcRaD3GNfOhXMp+o1grHNiXF9YgkmdQbPPZ +DQ2KBhFPCRJXBGvyKOqno/DTg0sQ3crGH/C4/4t7mnQXWldZotmJUZ0ONc9oD+Q1 +JAaguUKqIwn9yZ093ie+JWHbYNid9IIIPXYgtRxmf9a376WBhqhu56uJftBJ7x9g +eQ7Lot6CSWCiFw== +-----END CERTIFICATE----- + +// Subject: CN=Solaris INTERNAL DEVELOPMENT USE ONLY, +// OU=Solaris Cryptographic Framework, +// OU=Corporate Object Signing, +// O=Sun Microsystems Inc +// Issuer: CN=Object Signing CA, +// OU=Class 2 OnSite Subscriber CA, +// OU=VeriSign Trust Network, +// O=Sun Microsystems Inc +// Serial: 77:29:77:52:6a:19:7b:9a:a6:a2:c7:99:a0:e1:cd:8c +-----BEGIN CERTIFICATE----- +MIIFHjCCBAagAwIBAgIQdyl3UmoZe5qmoseZoOHNjDANBgkqhkiG9w0BAQUFADCB +gzEdMBsGA1UEChMUU3VuIE1pY3Jvc3lzdGVtcyBJbmMxHzAdBgNVBAsTFlZlcmlT +aWduIFRydXN0IE5ldHdvcmsxJTAjBgNVBAsTHENsYXNzIDIgT25TaXRlIFN1YnNj +cmliZXIgQ0ExGjAYBgNVBAMTEU9iamVjdCBTaWduaW5nIENBMB4XDTA3MDEwNDAw +MDAwMFoXDTEwMDEwMzIzNTk1OVowgZwxHTAbBgNVBAoUFFN1biBNaWNyb3N5c3Rl +bXMgSW5jMSEwHwYDVQQLFBhDb3Jwb3JhdGUgT2JqZWN0IFNpZ25pbmcxKDAmBgNV +BAsUH1NvbGFyaXMgQ3J5cHRvZ3JhcGhpYyBGcmFtZXdvcmsxLjAsBgNVBAMUJVNv +bGFyaXMgSU5URVJOQUwgREVWRUxPUE1FTlQgVVNFIE9OTFkwgZ8wDQYJKoZIhvcN +AQEBBQADgY0AMIGJAoGBALbNU4hf3mD5ArDI9pjgioAyvV3bjMPRQdCZniIeGJBp +odFlSEH+Mh64W1DsY8coeZ7FvvGJkx9IpTMJW9k8w1oJK9UNqHyAQfaYjQyXi3xQ +LJp62EvYdGfDlwOZejEcR/MbzZG+GOPMMvQj5+xyFDvLXNGfQNTnxw2qnBgCJXjj +AgMBAAGjggH1MIIB8TAJBgNVHRMEAjAAMA4GA1UdDwEB/wQEAwIHgDCBiQYDVR0f +BIGBMH8wfaB7oHmGd2h0dHA6Ly9vbnNpdGVjcmwudmVyaXNpZ24uY29tL1N1bk1p +Y3Jvc3lzdGVtc0luY0NvcnBvcmF0ZU9iamVjdFNpZ25pbmdTb2xhcmlzQ3J5cHRv +Z3JhcGhpY0ZyYW1ld29ya0NsYXNzQi9MYXRlc3RDUkwuY3JsMB8GA1UdIwQYMBaA +FLNHK4J+U7Rzyri7Gbe+gU0HlcftMB0GA1UdDgQWBBRpfiGYkehTnsIzuN2H6AFb +VCZG8jA7BggrBgEFBQcBAQQvMC0wKwYIKwYBBQUHMAGGH2h0dHA6Ly9vbnNpdGUt +b2NzcC52ZXJpc2lnbi5jb20wgbUGA1UdIASBrTCBqjA5BgtghkgBhvhFAQcXAjAq +MCgGCCsGAQUFBwIBFhxodHRwczovL3d3dy52ZXJpc2lnbi5jb20vcnBhMG0GC2CG +SAGG9wCDfZw/MF4wJwYIKwYBBQUHAgEWG2h0dHBzOi8vd3d3LnN1bi5jb20vcGtp +L2NwczAzBggrBgEFBQcCAjAnFiVWYWxpZGF0ZWQgRm9yIFN1biBCdXNpbmVzcyBP +cGVyYXRpb25zMBMGA1UdJQQMMAoGCCsGAQUFBwMDMA0GCSqGSIb3DQEBBQUAA4IB +AQCG5soy3LFHTFbA8/5SzDRhQoJkHUnOP0t3b6nvX6vZYRp649fje7TQOPRm1pFd +CZ17J+tggdZwgzTqY4aYpJ00jZaK6pV37q/vgFC/ia6jDs8Q+ly9cEcadBZ5loYg +cmxp9p57W2MNWx8VA8oFdNtKfF0jUNXbLNtvwGHmgR6YcwLrGN1b6/9Lt9bO3ODl +FO+ZDwkfQz5ClUVrTx2dGBvKRYFqSG5S8JAfsgYhPvcacUQkA7ExyKvfRXLWVrce +ZiPpcElbx+819H2sAPvVvparVeAruZGMAtejHZp9NFoowKen5drJp9VxePS4eM49 +3DepB6lKRrNRw66LNQol4ZBz +-----END CERTIFICATE----- + +// Subject: EMAILADDRESS=info@diginotar.nl, CN=DigiNotar Cyber CA, +// O=DigiNotar, C=NL +// Issuer: CN=GTE CyberTrust Global Root, +// OU=GTE CyberTrust Solutions, Inc., +// O=GTE Corporation, +// C=US +// Serial: 120000525 (07:27:10:0D) +-----BEGIN CERTIFICATE----- +MIIFWjCCBMOgAwIBAgIEBycQDTANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQGEwJV +UzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQLEx5HVEUgQ3liZXJU +cnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN0IEds +b2JhbCBSb290MB4XDTA2MTAwNDEwNTQxMVoXDTExMTAwNDEwNTMxMVowYDELMAkG +A1UEBhMCTkwxEjAQBgNVBAoTCURpZ2lOb3RhcjEbMBkGA1UEAxMSRGlnaU5vdGFy +IEN5YmVyIENBMSAwHgYJKoZIhvcNAQkBFhFpbmZvQGRpZ2lub3Rhci5ubDCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANLOFQotqF6EZ639vu9Gx8i5z3P8 +9DS5+SxD52ATPXrjss87Z2yQrcC5P4RS8DVC3HTcKDu9UrSnrHJFF8bwieu0qiXy +XUte0dmHutZ9fPXOMp8QM8WxSrtekTHC0OlBwpFkfglBO9uLCDdqqspS3rU5HsCI +A6U/i5kTYUO1m4Kz7iBvz6FEouova0CfjytXraFTwoUiaZ2gP1HfC0GRDaXhqKpc +SQhdvd5wQbEPyWNr0380dAIvNFp4dRxoeoFnivPaQPBgY/SSINcDpj2jHmfEhBtB +pcmM5r3qSLYFFgizNxJa92E89zhvLpfgb1Y4VNMota0Ubi5LZLUnZbd1JQm2Bz2V +VgIKgmCyc0XgMyZRdJq51FAc9k1bW1JSE1qmf6cO4ehBVGeYjIfVydNsy9NUkgYJ +NEH3gW8/nsl8dVWw58Gzd+jDxAA1lUBwEEoF3iW7n1mlZLxHYL9g43aLE1Xd4XR6 +uc8kpmp/3mQiRFhogmoQ+T3lPhu5vfwi9GAEibtVbShV+t6OjRshFNc3izR7Tfay +shDPM7F9HGKZSMsrbHaWVb8ZDR0fu2WqG46ZtcYokOWCLXhQIJr9eS8kf/CJKWn0 +fc1zvrPtTsHR7VJej/e4142HrbLZG1ES/1az4a80fVykeIgQnp0DxqWqoiRR90kU +xbHuWUOV36toKDA/AgMBAAGjggGGMIIBgjASBgNVHRMBAf8ECDAGAQH/AgEBMFMG +A1UdIARMMEowSAYJKwYBBAGxPgEAMDswOQYIKwYBBQUHAgEWLWh0dHA6Ly93d3cu +cHVibGljLXRydXN0LmNvbS9DUFMvT21uaVJvb3QuaHRtbDAOBgNVHQ8BAf8EBAMC +AQYwgaAGA1UdIwSBmDCBlYAUpgwdn2H/Bxe1vzhG20Mw1Y6wUgaheaR3MHUxCzAJ +BgNVBAYTAlVTMRgwFgYDVQQKEw9HVEUgQ29ycG9yYXRpb24xJzAlBgNVBAsTHkdU +RSBDeWJlclRydXN0IFNvbHV0aW9ucywgSW5jLjEjMCEGA1UEAxMaR1RFIEN5YmVy +VHJ1c3QgR2xvYmFsIFJvb3SCAgGlMEUGA1UdHwQ+MDwwOqA4oDaGNGh0dHA6Ly93 +d3cucHVibGljLXRydXN0LmNvbS9jZ2ktYmluL0NSTC8yMDE4L2NkcC5jcmwwHQYD +VR0OBBYEFKv5aN/PSjfXe0WMX3LeQETDZbvCMA0GCSqGSIb3DQEBBQUAA4GBAI9o +a6VbB7pEZg4cqFwwezPkCiYE/O+eGjjWLqEf0JlHwnVkJP2eOyh2uSYoYZEMbSz4 +BJ98UAHV42mv7xXSRZskCSpmBU8lgcpdvqrBWSeuM46C9990sFWzjvjnN8huqlZE +9r1TgSOWPbT6MopTZkQloiXGpjwljPDgKAYityZB +-----END CERTIFICATE----- + +// Subject: CN=DigiNotar Cyber CA, O=DigiNotar, C=NL +// Issuer: CN=GTE CyberTrust Global Root,