changeset 40757:c1abf2140414

Merge
author duke
date Wed, 05 Jul 2017 22:10:32 +0200
parents ac2a115938ab 62b2a93931b6
children 012037dcbf35
files hotspot/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.inline.hpp hotspot/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.inline.hpp hotspot/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.inline.hpp hotspot/src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.inline.hpp hotspot/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp hotspot/src/os_cpu/linux_x86/vm/atomic_linux_x86.inline.hpp hotspot/src/os_cpu/linux_zero/vm/atomic_linux_zero.inline.hpp hotspot/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.inline.hpp hotspot/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.inline.hpp hotspot/src/os_cpu/windows_x86/vm/atomic_windows_x86.inline.hpp hotspot/src/share/vm/runtime/atomic.inline.hpp hotspot/test/testlibrary/ClassFileInstaller.java hotspot/test/testlibrary/RedefineClassHelper.java hotspot/test/testlibrary/jdk/test/lib/AllocationHelper.java hotspot/test/testlibrary/jdk/test/lib/Asserts.java hotspot/test/testlibrary/jdk/test/lib/BuildHelper.java hotspot/test/testlibrary/jdk/test/lib/ByteCodeLoader.java hotspot/test/testlibrary/jdk/test/lib/DynamicVMOption.java hotspot/test/testlibrary/jdk/test/lib/ExitCode.java hotspot/test/testlibrary/jdk/test/lib/FileInstaller.java hotspot/test/testlibrary/jdk/test/lib/HeapRegionUsageTool.java hotspot/test/testlibrary/jdk/test/lib/InMemoryJavaCompiler.java hotspot/test/testlibrary/jdk/test/lib/InfiniteLoop.java hotspot/test/testlibrary/jdk/test/lib/InputArguments.java hotspot/test/testlibrary/jdk/test/lib/JDKToolFinder.java hotspot/test/testlibrary/jdk/test/lib/JDKToolLauncher.java hotspot/test/testlibrary/jdk/test/lib/OutputAnalyzer.java hotspot/test/testlibrary/jdk/test/lib/OutputBuffer.java hotspot/test/testlibrary/jdk/test/lib/Pair.java hotspot/test/testlibrary/jdk/test/lib/Platform.java hotspot/test/testlibrary/jdk/test/lib/ProcessTools.java hotspot/test/testlibrary/jdk/test/lib/StreamPumper.java hotspot/test/testlibrary/jdk/test/lib/TimeLimitedRunner.java hotspot/test/testlibrary/jdk/test/lib/Triple.java hotspot/test/testlibrary/jdk/test/lib/Utils.java hotspot/test/testlibrary/jdk/test/lib/cli/CPUSpecificCommandLineOptionTest.java hotspot/test/testlibrary/jdk/test/lib/cli/CommandLineOptionTest.java hotspot/test/testlibrary/jdk/test/lib/cli/predicate/AndPredicate.java hotspot/test/testlibrary/jdk/test/lib/cli/predicate/CPUSpecificPredicate.java hotspot/test/testlibrary/jdk/test/lib/cli/predicate/NotPredicate.java hotspot/test/testlibrary/jdk/test/lib/cli/predicate/OrPredicate.java hotspot/test/testlibrary/jdk/test/lib/dcmd/CommandExecutor.java hotspot/test/testlibrary/jdk/test/lib/dcmd/CommandExecutorException.java hotspot/test/testlibrary/jdk/test/lib/dcmd/FileJcmdExecutor.java hotspot/test/testlibrary/jdk/test/lib/dcmd/JMXExecutor.java hotspot/test/testlibrary/jdk/test/lib/dcmd/JcmdExecutor.java hotspot/test/testlibrary/jdk/test/lib/dcmd/MainClassJcmdExecutor.java hotspot/test/testlibrary/jdk/test/lib/dcmd/PidJcmdExecutor.java hotspot/test/testlibrary/jdk/test/lib/dtrace/DtraceResultsAnalyzer.java hotspot/test/testlibrary/jdk/test/lib/dtrace/DtraceRunner.java jdk/test/sun/security/krb5/auto/unreachable.krb5.conf test/lib/share/classes/jdk/test/lib/Asserts.java test/lib/share/classes/jdk/test/lib/JDKToolFinder.java test/lib/share/classes/jdk/test/lib/JDKToolLauncher.java test/lib/share/classes/jdk/test/lib/Platform.java test/lib/share/classes/jdk/test/lib/Utils.java test/lib/share/classes/jdk/test/lib/apps/LingeredApp.java test/lib/share/classes/jdk/test/lib/apps/LingeredAppWithDeadlock.java test/lib/share/classes/jdk/test/lib/hprof/HprofParser.java test/lib/share/classes/jdk/test/lib/hprof/README test/lib/share/classes/jdk/test/lib/hprof/model/AbstractJavaHeapObjectVisitor.java test/lib/share/classes/jdk/test/lib/hprof/model/ArrayTypeCodes.java test/lib/share/classes/jdk/test/lib/hprof/model/HackJavaValue.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaBoolean.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaByte.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaChar.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaClass.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaDouble.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaField.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaFloat.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaHeapObject.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaHeapObjectVisitor.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaInt.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaLazyReadObject.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaLong.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaObject.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaObjectArray.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaObjectRef.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaShort.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaStatic.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaThing.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaValue.java test/lib/share/classes/jdk/test/lib/hprof/model/JavaValueArray.java test/lib/share/classes/jdk/test/lib/hprof/model/ReachableExcludes.java test/lib/share/classes/jdk/test/lib/hprof/model/ReachableExcludesImpl.java test/lib/share/classes/jdk/test/lib/hprof/model/ReachableObjects.java test/lib/share/classes/jdk/test/lib/hprof/model/ReferenceChain.java test/lib/share/classes/jdk/test/lib/hprof/model/Root.java test/lib/share/classes/jdk/test/lib/hprof/model/Snapshot.java test/lib/share/classes/jdk/test/lib/hprof/model/StackFrame.java test/lib/share/classes/jdk/test/lib/hprof/model/StackTrace.java test/lib/share/classes/jdk/test/lib/hprof/parser/FileReadBuffer.java test/lib/share/classes/jdk/test/lib/hprof/parser/HprofReader.java test/lib/share/classes/jdk/test/lib/hprof/parser/MappedReadBuffer.java test/lib/share/classes/jdk/test/lib/hprof/parser/PositionDataInputStream.java test/lib/share/classes/jdk/test/lib/hprof/parser/PositionInputStream.java test/lib/share/classes/jdk/test/lib/hprof/parser/ReadBuffer.java test/lib/share/classes/jdk/test/lib/hprof/parser/Reader.java test/lib/share/classes/jdk/test/lib/hprof/util/ArraySorter.java test/lib/share/classes/jdk/test/lib/hprof/util/Comparer.java test/lib/share/classes/jdk/test/lib/hprof/util/CompositeEnumeration.java test/lib/share/classes/jdk/test/lib/hprof/util/Misc.java test/lib/share/classes/jdk/test/lib/hprof/util/VectorSorter.java test/lib/share/classes/jdk/test/lib/process/OutputAnalyzer.java test/lib/share/classes/jdk/test/lib/process/OutputBuffer.java test/lib/share/classes/jdk/test/lib/process/ProcessTools.java test/lib/share/classes/jdk/test/lib/process/StreamPumper.java
diffstat 1719 files changed, 49559 insertions(+), 26790 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags-top-repo	Fri Sep 02 02:41:56 2016 +0000
+++ b/.hgtags-top-repo	Wed Jul 05 22:10:32 2017 +0200
@@ -376,3 +376,4 @@
 8728756c2f70a79a90188f4019cfd6b9a275765c jdk-9+131
 a24702d4d5ab0015a5c553ed57f66fce7d85155e jdk-9+132
 be1218f792a450dfb5d4b1f82616b9d95a6a732e jdk-9+133
+065724348690eda41fc69112278d8da6dcde548c jdk-9+134
--- a/common/autoconf/generated-configure.sh	Fri Sep 02 02:41:56 2016 +0000
+++ b/common/autoconf/generated-configure.sh	Wed Jul 05 22:10:32 2017 +0200
@@ -5095,7 +5095,7 @@
 #CUSTOM_AUTOCONF_INCLUDE
 
 # Do not change or remove the following line, it is needed for consistency checks:
-DATE_WHEN_GENERATED=1470863189
+DATE_WHEN_GENERATED=1472718471
 
 ###############################################################################
 #
@@ -15944,6 +15944,8 @@
     HOTSPOT_TARGET_CPU_DEFINE=S390
   elif test "x$OPENJDK_TARGET_CPU" = xs390x; then
     HOTSPOT_TARGET_CPU_DEFINE=S390
+  elif test "x$OPENJDK_TARGET_CPU" != x; then
+    HOTSPOT_TARGET_CPU_DEFINE=$(echo $OPENJDK_TARGET_CPU | tr a-z A-Z)
   fi
 
 
@@ -16117,6 +16119,8 @@
     HOTSPOT_BUILD_CPU_DEFINE=S390
   elif test "x$OPENJDK_BUILD_CPU" = xs390x; then
     HOTSPOT_BUILD_CPU_DEFINE=S390
+  elif test "x$OPENJDK_BUILD_CPU" != x; then
+    HOTSPOT_BUILD_CPU_DEFINE=$(echo $OPENJDK_BUILD_CPU | tr a-z A-Z)
   fi
 
 
--- a/common/autoconf/platform.m4	Fri Sep 02 02:41:56 2016 +0000
+++ b/common/autoconf/platform.m4	Wed Jul 05 22:10:32 2017 +0200
@@ -454,6 +454,8 @@
     HOTSPOT_$1_CPU_DEFINE=S390
   elif test "x$OPENJDK_$1_CPU" = xs390x; then
     HOTSPOT_$1_CPU_DEFINE=S390
+  elif test "x$OPENJDK_$1_CPU" != x; then
+    HOTSPOT_$1_CPU_DEFINE=$(echo $OPENJDK_$1_CPU | tr a-z A-Z)
   fi
   AC_SUBST(HOTSPOT_$1_CPU_DEFINE)
 
--- a/corba/.hgtags	Fri Sep 02 02:41:56 2016 +0000
+++ b/corba/.hgtags	Wed Jul 05 22:10:32 2017 +0200
@@ -376,3 +376,4 @@
 f7e1d5337c2e550fe553df7a3886bbed80292ecd jdk-9+131
 1ab4b9399c4cba584f66c1c088188f2f565fbf9c jdk-9+132
 2021bfedf1c478a4808a7711a6090682a12f4c0e jdk-9+133
+1a497f5ca0cfd88115cc7daa8af8a62b8741caf2 jdk-9+134
--- a/hotspot/.hgtags	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/.hgtags	Wed Jul 05 22:10:32 2017 +0200
@@ -536,3 +536,4 @@
 943bf73b49c33c2d7cbd796f6a4ae3c7a00ae932 jdk-9+131
 713951c08aa26813375175c2ab6cc99ff2a56903 jdk-9+132
 a25e0fb6033245ab075136e744d362ce765464cd jdk-9+133
+b8b694c6b4d2ab0939aed7adaf0eec1ac321a085 jdk-9+134
--- a/hotspot/make/test/JtregNative.gmk	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/make/test/JtregNative.gmk	Wed Jul 05 22:10:32 2017 +0200
@@ -71,15 +71,15 @@
 endif
 
 ifeq ($(TOOLCHAIN_TYPE), solstudio)
-    BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_liboverflow := -lc
-    BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libSimpleClassFileLoadHook := -lc
-    BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libGetNamedModuleTest := -lc
+    BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_liboverflow := -lc
+    BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libSimpleClassFileLoadHook := -lc
+    BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libGetNamedModuleTest := -lc
 endif
 
 ifeq ($(OPENJDK_TARGET_OS), linux)
     BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libtest-rw := -z noexecstack
     BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libtest-rwx := -z execstack
-    BUILD_HOTSPOT_JTREG_EXECUTABLES_LDFLAGS_exeinvoke := -ljvm -lpthread
+    BUILD_HOTSPOT_JTREG_EXECUTABLES_LIBS_exeinvoke := -ljvm -lpthread
     BUILD_TEST_invoke_exeinvoke.c_OPTIMIZATION := NONE
 endif
 
--- a/hotspot/src/cpu/aarch64/vm/aarch64.ad	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/aarch64/vm/aarch64.ad	Wed Jul 05 22:10:32 2017 +0200
@@ -4680,7 +4680,7 @@
       Label retaddr;
       __ adr(rscratch2, retaddr);
       __ lea(rscratch1, RuntimeAddress(entry));
-      // Leave a breadcrumb for JavaThread::pd_last_frame().
+      // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc()
       __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize)));
       __ blrt(rscratch1, gpcnt, fpcnt, rtype);
       __ bind(retaddr);
--- a/hotspot/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/aarch64/vm/c1_Runtime1_aarch64.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -78,7 +78,7 @@
   }
   pop(r0, sp);
 #endif
-  reset_last_Java_frame(true, true);
+  reset_last_Java_frame(true);
   maybe_isb();
 
   // check for pending exceptions
@@ -547,7 +547,7 @@
     __ bind(L);
   }
 #endif
-  __ reset_last_Java_frame(true, false);
+  __ reset_last_Java_frame(true);
   __ maybe_isb();
 
   // check for pending exceptions
--- a/hotspot/src/cpu/aarch64/vm/frame_aarch64.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/aarch64/vm/frame_aarch64.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -336,13 +336,16 @@
   JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
   assert(!entry_frame_is_first(), "next Java fp must be non zero");
   assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack");
+  // Since we are walking the stack now this nested anchor is obviously walkable
+  // even if it wasn't when it was stacked.
+  if (!jfa->walkable()) {
+    // Capture _last_Java_pc (if needed) and mark anchor walkable.
+    jfa->capture_last_Java_pc();
+  }
   map->clear();
   assert(map->include_argument_oops(), "should be set by clear");
-  if (jfa->last_Java_pc() != NULL ) {
-    frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
-    return fr;
-  }
-  frame fr(jfa->last_Java_sp(), jfa->last_Java_fp());
+  vmassert(jfa->last_Java_pc() != NULL, "not walkable");
+  frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
   return fr;
 }
 
@@ -769,3 +772,21 @@
   init((intptr_t*)sp, (intptr_t*)fp, (address)pc);
 }
 #endif
+
+void JavaFrameAnchor::make_walkable(JavaThread* thread) {
+  // last frame set?
+  if (last_Java_sp() == NULL) return;
+  // already walkable?
+  if (walkable()) return;
+  vmassert(Thread::current() == (Thread*)thread, "not current thread");
+  vmassert(last_Java_sp() != NULL, "not called from Java code?");
+  vmassert(last_Java_pc() == NULL, "already walkable");
+  capture_last_Java_pc();
+  vmassert(walkable(), "something went wrong");
+}
+
+void JavaFrameAnchor::capture_last_Java_pc() {
+  vmassert(_last_Java_sp != NULL, "no last frame set");
+  vmassert(_last_Java_pc == NULL, "already walkable");
+  _last_Java_pc = (address)_last_Java_sp[-1];
+}
--- a/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.hpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.hpp	Wed Jul 05 22:10:32 2017 +0200
@@ -201,7 +201,7 @@
 // #endif
     MacroAssembler::null_check(reg, offset);
 // #ifdef ASSERT
-//     reset_last_Java_frame(true, false);
+//     reset_last_Java_frame(true);
 // #endif
   }
 
--- a/hotspot/src/cpu/aarch64/vm/javaFrameAnchor_aarch64.hpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/aarch64/vm/javaFrameAnchor_aarch64.hpp	Wed Jul 05 22:10:32 2017 +0200
@@ -64,10 +64,9 @@
     _last_Java_sp = src->_last_Java_sp;
   }
 
-  // Always walkable
-  bool walkable(void) { return true; }
-  // Never any thing to do since we are always walkable and can find address of return addresses
-  void make_walkable(JavaThread* thread) { }
+  bool walkable(void)                            { return _last_Java_sp != NULL && _last_Java_pc != NULL; }
+  void make_walkable(JavaThread* thread);
+  void capture_last_Java_pc(void);
 
   intptr_t* last_Java_sp(void) const             { return _last_Java_sp; }
 
--- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -274,19 +274,18 @@
 }
 
 
-void MacroAssembler::reset_last_Java_frame(bool clear_fp,
-                                           bool clear_pc) {
+void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
   // we must set sp to zero to clear frame
   str(zr, Address(rthread, JavaThread::last_Java_sp_offset()));
+
   // must clear fp, so that compiled frames are not confused; it is
   // possible that we need it only for debugging
   if (clear_fp) {
     str(zr, Address(rthread, JavaThread::last_Java_fp_offset()));
   }
 
-  if (clear_pc) {
-    str(zr, Address(rthread, JavaThread::last_Java_pc_offset()));
-  }
+  // Always clear the pc because it could have been set by make_walkable()
+  str(zr, Address(rthread, JavaThread::last_Java_pc_offset()));
 }
 
 // Calls to C land
@@ -632,7 +631,7 @@
 
   // reset last Java frame
   // Only interpreter should have to clear fp
-  reset_last_Java_frame(true, true);
+  reset_last_Java_frame(true);
 
    // C++ interp handles this in the interpreter
   check_and_handle_popframe(java_thread);
@@ -875,7 +874,7 @@
   if (type == bytecode_start) {
     // set_last_Java_frame(esp, rfp, (address)NULL);
     Assembler:: notify(type);
-    // reset_last_Java_frame(true, false);
+    // reset_last_Java_frame(true);
   }
   else
     Assembler:: notify(type);
--- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp	Wed Jul 05 22:10:32 2017 +0200
@@ -757,10 +757,10 @@
                            Register last_java_pc,
                            Register scratch);
 
-  void reset_last_Java_frame(Register thread, bool clearfp, bool clear_pc);
+  void reset_last_Java_frame(Register thread);
 
-  // thread in the default location (r15_thread on 64bit)
-  void reset_last_Java_frame(bool clear_fp, bool clear_pc);
+  // thread in the default location (rthread)
+  void reset_last_Java_frame(bool clear_fp);
 
   // Stores
   void store_check(Register obj);                // store check for obj - register is destroyed afterwards
--- a/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -2030,7 +2030,7 @@
     __ bind(dtrace_method_exit_done);
   }
 
-  __ reset_last_Java_frame(false, true);
+  __ reset_last_Java_frame(false);
 
   // Unpack oop result
   if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
@@ -2370,7 +2370,7 @@
     __ bind(retaddr);
     oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
 
-    __ reset_last_Java_frame(false, false);
+    __ reset_last_Java_frame(false);
 
     __ b(after_fetch_unroll_info_call);
   } // EnableJVMCI
@@ -2465,7 +2465,7 @@
   // find any register it might need.
   oop_maps->add_gc_map(__ pc() - start, map);
 
-  __ reset_last_Java_frame(false, true);
+  __ reset_last_Java_frame(false);
 
 #if INCLUDE_JVMCI
   if (EnableJVMCI) {
@@ -2606,7 +2606,7 @@
                        new OopMap( frame_size_in_words, 0 ));
 
   // Clear fp AND pc
-  __ reset_last_Java_frame(true, true);
+  __ reset_last_Java_frame(true);
 
   // Collect return values
   __ ldrd(v0, Address(sp, RegisterSaver::v0_offset_in_bytes()));
@@ -2709,7 +2709,7 @@
 
   oop_maps->add_gc_map(__ pc() - start, map);
 
-  __ reset_last_Java_frame(false, true);
+  __ reset_last_Java_frame(false);
 
   // move UnrollBlock* into r4
   __ mov(r4, r0);
@@ -2828,7 +2828,7 @@
   oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
 
   // Clear fp AND pc
-  __ reset_last_Java_frame(true, true);
+  __ reset_last_Java_frame(true);
 
   // Pop self-frame.
   __ leave();                 // Epilog
@@ -2906,7 +2906,7 @@
 
   Label noException;
 
-  __ reset_last_Java_frame(false, true);
+  __ reset_last_Java_frame(false);
 
   __ maybe_isb();
   __ membar(Assembler::LoadLoad | Assembler::LoadStore);
@@ -2985,7 +2985,7 @@
   // r0 contains the address we are going to jump to assuming no exception got installed
 
   // clear last_Java_sp
-  __ reset_last_Java_frame(false, true);
+  __ reset_last_Java_frame(false);
   // check for pending exceptions
   Label pending;
   __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
@@ -3116,7 +3116,7 @@
 
   oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
 
-  __ reset_last_Java_frame(false, true);
+  __ reset_last_Java_frame(false);
 
   // Restore callee-saved registers
 
--- a/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/aarch64/vm/stubGenerator_aarch64.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -3801,7 +3801,7 @@
 
     oop_maps->add_gc_map(the_pc - start, map);
 
-    __ reset_last_Java_frame(true, true);
+    __ reset_last_Java_frame(true);
     __ maybe_isb();
 
     __ leave();
--- a/hotspot/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -1353,7 +1353,7 @@
   __ stlrw(rscratch1, rscratch2);
 
   // reset_last_Java_frame
-  __ reset_last_Java_frame(true, true);
+  __ reset_last_Java_frame(true);
 
   // reset handle block
   __ ldr(t, Address(rthread, JavaThread::active_handles_offset()));
--- a/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -852,26 +852,23 @@
     // get next bytecode
     __ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
 
-    // do actual aload_0
-    aload(0);
-
     // if _getfield then wait with rewrite
     __ cmpw(r1, Bytecodes::Bytecodes::_getfield);
     __ br(Assembler::EQ, done);
 
-    // if _igetfield then reqrite to _fast_iaccess_0
+    // if _igetfield then rewrite to _fast_iaccess_0
     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
     __ cmpw(r1, Bytecodes::_fast_igetfield);
     __ movw(bc, Bytecodes::_fast_iaccess_0);
     __ br(Assembler::EQ, rewrite);
 
-    // if _agetfield then reqrite to _fast_aaccess_0
+    // if _agetfield then rewrite to _fast_aaccess_0
     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
     __ cmpw(r1, Bytecodes::_fast_agetfield);
     __ movw(bc, Bytecodes::_fast_aaccess_0);
     __ br(Assembler::EQ, rewrite);
 
-    // if _fgetfield then reqrite to _fast_faccess_0
+    // if _fgetfield then rewrite to _fast_faccess_0
     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
     __ cmpw(r1, Bytecodes::_fast_fgetfield);
     __ movw(bc, Bytecodes::_fast_faccess_0);
@@ -887,9 +884,10 @@
     patch_bytecode(Bytecodes::_aload_0, bc, r1, false);
 
     __ bind(done);
-  } else {
-    aload(0);
   }
+
+  // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
+  aload(0);
 }
 
 void TemplateTable::istore()
--- a/hotspot/src/cpu/aarch64/vm/vm_version_aarch64.hpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/aarch64/vm/vm_version_aarch64.hpp	Wed Jul 05 22:10:32 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -28,6 +28,7 @@
 
 #include "runtime/globals_extension.hpp"
 #include "runtime/vm_version.hpp"
+#include "utilities/sizes.hpp"
 
 class VM_Version : public Abstract_VM_Version {
   friend class JVMCIVMStructs;
--- a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -705,9 +705,6 @@
     // get next byte
     __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)), G3_scratch);
 
-    // do actual aload_0
-    aload(0);
-
     // if _getfield then wait with rewrite
     __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_getfield, Assembler::equal, Assembler::pn, done);
 
@@ -738,9 +735,10 @@
     __ bind(rewrite);
     patch_bytecode(Bytecodes::_aload_0, G4_scratch, G3_scratch, false);
     __ bind(done);
-  } else {
-    aload(0);
   }
+
+  // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
+  aload(0);
 }
 
 void TemplateTable::istore() {
--- a/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -98,7 +98,7 @@
   }
   pop(rax);
 #endif
-  reset_last_Java_frame(thread, true, align_stack);
+  reset_last_Java_frame(thread, true);
 
   // discard thread and arguments
   NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord));
@@ -872,7 +872,7 @@
   }
   __ pop(rax);
 #endif
-  __ reset_last_Java_frame(thread, true, false);
+  __ reset_last_Java_frame(thread, true);
 #ifndef _LP64
   __ pop(rcx); // discard thread arg
   __ pop(rcx); // discard dummy
--- a/hotspot/src/cpu/x86/vm/frame_x86.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/x86/vm/frame_x86.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -337,13 +337,16 @@
   JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
   assert(!entry_frame_is_first(), "next Java fp must be non zero");
   assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack");
+  // Since we are walking the stack now this nested anchor is obviously walkable
+  // even if it wasn't when it was stacked.
+  if (!jfa->walkable()) {
+    // Capture _last_Java_pc (if needed) and mark anchor walkable.
+    jfa->capture_last_Java_pc();
+  }
   map->clear();
   assert(map->include_argument_oops(), "should be set by clear");
-  if (jfa->last_Java_pc() != NULL ) {
-    frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
-    return fr;
-  }
-  frame fr(jfa->last_Java_sp(), jfa->last_Java_fp());
+  vmassert(jfa->last_Java_pc() != NULL, "not walkable");
+  frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
   return fr;
 }
 
@@ -666,3 +669,21 @@
   init((intptr_t*)sp, (intptr_t*)fp, (address)pc);
 }
 #endif
+
+void JavaFrameAnchor::make_walkable(JavaThread* thread) {
+  // last frame set?
+  if (last_Java_sp() == NULL) return;
+  // already walkable?
+  if (walkable()) return;
+  vmassert(Thread::current() == (Thread*)thread, "not current thread");
+  vmassert(last_Java_sp() != NULL, "not called from Java code?");
+  vmassert(last_Java_pc() == NULL, "already walkable");
+  capture_last_Java_pc();
+  vmassert(walkable(), "something went wrong");
+}
+
+void JavaFrameAnchor::capture_last_Java_pc() {
+  vmassert(_last_Java_sp != NULL, "no last frame set");
+  vmassert(_last_Java_pc == NULL, "already walkable");
+  _last_Java_pc = (address)_last_Java_sp[-1];
+}
--- a/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp	Wed Jul 05 22:10:32 2017 +0200
@@ -101,6 +101,7 @@
   // call a specialized frame constructor instead of this one.
   // Then we could use the assert below. However this assert is of somewhat dubious
   // value.
+  // UPDATE: this constructor is only used by trace_method_handle_stub() now.
   // assert(_pc != NULL, "no pc?");
 
   _cb = CodeCache::find_blob(_pc);
--- a/hotspot/src/cpu/x86/vm/javaFrameAnchor_x86.hpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/x86/vm/javaFrameAnchor_x86.hpp	Wed Jul 05 22:10:32 2017 +0200
@@ -62,10 +62,9 @@
     _last_Java_sp = src->_last_Java_sp;
   }
 
-  // Always walkable
-  bool walkable(void) { return true; }
-  // Never any thing to do since we are always walkable and can find address of return addresses
-  void make_walkable(JavaThread* thread) { }
+  bool walkable(void)                            { return _last_Java_sp != NULL && _last_Java_pc != NULL; }
+  void make_walkable(JavaThread* thread);
+  void capture_last_Java_pc(void);
 
   intptr_t* last_Java_sp(void) const             { return _last_Java_sp; }
 
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -752,8 +752,7 @@
   }
 }
 
-void MacroAssembler::reset_last_Java_frame(bool clear_fp,
-                                           bool clear_pc) {
+void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
   // we must set sp to zero to clear frame
   movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
   // must clear fp, so that compiled frames are not confused; it is
@@ -762,9 +761,8 @@
     movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
   }
 
-  if (clear_pc) {
-    movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
-  }
+  // Always clear the pc because it could have been set by make_walkable()
+  movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
 }
 
 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
@@ -2531,7 +2529,7 @@
   }
   // reset last Java frame
   // Only interpreter should have to clear fp
-  reset_last_Java_frame(java_thread, true, false);
+  reset_last_Java_frame(java_thread, true);
 
    // C++ interp handles this in the interpreter
   check_and_handle_popframe(java_thread);
@@ -3642,8 +3640,7 @@
   pusha();
 }
 
-void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp, bool clear_pc) {
-  // determine java_thread register
+void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp) { // determine java_thread register
   if (!java_thread->is_valid()) {
     java_thread = rdi;
     get_thread(java_thread);
@@ -3654,8 +3651,8 @@
     movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
   }
 
-  if (clear_pc)
-    movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
+  // Always clear the pc because it could have been set by make_walkable()
+  movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
 
 }
 
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.hpp	Wed Jul 05 22:10:32 2017 +0200
@@ -288,10 +288,10 @@
                            Register last_java_fp,
                            address last_java_pc);
 
-  void reset_last_Java_frame(Register thread, bool clear_fp, bool clear_pc);
+  void reset_last_Java_frame(Register thread, bool clear_fp);
 
   // thread in the default location (r15_thread on 64bit)
-  void reset_last_Java_frame(bool clear_fp, bool clear_pc);
+  void reset_last_Java_frame(bool clear_fp);
 
   // Stores
   void store_check(Register obj);                // store check for obj - register is destroyed afterwards
--- a/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -117,7 +117,7 @@
   // No registers to map, rbp is known implicitly
   oop_maps->add_gc_map( __ pc() - start,  new OopMap( framesize, 0 ));
   __ get_thread(rcx);
-  __ reset_last_Java_frame(rcx, false, false);
+  __ reset_last_Java_frame(rcx, false);
 
   // Restore callee-saved registers
   __ movptr(rbp, Address(rsp, rbp_off * wordSize));
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -1330,7 +1330,7 @@
   __ increment(rsp, wordSize);
 
   __ get_thread(thread);
-  __ reset_last_Java_frame(thread, false, true);
+  __ reset_last_Java_frame(thread, false);
 
   save_or_restore_arguments(masm, stack_slots, total_in_args,
                             arg_save_area, NULL, in_regs, in_sig_bt);
@@ -2224,7 +2224,7 @@
 
   // We can finally stop using that last_Java_frame we setup ages ago
 
-  __ reset_last_Java_frame(thread, false, true);
+  __ reset_last_Java_frame(thread, false);
 
   // Unpack oop result
   if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
@@ -2553,7 +2553,7 @@
   __ pop(rcx);
 
   __ get_thread(rcx);
-  __ reset_last_Java_frame(rcx, false, false);
+  __ reset_last_Java_frame(rcx, false);
 
   // Load UnrollBlock into EDI
   __ mov(rdi, rax);
@@ -2702,7 +2702,7 @@
   __ push(rax);
 
   __ get_thread(rcx);
-  __ reset_last_Java_frame(rcx, false, false);
+  __ reset_last_Java_frame(rcx, false);
 
   // Collect return values
   __ movptr(rax,Address(rsp, (RegisterSaver::raxOffset() + additional_words + 1)*wordSize));
@@ -2806,7 +2806,7 @@
 
   __ get_thread(rcx);
 
-  __ reset_last_Java_frame(rcx, false, false);
+  __ reset_last_Java_frame(rcx, false);
 
   // Load UnrollBlock into EDI
   __ movptr(rdi, rax);
@@ -2912,7 +2912,7 @@
   oop_maps->add_gc_map( __ pc()-start, new OopMap( framesize, 0 ) );
 
   __ get_thread(rdi);
-  __ reset_last_Java_frame(rdi, true, false);
+  __ reset_last_Java_frame(rdi, true);
 
   // Pop self-frame.
   __ leave();     // Epilog!
@@ -3007,7 +3007,7 @@
 
   // Clear last_Java_sp again
   __ get_thread(java_thread);
-  __ reset_last_Java_frame(java_thread, false, false);
+  __ reset_last_Java_frame(java_thread, false);
 
   __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
   __ jcc(Assembler::equal, noException);
@@ -3082,7 +3082,7 @@
   __ addptr(rsp, wordSize);
 
   // clear last_Java_sp
-  __ reset_last_Java_frame(thread, true, false);
+  __ reset_last_Java_frame(thread, true);
   // check for pending exceptions
   Label pending;
   __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -1461,7 +1461,7 @@
   __ mov(rsp, r12); // restore sp
   __ reinit_heapbase();
 
-  __ reset_last_Java_frame(false, true);
+  __ reset_last_Java_frame(false);
 
   save_or_restore_arguments(masm, stack_slots, total_in_args,
                             arg_save_area, NULL, in_regs, in_sig_bt);
@@ -2577,7 +2577,7 @@
     restore_native_result(masm, ret_type, stack_slots);
   }
 
-  __ reset_last_Java_frame(false, true);
+  __ reset_last_Java_frame(false);
 
   // Unpack oop result
   if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
@@ -2849,7 +2849,7 @@
     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
     oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
 
-    __ reset_last_Java_frame(false, false);
+    __ reset_last_Java_frame(false);
 
     __ jmp(after_fetch_unroll_info_call);
   } // EnableJVMCI
@@ -2939,7 +2939,7 @@
   // find any register it might need.
   oop_maps->add_gc_map(__ pc() - start, map);
 
-  __ reset_last_Java_frame(false, false);
+  __ reset_last_Java_frame(false);
 
 #if INCLUDE_JVMCI
   if (EnableJVMCI) {
@@ -3087,7 +3087,7 @@
                        new OopMap( frame_size_in_words, 0 ));
 
   // Clear fp AND pc
-  __ reset_last_Java_frame(true, true);
+  __ reset_last_Java_frame(true);
 
   // Collect return values
   __ movdbl(xmm0, Address(rsp, RegisterSaver::xmm0_offset_in_bytes()));
@@ -3164,7 +3164,7 @@
 
   oop_maps->add_gc_map(__ pc() - start, map);
 
-  __ reset_last_Java_frame(false, false);
+  __ reset_last_Java_frame(false);
 
   // Load UnrollBlock* into rdi
   __ mov(rdi, rax);
@@ -3281,7 +3281,7 @@
   oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
 
   // Clear fp AND pc
-  __ reset_last_Java_frame(true, true);
+  __ reset_last_Java_frame(true);
 
   // Pop self-frame.
   __ leave();                 // Epilog
@@ -3364,7 +3364,7 @@
 
   Label noException;
 
-  __ reset_last_Java_frame(false, false);
+  __ reset_last_Java_frame(false);
 
   __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
   __ jcc(Assembler::equal, noException);
@@ -3434,7 +3434,7 @@
   // rax contains the address we are going to jump to assuming no exception got installed
 
   // clear last_Java_sp
-  __ reset_last_Java_frame(false, false);
+  __ reset_last_Java_frame(false);
   // check for pending exceptions
   Label pending;
   __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
@@ -3809,7 +3809,7 @@
 
   oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
 
-  __ reset_last_Java_frame(false, true);
+  __ reset_last_Java_frame(false);
 
   // Restore callee-saved registers
 
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -3766,7 +3766,7 @@
     // however can use the register value directly if it is callee saved.
     __ get_thread(java_thread);
 
-    __ reset_last_Java_frame(java_thread, true, false);
+    __ reset_last_Java_frame(java_thread, true);
 
     __ leave(); // required for proper stackwalking of RuntimeStub frame
 
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -5018,7 +5018,7 @@
 
     oop_maps->add_gc_map(the_pc - start, map);
 
-    __ reset_last_Java_frame(true, true);
+    __ reset_last_Java_frame(true);
 
     __ leave(); // required for proper stackwalking of RuntimeStub frame
 
--- a/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -1167,7 +1167,7 @@
   __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
 
   // reset_last_Java_frame
-  __ reset_last_Java_frame(thread, true, true);
+  __ reset_last_Java_frame(thread, true);
 
   // reset handle block
   __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
@@ -1659,7 +1659,7 @@
   __ set_last_Java_frame(noreg, rbp, __ pc());
   __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2);
 #endif
-  __ reset_last_Java_frame(thread, true, true);
+  __ reset_last_Java_frame(thread, true);
 
   // Restore the last_sp and null it out
   __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
--- a/hotspot/src/cpu/x86/vm/templateTable_x86.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -858,26 +858,23 @@
     // get next byte
     __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
 
-    // do actual aload_0
-    aload(0);
-
     // if _getfield then wait with rewrite
     __ cmpl(rbx, Bytecodes::_getfield);
     __ jcc(Assembler::equal, done);
 
-    // if _igetfield then reqrite to _fast_iaccess_0
+    // if _igetfield then rewrite to _fast_iaccess_0
     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
     __ cmpl(rbx, Bytecodes::_fast_igetfield);
     __ movl(bc, Bytecodes::_fast_iaccess_0);
     __ jccb(Assembler::equal, rewrite);
 
-    // if _agetfield then reqrite to _fast_aaccess_0
+    // if _agetfield then rewrite to _fast_aaccess_0
     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
     __ cmpl(rbx, Bytecodes::_fast_agetfield);
     __ movl(bc, Bytecodes::_fast_aaccess_0);
     __ jccb(Assembler::equal, rewrite);
 
-    // if _fgetfield then reqrite to _fast_faccess_0
+    // if _fgetfield then rewrite to _fast_faccess_0
     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
     __ cmpl(rbx, Bytecodes::_fast_fgetfield);
     __ movl(bc, Bytecodes::_fast_faccess_0);
@@ -893,9 +890,10 @@
     patch_bytecode(Bytecodes::_aload_0, bc, rbx, false);
 
     __ bind(done);
-  } else {
-    aload(0);
   }
+
+  // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
+  aload(0);
 }
 
 void TemplateTable::istore() {
--- a/hotspot/src/cpu/x86/vm/x86_64.ad	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad	Wed Jul 05 22:10:32 2017 +0200
@@ -986,7 +986,7 @@
   emit_opcode(cbuf, 0x58 | RBP_enc);
 
   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
-    __ reserved_stack_check(); 
+    __ reserved_stack_check();
   }
 
   if (do_polling() && C->is_method_compilation()) {
@@ -7355,7 +7355,7 @@
             "movzbl  $res, $res" %}
   opcode(0x0F, 0xB0);
   ins_encode(lock_prefix,
-             REX_reg_mem(newval, mem_ptr),
+             REX_breg_mem(newval, mem_ptr),
              OpcP, OpcS,
              reg_mem(newval, mem_ptr),
              REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
@@ -7380,7 +7380,7 @@
   opcode(0x0F, 0xB1);
   ins_encode(lock_prefix,
              SizePrefix,
-             REX_reg_mem(newval, mem_ptr),          
+             REX_reg_mem(newval, mem_ptr),
              OpcP, OpcS,
              reg_mem(newval, mem_ptr),
              REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete
@@ -7424,7 +7424,7 @@
             "If rax == $mem_ptr then store $newval into $mem_ptr\n\t"  %}
   opcode(0x0F, 0xB0);
   ins_encode(lock_prefix,
-             REX_reg_mem(newval, mem_ptr),
+             REX_breg_mem(newval, mem_ptr),
              OpcP, OpcS,
              reg_mem(newval, mem_ptr) // lock cmpxchg
              );
--- a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -37,7 +37,7 @@
 #include "prims/jvmtiExport.hpp"
 #include "prims/jvmtiThreadState.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
--- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/CodeBlob.java	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/code/CodeBlob.java	Wed Jul 05 22:10:32 2017 +0200
@@ -32,6 +32,7 @@
 import sun.jvm.hotspot.types.Type;
 import sun.jvm.hotspot.types.TypeDataBase;
 import sun.jvm.hotspot.utilities.Assert;
+import sun.jvm.hotspot.utilities.CStringUtilities;
 
 import java.io.PrintStream;
 import java.util.Observable;
@@ -115,7 +116,7 @@
   }
 
   public String getName() {
-    return getName();
+    return CStringUtilities.getString(nameField.getValue(addr));
   }
 
   /** OopMap for frame; can return null if none available */
--- a/hotspot/src/os/aix/vm/attachListener_aix.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/os/aix/vm/attachListener_aix.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -494,7 +494,7 @@
   if (ret == 0) {
     ret = ::unlink(fn);
     if (ret == -1) {
-      debug_only(warning("failed to remove stale attach pid file at %s", fn));
+      log_debug(attach)("Failed to remove stale attach pid file at %s", fn);
     }
   }
 }
@@ -537,16 +537,23 @@
   struct stat64 st;
   RESTARTABLE(::stat64(fn, &st), ret);
   if (ret == -1) {
+    log_trace(attach)("Failed to find attach file: %s, trying alternate", fn);
     snprintf(fn, sizeof(fn), "%s/.attach_pid%d",
              os::get_temp_directory(), os::current_process_id());
     RESTARTABLE(::stat64(fn, &st), ret);
+    if (ret == -1) {
+      log_debug(attach)("Failed to find attach file: %s", fn);
+    }
   }
   if (ret == 0) {
     // simple check to avoid starting the attach mechanism when
     // a bogus user creates the file
     if (st.st_uid == geteuid()) {
       init();
+      log_trace(attach)("Attach trigerred by %s", fn);
       return true;
+    } else {
+      log_debug(attach)("File %s has wrong user id %d (vs %d). Attach is not triggered", fn, st.st_uid, geteuid());
     }
   }
   return false;
--- a/hotspot/src/os/aix/vm/os_aix.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/os/aix/vm/os_aix.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -52,7 +52,7 @@
 #include "prims/jvm.h"
 #include "prims/jvm_misc.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/extendedPC.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -3800,10 +3800,6 @@
   return ::stat(pathbuf, sbuf);
 }
 
-bool os::check_heap(bool force) {
-  return true;
-}
-
 // Is a (classpath) directory empty?
 bool os::dir_is_empty(const char* path) {
   DIR *dir = NULL;
--- a/hotspot/src/os/bsd/vm/attachListener_bsd.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/os/bsd/vm/attachListener_bsd.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -456,7 +456,7 @@
   if (ret == 0) {
     ret = ::unlink(fn);
     if (ret == -1) {
-      debug_only(warning("failed to remove stale attach pid file at %s", fn));
+      log_debug(attach)("Failed to remove stale attach pid file at %s", fn);
     }
   }
 }
@@ -493,19 +493,25 @@
   if (init_at_startup() || is_initialized()) {
     return false;               // initialized at startup or already initialized
   }
-  char path[PATH_MAX + 1];
+  char fn[PATH_MAX + 1];
   int ret;
   struct stat st;
 
-  snprintf(path, PATH_MAX + 1, "%s/.attach_pid%d",
+  snprintf(fn, PATH_MAX + 1, "%s/.attach_pid%d",
            os::get_temp_directory(), os::current_process_id());
-  RESTARTABLE(::stat(path, &st), ret);
+  RESTARTABLE(::stat(fn, &st), ret);
+  if (ret == -1) {
+    log_debug(attach)("Failed to find attach file: %s", fn);
+  }
   if (ret == 0) {
     // simple check to avoid starting the attach mechanism when
     // a bogus user creates the file
     if (st.st_uid == geteuid()) {
       init();
+      log_trace(attach)("Attach trigerred by %s", fn);
       return true;
+    } else {
+      log_debug(attach)("File %s has wrong user id %d (vs %d). Attach is not triggered", fn, st.st_uid, geteuid());
     }
   }
   return false;
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -42,7 +42,7 @@
 #include "prims/jvm.h"
 #include "prims/jvm_misc.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/extendedPC.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -3780,11 +3780,6 @@
   return diff;
 }
 
-
-bool os::check_heap(bool force) {
-  return true;
-}
-
 // Is a (classpath) directory empty?
 bool os::dir_is_empty(const char* path) {
   DIR *dir = NULL;
--- a/hotspot/src/os/linux/vm/attachListener_linux.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/os/linux/vm/attachListener_linux.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -453,7 +453,7 @@
   if (ret == 0) {
     ret = ::unlink(fn);
     if (ret == -1) {
-      debug_only(warning("failed to remove stale attach pid file at %s", fn));
+      log_debug(attach)("Failed to remove stale attach pid file at %s", fn);
     }
   }
 }
@@ -496,16 +496,23 @@
   struct stat64 st;
   RESTARTABLE(::stat64(fn, &st), ret);
   if (ret == -1) {
+    log_trace(attach)("Failed to find attach file: %s, trying alternate", fn);
     snprintf(fn, sizeof(fn), "%s/.attach_pid%d",
              os::get_temp_directory(), os::current_process_id());
     RESTARTABLE(::stat64(fn, &st), ret);
+    if (ret == -1) {
+      log_debug(attach)("Failed to find attach file: %s", fn);
+    }
   }
   if (ret == 0) {
     // simple check to avoid starting the attach mechanism when
     // a bogus user creates the file
     if (st.st_uid == geteuid()) {
       init();
+      log_trace(attach)("Attach trigerred by %s", fn);
       return true;
+    } else {
+      log_debug(attach)("File %s has wrong user id %d (vs %d). Attach is not trigerred", fn, st.st_uid, geteuid());
     }
   }
   return false;
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -42,7 +42,7 @@
 #include "prims/jvm.h"
 #include "prims/jvm_misc.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/extendedPC.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -5174,10 +5174,6 @@
   return ::stat(pathbuf, sbuf);
 }
 
-bool os::check_heap(bool force) {
-  return true;
-}
-
 // Is a (classpath) directory empty?
 bool os::dir_is_empty(const char* path) {
   DIR *dir = NULL;
--- a/hotspot/src/os/solaris/vm/attachListener_solaris.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/os/solaris/vm/attachListener_solaris.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -394,7 +394,7 @@
   snprintf(initial_path, sizeof(initial_path), "%s.tmp", door_path);
   RESTARTABLE(::creat(initial_path, S_IRUSR | S_IWUSR), fd);
   if (fd == -1) {
-    debug_only(warning("attempt to create %s failed", initial_path));
+    log_debug(attach)("attempt to create door file %s failed (%d)", initial_path, errno);
     ::door_revoke(dd);
     return -1;
   }
@@ -409,6 +409,7 @@
       res = ::fattach(dd, initial_path);
     }
     if (res == -1) {
+      log_debug(attach)("unable to create door - fattach failed (%d)", errno);
       ::door_revoke(dd);
       dd = -1;
     }
@@ -419,12 +420,14 @@
     if (::rename(initial_path, door_path) == -1) {
         ::close(dd);
         ::fdetach(initial_path);
+        log_debug(attach)("unable to create door - rename %s to %s failed (%d)", errno);
         dd = -1;
     }
   }
   if (dd >= 0) {
     set_door_descriptor(dd);
     set_door_path(door_path);
+    log_trace(attach)("door file %s created succesfully", door_path);
   } else {
     // unable to create door, attach it to file, or rename file into place
     ::unlink(initial_path);
@@ -602,7 +605,7 @@
   if (ret == 0) {
     ret = ::unlink(fn);
     if (ret == -1) {
-      debug_only(warning("failed to remove stale attach pid file at %s", fn));
+      log_debug(attach)("Failed to remove stale attach pid file at %s", fn);
     }
   }
 }
@@ -645,9 +648,13 @@
   struct stat64 st;
   RESTARTABLE(::stat64(fn, &st), ret);
   if (ret == -1) {
+    log_trace(attach)("Failed to find attach file: %s, trying alternate", fn);
     snprintf(fn, sizeof(fn), "%s/.attach_pid%d",
              os::get_temp_directory(), os::current_process_id());
     RESTARTABLE(::stat64(fn, &st), ret);
+    if (ret == -1) {
+      log_debug(attach)("Failed to find attach file: %s", fn);
+    }
   }
   if (ret == 0) {
     // simple check to avoid starting the attach mechanism when
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -42,7 +42,7 @@
 #include "prims/jvm.h"
 #include "prims/jvm_misc.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/extendedPC.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -4589,10 +4589,6 @@
   }
 }
 
-// OS interface.
-
-bool os::check_heap(bool force) { return true; }
-
 // Is a (classpath) directory empty?
 bool os::dir_is_empty(const char* path) {
   DIR *dir = NULL;
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -45,7 +45,7 @@
 #include "prims/jvm.h"
 #include "prims/jvm_misc.hpp"
 #include "runtime/arguments.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/extendedPC.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -5258,75 +5258,6 @@
   }
 }
 
-//--------------------------------------------------------------------------------------------------
-// Non-product code
-
-static int mallocDebugIntervalCounter = 0;
-static int mallocDebugCounter = 0;
-
-// For debugging possible bugs inside HeapWalk (a ring buffer)
-#define SAVE_COUNT 8
-static PROCESS_HEAP_ENTRY saved_heap_entries[SAVE_COUNT];
-static int saved_heap_entry_index;
-
-bool os::check_heap(bool force) {
-  if (++mallocDebugCounter < MallocVerifyStart && !force) return true;
-  if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) {
-    // Note: HeapValidate executes two hardware breakpoints when it finds something
-    // wrong; at these points, eax contains the address of the offending block (I think).
-    // To get to the exlicit error message(s) below, just continue twice.
-    //
-    // Note:  we want to check the CRT heap, which is not necessarily located in the
-    // process default heap.
-    HANDLE heap = (HANDLE) _get_heap_handle();
-    if (!heap) {
-      return true;
-    }
-
-    // If we fail to lock the heap, then gflags.exe has been used
-    // or some other special heap flag has been set that prevents
-    // locking. We don't try to walk a heap we can't lock.
-    if (HeapLock(heap) != 0) {
-      PROCESS_HEAP_ENTRY phe;
-      phe.lpData = NULL;
-      memset(saved_heap_entries, 0, sizeof(saved_heap_entries));
-      saved_heap_entry_index = 0;
-      int count = 0;
-
-      while (HeapWalk(heap, &phe) != 0) {
-        count ++;
-        if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) &&
-            !HeapValidate(heap, 0, phe.lpData)) {
-          tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter);
-          tty->print_cr("corrupted block near address %#x, length %d, count %d", phe.lpData, phe.cbData, count);
-          HeapUnlock(heap);
-          fatal("corrupted C heap");
-        } else {
-          // Save previous seen entries in a ring buffer. We have seen strange
-          // heap corruption fatal errors that produced mdmp files, but when we load
-          // these mdmp files in WinDBG, "!heap -triage" shows no error.
-          // We can examine the saved_heap_entries[] array in the mdmp file to
-          // diagnose such seemingly spurious errors reported by HeapWalk.
-          saved_heap_entries[saved_heap_entry_index++] = phe;
-          if (saved_heap_entry_index >= SAVE_COUNT) {
-            saved_heap_entry_index = 0;
-          }
-        }
-      }
-      DWORD err = GetLastError();
-      if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED &&
-         (err == ERROR_INVALID_FUNCTION && phe.lpData != NULL)) {
-        HeapUnlock(heap);
-        fatal("heap walk aborted with error %d", err);
-      }
-      HeapUnlock(heap);
-    }
-    mallocDebugIntervalCounter = 0;
-  }
-  return true;
-}
-
-
 bool os::find(address addr, outputStream* st) {
   int offset = -1;
   bool result = false;
--- a/hotspot/src/os/windows/vm/threadCritical_windows.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/os/windows/vm/threadCritical_windows.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
  */
 
 #include "precompiled.hpp"
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/threadCritical.hpp"
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.hpp	Wed Jul 05 22:10:32 2017 +0200
@@ -0,0 +1,479 @@
+/*
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_HPP
+#define OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_HPP
+
+#ifndef _LP64
+#error "Atomic currently only impleneted for PPC64"
+#endif
+
+// Implementation of class atomic
+
+inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
+inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
+
+inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
+inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
+
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
+//
+//   machine barrier instructions:
+//
+//   - ppc_sync            two-way memory barrier, aka fence
+//   - ppc_lwsync          orders  Store|Store,
+//                                  Load|Store,
+//                                  Load|Load,
+//                         but not Store|Load
+//   - ppc_eieio           orders memory accesses for device memory (only)
+//   - ppc_isync           invalidates speculatively executed instructions
+//                         From the POWER ISA 2.06 documentation:
+//                          "[...] an isync instruction prevents the execution of
+//                         instructions following the isync until instructions
+//                         preceding the isync have completed, [...]"
+//                         From IBM's AIX assembler reference:
+//                          "The isync [...] instructions causes the processor to
+//                         refetch any instructions that might have been fetched
+//                         prior to the isync instruction. The instruction isync
+//                         causes the processor to wait for all previous instructions
+//                         to complete. Then any instructions already fetched are
+//                         discarded and instruction processing continues in the
+//                         environment established by the previous instructions."
+//
+//   semantic barrier instructions:
+//   (as defined in orderAccess.hpp)
+//
+//   - ppc_release         orders Store|Store,       (maps to ppc_lwsync)
+//                                 Load|Store
+//   - ppc_acquire         orders  Load|Store,       (maps to ppc_lwsync)
+//                                 Load|Load
+//   - ppc_fence           orders Store|Store,       (maps to ppc_sync)
+//                                 Load|Store,
+//                                 Load|Load,
+//                                Store|Load
+//
+
+#define strasm_sync                       "\n  sync    \n"
+#define strasm_lwsync                     "\n  lwsync  \n"
+#define strasm_isync                      "\n  isync   \n"
+#define strasm_release                    strasm_lwsync
+#define strasm_acquire                    strasm_lwsync
+#define strasm_fence                      strasm_sync
+#define strasm_nobarrier                  ""
+#define strasm_nobarrier_clobber_memory   ""
+
+inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+
+  unsigned int result;
+
+  __asm__ __volatile__ (
+    strasm_lwsync
+    "1: lwarx   %0,  0, %2    \n"
+    "   add     %0, %0, %1    \n"
+    "   stwcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_isync
+    : /*%0*/"=&r" (result)
+    : /*%1*/"r" (add_value), /*%2*/"r" (dest)
+    : "cc", "memory" );
+
+  return (jint) result;
+}
+
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+
+  long result;
+
+  __asm__ __volatile__ (
+    strasm_lwsync
+    "1: ldarx   %0,  0, %2    \n"
+    "   add     %0, %0, %1    \n"
+    "   stdcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_isync
+    : /*%0*/"=&r" (result)
+    : /*%1*/"r" (add_value), /*%2*/"r" (dest)
+    : "cc", "memory" );
+
+  return (intptr_t) result;
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
+}
+
+
+inline void Atomic::inc    (volatile jint*     dest) {
+
+  unsigned int temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: lwarx   %0,  0, %2    \n"
+    "   addic   %0, %0,  1    \n"
+    "   stwcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+
+  long temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: ldarx   %0,  0, %2    \n"
+    "   addic   %0, %0,  1    \n"
+    "   stdcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::inc_ptr(volatile void*     dest) {
+  inc_ptr((volatile intptr_t*)dest);
+}
+
+
+inline void Atomic::dec    (volatile jint*     dest) {
+
+  unsigned int temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: lwarx   %0,  0, %2    \n"
+    "   addic   %0, %0, -1    \n"
+    "   stwcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+
+  long temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: ldarx   %0,  0, %2    \n"
+    "   addic   %0, %0, -1    \n"
+    "   stdcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::dec_ptr(volatile void*     dest) {
+  dec_ptr((volatile intptr_t*)dest);
+}
+
+inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
+
+  // Note that xchg_ptr doesn't necessarily do an acquire
+  // (see synchronizer.cpp).
+
+  unsigned int old_value;
+  const uint64_t zero = 0;
+
+  __asm__ __volatile__ (
+    /* lwsync */
+    strasm_lwsync
+    /* atomic loop */
+    "1:                                                 \n"
+    "   lwarx   %[old_value], %[dest], %[zero]          \n"
+    "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* isync */
+    strasm_sync
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  return (jint) old_value;
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+
+  // Note that xchg_ptr doesn't necessarily do an acquire
+  // (see synchronizer.cpp).
+
+  long old_value;
+  const uint64_t zero = 0;
+
+  __asm__ __volatile__ (
+    /* lwsync */
+    strasm_lwsync
+    /* atomic loop */
+    "1:                                                 \n"
+    "   ldarx   %[old_value], %[dest], %[zero]          \n"
+    "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* isync */
+    strasm_sync
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  return (intptr_t) old_value;
+}
+
+inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
+  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
+}
+
+inline void cmpxchg_pre_membar(cmpxchg_memory_order order) {
+  if (order != memory_order_relaxed) {
+    __asm__ __volatile__ (
+      /* fence */
+      strasm_sync
+      );
+  }
+}
+
+inline void cmpxchg_post_membar(cmpxchg_memory_order order) {
+  if (order != memory_order_relaxed) {
+    __asm__ __volatile__ (
+      /* fence */
+      strasm_sync
+      );
+  }
+}
+
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) {
+
+  // Note that cmpxchg guarantees a two-way memory barrier across
+  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
+  // specified otherwise (see atomic.hpp).
+
+  // Using 32 bit internally.
+  volatile int *dest_base = (volatile int*)((uintptr_t)dest & ~3);
+
+#ifdef VM_LITTLE_ENDIAN
+  const unsigned int shift_amount        = ((uintptr_t)dest & 3) * 8;
+#else
+  const unsigned int shift_amount        = ((~(uintptr_t)dest) & 3) * 8;
+#endif
+  const unsigned int masked_compare_val  = ((unsigned int)(unsigned char)compare_value),
+                     masked_exchange_val = ((unsigned int)(unsigned char)exchange_value),
+                     xor_value           = (masked_compare_val ^ masked_exchange_val) << shift_amount;
+
+  unsigned int old_value, value32;
+
+  cmpxchg_pre_membar(order);
+
+  __asm__ __volatile__ (
+    /* simple guard */
+    "   lbz     %[old_value], 0(%[dest])                  \n"
+    "   cmpw    %[masked_compare_val], %[old_value]       \n"
+    "   bne-    2f                                        \n"
+    /* atomic loop */
+    "1:                                                   \n"
+    "   lwarx   %[value32], 0, %[dest_base]               \n"
+    /* extract byte and compare */
+    "   srd     %[old_value], %[value32], %[shift_amount] \n"
+    "   clrldi  %[old_value], %[old_value], 56            \n"
+    "   cmpw    %[masked_compare_val], %[old_value]       \n"
+    "   bne-    2f                                        \n"
+    /* replace byte and try to store */
+    "   xor     %[value32], %[xor_value], %[value32]      \n"
+    "   stwcx.  %[value32], 0, %[dest_base]               \n"
+    "   bne-    1b                                        \n"
+    /* exit */
+    "2:                                                   \n"
+    /* out */
+    : [old_value]           "=&r"   (old_value),
+      [value32]             "=&r"   (value32),
+                            "=m"    (*dest),
+                            "=m"    (*dest_base)
+    /* in */
+    : [dest]                "b"     (dest),
+      [dest_base]           "b"     (dest_base),
+      [shift_amount]        "r"     (shift_amount),
+      [masked_compare_val]  "r"     (masked_compare_val),
+      [xor_value]           "r"     (xor_value),
+                            "m"     (*dest),
+                            "m"     (*dest_base)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  cmpxchg_post_membar(order);
+
+  return (jbyte)(unsigned char)old_value;
+}
+
+inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {
+
+  // Note that cmpxchg guarantees a two-way memory barrier across
+  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
+  // specified otherwise (see atomic.hpp).
+
+  unsigned int old_value;
+  const uint64_t zero = 0;
+
+  cmpxchg_pre_membar(order);
+
+  __asm__ __volatile__ (
+    /* simple guard */
+    "   lwz     %[old_value], 0(%[dest])                \n"
+    "   cmpw    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    /* atomic loop */
+    "1:                                                 \n"
+    "   lwarx   %[old_value], %[dest], %[zero]          \n"
+    "   cmpw    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [compare_value]   "r"     (compare_value),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  cmpxchg_post_membar(order);
+
+  return (jint) old_value;
+}
+
+inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
+
+  // Note that cmpxchg guarantees a two-way memory barrier across
+  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
+  // specified otherwise (see atomic.hpp).
+
+  long old_value;
+  const uint64_t zero = 0;
+
+  cmpxchg_pre_membar(order);
+
+  __asm__ __volatile__ (
+    /* simple guard */
+    "   ld      %[old_value], 0(%[dest])                \n"
+    "   cmpd    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    /* atomic loop */
+    "1:                                                 \n"
+    "   ldarx   %[old_value], %[dest], %[zero]          \n"
+    "   cmpd    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [compare_value]   "r"     (compare_value),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  cmpxchg_post_membar(order);
+
+  return (jlong) old_value;
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+}
+
+inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) {
+  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+}
+
+#undef strasm_sync
+#undef strasm_lwsync
+#undef strasm_isync
+#undef strasm_release
+#undef strasm_acquire
+#undef strasm_fence
+#undef strasm_nobarrier
+#undef strasm_nobarrier_clobber_memory
+
+#endif // OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_HPP
--- a/hotspot/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.inline.hpp	Fri Sep 02 02:41:56 2016 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,482 +0,0 @@
-/*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2014 SAP SE. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_INLINE_HPP
-#define OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-
-#ifndef _LP64
-#error "Atomic currently only impleneted for PPC64"
-#endif
-
-// Implementation of class atomic
-
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-inline jlong Atomic::load(volatile jlong* src) { return *src; }
-
-//
-//   machine barrier instructions:
-//
-//   - ppc_sync            two-way memory barrier, aka fence
-//   - ppc_lwsync          orders  Store|Store,
-//                                  Load|Store,
-//                                  Load|Load,
-//                         but not Store|Load
-//   - ppc_eieio           orders memory accesses for device memory (only)
-//   - ppc_isync           invalidates speculatively executed instructions
-//                         From the POWER ISA 2.06 documentation:
-//                          "[...] an isync instruction prevents the execution of
-//                         instructions following the isync until instructions
-//                         preceding the isync have completed, [...]"
-//                         From IBM's AIX assembler reference:
-//                          "The isync [...] instructions causes the processor to
-//                         refetch any instructions that might have been fetched
-//                         prior to the isync instruction. The instruction isync
-//                         causes the processor to wait for all previous instructions
-//                         to complete. Then any instructions already fetched are
-//                         discarded and instruction processing continues in the
-//                         environment established by the previous instructions."
-//
-//   semantic barrier instructions:
-//   (as defined in orderAccess.hpp)
-//
-//   - ppc_release         orders Store|Store,       (maps to ppc_lwsync)
-//                                 Load|Store
-//   - ppc_acquire         orders  Load|Store,       (maps to ppc_lwsync)
-//                                 Load|Load
-//   - ppc_fence           orders Store|Store,       (maps to ppc_sync)
-//                                 Load|Store,
-//                                 Load|Load,
-//                                Store|Load
-//
-
-#define strasm_sync                       "\n  sync    \n"
-#define strasm_lwsync                     "\n  lwsync  \n"
-#define strasm_isync                      "\n  isync   \n"
-#define strasm_release                    strasm_lwsync
-#define strasm_acquire                    strasm_lwsync
-#define strasm_fence                      strasm_sync
-#define strasm_nobarrier                  ""
-#define strasm_nobarrier_clobber_memory   ""
-
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-
-  unsigned int result;
-
-  __asm__ __volatile__ (
-    strasm_lwsync
-    "1: lwarx   %0,  0, %2    \n"
-    "   add     %0, %0, %1    \n"
-    "   stwcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_isync
-    : /*%0*/"=&r" (result)
-    : /*%1*/"r" (add_value), /*%2*/"r" (dest)
-    : "cc", "memory" );
-
-  return (jint) result;
-}
-
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-
-  long result;
-
-  __asm__ __volatile__ (
-    strasm_lwsync
-    "1: ldarx   %0,  0, %2    \n"
-    "   add     %0, %0, %1    \n"
-    "   stdcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_isync
-    : /*%0*/"=&r" (result)
-    : /*%1*/"r" (add_value), /*%2*/"r" (dest)
-    : "cc", "memory" );
-
-  return (intptr_t) result;
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
-}
-
-
-inline void Atomic::inc    (volatile jint*     dest) {
-
-  unsigned int temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: lwarx   %0,  0, %2    \n"
-    "   addic   %0, %0,  1    \n"
-    "   stwcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-
-  long temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: ldarx   %0,  0, %2    \n"
-    "   addic   %0, %0,  1    \n"
-    "   stdcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::inc_ptr(volatile void*     dest) {
-  inc_ptr((volatile intptr_t*)dest);
-}
-
-
-inline void Atomic::dec    (volatile jint*     dest) {
-
-  unsigned int temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: lwarx   %0,  0, %2    \n"
-    "   addic   %0, %0, -1    \n"
-    "   stwcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-
-  long temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: ldarx   %0,  0, %2    \n"
-    "   addic   %0, %0, -1    \n"
-    "   stdcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::dec_ptr(volatile void*     dest) {
-  dec_ptr((volatile intptr_t*)dest);
-}
-
-inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
-
-  // Note that xchg_ptr doesn't necessarily do an acquire
-  // (see synchronizer.cpp).
-
-  unsigned int old_value;
-  const uint64_t zero = 0;
-
-  __asm__ __volatile__ (
-    /* lwsync */
-    strasm_lwsync
-    /* atomic loop */
-    "1:                                                 \n"
-    "   lwarx   %[old_value], %[dest], %[zero]          \n"
-    "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
-    "   bne-    1b                                      \n"
-    /* isync */
-    strasm_sync
-    /* exit */
-    "2:                                                 \n"
-    /* out */
-    : [old_value]       "=&r"   (old_value),
-                        "=m"    (*dest)
-    /* in */
-    : [dest]            "b"     (dest),
-      [zero]            "r"     (zero),
-      [exchange_value]  "r"     (exchange_value),
-                        "m"     (*dest)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  return (jint) old_value;
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-
-  // Note that xchg_ptr doesn't necessarily do an acquire
-  // (see synchronizer.cpp).
-
-  long old_value;
-  const uint64_t zero = 0;
-
-  __asm__ __volatile__ (
-    /* lwsync */
-    strasm_lwsync
-    /* atomic loop */
-    "1:                                                 \n"
-    "   ldarx   %[old_value], %[dest], %[zero]          \n"
-    "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
-    "   bne-    1b                                      \n"
-    /* isync */
-    strasm_sync
-    /* exit */
-    "2:                                                 \n"
-    /* out */
-    : [old_value]       "=&r"   (old_value),
-                        "=m"    (*dest)
-    /* in */
-    : [dest]            "b"     (dest),
-      [zero]            "r"     (zero),
-      [exchange_value]  "r"     (exchange_value),
-                        "m"     (*dest)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  return (intptr_t) old_value;
-}
-
-inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
-  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
-}
-
-inline void cmpxchg_pre_membar(cmpxchg_memory_order order) {
-  if (order != memory_order_relaxed) {
-    __asm__ __volatile__ (
-      /* fence */
-      strasm_sync
-      );
-  }
-}
-
-inline void cmpxchg_post_membar(cmpxchg_memory_order order) {
-  if (order != memory_order_relaxed) {
-    __asm__ __volatile__ (
-      /* fence */
-      strasm_sync
-      );
-  }
-}
-
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order) {
-
-  // Note that cmpxchg guarantees a two-way memory barrier across
-  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
-  // specified otherwise (see atomic.hpp).
-
-  // Using 32 bit internally.
-  volatile int *dest_base = (volatile int*)((uintptr_t)dest & ~3);
-
-#ifdef VM_LITTLE_ENDIAN
-  const unsigned int shift_amount        = ((uintptr_t)dest & 3) * 8;
-#else
-  const unsigned int shift_amount        = ((~(uintptr_t)dest) & 3) * 8;
-#endif
-  const unsigned int masked_compare_val  = ((unsigned int)(unsigned char)compare_value),
-                     masked_exchange_val = ((unsigned int)(unsigned char)exchange_value),
-                     xor_value           = (masked_compare_val ^ masked_exchange_val) << shift_amount;
-
-  unsigned int old_value, value32;
-
-  cmpxchg_pre_membar(order);
-
-  __asm__ __volatile__ (
-    /* simple guard */
-    "   lbz     %[old_value], 0(%[dest])                  \n"
-    "   cmpw    %[masked_compare_val], %[old_value]       \n"
-    "   bne-    2f                                        \n"
-    /* atomic loop */
-    "1:                                                   \n"
-    "   lwarx   %[value32], 0, %[dest_base]               \n"
-    /* extract byte and compare */
-    "   srd     %[old_value], %[value32], %[shift_amount] \n"
-    "   clrldi  %[old_value], %[old_value], 56            \n"
-    "   cmpw    %[masked_compare_val], %[old_value]       \n"
-    "   bne-    2f                                        \n"
-    /* replace byte and try to store */
-    "   xor     %[value32], %[xor_value], %[value32]      \n"
-    "   stwcx.  %[value32], 0, %[dest_base]               \n"
-    "   bne-    1b                                        \n"
-    /* exit */
-    "2:                                                   \n"
-    /* out */
-    : [old_value]           "=&r"   (old_value),
-      [value32]             "=&r"   (value32),
-                            "=m"    (*dest),
-                            "=m"    (*dest_base)
-    /* in */
-    : [dest]                "b"     (dest),
-      [dest_base]           "b"     (dest_base),
-      [shift_amount]        "r"     (shift_amount),
-      [masked_compare_val]  "r"     (masked_compare_val),
-      [xor_value]           "r"     (xor_value),
-                            "m"     (*dest),
-                            "m"     (*dest_base)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  cmpxchg_post_membar(order);
-
-  return (jbyte)(unsigned char)old_value;
-}
-
-inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {
-
-  // Note that cmpxchg guarantees a two-way memory barrier across
-  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
-  // specified otherwise (see atomic.hpp).
-
-  unsigned int old_value;
-  const uint64_t zero = 0;
-
-  cmpxchg_pre_membar(order);
-
-  __asm__ __volatile__ (
-    /* simple guard */
-    "   lwz     %[old_value], 0(%[dest])                \n"
-    "   cmpw    %[compare_value], %[old_value]          \n"
-    "   bne-    2f                                      \n"
-    /* atomic loop */
-    "1:                                                 \n"
-    "   lwarx   %[old_value], %[dest], %[zero]          \n"
-    "   cmpw    %[compare_value], %[old_value]          \n"
-    "   bne-    2f                                      \n"
-    "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
-    "   bne-    1b                                      \n"
-    /* exit */
-    "2:                                                 \n"
-    /* out */
-    : [old_value]       "=&r"   (old_value),
-                        "=m"    (*dest)
-    /* in */
-    : [dest]            "b"     (dest),
-      [zero]            "r"     (zero),
-      [compare_value]   "r"     (compare_value),
-      [exchange_value]  "r"     (exchange_value),
-                        "m"     (*dest)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  cmpxchg_post_membar(order);
-
-  return (jint) old_value;
-}
-
-inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
-
-  // Note that cmpxchg guarantees a two-way memory barrier across
-  // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
-  // specified otherwise (see atomic.hpp).
-
-  long old_value;
-  const uint64_t zero = 0;
-
-  cmpxchg_pre_membar(order);
-
-  __asm__ __volatile__ (
-    /* simple guard */
-    "   ld      %[old_value], 0(%[dest])                \n"
-    "   cmpd    %[compare_value], %[old_value]          \n"
-    "   bne-    2f                                      \n"
-    /* atomic loop */
-    "1:                                                 \n"
-    "   ldarx   %[old_value], %[dest], %[zero]          \n"
-    "   cmpd    %[compare_value], %[old_value]          \n"
-    "   bne-    2f                                      \n"
-    "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
-    "   bne-    1b                                      \n"
-    /* exit */
-    "2:                                                 \n"
-    /* out */
-    : [old_value]       "=&r"   (old_value),
-                        "=m"    (*dest)
-    /* in */
-    : [dest]            "b"     (dest),
-      [zero]            "r"     (zero),
-      [compare_value]   "r"     (compare_value),
-      [exchange_value]  "r"     (exchange_value),
-                        "m"     (*dest)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  cmpxchg_post_membar(order);
-
-  return (jlong) old_value;
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-#undef strasm_sync
-#undef strasm_lwsync
-#undef strasm_isync
-#undef strasm_release
-#undef strasm_acquire
-#undef strasm_fence
-#undef strasm_nobarrier
-#undef strasm_nobarrier_clobber_memory
-
-#endif // OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.hpp	Wed Jul 05 22:10:32 2017 +0200
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
+#define OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
+
+#include "runtime/os.hpp"
+
+// Implementation of class atomic
+
+inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
+
+inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
+
+
+// Adding a lock prefix to an instruction on MP machine
+#define LOCK_IF_MP(mp) "cmp $0, " #mp "; je 1f; lock; 1: "
+
+inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+  jint addend = add_value;
+  int mp = os::is_MP();
+  __asm__ volatile (  LOCK_IF_MP(%3) "xaddl %0,(%2)"
+                    : "=r" (addend)
+                    : "0" (addend), "r" (dest), "r" (mp)
+                    : "cc", "memory");
+  return addend + add_value;
+}
+
+inline void Atomic::inc    (volatile jint*     dest) {
+  int mp = os::is_MP();
+  __asm__ volatile (LOCK_IF_MP(%1) "addl $1,(%0)" :
+                    : "r" (dest), "r" (mp) : "cc", "memory");
+}
+
+inline void Atomic::inc_ptr(volatile void*     dest) {
+  inc_ptr((volatile intptr_t*)dest);
+}
+
+inline void Atomic::dec    (volatile jint*     dest) {
+  int mp = os::is_MP();
+  __asm__ volatile (LOCK_IF_MP(%1) "subl $1,(%0)" :
+                    : "r" (dest), "r" (mp) : "cc", "memory");
+}
+
+inline void Atomic::dec_ptr(volatile void*     dest) {
+  dec_ptr((volatile intptr_t*)dest);
+}
+
+inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
+  __asm__ volatile (  "xchgl (%2),%0"
+                    : "=r" (exchange_value)
+                    : "0" (exchange_value), "r" (dest)
+                    : "memory");
+  return exchange_value;
+}
+
+inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
+  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
+}
+
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
+  int mp = os::is_MP();
+  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
+                    : "=a" (exchange_value)
+                    : "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                    : "cc", "memory");
+  return exchange_value;
+}
+
+inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
+  int mp = os::is_MP();
+  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgl %1,(%3)"
+                    : "=a" (exchange_value)
+                    : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                    : "cc", "memory");
+  return exchange_value;
+}
+
+#ifdef AMD64
+inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
+inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+  intptr_t addend = add_value;
+  bool mp = os::is_MP();
+  __asm__ __volatile__ (LOCK_IF_MP(%3) "xaddq %0,(%2)"
+                        : "=r" (addend)
+                        : "0" (addend), "r" (dest), "r" (mp)
+                        : "cc", "memory");
+  return addend + add_value;
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
+}
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+  bool mp = os::is_MP();
+  __asm__ __volatile__ (LOCK_IF_MP(%1) "addq $1,(%0)"
+                        :
+                        : "r" (dest), "r" (mp)
+                        : "cc", "memory");
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+  bool mp = os::is_MP();
+  __asm__ __volatile__ (LOCK_IF_MP(%1) "subq $1,(%0)"
+                        :
+                        : "r" (dest), "r" (mp)
+                        : "cc", "memory");
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+  __asm__ __volatile__ ("xchgq (%2),%0"
+                        : "=r" (exchange_value)
+                        : "0" (exchange_value), "r" (dest)
+                        : "memory");
+  return exchange_value;
+}
+
+inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
+  bool mp = os::is_MP();
+  __asm__ __volatile__ (LOCK_IF_MP(%4) "cmpxchgq %1,(%3)"
+                        : "=a" (exchange_value)
+                        : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                        : "cc", "memory");
+  return exchange_value;
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+}
+
+inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
+  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
+}
+
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
+#else // !AMD64
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+  return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)Atomic::add((jint)add_value, (volatile jint*)dest);
+}
+
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+  inc((volatile jint*)dest);
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+  dec((volatile jint*)dest);
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
+}
+
+extern "C" {
+  // defined in bsd_x86.s
+  jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
+  void _Atomic_move_long(volatile jlong* src, volatile jlong* dst);
+}
+
+inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
+  return _Atomic_cmpxchg_long(exchange_value, dest, compare_value, os::is_MP());
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
+  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
+}
+
+inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
+  return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
+}
+
+inline jlong Atomic::load(volatile jlong* src) {
+  volatile jlong dest;
+  _Atomic_move_long(src, &dest);
+  return dest;
+}
+
+inline void Atomic::store(jlong store_value, jlong* dest) {
+  _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
+}
+
+inline void Atomic::store(jlong store_value, volatile jlong* dest) {
+  _Atomic_move_long((volatile jlong*)&store_value, dest);
+}
+
+#endif // AMD64
+
+#endif // OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP
--- a/hotspot/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.inline.hpp	Fri Sep 02 02:41:56 2016 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,228 +0,0 @@
-/*
- * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_INLINE_HPP
-#define OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-
-// Implementation of class atomic
-
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-
-// Adding a lock prefix to an instruction on MP machine
-#define LOCK_IF_MP(mp) "cmp $0, " #mp "; je 1f; lock; 1: "
-
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  jint addend = add_value;
-  int mp = os::is_MP();
-  __asm__ volatile (  LOCK_IF_MP(%3) "xaddl %0,(%2)"
-                    : "=r" (addend)
-                    : "0" (addend), "r" (dest), "r" (mp)
-                    : "cc", "memory");
-  return addend + add_value;
-}
-
-inline void Atomic::inc    (volatile jint*     dest) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%1) "addl $1,(%0)" :
-                    : "r" (dest), "r" (mp) : "cc", "memory");
-}
-
-inline void Atomic::inc_ptr(volatile void*     dest) {
-  inc_ptr((volatile intptr_t*)dest);
-}
-
-inline void Atomic::dec    (volatile jint*     dest) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%1) "subl $1,(%0)" :
-                    : "r" (dest), "r" (mp) : "cc", "memory");
-}
-
-inline void Atomic::dec_ptr(volatile void*     dest) {
-  dec_ptr((volatile intptr_t*)dest);
-}
-
-inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
-  __asm__ volatile (  "xchgl (%2),%0"
-                    : "=r" (exchange_value)
-                    : "0" (exchange_value), "r" (dest)
-                    : "memory");
-  return exchange_value;
-}
-
-inline void*    Atomic::xchg_ptr(void*    exchange_value, volatile void*     dest) {
-  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
-}
-
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value, cmpxchg_memory_order order) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
-                    : "=a" (exchange_value)
-                    : "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
-                    : "cc", "memory");
-  return exchange_value;
-}
-
-inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value, cmpxchg_memory_order order) {
-  int mp = os::is_MP();
-  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgl %1,(%3)"
-                    : "=a" (exchange_value)
-                    : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
-                    : "cc", "memory");
-  return exchange_value;
-}
-
-#ifdef AMD64
-inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  intptr_t addend = add_value;
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%3) "xaddq %0,(%2)"
-                        : "=r" (addend)
-                        : "0" (addend), "r" (dest), "r" (mp)
-                        : "cc", "memory");
-  return addend + add_value;
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%1) "addq $1,(%0)"
-                        :
-                        : "r" (dest), "r" (mp)
-                        : "cc", "memory");
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%1) "subq $1,(%0)"
-                        :
-                        : "r" (dest), "r" (mp)
-                        : "cc", "memory");
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  __asm__ __volatile__ ("xchgq (%2),%0"
-                        : "=r" (exchange_value)
-                        : "0" (exchange_value), "r" (dest)
-                        : "memory");
-  return exchange_value;
-}
-
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  bool mp = os::is_MP();
-  __asm__ __volatile__ (LOCK_IF_MP(%4) "cmpxchgq %1,(%3)"
-                        : "=a" (exchange_value)
-                        : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
-                        : "cc", "memory");
-  return exchange_value;
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
-}
-
-inline jlong Atomic::load(volatile jlong* src) { return *src; }
-
-#else // !AMD64
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)Atomic::add((jint)add_value, (volatile jint*)dest);
-}
-
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  inc((volatile jint*)dest);
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  dec((volatile jint*)dest);
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
-}
-
-extern "C" {
-  // defined in bsd_x86.s
-  jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
-  void _Atomic_move_long(volatile jlong* src, volatile jlong* dst);
-}
-
-inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value, cmpxchg_memory_order order) {
-  return _Atomic_cmpxchg_long(exchange_value, dest, compare_value, os::is_MP());
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
-  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-inline void*    Atomic::cmpxchg_ptr(void*    exchange_value, volatile void*     dest, void*    compare_value, cmpxchg_memory_order order) {
-  return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
-}
-
-inline jlong Atomic::load(volatile jlong* src) {
-  volatile jlong dest;
-  _Atomic_move_long(src, &dest);
-  return dest;
-}
-
-inline void Atomic::store(jlong store_value, jlong* dest) {
-  _Atomic_move_long((volatile jlong*)&store_value, (volatile jlong*)dest);
-}
-
-inline void Atomic::store(jlong store_value, volatile jlong* dest) {
-  _Atomic_move_long((volatile jlong*)&store_value, dest);
-}
-
-#endif // AMD64
-
-#endif // OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_INLINE_HPP
--- a/hotspot/src/os_cpu/bsd_x86/vm/orderAccess_bsd_x86.inline.hpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/os_cpu/bsd_x86/vm/orderAccess_bsd_x86.inline.hpp	Wed Jul 05 22:10:32 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 #ifndef OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP
 #define OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP
 
-#include "runtime/atomic.inline.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/orderAccess.hpp"
 #include "runtime/os.hpp"
 
--- a/hotspot/src/os_cpu/bsd_x86/vm/thread_bsd_x86.cpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/os_cpu/bsd_x86/vm/thread_bsd_x86.cpp	Wed Jul 05 22:10:32 2017 +0200
@@ -45,7 +45,7 @@
 
   // If we have a last_Java_frame, then we should use it even if
   // isInJava == true.  It should be more reliable than ucontext info.
-  if (jt->has_last_Java_frame()) {
+  if (jt->has_last_Java_frame() && jt->frame_anchor()->walkable()) {
     *fr_addr = jt->pd_last_frame();
     return true;
   }
--- a/hotspot/src/os_cpu/bsd_x86/vm/thread_bsd_x86.hpp	Fri Sep 02 02:41:56 2016 +0000
+++ b/hotspot/src/os_cpu/bsd_x86/vm/thread_bsd_x86.hpp	Wed Jul 05 22:10:32 2017 +0200
@@ -32,12 +32,8 @@
 
   frame pd_last_frame() {
     assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
-    if (_anchor.last_Java_pc() != NULL) {
-      return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
-    } else {
-      // This will pick up pc from sp
-      return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp());
-    }
+    vmassert(_anchor.last_Java_pc() != NULL, "not walkable");
+    return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
   }
 
  public:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.hpp	Wed Jul 05 22:10:32 2017 +0200
@@ -0,0 +1,333 @@
+/*
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP
+#define OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP
+
+#include "runtime/os.hpp"
+
+// Implementation of class atomic
+
+#ifdef M68K
+
+/*
+ * __m68k_cmpxchg
+ *
+ * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
+ * Returns newval on success and oldval if no exchange happened.
+ * This implementation is processor specific and works on
+ * 68020 68030 68040 and 68060.
+ *
+ * It will not work on ColdFire, 68000 and 68010 since they lack the CAS
+ * instruction.
+ * Using a kernelhelper would be better for arch complete implementation.
+ *
+ */
+
+static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {
+  int ret;
+  __asm __volatile ("cas%.l %0,%2,%1"
+                   : "=d" (ret), "+m" (*(ptr))
+                   : "d" (newval), "0" (oldval));
+  return ret;
+}
+
+/* Perform an atomic compare and swap: if the current value of `*PTR'
+   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
+   `*PTR' before the operation.*/
+static inline int m68k_compare_and_swap(volatile int *ptr,
+                                        int oldval,
+                                        int newval) {
+  for (;;) {
+      int prev = *ptr;
+      if (prev != oldval)
+        return prev;
+
+      if (__m68k_cmpxchg (prev, newval, ptr) == newval)
+        // Success.
+        return prev;
+
+      // We failed even though prev == oldval.  Try again.
+    }
+}
+
+/* Atomically add an int to memory.  */
+static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
+  for (;;) {
+      // Loop until success.
+
+      int prev = *ptr;
+
+      if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value)
+        return prev + add_value;
+    }
+}
+
+/* Atomically write VALUE into `*PTR' and returns the previous
+   contents of `*PTR'.  */
+static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) {
+  for (;;) {
+      // Loop until success.
+      int prev = *ptr;
+
+      if (__m68k_cmpxchg (prev, newval, ptr) == prev)
+        return prev;
+    }
+}
+#endif // M68K
+
+#ifdef ARM
+
+/*
+ * __kernel_cmpxchg
+ *
+ * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
+ * Return zero if *ptr was changed or non-zero if no exchange happened.
+ * The C flag is also set if *ptr was changed to allow for assembly
+ * optimization in the calling code.
+ *
+ */
+
+typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
+#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
+
+
+
+/* Perform an atomic compare and swap: if the current value of `*PTR'
+   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
+   `*PTR' before the operation.*/
+static inline int arm_compare_and_swap(volatile int *ptr,
+                                       int oldval,
+                                       int newval) {
+  for (;;) {
+      int prev = *ptr;
+      if (prev != oldval)
+        return prev;
+
+      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
+        // Success.
+        return prev;
+
+      // We failed even though prev == oldval.  Try again.
+    }
+}
+
+/* Atomically add an int to memory.  */
+static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
+  for (;;) {
+      // Loop until a __kernel_cmpxchg succeeds.
+
+      int prev = *ptr;
+
+      if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
+        return prev + add_value;
+    }
+}
+
+/* Atomically write VALUE into `*PTR' and returns the previous
+   contents of `*PTR'.  */
+static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
+  for (;;) {
+      // Loop until a __kernel_cmpxchg succeeds.
+      int prev = *ptr;
+
+      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
+        return prev;
+    }
+}
+#endif // ARM
+
+inline void Atomic::store(jint store_value, volatile jint* dest) {
+#if !defined(ARM) && !defined(M68K)
+  __sync_synchronize();
+#endif
+  *dest = store_value;
+}
+
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
+#if !defined(ARM) && !defined(M68K)
+  __sync_synchronize();
+#endif
+  *dest = store_value;
+}
+
+inline jint Atomic::add(jint add_value, volatile jint* dest) {
+#ifdef ARM
+  return arm_add_and_fetch(dest, add_value);
+#else
+#ifdef M68K
+  return m68k_add_and_fetch(dest, add_value);
+#else
+  return __sync_add_and_fetch(dest, add_value);
+#endif // M68K
+#endif // ARM
+}
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+#ifdef ARM
+  return arm_add_and_fetch(dest, add_value);
+#else
+#ifdef M68K
+  return m68k_add_and_fetch(dest, add_value);
+#else
+  return __sync_add_and_fetch(dest, add_value);
+#endif // M68K
+#endif // ARM
+}
+
+inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
+  return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
+}
+
+inline void Atomic::inc(volatile jint* dest) {
+  add(1, dest);
+}
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+  add_ptr(1, dest);
+}
+
+inline void Atomic::inc_ptr(volatile void* dest) {
+  add_ptr(1, dest);
+}
+
+inline void Atomic::dec(volatile jint* dest) {
+  add(-1, dest);
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+  add_ptr(-1, dest);
+}
+
+inline void Atomic::dec_ptr(volatile void* dest) {
+  add_ptr(-1, dest);
+}
+
+inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
+#ifdef ARM
+  return arm_lock_test_and_set(dest, exchange_value);
+#else
+#ifdef M68K
+  return m68k_lock_test_and_set(dest, exchange_value);
+#else
+  // __sync_lock_test_and_set is a bizarrely named atomic exchange
+  // operation.  Note that some platforms only support this with the
+  // limitation that the only valid value to store is the immediate
+  // constant 1.  There is a test for this in JNI_CreateJavaVM().
+  jint result = __sync_lock_test_and_set (dest, exchange_value);
+  // All atomic operations are expected to be full memory barriers
+  // (see atomic.hpp). However, __sync_lock_test_and_set is not
+  // a full memory barrier, but an acquire barrier. Hence, this added
+  // barrier.
+  __sync_synchronize();
+  return result;
+#endif // M68K
+#endif // ARM
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
+                                 volatile intptr_t* dest) {
+#ifdef ARM
+  return arm_lock_test_and_set(dest, exchange_value);
+#else
+#ifdef M68K
+  return m68k_lock_test_and_set(dest, exchange_value);
+#else
+  intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
+  __sync_synchronize();
+  return result;
+#endif // M68K
+#endif // ARM
+}
+
+inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
+  return (void *) xchg_ptr((intptr_t) exchange_value,
+                           (volatile intptr_t*) dest);
+}
+
+inline jint Atomic::cmpxchg(jint exchange_value,
+                            volatile jint* dest,
+                            jint compare_value,
+                            cmpxchg_memory_order order) {
+#ifdef ARM
+  return arm_compare_and_swap(dest, compare_value, exchange_value);
+#else
+#ifdef M68K
+  return m68k_compare_and_swap(dest, compare_value, exchange_value);
+#else
+  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
+#endif // M68K
+#endif // ARM
+}
+
+inline jlong Atomic::cmpxchg(jlong exchange_value,
+                             volatile jlong* dest,
+                             jlong compare_value,
+                             cmpxchg_memory_order order) {
+
+  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value,
+                                    volatile intptr_t* dest,
+                                    intptr_t compare_value,
+                                    cmpxchg_memory_order order) {
+#ifdef ARM
+  return arm_compare_and_swap(dest, compare_value, exchange_value);
+#else
+#ifdef M68K
+  return m68k_compare_and_swap(dest, compare_value, exchange_value);
+#else
+  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
+#endif // M68K
+#endif // ARM
+}
+
+inline void* Atomic::cmpxchg_ptr(void* exchange_value,
+                                 volatile void* dest,
+                                 void* compare_value,
+                                 cmpxchg_memory_order order) {
+
+  return (void *) cmpxchg_ptr((intptr_t) exchange_value,
+                              (volatile intptr_t*) dest,
+                              (intptr_t) compare_value,
+                              order);
+}
+
+inline jlong Atomic::load(volatile jlong* src) {
+  volatile jlong dest;
+  os::atomic_copy64(src, &dest);
+  return dest;
+}
+
+inline void Atomic::store(jlong store_value, jlong* dest) {
+  os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
+}
+
+inline void Atomic::store(jlong store_value, volatile jlong* dest) {
+  os::atomic_copy64((volatile jlong*)&store_value, dest);
+}
+
+#endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP
--- a/hotspot/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.inline.hpp	Fri Sep 02 02:41:56 2016 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,334 +0,0 @@
-/*
- * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_INLINE_HPP
-#define OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-
-// Implementation of class atomic
-
-#ifdef M68K
-
-/*
- * __m68k_cmpxchg
- *
- * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
- * Returns newval on success and oldval if no exchange happened.
- * This implementation is processor specific and works on
- * 68020 68030 68040 and 68060.
- *
- * It will not work on ColdFire, 68000 and 68010 since they lack the CAS
- * instruction.
- * Using a kernelhelper would be better for arch complete implementation.
- *
- */
-
-static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {
-  int ret;
-  __asm __volatile ("cas%.l %0,%2,%1"
-                   : "=d" (ret), "+m" (*(ptr))
-                   : "d" (newval), "0" (oldval));
-  return ret;
-}
-
-/* Perform an atomic compare and swap: if the current value of `*PTR'
-   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
-   `*PTR' before the operation.*/
-static inline int m68k_compare_and_swap(volatile int *ptr,
-                                        int oldval,
-                                        int newval) {
-  for (;;) {
-      int prev = *ptr;
-      if (prev != oldval)
-        return prev;
-
-      if (__m68k_cmpxchg (prev, newval, ptr) == newval)
-        // Success.
-        return prev;
-
-      // We failed even though prev == oldval.  Try again.
-    }
-}
-
-/* Atomically add an int to memory.  */
-static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
-  for (;;) {
-      // Loop until success.
-
-      int prev = *ptr;
-
-      if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value)
-        return prev + add_value;
-    }
-}
-
-/* Atomically write VALUE into `*PTR' and returns the previous
-   contents of `*PTR'.  */
-static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) {
-  for (;;) {
-      // Loop until success.
-      int prev = *ptr;
-
-      if (__m68k_cmpxchg (prev, newval, ptr) == prev)
-        return prev;
-    }
-}
-#endif // M68K
-
-#ifdef ARM
-
-/*
- * __kernel_cmpxchg
- *
- * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
- * Return zero if *ptr was changed or non-zero if no exchange happened.
- * The C flag is also set if *ptr was changed to allow for assembly
- * optimization in the calling code.
- *
- */
-
-typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
-#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
-
-
-
-/* Perform an atomic compare and swap: if the current value of `*PTR'
-   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
-   `*PTR' before the operation.*/
-static inline int arm_compare_and_swap(volatile int *ptr,
-                                       int oldval,
-                                       int newval) {
-  for (;;) {
-      int prev = *ptr;
-      if (prev != oldval)
-        return prev;
-
-      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
-        // Success.
-        return prev;
-
-      // We failed even though prev == oldval.  Try again.
-    }
-}
-
-/* Atomically add an int to memory.  */
-static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
-  for (;;) {
-      // Loop until a __kernel_cmpxchg succeeds.
-
-      int prev = *ptr;
-
-      if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
-        return prev + add_value;
-    }
-}
-
-/* Atomically write VALUE into `*PTR' and returns the previous
-   contents of `*PTR'.  */
-static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
-  for (;;) {
-      // Loop until a __kernel_cmpxchg succeeds.
-      int prev = *ptr;
-
-      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
-        return prev;
-    }
-}
-#endif // ARM
-
-inline void Atomic::store(jint store_value, volatile jint* dest) {
-#if !defined(ARM) && !defined(M68K)
-  __sync_synchronize();
-#endif
-  *dest = store_value;
-}
-
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
-#if !defined(ARM) && !defined(M68K)
-  __sync_synchronize();
-#endif
-  *dest = store_value;
-}
-
-inline jint Atomic::add(jint add_value, volatile jint* dest) {
-#ifdef ARM
-  return arm_add_and_fetch(dest, add_value);
-#else
-#ifdef M68K
-  return m68k_add_and_fetch(dest, add_value);
-#else
-  return __sync_add_and_fetch(dest, add_value);
-#endif // M68K
-#endif // ARM
-}
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-#ifdef ARM
-  return arm_add_and_fetch(dest, add_value);
-#else
-#ifdef M68K
-  return m68k_add_and_fetch(dest, add_value);
-#else
-  return __sync_add_and_fetch(dest, add_value);
-#endif // M68K
-#endif // ARM
-}
-
-inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
-  return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
-}
-
-inline void Atomic::inc(volatile jint* dest) {
-  add(1, dest);
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-  add_ptr(1, dest);
-}
-
-inline void Atomic::inc_ptr(volatile void* dest) {
-  add_ptr(1, dest);
-}
-
-inline void Atomic::dec(volatile jint* dest) {
-  add(-1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-  add_ptr(-1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile void* dest) {
-  add_ptr(-1, dest);
-}
-
-inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
-#ifdef ARM
-  return arm_lock_test_and_set(dest, exchange_value);
-#else
-#ifdef M68K
-  return m68k_lock_test_and_set(dest, exchange_value);
-#else
-  // __sync_lock_test_and_set is a bizarrely named atomic exchange
-  // operation.  Note that some platforms only support this with the
-  // limitation that the only valid value to store is the immediate
-  // constant 1.  There is a test for this in JNI_CreateJavaVM().
-  jint result = __sync_lock_test_and_set (dest, exchange_value);
-  // All atomic operations are expected to be full memory barriers
-  // (see atomic.hpp). However, __sync_lock_test_and_set is not
-  // a full memory barrier, but an acquire barrier. Hence, this added
-  // barrier.
-  __sync_synchronize();
-  return result;
-#endif // M68K
-#endif // ARM
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
-                                 volatile intptr_t* dest) {
-#ifdef ARM
-  return arm_lock_test_and_set(dest, exchange_value);
-#else
-#ifdef M68K
-  return m68k_lock_test_and_set(dest, exchange_value);
-#else
-  intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
-  __sync_synchronize();
-  return result;
-#endif // M68K
-#endif // ARM
-}
-
-inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
-  return (void *) xchg_ptr((intptr_t) exchange_value,
-                           (volatile intptr_t*) dest);
-}
-
-inline jint Atomic::cmpxchg(jint exchange_value,
-                            volatile jint* dest,
-                            jint compare_value,
-                            cmpxchg_memory_order order) {
-#ifdef ARM
-  return arm_compare_and_swap(dest, compare_value, exchange_value);
-#else
-#ifdef M68K
-  return m68k_compare_and_swap(dest, compare_value, exchange_value);
-#else
-  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
-#endif // M68K
-#endif // ARM
-}
-
-inline jlong Atomic::cmpxchg(jlong exchange_value,
-                             volatile jlong* dest,
-                             jlong compare_value,
-                             cmpxchg_memory_order order) {
-
-  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value,
-                                    volatile intptr_t* dest,
-                                    intptr_t compare_value,
-                                    cmpxchg_memory_order order) {
-#ifdef ARM
-  return arm_compare_and_swap(dest, compare_value, exchange_value);
-#else
-#ifdef M68K
-  return m68k_compare_and_swap(dest, compare_value, exchange_value);
-#else
-  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
-#endif // M68K
-#endif // ARM
-}
-
-inline void* Atomic::cmpxchg_ptr(void* exchange_value,
-                                 volatile void* dest,
-                                 void* compare_value,
-                                 cmpxchg_memory_order order) {
-
-  return (void *) cmpxchg_ptr((intptr_t) exchange_value,
-                              (volatile intptr_t*) dest,
-                              (intptr_t) compare_value,
-                              order);
-}
-
-inline jlong Atomic::load(volatile jlong* src) {
-  volatile jlong dest;
-  os::atomic_copy64(src, &dest);
-  return dest;
-}
-
-inline void Atomic::store(jlong store_value, jlong* dest) {
-  os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
-}
-
-inline void Atomic::store(jlong store_value, volatile jlong* dest) {
-  os::atomic_copy64((volatile jlong*)&store_value, dest);
-}
-
-#endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.hpp	Wed Jul 05 22:10:32 2017 +0200
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_HPP
+#define OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_HPP
+
+#include "vm_version_aarch64.hpp"
+
+// Implementation of class atomic
+
+#define FULL_MEM_BARRIER  __sync_synchronize()
+#define READ_MEM_BARRIER  __atomic_thread_fence(__ATOMIC_ACQUIRE);
+#define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
+
+inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
+
+inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
+
+
+inline jint Atomic::add(jint add_value, volatile jint* dest)
+{
+ return __sync_add_and_fetch(dest, add_value);
+}
+
+inline void Atomic::inc(volatile jint* dest)
+{
+ add(1, dest);
+}
+
+inline void Atomic::inc_ptr(volatile void* dest)
+{
+ add_ptr(1, dest);
+}
+
+inline void Atomic::dec (volatile jint* dest)
+{
+ add(-1, dest);
+}
+
+inline void Atomic::dec_ptr(volatile void* dest)
+{
+ add_ptr(-1, dest);
+}
+
+inline jint Atomic::xchg (jint exchange_value, volatile jint* dest)
+{
+  jint res = __sync_lock_test_and_set (dest, exchange_value);
+  FULL_MEM_BARRIER;
+  return res;
+}
+
+inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest)
+{
+  return (void *) xchg_ptr((intptr_t) exchange_value,
+                           (volatile intptr_t*) dest);
+}
+
+template <typename T> T generic_cmpxchg(T exchange_value, volatile T* dest,
+                                        T compare_value, cmpxchg_memory_order order)
+{
+  if (order == memory_order_relaxed) {
+    T value = compare_value;
+    __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
+                              __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+    return value;
+  } else {
+    return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
+  }
+}
+
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order)
+{
+  return generic_cmpxchg(exchange_value, dest, compare_value, order);
+}
+
+inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order)
+{
+  return generic_cmpxchg(exchange_value, dest, compare_value, order);
+}
+
+inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
+inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
+{
+ return __sync_add_and_fetch(dest, add_value);
+}
+
+inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest)
+{
+  return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
+}
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest)
+{
+ add_ptr(1, dest);
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest)
+{
+ add_ptr(-1, dest);
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest)
+{
+  intptr_t res = __sync_lock_test_and_set (dest, exchange_value);
+  FULL_MEM_BARRIER;
+  return res;
+}
+
+inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order)
+{
+  return generic_cmpxchg(exchange_value, dest, compare_value, order);
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order)
+{
+  return generic_cmpxchg(exchange_value, dest, compare_value, order);
+}
+
+inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order)
+{
+  return (void *) cmpxchg_ptr((intptr_t) exchange_value,
+                              (volatile intptr_t*) dest,
+                              (intptr_t) compare_value,
+                              order);
+}
+
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
+#endif // OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_HPP
--- a/hotspot/src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.inline.hpp	Fri Sep 02 02:41:56 2016 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,164 +0,0 @@
-/*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_INLINE_HPP
-#define OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-#include "vm_version_aarch64.hpp"
-
-// Implementation of class atomic
-
-#define FULL_MEM_BARRIER  __sync_synchronize()
-#define READ_MEM_BARRIER  __atomic_thread_fence(__ATOMIC_ACQUIRE);
-#define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
-
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-
-inline jint Atomic::add(jint add_value, volatile jint* dest)
-{
- return __sync_add_and_fetch(dest, add_value);
-}
-
-inline void Atomic::inc(volatile jint* dest)
-{
- add(1, dest);
-}
-
-inline void Atomic::inc_ptr(volatile void* dest)
-{
- add_ptr(1, dest);
-}
-
-inline void Atomic::dec (volatile jint* dest)
-{
- add(-1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile void* dest)
-{
- add_ptr(-1, dest);
-}
-
-inline jint Atomic::xchg (jint exchange_value, volatile jint* dest)
-{
-  jint res = __sync_lock_test_and_set (dest, exchange_value);
-  FULL_MEM_BARRIER;
-  return res;
-}
-
-inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest)
-{
-  return (void *) xchg_ptr((intptr_t) exchange_value,
-                           (volatile intptr_t*) dest);
-}
-
-template <typename T> T generic_cmpxchg(T exchange_value, volatile T* dest,
-                                        T compare_value, cmpxchg_memory_order order)
-{
-  if (order == memory_order_relaxed) {
-    T value = compare_value;
-    __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
-                              __ATOMIC_RELAXED, __ATOMIC_RELAXED);
-    return value;
-  } else {
-    return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
-  }
-}
-
-#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
-inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order)
-{
-  return generic_cmpxchg(exchange_value, dest, compare_value, order);
-}
-
-inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order)
-{
-  return generic_cmpxchg(exchange_value, dest, compare_value, order);
-}
-
-inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
-inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
-{
- return __sync_add_and_fetch(dest, add_value);
-}
-
-inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest)
-{
-  return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest)
-{
- add_ptr(1, dest);
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest)
-{
- add_ptr(-1, dest);
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest)
-{
-  intptr_t res = __sync_lock_test_and_set (dest, exchange_value);
-  FULL_MEM_BARRIER;
-  return res;
-}
-
-inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order)
-{
-  return generic_cmpxchg(exchange_value, dest, compare_value, order);
-}
-
-inli