changeset 1017:ac3c2d59e32f

more from linux/solaris
author bachmann
date Fri, 23 Oct 2009 12:01:06 -0700
parents 0512bf73ad56
children 7930b62dafc9
files src/os_cpu/haiku_x86/vm/copy_haiku_x86.inline.hpp src/os_cpu/haiku_x86/vm/orderAccess_haiku_x86.inline.hpp src/os_cpu/haiku_x86/vm/prefetch_haiku_x86.inline.hpp
diffstat 3 files changed, 382 insertions(+), 0 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/haiku_x86/vm/copy_haiku_x86.inline.hpp	Fri Oct 23 12:01:06 2009 -0700
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2003-2004 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
+  (void)memmove(to, from, count * HeapWordSize);
+}
+
+static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
+#ifndef AMD64
+  (void)memcpy(to, from, count * HeapWordSize);
+#else
+  switch (count) {
+  case 8:  to[7] = from[7];
+  case 7:  to[6] = from[6];
+  case 6:  to[5] = from[5];
+  case 5:  to[4] = from[4];
+  case 4:  to[3] = from[3];
+  case 3:  to[2] = from[2];
+  case 2:  to[1] = from[1];
+  case 1:  to[0] = from[0];
+  case 0:  break;
+  default:
+    (void)memcpy(to, from, count * HeapWordSize);
+    break;
+  }
+#endif // AMD64
+}
+
+static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) {
+  switch (count) {
+  case 8:  to[7] = from[7];
+  case 7:  to[6] = from[6];
+  case 6:  to[5] = from[5];
+  case 5:  to[4] = from[4];
+  case 4:  to[3] = from[3];
+  case 3:  to[2] = from[2];
+  case 2:  to[1] = from[1];
+  case 1:  to[0] = from[0];
+  case 0:  break;
+  default: while (count-- > 0) {
+             *to++ = *from++;
+           }
+           break;
+  }
+}
+
+static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
+  (void)memmove(to, from, count * HeapWordSize);
+}
+
+static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
+  pd_disjoint_words(from, to, count);
+}
+
+static void pd_conjoint_bytes(void* from, void* to, size_t count) {
+#ifdef AMD64
+  (void)memmove(to, from, count);
+#else
+  _Copy_conjoint_bytes(from, to, count);
+#endif // AMD64
+}
+
+static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) {
+  pd_conjoint_bytes(from, to, count);
+}
+
+static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
+  _Copy_conjoint_jshorts_atomic(from, to, count);
+}
+
+static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
+  _Copy_conjoint_jints_atomic(from, to, count);
+}
+
+static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
+  // Guarantee use of fild/fistp or xmm regs via some asm code, because compilers won't.
+  _Copy_conjoint_jlongs_atomic(from, to, count);
+}
+
+static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) {
+#ifdef AMD64
+  assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size");
+  _Copy_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count);
+#else
+  _Copy_conjoint_jints_atomic((jint*)from, (jint*)to, count);
+#endif // AMD64
+}
+
+static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) {
+  _Copy_arrayof_conjoint_bytes(from, to, count);
+}
+
+static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) {
+  _Copy_arrayof_conjoint_jshorts(from, to, count);
+}
+
+static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) {
+  _Copy_arrayof_conjoint_jints(from, to, count);
+}
+
+static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) {
+#ifdef AMD64
+  _Copy_arrayof_conjoint_jlongs(from, to, count);
+#else
+  pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count);
+#endif // AMD64
+}
+
+static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) {
+#ifdef AMD64
+  assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size");
+  _Copy_arrayof_conjoint_jlongs(from, to, count);
+#else
+  assert(BytesPerInt == BytesPerOop, "jints and oops must be the same size");
+  _Copy_arrayof_conjoint_jints(from, to, count);
+#endif // AMD64
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/haiku_x86/vm/orderAccess_haiku_x86.inline.hpp	Fri Oct 23 12:01:06 2009 -0700
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// Implementation of class OrderAccess.
+
+inline void OrderAccess::loadload()   { acquire(); }
+inline void OrderAccess::storestore() { release(); }
+inline void OrderAccess::loadstore()  { acquire(); }
+inline void OrderAccess::storeload()  { fence(); }
+
+inline void OrderAccess::acquire() {
+  volatile intptr_t dummy;
+#ifdef AMD64
+  __asm__ volatile ("movq 0(%%rsp), %0" : "=r" (dummy) : : "memory");
+#else
+  __asm__ volatile ("movl 0(%%esp),%0" : "=r" (dummy) : : "memory");
+#endif // AMD64
+}
+
+inline void OrderAccess::release() {
+  dummy = 0;
+}
+
+inline void OrderAccess::fence() {
+  if (os::is_MP()) {
+    // always use locked addl since mfence is sometimes expensive
+#ifdef AMD64
+    __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
+#else
+    __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
+#endif
+  }
+}
+
+inline jbyte    OrderAccess::load_acquire(volatile jbyte*   p) { return *p; }
+inline jshort   OrderAccess::load_acquire(volatile jshort*  p) { return *p; }
+inline jint     OrderAccess::load_acquire(volatile jint*    p) { return *p; }
+inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { return *p; }
+inline jubyte   OrderAccess::load_acquire(volatile jubyte*  p) { return *p; }
+inline jushort  OrderAccess::load_acquire(volatile jushort* p) { return *p; }
+inline juint    OrderAccess::load_acquire(volatile juint*   p) { return *p; }
+inline julong   OrderAccess::load_acquire(volatile julong*  p) { return *p; }
+inline jfloat   OrderAccess::load_acquire(volatile jfloat*  p) { return *p; }
+inline jdouble  OrderAccess::load_acquire(volatile jdouble* p) { return *p; }
+
+inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t*   p) { return *p; }
+inline void*    OrderAccess::load_ptr_acquire(volatile void*       p) { return *(void* volatile *)p; }
+inline void*    OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; }
+
+inline void     OrderAccess::release_store(volatile jbyte*   p, jbyte   v) { *p = v; }
+inline void     OrderAccess::release_store(volatile jshort*  p, jshort  v) { *p = v; }
+inline void     OrderAccess::release_store(volatile jint*    p, jint    v) { *p = v; }
+inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { *p = v; }
+inline void     OrderAccess::release_store(volatile jubyte*  p, jubyte  v) { *p = v; }
+inline void     OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
+inline void     OrderAccess::release_store(volatile juint*   p, juint   v) { *p = v; }
+inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { *p = v; }
+inline void     OrderAccess::release_store(volatile jfloat*  p, jfloat  v) { *p = v; }
+inline void     OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; }
+
+inline void     OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; }
+inline void     OrderAccess::release_store_ptr(volatile void*     p, void*    v) { *(void* volatile *)p = v; }
+
+inline void     OrderAccess::store_fence(jbyte*  p, jbyte  v) {
+  __asm__ volatile (  "xchgb (%2),%0"
+                    : "=r" (v)
+                    : "0" (v), "r" (p)
+                    : "memory");
+}
+inline void     OrderAccess::store_fence(jshort* p, jshort v) {
+  __asm__ volatile (  "xchgw (%2),%0"
+                    : "=r" (v)
+                    : "0" (v), "r" (p)
+                    : "memory");
+}
+inline void     OrderAccess::store_fence(jint*   p, jint   v) {
+  __asm__ volatile (  "xchgl (%2),%0"
+                    : "=r" (v)
+                    : "0" (v), "r" (p)
+                    : "memory");
+}
+
+inline void     OrderAccess::store_fence(jlong*   p, jlong   v) {
+#ifdef AMD64
+  __asm__ __volatile__ ("xchgq (%2), %0"
+                        : "=r" (v)
+                        : "0" (v), "r" (p)
+                        : "memory");
+#else
+  *p = v; fence();
+#endif // AMD64
+}
+
+// AMD64 copied the bodies for the the signed version. 32bit did this. As long as the
+// compiler does the inlining this is simpler.
+inline void     OrderAccess::store_fence(jubyte*  p, jubyte  v) { store_fence((jbyte*)p,  (jbyte)v);  }
+inline void     OrderAccess::store_fence(jushort* p, jushort v) { store_fence((jshort*)p, (jshort)v); }
+inline void     OrderAccess::store_fence(juint*   p, juint   v) { store_fence((jint*)p,   (jint)v);   }
+inline void     OrderAccess::store_fence(julong*  p, julong  v) { store_fence((jlong*)p,  (jlong)v);  }
+inline void     OrderAccess::store_fence(jfloat*  p, jfloat  v) { *p = v; fence(); }
+inline void     OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; fence(); }
+
+inline void     OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) {
+#ifdef AMD64
+  __asm__ __volatile__ ("xchgq (%2), %0"
+                        : "=r" (v)
+                        : "0" (v), "r" (p)
+                        : "memory");
+#else
+  store_fence((jint*)p, (jint)v);
+#endif // AMD64
+}
+
+inline void     OrderAccess::store_ptr_fence(void**    p, void*    v) {
+#ifdef AMD64
+  __asm__ __volatile__ ("xchgq (%2), %0"
+                        : "=r" (v)
+                        : "0" (v), "r" (p)
+                        : "memory");
+#else
+  store_fence((jint*)p, (jint)v);
+#endif // AMD64
+}
+
+// Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile.
+inline void     OrderAccess::release_store_fence(volatile jbyte*  p, jbyte  v) {
+  __asm__ volatile (  "xchgb (%2),%0"
+                    : "=r" (v)
+                    : "0" (v), "r" (p)
+                    : "memory");
+}
+inline void     OrderAccess::release_store_fence(volatile jshort* p, jshort v) {
+  __asm__ volatile (  "xchgw (%2),%0"
+                    : "=r" (v)
+                    : "0" (v), "r" (p)
+                    : "memory");
+}
+inline void     OrderAccess::release_store_fence(volatile jint*   p, jint   v) {
+  __asm__ volatile (  "xchgl (%2),%0"
+                    : "=r" (v)
+                    : "0" (v), "r" (p)
+                    : "memory");
+}
+
+inline void     OrderAccess::release_store_fence(volatile jlong*   p, jlong   v) {
+#ifdef AMD64
+  __asm__ __volatile__ (  "xchgq (%2), %0"
+                          : "=r" (v)
+                          : "0" (v), "r" (p)
+                          : "memory");
+#else
+  *p = v; fence();
+#endif // AMD64
+}
+
+inline void     OrderAccess::release_store_fence(volatile jubyte*  p, jubyte  v) { release_store_fence((volatile jbyte*)p,  (jbyte)v);  }
+inline void     OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store_fence((volatile jshort*)p, (jshort)v); }
+inline void     OrderAccess::release_store_fence(volatile juint*   p, juint   v) { release_store_fence((volatile jint*)p,   (jint)v);   }
+inline void     OrderAccess::release_store_fence(volatile julong*  p, julong  v) { release_store_fence((volatile jlong*)p,  (jlong)v);  }
+
+inline void     OrderAccess::release_store_fence(volatile jfloat*  p, jfloat  v) { *p = v; fence(); }
+inline void     OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { *p = v; fence(); }
+
+inline void     OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) {
+#ifdef AMD64
+  __asm__ __volatile__ (  "xchgq (%2), %0"
+                          : "=r" (v)
+                          : "0" (v), "r" (p)
+                          : "memory");
+#else
+  release_store_fence((volatile jint*)p, (jint)v);
+#endif // AMD64
+}
+inline void     OrderAccess::release_store_ptr_fence(volatile void*     p, void*    v) {
+#ifdef AMD64
+  __asm__ __volatile__ (  "xchgq (%2), %0"
+                          : "=r" (v)
+                          : "0" (v), "r" (p)
+                          : "memory");
+#else
+  release_store_fence((volatile jint*)p, (jint)v);
+#endif // AMD64
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/haiku_x86/vm/prefetch_haiku_x86.inline.hpp	Fri Oct 23 12:01:06 2009 -0700
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2003 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+
+inline void Prefetch::read (void *loc, intx interval) {
+#ifdef AMD64
+  __asm__ ("prefetcht0 (%0,%1,1)" : : "r" (loc), "r" (interval));
+#endif // AMD64
+}
+
+inline void Prefetch::write(void *loc, intx interval) {
+#ifdef AMD64
+
+  // Do not use the 3dnow prefetchw instruction.  It isn't supported on em64t.
+  //  __asm__ ("prefetchw (%0,%1,1)" : : "r" (loc), "r" (interval));
+  __asm__ ("prefetcht0 (%0,%1,1)" : : "r" (loc), "r" (interval));
+
+#endif // AMD64
+}