OpenJDK / haiku / haiku / hotspot
changeset 1016:0512bf73ad56
initial versions from linux/solaris
author | bachmann |
---|---|
date | Fri, 23 Oct 2009 11:35:13 -0700 |
parents | c93664c68ae5 |
children | ac3c2d59e32f |
files | src/os_cpu/haiku_x86/vm/assembler_haiku_x86.cpp src/os_cpu/haiku_x86/vm/atomic_haiku_x86.inline.hpp src/os_cpu/haiku_x86/vm/bytes_haiku_x86.inline.hpp |
diffstat | 3 files changed, 298 insertions(+), 0 deletions(-) [+] |
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/os_cpu/haiku_x86/vm/assembler_haiku_x86.cpp Fri Oct 23 11:35:13 2009 -0700 @@ -0,0 +1,18 @@ + +#include "incls/_precompiled.incl" +#include "incls/_assembler_haiku_x86.cpp.incl" + +void Assembler::int3() { + call(CAST_FROM_FN_PTR(address, os::breakpoint), relocInfo::runtime_call_type); +} + +// TODO: convert the following working i586 version to i486 +// if (thread != eax) pushl(eax); // save eax if necessary +// movl(eax,FS_segment,4 * ThreadLocalStorage::thread_index()); // grab the Thread* from FS:thread_index +// if (thread != eax) { +// movl(thread,eax); // move the result out of eax +// popl(eax); // restore prior eax +// } +void MacroAssembler::get_thread(Register thread) { + prefix(FS_segment); movl(thread, Address(4 * ThreadLocalStorage::thread_index(), relocInfo::none)); +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/os_cpu/haiku_x86/vm/atomic_haiku_x86.inline.hpp Fri Oct 23 11:35:13 2009 -0700 @@ -0,0 +1,195 @@ +/* + * Copyright 1999-2005 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +// Implementation of class atomic + +inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; } +inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; } +inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; } +inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; } +inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; } + +inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; } +inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; } +inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } +inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } +inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } + + +// Adding a lock prefix to an instruction on MP machine +#define LOCK_IF_MP(mp) "cmp $0, " #mp "; je 1f; lock; 1: " + +inline jint Atomic::add (jint add_value, volatile jint* dest) { + jint addend = add_value; + int mp = os::is_MP(); + __asm__ volatile ( LOCK_IF_MP(%3) "xaddl %0,(%2)" + : "=r" (addend) + : "0" (addend), "r" (dest), "r" (mp) + : "cc", "memory"); + return addend + add_value; +} + +inline void Atomic::inc (volatile jint* dest) { + int mp = os::is_MP(); + __asm__ volatile (LOCK_IF_MP(%1) "addl $1,(%0)" : + : "r" (dest), "r" (mp) : "cc", "memory"); +} + +inline void Atomic::inc_ptr(volatile void* dest) { + inc_ptr((volatile intptr_t*)dest); +} + +inline void Atomic::dec (volatile jint* dest) { + int mp = os::is_MP(); + __asm__ volatile (LOCK_IF_MP(%1) "subl $1,(%0)" : + : "r" (dest), "r" (mp) : "cc", "memory"); +} + +inline void Atomic::dec_ptr(volatile void* dest) { + dec_ptr((volatile intptr_t*)dest); +} + +inline jint Atomic::xchg (jint exchange_value, volatile jint* dest) { + __asm__ volatile ( "xchgl (%2),%0" + : "=r" (exchange_value) + : "0" (exchange_value), "r" (dest) + : "memory"); + return exchange_value; +} + +inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { + return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest); +} + + +inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value) { + int mp = os::is_MP(); + __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgl %1,(%3)" + : "=a" (exchange_value) + : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp) + : "cc", "memory"); + return exchange_value; +} + +extern "C" { + // defined in linux_x86.s + jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool); +} + +#ifdef AMD64 +inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } +inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } + +inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { + intptr_t addend = add_value; + bool mp = os::is_MP(); + __asm__ __volatile__ (LOCK_IF_MP(%3) "xaddq %0,(%2)" + : "=r" (addend) + : "0" (addend), "r" (dest), "r" (mp) + : "cc", "memory"); + return addend + add_value; +} + +inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { + return (void*)add_ptr(add_value, (volatile intptr_t*)dest); +} + +inline void Atomic::inc_ptr(volatile intptr_t* dest) { + bool mp = os::is_MP(); + __asm__ __volatile__ (LOCK_IF_MP(%1) "addq $1,(%0)" + : + : "r" (dest), "r" (mp) + : "cc", "memory"); +} + +inline void Atomic::dec_ptr(volatile intptr_t* dest) { + bool mp = os::is_MP(); + __asm__ __volatile__ (LOCK_IF_MP(%1) "subq $1,(%0)" + : + : "r" (dest), "r" (mp) + : "cc", "memory"); +} + +inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { + __asm__ __volatile__ ("xchgq (%2),%0" + : "=r" (exchange_value) + : "0" (exchange_value), "r" (dest) + : "memory"); + return exchange_value; +} + +inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value) { + bool mp = os::is_MP(); + __asm__ __volatile__ (LOCK_IF_MP(%4) "cmpxchgq %1,(%3)" + : "=a" (exchange_value) + : "r" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp) + : "cc", "memory"); + return exchange_value; +} + +inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) { + return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value); +} + +inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) { + return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value); +} + +#else +//inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; } +//inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; } + +inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { + return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest); +} + +inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { + return (void*)Atomic::add((jint)add_value, (volatile jint*)dest); +} + + +inline void Atomic::inc_ptr(volatile intptr_t* dest) { + inc((volatile jint*)dest); +} + +inline void Atomic::dec_ptr(volatile intptr_t* dest) { + dec((volatile jint*)dest); +} + +inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { + return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest); +} + +inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value) { + return _Atomic_cmpxchg_long(exchange_value, dest, compare_value, os::is_MP()); +} + +inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) { + return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value); +} + +inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) { + return (void*)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value); +} +#endif // AMD64
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/os_cpu/haiku_x86/vm/bytes_haiku_x86.inline.hpp Fri Oct 23 11:35:13 2009 -0700 @@ -0,0 +1,85 @@ +/* + * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + */ + +//#include <byteswap.h> + +// Efficient swapping of data bytes from Java byte +// ordering to native byte ordering and vice versa. +inline u2 Bytes::swap_u2(u2 x) { +#ifdef AMD64 + return bswap_16(x); +#else + u2 ret; + __asm__ __volatile__ ( + "movw %0, %%ax;" + "xchg %%al, %%ah;" + "movw %%ax, %0" + :"=r" (ret) // output : register 0 => ret + :"0" (x) // input : x => register 0 + :"ax", "0" // clobbered registers + ); + return ret; +#endif // AMD64 +} + +inline u4 Bytes::swap_u4(u4 x) { +#ifdef AMD64 + return bswap_32(x); +#else + u4 ret; + __asm__ __volatile__ ( + "bswap %0" + :"=r" (ret) // output : register 0 => ret + :"0" (x) // input : x => register 0 + :"0" // clobbered register + ); + return ret; +#endif // AMD64 +} + +#ifdef AMD64 +inline u8 Bytes::swap_u8(u8 x) { +#ifdef SPARC_WORKS + // workaround for SunStudio12 CR6615391 + __asm__ __volatile__ ( + "bswapq %0" + :"=r" (x) // output : register 0 => x + :"0" (x) // input : x => register 0 + :"0" // clobbered register + ); + return x; +#else + return bswap_64(x); +#endif +} +#else +// Helper function for swap_u8 +inline u8 Bytes::swap_u8_base(u4 x, u4 y) { + return (((u8)swap_u4(x))<<32) | swap_u4(y); +} + +inline u8 Bytes::swap_u8(u8 x) { + return swap_u8_base(*(u4*)&x, *(((u4*)&x)+1)); +} +#endif // !AMD64