/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "orderAccess_linux_sparc.inline.hpp"
#include "runtime/atomic.hpp"
#include "vm_version_sparc.hpp"
// Implementation of class atomic
inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
__asm__ volatile(
"1: \n\t"
" ld [%2], %%o2\n\t"
" add %1, %%o2, %%o3\n\t"
" cas [%2], %%o2, %%o3\n\t"
" cmp %%o2, %%o3\n\t"
" bne 1b\n\t"
" nop\n\t"
" add %1, %%o2, %0\n\t"
: "=r" (rv)
: "memory", "o2", "o3");
return rv;
}
#ifdef _LP64
__asm__ volatile(
"1: \n\t"
" ldx [%2], %%o2\n\t"
" add %0, %%o2, %%o3\n\t"
" casx [%2], %%o2, %%o3\n\t"
" cmp %%o2, %%o3\n\t"
" bne %%xcc, 1b\n\t"
" nop\n\t"
" add %0, %%o2, %0\n\t"
: "=r" (rv)
: "memory", "o2", "o3");
#else
__asm__ volatile(
"1: \n\t"
" ld [%2], %%o2\n\t"
" add %1, %%o2, %%o3\n\t"
" cas [%2], %%o2, %%o3\n\t"
" cmp %%o2, %%o3\n\t"
" bne 1b\n\t"
" nop\n\t"
" add %1, %%o2, %0\n\t"
: "=r" (rv)
: "memory", "o2", "o3");
#endif // _LP64
return rv;
}
}
__asm__ volatile(
" swap [%2],%1\n\t"
: "=r" (rv)
: "memory");
return rv;
}
#ifdef _LP64
__asm__ volatile(
"1:\n\t"
" mov %1, %%o3\n\t"
" ldx [%2], %%o2\n\t"
" casx [%2], %%o2, %%o3\n\t"
" cmp %%o2, %%o3\n\t"
" bne %%xcc, 1b\n\t"
" nop\n\t"
" mov %%o2, %0\n\t"
: "=r" (rv)
: "memory", "o2", "o3");
#else
__asm__ volatile(
"swap [%2],%1\n\t"
: "=r" (rv)
: "memory");
#endif // _LP64
return rv;
}
}
__asm__ volatile(
" cas [%2], %3, %0"
: "=r" (rv)
: "memory");
return rv;
}
#ifdef _LP64
__asm__ volatile(
" casx [%2], %3, %0"
: "=r" (rv)
: "memory");
return rv;
#else
__asm__ volatile(
" sllx %2, 32, %2\n\t"
" srl %3, 0, %3\n\t"
" or %2, %3, %2\n\t"
" sllx %5, 32, %5\n\t"
" srl %6, 0, %6\n\t"
" or %5, %6, %5\n\t"
" casx [%4], %5, %2\n\t"
" srl %2, 0, %1\n\t"
" srlx %2, 32, %0\n\t"
: "memory");
return rv.long_value;
#endif
}
inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) {
#ifdef _LP64
__asm__ volatile(
" casx [%2], %3, %0"
: "=r" (rv)
: "memory");
#else
__asm__ volatile(
" cas [%2], %3, %0"
: "=r" (rv)
: "memory");
#endif // _LP64
return rv;
}
return (void*)cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value);
}
#endif // OS_CPU_LINUX_SPARC_VM_ATOMIC_LINUX_SPARC_INLINE_HPP