/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "runtime/atomic.hpp"
#include "runtime/orderAccess.hpp"
#include "vm_version_x86.hpp"
// Implementation of class OrderAccess.
#ifdef AMD64
#else
#endif // AMD64
}
// Avoid hitting the same cache-line from
// different threads.
}
// always use locked addl since mfence is sometimes expensive
#ifdef AMD64
#else
#endif
}
}
inline julong OrderAccess::load_acquire(volatile julong* p) { return Atomic::load((volatile jlong*)p); }
inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; }
inline void OrderAccess::release_store(volatile julong* p, julong v) { Atomic::store((jlong)v, (volatile jlong*)p); }
__asm__ volatile ( "xchgb (%2),%0"
: "=q" (v)
: "0" (v), "r" (p)
: "memory");
}
__asm__ volatile ( "xchgw (%2),%0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
__asm__ volatile ( "xchgl (%2),%0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
#ifdef AMD64
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
#else
*p = v; fence();
#endif // AMD64
}
// AMD64 copied the bodies for the the signed version. 32bit did this. As long as the
// compiler does the inlining this is simpler.
#ifdef AMD64
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
#else
#endif // AMD64
}
#ifdef AMD64
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
#else
#endif // AMD64
}
// Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile.
__asm__ volatile ( "xchgb (%2),%0"
: "=q" (v)
: "0" (v), "r" (p)
: "memory");
}
__asm__ volatile ( "xchgw (%2),%0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
__asm__ volatile ( "xchgl (%2),%0"
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
}
#ifdef AMD64
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
#else
release_store(p, v); fence();
#endif // AMD64
}
inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { release_store_fence((volatile jbyte*)p, (jbyte)v); }
inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store_fence((volatile jshort*)p, (jshort)v); }
inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { release_store_fence((volatile jint*)p, (jint)v); }
inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store_fence((volatile jlong*)p, (jlong)v); }
#ifdef AMD64
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
#else
#endif // AMD64
}
#ifdef AMD64
: "=r" (v)
: "0" (v), "r" (p)
: "memory");
#else
#endif // AMD64
}
#endif // OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP