/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "c1/c1_CodeStubs.hpp"
#include "c1/c1_FrameMap.hpp"
#include "c1/c1_LIRAssembler.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "nativeInst_x86.hpp"
#include "runtime/sharedRuntime.hpp"
#include "vmreg_x86.inline.hpp"
#ifndef SERIALGC
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#endif
assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
if (input()->is_single_xmm()) {
} else if (input()->is_double_xmm()) {
} else {
}
// input is > 0 -> return maxInt
// result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff
// input is NaN -> return 0
}
}
{
}
// pass the array index on stack because all registers must be preserved
if (_index->is_cpu_register()) {
} else {
}
} else {
}
}
if (_offset != -1) {
}
}
// Implementation of NewInstanceStub
NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
"need new_instance id");
}
}
// Implementation of NewTypeArrayStub
NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
}
}
// Implementation of NewObjectArrayStub
NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
}
}
// Implementation of MonitorAccessStubs
{
}
} else {
}
}
if (_compute_lock) {
// lock_reg was destroyed by fast unlocking attempt => recompute it
}
// note: non-blocking leaf routine => no call info needed
} else {
}
}
// Implementation of patching:
// - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
// - Replace original code with a call to the stub
// At Runtime:
// - call to stub, jump to runtime
// - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
// - in runtime: after initializing class, restore original code, reexecute instruction
// We're patching a 5-7 byte instruction on intel and we need to
// make sure that we don't see a piece of the instruction. It
// appears mostly impossible on Intel to simply invalidate other
// processors caches and since they may do aggressive prefetch it's
// very hard to make a guess about what code might be in the icache.
// Force the instruction to be double word aligned so that it
// doesn't span a cache line.
}
assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");
// static field accesses have special semantics while the class
// initializer is being run so we emit a test which can be used to
// check that this code is being executed by the initializing
// thread.
if (CommentedAssembly) {
}
if (_id == load_klass_id) {
// produce a copy of the load klass instruction for use by the being initialized case
#ifdef ASSERT
for (int i = 0; i < _bytes_to_copy; i++) {
}
#endif
} else {
// make a copy the code which is going to be patched.
for ( int i = 0; i < _bytes_to_copy; i++) {
}
}
int bytes_to_skip = 0;
if (_id == load_klass_id) {
if (CommentedAssembly) {
}
// Load without verification to keep code size small. We need it because
// begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null.
// access_field patches may execute the patched code before it's
// copied back into place so we need to jump back into the main
// code of the nmethod to continue execution.
// make sure this extra code gets skipped
}
if (CommentedAssembly) {
}
// Now emit the patch record telling the runtime how to find the
// pieces of the patch. We only need 3 bytes but for readability of
// the disassembly we make the data look like a movl reg, imm32,
// which requires 5 bytes
// emit the offsets needed to find the code to patch
switch (_id) {
default: ShouldNotReachHere();
}
if (CommentedAssembly) {
}
// Add enough nops so deoptimization can overwrite the jmp above with a call
// and not destroy the world.
}
if (_id == load_klass_id) {
relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, relocInfo::oop_type, relocInfo::none);
}
}
}
}
// pass the object on stack because all registers must be preserved
if (_obj->is_cpu_register()) {
}
}
//---------------slow case: call to native-----------------
// Figure out where the args should go
// This should really convert the IntrinsicID to the methodOop and signature
// but I don't know how to do that.
//
// push parameters
// (src, src_pos, dest, destPos, length)
Register r[5];
r[0] = src()->as_register();
// next registers will get stored on the stack
for (int i = 0; i < 5 ; i++ ) {
} else {
}
}
#ifndef PRODUCT
#endif
}
/////////////////////////////////////////////////////////////////////////////
#ifndef SERIALGC
// At this point we know that marking is in progress.
// If do_load() is true then we have to emit the
// load of the previous value; otherwise it has already
// been loaded into _pre_val.
if (do_load()) {
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
}
}
"Must be if we're using this.");
}
}
#endif // SERIALGC
/////////////////////////////////////////////////////////////////////////////