Lines Matching refs:to

16  * 2 along with this work; if not, write to the Free Software Foundation,
173 // to end up back on the FPU so it can operate on them.
269 case T_FLOAT : i = 5; break; // have to treat float and double separately for SSE
287 case T_INT : /* nothing to do */ break;
365 // Increment counter in methodOop (we don't need to load it, it's in rcx).
389 // Test to see if we should create a method data oop
394 // if no method data exists, go to profile_method
418 // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
439 // and jump to the interpreted entry.
466 // be sure to change this if you add/subtract anything to/from the overhead area
474 // then we need to verify there is enough stack space remaining
479 // compute rsp as if this were going to be the last frame on
510 // Add stack base to locals and subtract stack size
620 __ push(0); // reserve word for pointer to expression stack bottom
639 // rsi: senderSP must preserved for slow path, set SP to it on fast path
670 // Shift codes right to get the index on the right.
710 // Need to differentiate between igetfield, agetfield, bgetfield etc.
714 // Make sure we don't need to mask rdx after the above shift
751 __ mov(rsp, rsi); // set sp to sender sp
775 // * In the G1 code we do not check whether we need to block for
785 // of java.lang.Reference) and jump to the slow path if null. If the
787 // and so we don't need to call the G1 pre-barrier. Thus we can use the
788 // regular method entry code to generate the NPE.
795 // rsi: senderSP must preserved for slow path, set SP to it on fast path
806 // If the receiver is null then it is OK to jump to the slow path.
828 // Generate the G1 pre-barrier code to log the value of
841 __ mov(rsp, rsi); // set sp to sender sp
851 // If G1 is not enabled then attempt to go through the accessor entry point
881 // to the stack
912 __ stop("tried to execute non-native method as native");
918 __ stop("tried to execute abstract method in interpreter");
924 // would try to exit the monitor of synchronized methods which hasn't
926 // _do_not_unlock_if_synchronized to true. The remove_activation will
996 __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
1011 assert(InterpreterRuntime::SignatureHandlerGenerator::to () == rsp, "adjust this code");
1037 // pass handle to mirror
1063 // points into the right code segment. It does not have to be the correct return pc.
1077 // Change state to native
1088 // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers)
1089 // It is safe to do this push because state is _thread_in_native and return address will be found
1092 // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result.
1093 // If the order changes or anything else is added to the stack the code in
1094 // interpreter_frame_result will have to be changed.
1123 // We use the current thread pointer to calculate a thread specific
1124 // offset to write to within the page. This minimizes bus traffic
1125 // due to cache line collision.
1149 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
1150 // preserved and correspond to the bcp/locals pointers. So we do a runtime call
1203 // restore rsi to have legal interpreter frame,
1206 __ get_method(method); // method is junk from thread_in_native to now.
1215 // Note: At some point we may want to unify this with the code used in call_VM_base();
1231 // to check that the object has not been unlocked by an explicit monitorexit bytecode.
1240 // Entry already unlocked, need to throw exception
1257 // restore potential result in rdx:rax, call result handler to restore potential result in ST0 & handle result
1266 __ mov(rsp, t); // set sp to sender sp
1279 // Generic interpreted method entry to (asm) interpreter
1340 __ stop("tried to execute native method as non-native");
1346 __ stop("tried to execute abstract method in interpreter");
1352 // would try to exit the monitor of synchronized methods which hasn't
1354 // _do_not_unlock_if_synchronized to true. The remove_activation will
1422 // We have decided to profile this method in the interpreter
1465 // Assuming that we don't go to one of the trivial specialized
1466 // entries the stack will look like below when we are ready to execute
1471 // the return address is moved to the end of the locals).
1526 // the compiled version to the intrinsic version.
1548 // Save space for one monitor to get into the interpreted method in case
1554 // be sure to change this if you add/subtract anything to/from the overhead area
1579 // The frame interpreter_frame, if not NULL, is guaranteed to be the right size,
1580 // as determined by a previous call to this method.
1581 // It is also guaranteed to be walkable even though it is in a skeletal state
1592 // Since the callee parameters already account for the callee's params we only need to account for
1656 // Restore sp to interpreter_frame_last_sp even though we are going
1657 // to empty the expression stack for the exception processing.
1661 __ restore_bcp(); // rsi points to call/send
1680 __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!)
1709 // Check to see whether we are returning to a deoptimized frame.
1725 // Compute size of arguments for saving when returning to deoptimized caller
1760 // mutations to those outgoing arguments to be preserved and other
1761 // constraints basically require this frame to look exactly as
1764 // last_sp) and the top of stack. Rather than force deopt to
1766 // fixup routine to move the mutated arguments onto the top of our
1783 // call profiling. We have to restore the mdp for the current bcp.
1824 __ jmp(rbx); // jump to exception handler of caller
1929 // Call a little run-time stub to avoid blow-up for each bytecode.