Lines Matching defs:to

16  * 2 along with this work; if not, write to the Free Software Foundation,
76 // op codes such as ld or ldx, only access disp() to get
125 // Patch instruction inst at offset inst_pos to refer to dest_pos
536 // Generate a bunch 'o stuff unique to V8
584 // nothing to do, (later) access of M[reg + offset]
622 call(L, relocInfo::none); // No relocation for call to pc+0x8
657 call(L, relocInfo::none); // No relocation for call to pc+0x8
676 // This code sequence is relocatable to any address, even on LP64.
682 Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement.
686 // value to be relocated.
700 call(L, relocInfo::none); // No relocation for call to pc+0x8
733 // We want to use ST_BREAKPOINT here, but the debugger is confused by it.
745 // We use the current thread pointer to calculate a thread specific
746 // offset to write to within the page. This minimizes bus traffic
747 // due to cache line collision.
853 // Calls to C land
868 // call this when G2_thread is not known to be valid
870 save_frame(0); // to avoid clobbering O0
878 inc(L4, get_pc(L4) + 2 * BytesPerInstWord); // skip getpc() code + inc + st_ptr to point L4 at call
903 save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod for -Xprof)
944 // smash G2_thread, as if the VM were about to anyway
971 // (Note: flags should always be zero before we get here so doesn't need to be set.)
974 // Verify that flags was zeroed on return to Java
976 save_frame(0); // to avoid clobbering O0
982 // Verify that flags was zeroed on return to Java
993 // will always be set to NULL. It is set here so that if we are doing a call to
994 // native (not VM) that we capture the known pc and don't have to rely on the
1031 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame for -Xprof
1040 // Always return last_Java_pc to zero
1042 // Always null flags after return to Java
1066 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early
1452 Assembler::sethi(lo, d); // hardware version zero-extends to upper 32
1457 Assembler::sethi(~lo, d); // hardware version zero-extends to upper 32
1464 Assembler::sethi(hi, d); // hardware version zero-extends to upper 32
1529 if (nWords & 1) ++nWords; // round up to double-word
1564 // The trick here is to use precisely the same memory word
1565 // that trap handlers also use to save the register.
1567 // it works fine to save the register's value, whether or not
1737 // %%%%%% need to implement this
1742 // %%%%%% need to implement this
1746 // %%%%%% need to implement this
1772 // Call indirectly to solve generation ordering problem
1776 // Enough to hold 8 64-bit registers.
1783 mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed
1788 // Load address to call to into O7
1790 // Register call to verify_oop_subroutine
1810 // Call indirectly to solve generation ordering problem
1814 // Enough to hold 8 64-bit registers.
1821 ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed
1826 // Load address to call to into O7
1828 // Register call to verify_oop_subroutine
1850 // O0 is now the oop to be checked. O7 is the return address.
1951 // factor long stop-sequence into subroutine to save space
1954 // call indirectly to solve generation ordering problem
1963 // save frame first to get O7 for return address
1964 // add one word to size in case struct is odd number of words long
1973 // factor long stop-sequence into subroutine to save space
1976 // call indirectly to solve generation ordering problem
1985 // restore(); done in callee to save space!
2005 // We must be able to turn interactive prompting off
2006 // in order to run automated test scripts on the VM
2029 save_frame(); // one more save to free up another O7 register
2032 // We expect pointer to message in I1. Caller must set it up in O1
2057 // In order to get locks work, we need to fake a in_VM state
2318 // This code can be optimized to use the 64 bit shifts in V9.
2330 // more to take care of the special (rare) case where count is zero
2379 // This code can be optimized to use the 64 bit shifts in V9.
2391 // more to take care of the special (rare) case where count is zero
2442 // This code can be optimized to use the 64 bit shifts in V9.
2454 // more to take care of the special (rare) case where count is zero
2673 // registers to the save area).
2738 Untested("Need to verify global reg consistancy");
2755 // try to get lock
2786 // load indirectly to solve generation ordering problem
2907 // On failure, execution transfers to the given label.
2924 assert(method_result->is_global(), "must be able to return value");
2952 // Round up to align_object_offset boundary
2988 // (invert the test to fall through to found_method...)
3001 // scan_temp[-scan_step] points to the vtable offset we need
3120 // We move this check to the front of the fast path because many
3239 // pointer were to sneak in here. Note that we have already loaded the
3260 // Load next super to check
3262 // Don't use load_heap_oop; we don't want to decode the element.
3271 // A miss means we are NOT a subtype and need to keep looping
3334 // pointers to allow age to be placed into low bits
3358 // that we are not the bias owner in the current epoch. We need to
3359 // figure out more details about the state of the header in order to
3364 // the prototype header is no longer biased and we have to revoke
3371 // bits of the mark word are equal to the epoch bits of the
3373 // only change at a safepoint.) If not, attempt to rebias the object
3375 // that the current epoch is invalid in order to do this because
3382 // about the owner; it might be set or it might be clear. Try to
3384 // fails we will go in to the runtime to revoke the object's bias.
3394 // need to revoke that bias. The revocation will occur in the
3409 // circumstances _only_, we are allowed to use the current header's
3410 // value as the comparison value when doing the cas to acquire the
3412 // the bias from one thread to another directly in this situation.
3414 // FIXME: due to a lack of registers we currently blow away the age
3415 // bits in this situation. Should attempt to preserve them.
3422 // need to revoke that bias. The revocation will occur in the
3437 // to be biased any more. We are going to try to reset the mark of
3438 // this object to the prototype value and fall through to the
3441 // bias of this particular object, so it's okay to continue in the
3444 // FIXME: due to a lack of registers we currently blow away the age
3445 // bits in this situation. Should attempt to preserve them.
3449 // Fall through to the normal CAS-based lock, because no matter what
3463 // Note: we do not have to check the thread ID for two reasons.
3479 // CASN -- 32-64 bit switch hitter similar to the synthetic CASN provided by
3504 // extremely sensitive to the size of the code emitted by compiler_lock_object
3506 // length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the
3539 // Save Rbox in Rscratch to be used for the cas operation
3542 // set Rmark to markOop | markOopDesc::unlocked_value
3586 // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty.
3624 // Try to CAS m->owner from null to Self
3675 // on refworkload 0.83. If we need to reduce the size of the code
3679 // A more extreme idea is to always inflate on stack-lock recursion.
3682 // A brief experiment - requiring changes to synchronizer.cpp, interpreter,
3685 // control to the "slow" operators in synchronizer.cpp.
3717 // Try to CAS m->owner from null to Self
3767 // This could be related to inlining policies, code cache management, or
3845 // would also need to check for orpan monitors and stranded threads.
3847 // Finally, inflation is also used when T2 needs to assign a hashCode
3848 // to O and O is stack-locked by T1. The "stomp" race could cause
3849 // an assigned hashCode value to be lost. We can avoid that condition
3871 // %%%%% need to implement this
3875 // %%%%% need to implement this
3879 // %%%%% need to implement this
3884 // %%%%% need to implement this
3889 // %%%%% need to implement this
3894 // %%%%% need to implement this
3899 // %%%%% need to implement this
3904 // %%%%% need to implement this
3947 Register obj, // result: pointer to object after successful allocation
3975 // try to allocate
4031 Register obj, // result: pointer to object after successful allocation
4108 // the amount free in the tlab is too large to discard.
4112 // increment waste limit to prevent getting stuck on this slow path
4141 // set klass to intArrayKlass
4261 // Writes to stack successive pages until offset reached to check for
4364 // This should be rare enough that we can afford to save all the
4457 // Do we need to load the previous value?
4479 // OK, it's not filtered, so we'll need to call enqueue. In the normal
4485 "Or we need to think harder.");
4507 // This gets to assume that o0 contains the object address.
4570 // This should be rare enough that we can afford to save all the
4634 // If the "store_addr" register is an "in" or "local" register, move it to
4829 // Do not add assert code to this unless you change vtableStubs_sparc.cpp
4841 // Do not add assert code to this unless you change vtableStubs_sparc.cpp
4854 // call indirectly to solve generation ordering problem
4860 // Compare char[] arrays aligned to 4 bytes.
4886 // Shift ary1 and ary2 to the end of the arrays, negate limit
4898 // annul LDUW if branch is not taken to prevent access past end of array
4907 void MacroAssembler::bis_zeroing(Register to, Register count, Register temp, Label& Ldone) {
4928 delayed()->add(to, count, end);
4932 // Clean the beginning of space up to next cache line.
4934 stx(G0, to, offs);
4937 // align to next cache line
4938 add(to, cache_line_size, to);
4939 and3(to, -cache_line_size, to);
4943 // BIS should not be used to zero tail (64 bytes)
4944 // to avoid zeroing a header of the following object.
4949 stxa(G0, to, G0, Assembler::ASI_ST_BLKINIT_PRIMARY);
4950 add(to, cache_line_size, to);
4951 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, bis_loop);
4957 cmp_and_brx_short(to, end, Assembler::greaterEqualUnsigned, Assembler::pn, Ldone);
4961 stx(G0, to, 0);
4962 add(to, 8, to);
4963 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, small_loop);