Lines Matching refs:to

16  * 2 along with this work; if not, write to the Free Software Foundation,
84 // return address of next instruction to execute
100 // Call stubs are used to call Java from C
304 // Note: I tried to make c2 return longs in O0/O1 and G1 so we wouldn't have to
348 // complete return to VM
411 // jump to exception handler
425 // If the compiler needs all registers to be preserved between the fault
433 // Note that we generate only this stub into a RuntimeStub, because it needs to be
454 // which has the ability to fetch the return PC out of thread-local storage
469 if (VerifyThread) __ mov(G2_thread, O0); // about to be smashed; pass early
524 // put addr in L0, then load through L0 to F0
526 __ set((intptr_t)&one, L0); __ ldf( FloatRegisterImpl::S, L0, 0, F1); // 1.0 to F1
528 // use add to put 2..18 in F2..F18
537 // use add to put 20..32 in F20..F32
611 // want to use call_VM, because _last_java_sp and such
627 // try to get lock
659 // prefers to work with just one kind of synch. instruction.
664 // try to replace O2 with O3
670 __ delayed()->mov(O2, O0); // report previous value to caller
822 // The following routine generates a subroutine to throw an asynchronous
925 __ retl(); // Result in Rret is zero; flags set to Z
928 __ ret(); // Result in Rret is zero; flags set to Z
940 __ retl(); // Result in Rret is != 0; flags set to NZ
943 __ ret(); // Result in Rret is != 0; flags set to NZ
999 const Register to = O1;
1001 const Register to_from = O3; // to - from
1004 __ subcc(to, from, to_from);
1102 // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
1125 typedef void (StubGenerator::*CopyLoopFunc)(Register from, Register to, Register count, int count_dec,
1128 void disjoint_copy_core(Register from, Register to, Register count, int log2_elem_size,
1137 prefetch_dist = (prefetch_dist + (iter_size-1)) & (-iter_size); // round up to one iteration copy size
1150 // to <= from || to >= from+count
1151 // but BIS will stomp over 'from' if (to > from-tail_size && to <= from)
1152 __ sub(from, to, O4);
1157 // BIS should not be used to copy tail (64 bytes+iter_size)
1158 // to avoid zeroing of following values.
1161 if (prefetch_count > 0) { // rounded up to one iteration count
1168 (this->*copy_loop_func)(from, to, count, count_dec, L_block_copy_prefetch, true, true);
1173 (this->*copy_loop_func)(from, to, count, count_dec, L_block_copy, false, true);
1185 if (prefetch_count > 0) { // rounded up to one iteration count
1193 (this->*copy_loop_func)(from, to, count, count_dec, L_copy_prefetch, true, false);
1198 (this->*copy_loop_func)(from, to, count, count_dec, L_copy, false, false);
1206 void copy_16_bytes_shift_loop(Register from, Register to, Register count, int count_dec,
1219 __ prefetch(to, ArraycopyDstPrefetchDistance, Assembler::severalWritesAndPossiblyReads);
1224 __ inc(to, 16);
1233 __ stxa(O3, to, -16);
1234 __ stxa(O4, to, -8);
1236 __ stx(O3, to, -16);
1237 __ stx(O4, to, -8);
1247 // to - destination array aligned to 8-bytes
1248 // count - elements count to copy >= the count equivalent to 16 bytes
1249 // count_dec - elements count's decrement equivalent to 16 bytes
1252 void copy_16_bytes_forward_with_shift(Register from, Register to,
1272 // to form 2 aligned 8-bytes chunks to store.
1280 disjoint_copy_core(from, to, count, log2_elem_size, 16, copy_16_bytes_shift_loop);
1288 __ inc(to, 8);
1292 __ stx(G3, to, -8);
1306 // end_to - destination array end address aligned to 8-bytes
1307 // count - elements count to copy >= the count equivalent to 16 bytes
1308 // count_dec - elements count's decrement equivalent to 16 bytes
1331 // to form 2 aligned 8-bytes chunks to store.
1374 // "from" and "to" addresses are assumed to be heapword aligned.
1378 // to: O1
1390 const Register to = O1; // destination array address
1410 // of this arraycopy call site that both 'from' and 'to' addresses
1417 // copy a 4-bytes word if necessary to align 'to' to 8 bytes
1418 __ andcc(to, 7, G0);
1422 __ inc(to, 4);
1424 __ st(O3, to, -4);
1428 // copy bytes to align 'to' on 8 byte boundary
1429 __ andcc(to, 7, G1); // misaligned bytes
1432 __ inc(G1, 8); // bytes need to copy to next 8-bytes alignment
1438 __ stb(O3, to, 0);
1440 __ delayed()->inc(to);
1448 // the same alignment mod 8, otherwise fall through to the next
1453 copy_16_bytes_forward_with_shift(from, to, count, 0, L_copy_byte);
1469 __ stb(O3, to, offset);
1483 // "from" and "to" addresses are assumed to be heapword aligned.
1487 // to: O1
1502 const Register to = O1; // destination array address
1505 const Register end_to = to; // destination array end address
1517 __ add(to, count, end_to); // offset after last copied element
1528 // copy bytes to align 'end_to' on 8 byte boundary
1544 // Both arrays are aligned to 8-bytes in 64-bits VM.
1552 // the same alignment mod 8, otherwise jump to the next
1595 // "from" and "to" addresses are assumed to be heapword aligned.
1599 // to: O1
1611 const Register to = O1; // destination array address
1631 // of this arraycopy call site that both 'from' and 'to' addresses
1638 // copy a 2-elements word if necessary to align 'to' to 8 bytes
1639 __ andcc(to, 7, G0);
1643 __ inc(to, 4);
1645 __ st(O3, to, -4);
1649 // copy 1 element if necessary to align 'to' on an 4 bytes
1650 __ andcc(to, 3, G0);
1654 __ inc(to, 2);
1656 __ sth(O3, to, -2);
1659 // copy 2 elements to align 'to' on an 8 byte boundary
1660 __ andcc(to, 7, G0);
1666 __ inc(to, 4);
1667 __ sth(O3, to, -4);
1668 __ sth(O4, to, -2);
1676 // the same alignment mod 8, otherwise fall through to the next
1681 copy_16_bytes_forward_with_shift(from, to, count, 1, L_copy_2_bytes);
1697 __ sth(O3, to, offset);
1711 // "to" address is assumed to be heapword aligned.
1714 // to: O0
1723 const Register to = O0; // source array address
1773 __ andcc(to, 1, G0);
1776 __ stb(value, to, 0);
1777 __ inc(to, 1);
1782 __ andcc(to, 2, G0);
1785 __ sth(value, to, 0);
1786 __ inc(to, 2);
1793 // align to 8 bytes, we know we are 4 byte aligned to start
1794 __ andcc(to, 7, G0);
1797 __ stw(value, to, 0);
1798 __ inc(to, 4);
1824 __ stx(value, to, 0);
1825 __ stx(value, to, 8);
1826 __ stx(value, to, 16);
1827 __ stx(value, to, 24);
1831 __ delayed()->add(to, 32, to);
1845 __ stx(value, to, 0);
1848 __ delayed()->add(to, 8, to);
1862 __ stw(value, to, 0);
1864 __ inc(to, 4);
1870 __ sth(value, to, 0);
1872 __ inc(to, 2);
1878 __ stb(value, to, 0);
1896 __ stb(value, to, 0);
1897 __ inc(to, 1);
1901 __ stb(value, to, 0);
1902 __ stb(value, to, 1);
1903 __ inc(to, 2);
1907 __ stb(value, to, 0);
1908 __ stb(value, to, 1);
1909 __ stb(value, to, 2);
1911 __ delayed()->stb(value, to, 3);
1920 __ sth(value, to, 0);
1921 __ inc(to, 2);
1925 __ sth(value, to, 0);
1927 __ delayed()->sth(value, to, 2);
1934 // "from" and "to" addresses are assumed to be heapword aligned.
1938 // to: O1
1953 const Register to = O1; // destination array address
1956 const Register end_to = to; // destination array end address
1958 const Register byte_count = O3; // bytes count to copy
1971 __ add(to, byte_count, end_to); // offset after last copied element
1982 // copy 1 element if necessary to align 'end_to' on an 4 bytes
1992 // copy 2 elements to align 'end_to' on an 8 byte boundary
2006 // Both arrays are aligned to 8-bytes in 64-bits VM.
2014 // the same alignment mod 8, otherwise jump to the next
2057 void copy_16_bytes_loop(Register from, Register to, Register count, int count_dec,
2067 __ prefetch(to, ArraycopyDstPrefetchDistance, Assembler::severalWritesAndPossiblyReads);
2072 __ inc(to, 16);
2082 __ stxa(O3, to, -16);
2083 __ stxa(O4, to, -8);
2085 __ stx(O3, to, -16);
2086 __ stx(O4, to, -8);
2095 // If "aligned" is true, the "from" and "to" addresses are assumed
2096 // to be heapword aligned.
2100 // to: O1
2109 const Register to = O1; // destination array address
2115 // of this arraycopy call site that both 'from' and 'to' addresses
2133 // copy 1 element to align 'to' on an 8 byte boundary
2134 __ andcc(to, 7, G0);
2138 __ inc(to, 4);
2140 __ st(O3, to, -4);
2150 // to form 2 aligned 8-bytes chunks to store.
2159 disjoint_copy_core(from, to, count, 2, 16, copy_16_bytes_loop);
2179 __ st(O3, to, offset);
2187 // "from" and "to" addresses are assumed to be heapword aligned.
2191 // to: O1
2219 // If "aligned" is true, the "from" and "to" addresses are assumed
2220 // to be heapword aligned.
2224 // to: O1
2234 const Register to = O1; // destination array address
2237 const Register end_to = to; // destination array end address
2240 const Register byte_count = O3; // bytes count to copy
2243 __ add(to, byte_count, end_to); // offset after last copied element
2249 // copy 1 element to align 'to' on an 8 byte boundary
2268 // to form 2 aligned 8-bytes chunks to store.
2320 // "from" and "to" addresses are assumed to be heapword aligned.
2324 // to: O1
2355 void copy_64_bytes_loop(Register from, Register to, Register count, int count_dec,
2365 __ prefetch(to, ArraycopyDstPrefetchDistance+off, Assembler::severalWritesAndPossiblyReads);
2371 __ stxa(O4, to, off+0);
2372 __ stxa(O5, to, off+8);
2374 __ stx(O4, to, off+0);
2375 __ stx(O5, to, off+8);
2381 __ delayed()->inc(to, 64);
2391 // to: O1
2418 const Register to = O1; // destination array address
2434 __ delayed()->mov(to, to64);
2454 __ stx(O3, to, offset0);
2456 __ stx(G3, to, offset8);
2466 __ stx(O3, to, offset0);
2477 // to: O1
2509 // to: O1
2516 const Register to = O1; // destination array address
2529 __ stx(O2, to, offset8);
2531 __ stx(O3, to, offset0);
2539 __ stx(O3, to, 0);
2549 // to: O1
2580 // "from" and "to" addresses are assumed to be heapword aligned.
2584 // to: O1
2591 const Register to = O1; // destination array address
2607 __ mov(to, G1);
2631 // "from" and "to" addresses are assumed to be heapword aligned.
2635 // to: O1
2643 const Register to = O1; // destination array address
2661 __ mov(to, G1);
2726 // to: O1
2761 // otherwise, we would have to make conjoint checks
2785 // Empty array: Nothing to do.
2801 __ inc(O5_offset, heapOopSize); // step to next offset
2813 // branch to this on success:
2817 // It was a real error; we must depend on the caller to finish the job.
2820 // and report their number to the caller.
2824 __ delayed()->not1(O2_count, O0); // report (-1^K) to caller
2844 // to: O1
2848 // to a long, int, short, or byte copy loop.
2927 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'.
2948 // O0 == -1 - need to call System.arraycopy
2985 // Assembler stubs will be used for this call to arraycopy
3051 // Load 32-bits signed value. Use br() instruction with it to check icc.
3075 // At this point, it is known to be a typeArray (array_tag 0x3).
3088 __ delayed(); // match next insn to prev branch
3109 // next registers should be set before the jump to corresponding stub
3111 const Register to = O1; // destination array address
3114 // 'from', 'to', 'count' registers should be set in this order
3117 BLOCK_COMMENT("scale indexes to element size");
3121 __ add(dst, dst_pos, to); // dst_addr
3164 __ add(dst, dst_pos, to); // dst_addr
3173 // lduw(G4_dst_klass, lh_offset, O5_temp); // hoisted to delay slot
3177 // It is safe to examine both src.length and dst.length.
3178 __ delayed(); // match next insn to prev branch
3188 __ add(dst, dst_pos, to); // dst_addr
3192 assert_different_registers(from, to, count, sco_temp,
3220 // "to" address is aligned to jlong (8 bytes).
3223 // to: O0
3232 const Register to = O0; // source array address
3237 __ sllx(count, LogHeapWordSize, count); // to bytes count
3239 __ bis_zeroing(to, count, temp, Ldone);
3287 // entry_jint_arraycopy always points to the unaligned version (notice that we overwrite it).
3383 // Note: This is code that could be shared among different platforms - however the benefit seems to be smaller than
3418 // These entry points require SharedInfo::stack0 to be set up in non-core builds
3461 // put extra information in the stub code, to make it more readable