Lines Matching refs:aligned

893     __ add(SP,-4*wordSize,SP);  // Make space for 4 temps (stack must be 2 words aligned)
1247 // to - destination array aligned to 8-bytes
1258 // if both arrays have the same alignment mod 8, do 8 bytes aligned copy
1271 // Load 2 aligned 8-bytes chunks and use one from previous iteration
1272 // to form 2 aligned 8-bytes chunks to store.
1306 // end_to - destination array end address aligned to 8-bytes
1309 // L_aligned_copy - aligned copy exit label
1317 // if both arrays have the same alignment mod 8, do 8 bytes aligned copy
1330 // Load 2 aligned 8-bytes chunks and use one from previous iteration
1331 // to form 2 aligned 8-bytes chunks to store.
1373 // Generate stub for disjoint byte copy. If "aligned" is true, the
1374 // "from" and "to" addresses are assumed to be heapword aligned.
1381 address generate_disjoint_byte_copy(bool aligned, address *entry, const char *name) {
1408 if (aligned) {
1409 // 'aligned' == true when it is known statically during compilation
1411 // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
1444 if (!aligned)
1449 // code for aligned copy.
1451 // Also jump over aligned copy after the copy with shift completed.
1456 // Both array are 8 bytes aligned, copy 16 bytes at a time
1459 generate_disjoint_long_copy_core(aligned);
1482 // Generate stub for conjoint byte copy. If "aligned" is true, the
1483 // "from" and "to" addresses are assumed to be heapword aligned.
1490 address generate_conjoint_byte_copy(bool aligned, address nooverlap_target,
1525 // Align end of arrays since they could be not aligned even
1526 // when arrays itself are aligned.
1543 if (aligned) {
1544 // Both arrays are aligned to 8-bytes in 64-bits VM.
1553 // code for aligned copy (and substracting 16 from 'count' before jump).
1555 // Also jump over aligned copy after the copy with shift completed.
1594 // Generate stub for disjoint short copy. If "aligned" is true, the
1595 // "from" and "to" addresses are assumed to be heapword aligned.
1602 address generate_disjoint_short_copy(bool aligned, address *entry, const char * name) {
1629 if (aligned) {
1630 // 'aligned' == true when it is known statically during compilation
1632 // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
1672 if (!aligned)
1677 // code for aligned copy.
1679 // Also jump over aligned copy after the copy with shift completed.
1684 // Both array are 8 bytes aligned, copy 16 bytes at a time
1687 generate_disjoint_long_copy_core(aligned);
1710 // Generate stub for disjoint short fill. If "aligned" is true, the
1711 // "to" address is assumed to be heapword aligned.
1718 address generate_fill(BasicType t, bool aligned, const char* name) {
1769 if (!aligned && (t == T_BYTE || t == T_SHORT)) {
1791 if (!aligned) {
1793 // align to 8 bytes, we know we are 4 byte aligned to start
1933 // Generate stub for conjoint short copy. If "aligned" is true, the
1934 // "from" and "to" addresses are assumed to be heapword aligned.
1941 address generate_conjoint_short_copy(bool aligned, address nooverlap_target,
1979 // Align end of arrays since they could be not aligned even
1980 // when arrays itself are aligned.
2005 if (aligned) {
2006 // Both arrays are aligned to 8-bytes in 64-bits VM.
2015 // code for aligned copy (and substracting 8 from 'count' before jump).
2017 // Also jump over aligned copy after the copy with shift completed.
2095 // If "aligned" is true, the "from" and "to" addresses are assumed
2096 // to be heapword aligned.
2103 void generate_disjoint_int_copy_core(bool aligned) {
2114 // 'aligned' == true when it is known statically during compilation
2116 // are HeapWordSize aligned (see LibraryCallKit::basictype2arraycopy()).
2122 if (!aligned)
2149 // Load 2 aligned 8-bytes chunks and use one from previous iteration
2150 // to form 2 aligned 8-bytes chunks to store.
2165 } // !aligned
2170 generate_disjoint_long_copy_core(aligned);
2186 // Generate stub for disjoint int copy. If "aligned" is true, the
2187 // "from" and "to" addresses are assumed to be heapword aligned.
2194 address generate_disjoint_int_copy(bool aligned, address *entry, const char *name) {
2208 generate_disjoint_int_copy_core(aligned);
2219 // If "aligned" is true, the "from" and "to" addresses are assumed
2220 // to be heapword aligned.
2227 void generate_conjoint_int_copy_core(bool aligned) {
2267 // Load 2 aligned 8-bytes chunks and use one from previous iteration
2268 // to form 2 aligned 8-bytes chunks to store.
2319 // Generate stub for conjoint int copy. If "aligned" is true, the
2320 // "from" and "to" addresses are assumed to be heapword aligned.
2327 address generate_conjoint_int_copy(bool aligned, address nooverlap_target,
2343 generate_conjoint_int_copy_core(aligned);
2386 // "aligned" is ignored, because we must make the stronger
2387 // assumption that both addresses are always 64-bit aligned.
2415 void generate_disjoint_long_copy_core(bool aligned) {
2472 // "aligned" is ignored, because we must make the stronger
2473 // assumption that both addresses are always 64-bit aligned.
2480 address generate_disjoint_long_copy(bool aligned, address *entry, const char *name) {
2493 generate_disjoint_long_copy_core(aligned);
2504 // "aligned" is ignored, because we must make the stronger
2505 // assumption that both addresses are always 64-bit aligned.
2512 void generate_conjoint_long_copy_core(bool aligned) {
2544 // "aligned" is ignored, because we must make the stronger
2545 // assumption that both addresses are always 64-bit aligned.
2552 address generate_conjoint_long_copy(bool aligned, address nooverlap_target,
2558 assert(aligned, "Should always be aligned");
2570 generate_conjoint_long_copy_core(aligned);
2579 // Generate stub for disjoint oop copy. If "aligned" is true, the
2580 // "from" and "to" addresses are assumed to be heapword aligned.
2587 address generate_disjoint_oop_copy(bool aligned, address *entry, const char *name,
2613 generate_disjoint_int_copy_core(aligned);
2615 generate_disjoint_long_copy_core(aligned);
2618 generate_disjoint_int_copy_core(aligned);
2630 // Generate stub for conjoint oop copy. If "aligned" is true, the
2631 // "from" and "to" addresses are assumed to be heapword aligned.
2638 address generate_conjoint_oop_copy(bool aligned, address nooverlap_target,
2667 generate_conjoint_int_copy_core(aligned);
2669 generate_conjoint_long_copy_core(aligned);
2672 generate_conjoint_int_copy_core(aligned);
3220 // "to" address is aligned to jlong (8 bytes).
3256 // Always need aligned and unaligned versions
3268 // Always need aligned and unaligned versions
3286 // In 64 bit we need both aligned and unaligned versions of jint arraycopy.
3294 // In 32 bit jints are always HeapWordSize aligned, so always use the aligned version
3295 // (in fact in 32bit we always have a pre-loop part even in the aligned version,
3296 // because it uses 64-bit loads/stores, so the aligned flag is actually ignored).
3303 // It is always aligned
3342 // oop arraycopy is always aligned on 32bit and 64bit without compressed oops