2N/A/*
2N/A * CDDL HEADER START
2N/A *
2N/A * The contents of this file are subject to the terms of the
2N/A * Common Development and Distribution License (the "License").
2N/A * You may not use this file except in compliance with the License.
2N/A *
2N/A * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
2N/A * or http://www.opensolaris.org/os/licensing.
2N/A * See the License for the specific language governing permissions
2N/A * and limitations under the License.
2N/A *
2N/A * When distributing Covered Code, include this CDDL HEADER in each
2N/A * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
2N/A * If applicable, add the following below this CDDL HEADER, with the
2N/A * fields enclosed by brackets "[]" replaced with your own identifying
2N/A * information: Portions Copyright [yyyy] [name of copyright owner]
2N/A *
2N/A * CDDL HEADER END
2N/A */
2N/A
2N/A/*
2N/A * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
2N/A */
2N/A
2N/A .file "memcpy.s"
2N/A
2N/A/*
2N/A * memcpy(s1, s2, len)
2N/A *
2N/A * Copy s2 to s1, always copy n bytes.
2N/A * Note: this C code does not work for overlapped copies.
2N/A * Memmove() and bcopy() do.
2N/A *
2N/A * Fast assembler language version of the following C-program for memcpy
2N/A * which represents the `standard' for the C-library.
2N/A *
2N/A * void *
2N/A * memcpy(void *s, const void *s0, size_t n)
2N/A * {
2N/A * if (n != 0) {
2N/A * char *s1 = s;
2N/A * const char *s2 = s0;
2N/A * do {
2N/A * *s1++ = *s2++;
2N/A * } while (--n != 0);
2N/A * }
2N/A * return (s);
2N/A * }
2N/A */
2N/A
2N/A#include <sys/asm_linkage.h>
2N/A#include <sys/sun4asi.h>
2N/A#include <sys/trap.h>
2N/A
2N/A#define ICACHE_LINE_SIZE 64
2N/A#define BLOCK_SIZE 64
2N/A#define FPRS_FEF 0x4
2N/A
2N/A#define ALIGNED8_FPCOPY_THRESHOLD 1024
2N/A#define ALIGNED4_FPCOPY_THRESHOLD 1024
2N/A#define BST_THRESHOLD 65536
2N/A
2N/A#define SHORTCOPY 3
2N/A#define SMALL_MAX 111
2N/A#define MEDIUM_MAX 255
2N/A
2N/A ANSI_PRAGMA_WEAK(memmove,function)
2N/A ANSI_PRAGMA_WEAK(memcpy,function)
2N/A
2N/A ENTRY(memmove)
2N/A prefetch [%o1], #n_reads
2N/A prefetch [%o0], #n_writes
2N/A cmp %o1, %o0 ! if from address is >= to use forward copy
2N/A bgeu,pt %ncc, .forcpy ! else use backward if ...
2N/A sub %o0, %o1, %o4 ! get difference of two addresses
2N/A cmp %o2, %o4 ! compare size and difference of addresses
2N/A bleu,pt %ncc, .forcpy ! if size is bigger, do overlapped copy
2N/A nop
2N/A
2N/A !
2N/A ! an overlapped copy that must be done "backwards"
2N/A !
2N/A.ovbc:
2N/A mov %o0, %g1 ! save dest address for return val
2N/A add %o1, %o2, %o1 ! get to end of source space
2N/A add %o0, %o2, %o0 ! get to end of destination space
2N/A
2N/A cmp %o2, 64
2N/A bgeu,pn %ncc, .dbalign
2N/A nop
2N/A cmp %o2, 4
2N/A blt,pn %ncc, .byte
2N/A sub %o2, 3, %o2
2N/A.byte4loop:
2N/A ldub [%o1-1], %o3 ! load last byte
2N/A stb %o3, [%o0-1] ! store last byte
2N/A sub %o1, 4, %o1
2N/A ldub [%o1+2], %o3 ! load 2nd from last byte
2N/A stb %o3, [%o0-2] ! store 2nd from last byte
2N/A sub %o0, 4, %o0
2N/A ldub [%o1+1], %o3 ! load 3rd from last byte
2N/A stb %o3, [%o0+1] ! store 3rd from last byte
2N/A subcc %o2, 4, %o2
2N/A ldub [%o1], %o3 ! load 4th from last byte
2N/A bgu,pt %ncc, .byte4loop
2N/A stb %o3, [%o0] ! store 4th from last byte
2N/A.byte:
2N/A addcc %o2, 3, %o2
2N/A bz,pt %ncc, .exit
2N/A.byteloop:
2N/A dec %o1 ! decrement src address
2N/A ldub [%o1], %o3 ! read a byte
2N/A dec %o0 ! decrement dst address
2N/A deccc %o2 ! decrement count
2N/A bgu,pt %ncc, .byteloop ! loop until done
2N/A stb %o3, [%o0] ! write byte
2N/A.exit:
2N/A retl
2N/A mov %g1, %o0
2N/A
2N/A .align 16
2N/A.dbalign:
2N/A prefetch [%o1 - (4 * BLOCK_SIZE)], #one_read
2N/A andcc %o0, 7, %o5 ! bytes till DST 8 byte aligned
2N/A bz,pt %ncc, .dbmed
2N/A sub %o2, %o5, %o2 ! update count
2N/A.dbalign1:
2N/A dec %o1 ! decrement src address
2N/A ldub [%o1], %o3 ! read a byte
2N/A dec %o0 ! decrement dst address
2N/A deccc %o5 ! decrement count
2N/A bgu,pt %ncc, .dbalign1 ! loop until done
2N/A stb %o3, [%o0] ! store a byte
2N/A
2N/A! check for src long word alignment
2N/A.dbmed:
2N/A andcc %o1, 7, %g0 ! chk src long word alignment
2N/A bnz,pn %ncc, .dbbck
2N/A nop
2N/A!
2N/A! Following code is for overlapping copies where src and dest
2N/A! are long word aligned
2N/A!
2N/A!
2N/A! For SPARC64-VI, prefetch is effective for both integer and fp register
2N/A! operations. There are no benefits in using the fp registers for
2N/A! aligned data copying.
2N/A
2N/A.dbmedl32enter:
2N/A subcc %o2, 31, %o2 ! adjust length to allow cc test
2N/A ! for end of loop
2N/A ble,pt %ncc, .dbmedl31 ! skip big loop if less than 32
2N/A nop
2N/A.dbmedl32:
2N/A ldx [%o1-8], %o4 ! load
2N/A prefetch [%o1 - (8 * BLOCK_SIZE)], #one_read
2N/A subcc %o2, 32, %o2 ! decrement length count
2N/A stx %o4, [%o0-8] ! and store
2N/A ldx [%o1-16], %o3 ! a block of 32 bytes
2N/A sub %o1, 32, %o1 ! decrease src ptr by 32
2N/A stx %o3, [%o0-16]
2N/A ldx [%o1+8], %o4
2N/A sub %o0, 32, %o0 ! decrease dst ptr by 32
2N/A stx %o4, [%o0+8]
2N/A ldx [%o1], %o3
2N/A bgu,pt %ncc, .dbmedl32 ! repeat if at least 32 bytes left
2N/A stx %o3, [%o0]
2N/A.dbmedl31:
2N/A addcc %o2, 16, %o2 ! adjust remaining count
2N/A ble,pt %ncc, .dbmedl15 ! skip if 15 or fewer bytes left
2N/A nop !
2N/A ldx [%o1-8], %o4 ! load and store 16 bytes
2N/A sub %o1, 16, %o1 ! decrease src ptr by 16
2N/A stx %o4, [%o0-8] !
2N/A sub %o2, 16, %o2 ! decrease count by 16
2N/A ldx [%o1], %o3 !
2N/A sub %o0, 16, %o0 ! decrease dst ptr by 16
2N/A stx %o3, [%o0]
2N/A.dbmedl15:
2N/A addcc %o2, 15, %o2 ! restore count
2N/A bz,pt %ncc, .dbexit ! exit if finished
2N/A nop
2N/A cmp %o2, 8
2N/A blt,pt %ncc, .dbremain ! skip if 7 or fewer bytes left
2N/A nop
2N/A ldx [%o1-8], %o4 ! load 8 bytes
2N/A sub %o1, 8, %o1 ! decrease src ptr by 8
2N/A stx %o4, [%o0-8] ! and store 8 bytes
2N/A subcc %o2, 8, %o2 ! decrease count by 8
2N/A bnz,pt %ncc, .dbremain ! exit if finished
2N/A sub %o0, 8, %o0 ! decrease dst ptr by 8
2N/A retl
2N/A mov %g1, %o0
2N/A
2N/A!
2N/A! Following code is for overlapping copies where src and dest
2N/A! are not long word aligned
2N/A!
2N/A .align 16
2N/A.dbbck:
2N/A rd %fprs, %o3 ! o3 = fprs
2N/A
2N/A ! if fprs.fef == 0, set it. Checking it, requires 2 instructions.
2N/A ! So set it anyway, without checking.
2N/A wr %g0, 0x4, %fprs ! fprs.fef = 1
2N/A
2N/A alignaddr %o1, %g0, %o5 ! align src
2N/A ldd [%o5], %d0 ! get first 8 byte block
2N/A andn %o2, 7, %o4 ! prepare src ptr for finishup code
2N/A cmp %o2, 32
2N/A blt,pn %ncc, .dbmv8
2N/A sub %o1, %o4, %o1 !
2N/A cmp %o2, 4095 ! check for short memmoves
2N/A blt,pn %ncc, .dbmv32enter ! go to no prefetch code
2N/A.dbmv64:
2N/A ldd [%o5-8], %d2 ! load 8 bytes
2N/A ldd [%o5-16], %d4 ! load 8 bytes
2N/A sub %o5, 64, %o5 !
2N/A ldd [%o5+40], %d6 ! load 8 bytes
2N/A sub %o0, 64, %o0 !
2N/A ldd [%o5+32], %d8 ! load 8 bytes
2N/A sub %o2, 64, %o2 ! 64 less bytes to copy
2N/A ldd [%o5+24], %d18 ! load 8 bytes
2N/A cmp %o2, 64 ! do we have < 64 bytes remaining
2N/A ldd [%o5+16], %d28 ! load 8 bytes
2N/A ldd [%o5+8], %d30 ! load 8 bytes
2N/A faligndata %d2, %d0, %d10 ! extract 8 bytes out
2N/A prefetch [%o5 - (8 * BLOCK_SIZE)], #n_reads
2N/A ldd [%o5], %d0 ! load 8 bytes
2N/A std %d10, [%o0+56] ! store the current 8 bytes
2N/A faligndata %d4, %d2, %d12 ! extract 8 bytes out
2N/A prefetch [%o0 - (8 * BLOCK_SIZE)], #one_write
2N/A std %d12, [%o0+48] ! store the current 8 bytes
2N/A faligndata %d6, %d4, %d14 ! extract 8 bytes out
2N/A std %d14, [%o0+40] ! store the current 8 bytes
2N/A faligndata %d8, %d6, %d16 ! extract 8 bytes out
2N/A std %d16, [%o0+32] ! store the current 8 bytes
2N/A faligndata %d18, %d8, %d20 ! extract 8 bytes out
2N/A std %d20, [%o0+24] ! store the current 8 bytes
2N/A faligndata %d28, %d18, %d22 ! extract 8 bytes out
2N/A std %d22, [%o0+16] ! store the current 8 bytes
2N/A faligndata %d30, %d28, %d24 ! extract 8 bytes out
2N/A std %d24, [%o0+8] ! store the current 8 bytes
2N/A faligndata %d0, %d30, %d26 ! extract 8 bytes out
2N/A bgeu,pt %ncc, .dbmv64
2N/A std %d26, [%o0] ! store the current 8 bytes
2N/A
2N/A cmp %o2, 32
2N/A blt,pn %ncc, .dbmvx
2N/A nop
2N/A.dbmv32:
2N/A ldd [%o5-8], %d2 ! load 8 bytes
2N/A.dbmv32enter:
2N/A ldd [%o5-16], %d4 ! load 8 bytes
2N/A sub %o5, 32, %o5 !
2N/A ldd [%o5+8], %d6 ! load 8 bytes
2N/A sub %o0, 32, %o0 !
2N/A faligndata %d2, %d0, %d10 ! extract 8 bytes out
2N/A ldd [%o5], %d0 ! load 8 bytes
2N/A sub %o2,32, %o2 ! 32 less bytes to copy
2N/A std %d10, [%o0+24] ! store the current 8 bytes
2N/A cmp %o2, 32 ! do we have < 32 bytes remaining
2N/A faligndata %d4, %d2, %d12 ! extract 8 bytes out
2N/A std %d12, [%o0+16] ! store the current 8 bytes
2N/A faligndata %d6, %d4, %d14 ! extract 8 bytes out
2N/A std %d14, [%o0+8] ! store the current 8 bytes
2N/A faligndata %d0, %d6, %d16 ! extract 8 bytes out
2N/A bgeu,pt %ncc, .dbmv32
2N/A std %d16, [%o0] ! store the current 8 bytes
2N/A.dbmvx:
2N/A cmp %o2, 8 ! do we have < 8 bytes remaining
2N/A blt,pt %ncc, .dbmvfinish ! if yes, skip to finish up code
2N/A nop
2N/A.dbmv8:
2N/A ldd [%o5-8], %d2
2N/A sub %o0, 8, %o0 ! since we are at the end
2N/A ! when we first enter the loop
2N/A sub %o2, 8, %o2 ! 8 less bytes to copy
2N/A sub %o5, 8, %o5
2N/A cmp %o2, 8 ! do we have < 8 bytes remaining
2N/A faligndata %d2, %d0, %d8 ! extract 8 bytes out
2N/A std %d8, [%o0] ! store the current 8 bytes
2N/A bgeu,pt %ncc, .dbmv8
2N/A fmovd %d2, %d0
2N/A.dbmvfinish:
2N/A and %o3, 0x4, %o3 ! fprs.du = fprs.dl = 0
2N/A tst %o2
2N/A bz,pt %ncc, .dbexit
2N/A wr %o3, %g0, %fprs ! fprs = o3 restore fprs
2N/A
2N/A.dbremain:
2N/A cmp %o2, 4
2N/A blt,pn %ncc, .dbbyte
2N/A nop
2N/A ldub [%o1-1], %o3 ! load last byte
2N/A stb %o3, [%o0-1] ! store last byte
2N/A sub %o1, 4, %o1
2N/A ldub [%o1+2], %o3 ! load 2nd from last byte
2N/A stb %o3, [%o0-2] ! store 2nd from last byte
2N/A sub %o0, 4, %o0
2N/A ldub [%o1+1], %o3 ! load 3rd from last byte
2N/A stb %o3, [%o0+1] ! store 3rd from last byte
2N/A subcc %o2, 4, %o2
2N/A ldub [%o1], %o3 ! load 4th from last byte
2N/A stb %o3, [%o0] ! store 4th from last byte
2N/A bz,pt %ncc, .dbexit
2N/A.dbbyte:
2N/A dec %o1 ! decrement src address
2N/A ldub [%o1], %o3 ! read a byte
2N/A dec %o0 ! decrement dst address
2N/A deccc %o2 ! decrement count
2N/A bgu,pt %ncc, .dbbyte ! loop until done
2N/A stb %o3, [%o0] ! write byte
2N/A.dbexit:
2N/A retl
2N/A mov %g1, %o0
2N/A SET_SIZE(memmove)
2N/A
2N/A
2N/A ENTRY(memcpy)
2N/A prefetch [%o1 + (0 * BLOCK_SIZE)], #n_reads
2N/A prefetch [%o0 + (0 * BLOCK_SIZE)], #n_writes
2N/A.forcpy:
2N/A cmp %o2, SMALL_MAX ! check for not small case
2N/A bgu,pn %ncc, .medium ! go to larger cases
2N/A mov %o0, %g1 ! save %o0
2N/A cmp %o2, SHORTCOPY ! check for really short case
2N/A ble,pn %ncc, .smallleft !
2N/A or %o0, %o1, %o3 ! prepare alignment check
2N/A andcc %o3, 0x3, %g0 ! test for alignment
2N/A bz,pt %ncc, .smallword ! branch to word aligned case
2N/A prefetch [%o1 + (1 * BLOCK_SIZE)], #n_reads
2N/A ! force dest align on 2 bytes
2N/A andcc %o0, 1, %g0 ! test byte alignment
2N/A bz,pt %ncc, .small_half
2N/A sub %o2, 3, %o2 ! adjust count to allow cc zero test
2N/A ldub [%o1], %o3
2N/A add %o1, 1, %o1
2N/A subcc %o2, 1, %o2
2N/A add %o0, 1, %o0
2N/A bz,pt %ncc, .smallfin
2N/A stb %o3, [%o0-1]
2N/A ! force dest align on 4 bytes
2N/A.small_half:
2N/A andcc %o0, 2, %g0 ! test half word alignment
2N/A bz,pt %ncc, .small_4
2N/A nop
2N/A ldub [%o1], %o3
2N/A sllx %o3, 8, %o3
2N/A add %o0, 2, %o0
2N/A ldub [%o1+1], %o4
2N/A or %o3, %o4, %o3
2N/A add %o1, 2, %o1
2N/A subcc %o2, 2, %o2
2N/A ble,pt %ncc, .smallfin
2N/A sth %o3, [%o0-2]
2N/A ! dest is now aligned on 4 byte boundary
2N/A.small_4:
2N/A andcc %o1, 1, %g0
2N/A bnz,pt %ncc, .smallnotalign4
2N/A nop
2N/A.small_half4:
2N/A lduh [%o1], %o3 ! read byte
2N/A add %o1, 4, %o1 ! advance SRC by 4
2N/A sllx %o3, 16, %o3
2N/A subcc %o2, 4, %o2 ! reduce count by 4
2N/A lduh [%o1-2], %o4
2N/A add %o0, 4, %o0 ! advance DST by 4
2N/A or %o3, %o4, %o3
2N/A bgu,pt %ncc, .small_half4 ! loop til 3 or fewer bytes remain
2N/A stw %o3, [%o0-4]
2N/A
2N/A ba .smallfin
2N/A nop
2N/A
2N/A .align 16
2N/A.smallnotalign4:
2N/A subcc %o2, 4, %o2 ! reduce count by 4
2N/A ldub [%o1], %o3 ! read byte
2N/A sllx %o3, 16, %o3
2N/A add %o1, 4, %o1 ! advance SRC by 4
2N/A lduh [%o1-3], %o4 ! repeat for a total of 4 bytes
2N/A or %o3, %o4, %o3
2N/A sllx %o3, 8, %o3
2N/A ldub [%o1-1], %o4
2N/A add %o0, 4, %o0 ! advance DST by 4
2N/A or %o3, %o4, %o3
2N/A bgu,pt %ncc, .smallnotalign4 ! loop til 3 or fewer bytes remain
2N/A stw %o3, [%o0-4]
2N/A.smallfin:
2N/A add %o2, 3, %o2 ! restore count
2N/A.smallleft:
2N/A tst %o2
2N/A bz,pt %ncc, .smalldone
2N/A nop
2N/A.smallleft3: ! 1, 2, or 3 bytes remain
2N/A deccc %o2 ! reduce count for cc test
2N/A ldub [%o1], %o3 ! load one byte
2N/A bz,pt %ncc, .smalldone
2N/A stb %o3, [%o0] ! store one byte
2N/A ldub [%o1+1], %o3 ! load second byte
2N/A deccc %o2
2N/A bz,pt %ncc, .smalldone
2N/A stb %o3, [%o0+1] ! store second byte
2N/A ldub [%o1+2], %o3 ! load third byte
2N/A stb %o3, [%o0+2] ! store third byte
2N/A.smalldone:
2N/A retl
2N/A mov %g1, %o0 ! restore %o0
2N/A
2N/A .align 16
2N/A.smallword: ! count shifted by 3 before here
2N/A subcc %o2, 7, %o2 ! adjust count to allow zero tst
2N/A ble,pt %ncc, .smallwordy
2N/A nop
2N/A andcc %o3, 0x7, %g0 ! test for alignment
2N/A bz,pt %ncc, .smlong ! branch to long word aligned case
2N/A nop
2N/A andcc %o0, 4, %g0 ! test dest alignment
2N/A bz,pt %ncc, .smallwords
2N/A nop
2N/A lduw [%o1], %o3 ! read word
2N/A add %o1, 4, %o1
2N/A add %o0, 4, %o0
2N/A subcc %o2, 4, %o2
2N/A ble,pt %ncc, .smallwordy
2N/A stw %o3, [%o0-4]
2N/A.smallwords:
2N/A subcc %o2, 8, %o2 ! update count
2N/A lduw [%o1], %o3 ! read word
2N/A sllx %o3, 32, %o3
2N/A lduw [%o1+4], %o4 ! read word
2N/A or %o3, %o4, %o3
2N/A add %o1, 8, %o1 ! update SRC
2N/A stx %o3, [%o0] ! write word
2N/A bgu,pt %ncc, .smallwords ! loop until done
2N/A add %o0, 8, %o0 ! update DST
2N/A addcc %o2, 7, %o2 ! restore count
2N/A bz,pt %ncc, .smallexit ! check for completion
2N/A nop
2N/A.smleft7:
2N/A cmp %o2, 4 ! check for 4 or more bytes left
2N/A blt,pn %ncc, .smallleft3 ! if not, go to finish up
2N/A nop
2N/A subcc %o2, 4, %o2
2N/A lduw [%o1], %o3
2N/A add %o1, 4, %o1
2N/A add %o0, 4, %o0
2N/A bnz,pn %ncc, .smallleft3
2N/A stw %o3, [%o0-4]
2N/A retl
2N/A mov %g1, %o0 ! restore %o0
2N/A
2N/A .align 16
2N/A.smallwordy:
2N/A lduw [%o1], %o3 ! read word
2N/A addcc %o2, 3, %o2 ! restore count
2N/A bz,pt %ncc, .smallexit
2N/A stw %o3, [%o0] ! write word
2N/A deccc %o2 ! reduce count for cc test
2N/A ldub [%o1+4], %o3 ! load one byte
2N/A bz,pt %ncc, .smallexit
2N/A stb %o3, [%o0+4] ! store one byte
2N/A ldub [%o1+5], %o3 ! load second byte
2N/A deccc %o2
2N/A bz,pt %ncc, .smallexit
2N/A stb %o3, [%o0+5] ! store second byte
2N/A ldub [%o1+6], %o3 ! load third byte
2N/A stb %o3, [%o0+6] ! store third byte
2N/A.smallexit:
2N/A retl
2N/A mov %g1, %o0 ! restore %o0
2N/A
2N/A .align 16
2N/A.smlong: ! src&dest long word aligned
2N/A cmp %o2, 8
2N/A ble,pn %ncc, .smlongx ! Make sure 16+ bytes remain
2N/A sub %o2, 8, %o2
2N/A.smby16:
2N/A ldx [%o1], %o3
2N/A subcc %o2, 16, %o2
2N/A add %o1, 16, %o1
2N/A ldx [%o1-8], %o4
2N/A add %o0, 16, %o0
2N/A stx %o3, [%o0-16]
2N/A bgu,pt %ncc, .smby16
2N/A stx %o4, [%o0-8]
2N/A.smlongx: !
2N/A addcc %o2, 15, %o2
2N/A bz,pt %ncc, .smallexit
2N/A cmp %o2, 7
2N/A ble,pn %ncc, .smleft7
2N/A nop
2N/A subcc %o2, 8, %o2
2N/A ldx [%o1], %o3
2N/A add %o1, 8, %o1
2N/A add %o0, 8, %o0
2N/A bnz,pn %ncc, .smleft7
2N/A stx %o3, [%o0-8]
2N/A retl
2N/A mov %g1, %o0 ! restore %o0
2N/A
2N/A .align 16
2N/A.medium:
2N/A prefetch [%o1 + (1 * BLOCK_SIZE)], #n_reads
2N/A or %o0, %o1, %o3 ! prepare alignment check
2N/A andcc %o3, 0x7, %g0 ! test for alignment
2N/A bz,pt %ncc, .medlwordq ! branch to long word aligned case
2N/A prefetch [%o1 + (2 * BLOCK_SIZE)], #n_reads
2N/A neg %o0, %o5
2N/A andcc %o5, 7, %o5 ! bytes till DST 8 byte aligned
2N/A bz,pt %ncc, .med_align
2N/A prefetch [%o1 + (3 * BLOCK_SIZE)], #n_reads
2N/A sub %o2, %o5, %o2 ! update count
2N/A
2N/A andcc %o5, 1, %g0
2N/A bz,pt %icc,.med_half
2N/A nop
2N/A ldub [%o1], %o3
2N/A add %o1, 1, %o1
2N/A add %o0, 1, %o0
2N/A stb %o3, [%o0-1]
2N/A
2N/A.med_half:
2N/A andcc %o5, 2, %g0
2N/A bz,pt %icc, .med_word
2N/A nop
2N/A ldub [%o1], %o3
2N/A sllx %o3, 8, %o3
2N/A add %o0, 2, %o0
2N/A ldub [%o1+1], %o4
2N/A or %o3, %o4, %o3
2N/A add %o1, 2, %o1
2N/A sth %o3, [%o0-2]
2N/A
2N/A.med_word:
2N/A andcc %o5, 4, %g0
2N/A bz,pt %ncc, .med_align
2N/A nop
2N/A ldub [%o1], %o3
2N/A sllx %o3, 8, %o3
2N/A add %o0, 4, %o0
2N/A ldub [%o1+1], %o4
2N/A or %o3, %o4, %o3
2N/A sllx %o3, 8, %o3
2N/A ldub [%o1+2], %o4
2N/A or %o3, %o4, %o3
2N/A sllx %o3, 8, %o3
2N/A ldub [%o1+3], %o4
2N/A add %o1, 4, %o1
2N/A or %o3, %o4, %o3
2N/A stw %o3, [%o0-4]
2N/A
2N/A ! Now DST is 8-byte aligned. o0, o1, o2 are current.
2N/A
2N/A.med_align:
2N/A andcc %o1, 0x3, %g0 ! test alignment
2N/A prefetch [%o1 + (4 * BLOCK_SIZE)], #n_reads
2N/A bnz,pt %ncc, .mediumsetup ! branch to skip aligned cases
2N/A ! if src, dst not aligned
2N/A prefetch [%o0 + (4 * BLOCK_SIZE)], #n_writes
2N/A/*
2N/A * Handle all cases where src and dest are aligned on word
2N/A * or long word boundaries. Use unrolled loops for better
2N/A * performance. This option wins over standard large data
2N/A * move when source and destination is in cache for medium
2N/A * to short data moves.
2N/A */
2N/A andcc %o1, 0x7, %g0 ! test word alignment
2N/A bz,pt %ncc, .medlword ! branch to long word aligned case
2N/A prefetch [%o1 + (8 * BLOCK_SIZE)], #n_reads
2N/A cmp %o2, ALIGNED4_FPCOPY_THRESHOLD ! limit to store buffer size
2N/A bgu,pt %ncc, .mediumrejoin ! otherwise rejoin main loop
2N/A prefetch [%o0 + (8 * BLOCK_SIZE)], #n_writes
2N/A subcc %o2, 15, %o2 ! adjust length to allow cc test
2N/A ! for end of loop
2N/A prefetch [%o1 + (12 * BLOCK_SIZE)], #n_reads
2N/A prefetch [%o0 + (12 * BLOCK_SIZE)], #n_writes
2N/A prefetch [%o1 + (16 * BLOCK_SIZE)], #n_reads
2N/A.medw16:
2N/A lduw [%o1], %o4 ! load
2N/A subcc %o2, 16, %o2 ! decrement length count
2N/A sllx %o4, 32, %o4
2N/A lduw [%o1+4], %o3 ! a block of 16 bytes
2N/A or %o3, %o4, %o3
2N/A stx %o3, [%o0]
2N/A add %o1, 16, %o1 ! increase src ptr by 16
2N/A lduw [%o1-8], %o4
2N/A sllx %o4, 32, %o4
2N/A lduw [%o1-4], %o3
2N/A add %o0, 16, %o0 ! increase dst ptr by 16
2N/A or %o3, %o4, %o3
2N/A bgu,pt %ncc, .medw16 ! repeat if at least 16 bytes left
2N/A stx %o3, [%o0-8]
2N/A.medw15:
2N/A addcc %o2, 15, %o2 ! restore count
2N/A bz,pt %ncc, .medwexit ! exit if finished
2N/A nop
2N/A cmp %o2, 8
2N/A blt,pn %ncc, .medw7 ! skip if 7 or fewer bytes left
2N/A nop !
2N/A lduw [%o1], %o4 ! load 4 bytes
2N/A subcc %o2, 8, %o2 ! decrease count by 8
2N/A sllx %o4, 32, %o4
2N/A add %o1, 8, %o1 ! increase src ptr by 8
2N/A lduw [%o1-4], %o3 ! load 4 bytes
2N/A or %o3, %o4, %o3
2N/A add %o0, 8, %o0 ! increase dst ptr by 8
2N/A bz,pt %ncc, .medwexit ! exit if finished
2N/A stx %o3, [%o0-8] ! and store 4 bytes
2N/A.medw7: ! count is ge 1, less than 8
2N/A cmp %o2, 3 ! check for 4 bytes left
2N/A ble,pn %ncc, .medw3 ! skip if 3 or fewer bytes left
2N/A nop !
2N/A ld [%o1], %o4 ! load 4 bytes
2N/A sub %o2, 4, %o2 ! decrease count by 4
2N/A add %o1, 4, %o1 ! increase src ptr by 4
2N/A stw %o4, [%o0] ! and store 4 bytes
2N/A add %o0, 4, %o0 ! increase dst ptr by 4
2N/A tst %o2 ! check for zero bytes left
2N/A bz,pt %ncc, .medwexit ! exit if finished
2N/A nop
2N/A.medw3: ! count is known to be 1, 2, or 3
2N/A deccc %o2 ! reduce count by one
2N/A ldub [%o1], %o3 ! load one byte
2N/A bz,pt %ncc, .medwexit ! exit if last byte
2N/A stb %o3, [%o0] ! store one byte
2N/A ldub [%o1+1], %o3 ! load second byte
2N/A deccc %o2 ! reduce count by one
2N/A bz,pt %ncc, .medwexit ! exit if last byte
2N/A stb %o3, [%o0+1] ! store second byte
2N/A ldub [%o1+2], %o3 ! load third byte
2N/A stb %o3, [%o0+2] ! store third byte
2N/A.medwexit:
2N/A retl
2N/A mov %g1, %o0 ! restore %o0
2N/A
2N/A/*
2N/A * Special case for handling when src and dest are both long word aligned
2N/A * and total data to move is between SMALL_MAX and ALIGNED8_FPCOPY_THRESHOLD
2N/A * bytes.
2N/A */
2N/A
2N/A .align 16
2N/A.medlwordq:
2N/A prefetch [%o1 + (3 * BLOCK_SIZE)], #n_reads
2N/A prefetch [%o1 + (4 * BLOCK_SIZE)], #n_reads
2N/A prefetch [%o1 + (8 * BLOCK_SIZE)], #n_reads
2N/A prefetch [%o0 + (4 * BLOCK_SIZE)], #n_writes
2N/A.medlword: ! long word aligned
2N/A ! length > ALIGNED8_FPCOPY_THRESHOLD
2N/A prefetch [%o0 + (8 * BLOCK_SIZE)], #n_writes
2N/A prefetch [%o1 + (12 * BLOCK_SIZE)], #n_reads
2N/A cmp %o2, ALIGNED8_FPCOPY_THRESHOLD
2N/A bgu,pt %ncc, .large_long ! else go to large long align case
2N/A prefetch [%o0 + (12 * BLOCK_SIZE)], #n_writes
2N/A subcc %o2, 31, %o2 ! adjust length to allow cc test
2N/A ! for end of loop
2N/A ble,pt %ncc, .medl31 ! skip big loop if less than 32
2N/A prefetch [%o1 + (16 * BLOCK_SIZE)], #n_reads
2N/A prefetch [%o0 + (16 * BLOCK_SIZE)], #n_writes
2N/A.medl32:
2N/A ldx [%o1], %o4 ! load
2N/A subcc %o2, 32, %o2 ! decrement length count
2N/A stx %o4, [%o0] ! and store
2N/A ldx [%o1+8], %o3 ! a block of 32 bytes
2N/A add %o1, 32, %o1 ! increase src ptr by 32
2N/A stx %o3, [%o0+8]
2N/A ldx [%o1-16], %o4
2N/A add %o0, 32, %o0 ! increase dst ptr by 32
2N/A stx %o4, [%o0-16]
2N/A ldx [%o1-8], %o3
2N/A bgu,pt %ncc, .medl32 ! repeat if at least 32 bytes left
2N/A stx %o3, [%o0-8]
2N/A.medl31:
2N/A addcc %o2, 16, %o2 ! adjust remaining count
2N/A ble,pt %ncc, .medl15 ! skip if 15 or fewer bytes left
2N/A nop !
2N/A ldx [%o1], %o4 ! load and store 16 bytes
2N/A add %o1, 16, %o1 ! increase src ptr by 16
2N/A stx %o4, [%o0] !
2N/A sub %o2, 16, %o2 ! decrease count by 16
2N/A ldx [%o1-8], %o3 !
2N/A add %o0, 16, %o0 ! increase dst ptr by 16
2N/A stx %o3, [%o0-8]
2N/A.medl15:
2N/A addcc %o2, 15, %o2 ! restore count
2N/A bz,pt %ncc, .medwexit ! exit if finished
2N/A nop
2N/A cmp %o2, 8
2N/A blt,pt %ncc, .medw7 ! skip if 7 or fewer bytes left
2N/A nop
2N/A ldx [%o1], %o4 ! load 8 bytes
2N/A add %o1, 8, %o1 ! increase src ptr by 8
2N/A stx %o4, [%o0] ! and store 8 bytes
2N/A subcc %o2, 8, %o2 ! decrease count by 8
2N/A bz,pt %ncc, .medwexit ! exit if finished
2N/A add %o0, 8, %o0 ! increase dst ptr by 8
2N/A ba .medw7
2N/A nop
2N/A
2N/A .align 16
2N/A.mediumsetup:
2N/A prefetch [%o1 + (8 * BLOCK_SIZE)], #n_reads
2N/A prefetch [%o0 + (8 * BLOCK_SIZE)], #n_writes
2N/A.mediumrejoin:
2N/A rd %fprs, %o4 ! check for unused FPU
2N/A
2N/A neg %o1, %o3
2N/A and %o3, 7, %o3 ! bytes till SRC 8 byte aligned
2N/A prefetch [%o1 + (12 * BLOCK_SIZE)], #n_reads
2N/A sub %g0, %o3, %o3 ! -(bytes till SRC aligned after DST aligned)
2N/A ! o3={-7, -6, ... 7} o3>0 => SRC overaligned
2N/A add %o1, 8, %o1 ! prepare to round SRC upward
2N/A prefetch [%o0 + (12 * BLOCK_SIZE)], #n_writes
2N/A sethi %hi(0x1234567f), %o5 ! For GSR.MASK
2N/A
2N/A or %o5, 0x67f, %o5
2N/A
2N/A andcc %o4, FPRS_FEF, %o4 ! test FEF, fprs.du = fprs.dl = 0
2N/A bz,a %ncc, 3f
2N/A wr %g0, FPRS_FEF, %fprs ! fprs.fef = 1
2N/A3:
2N/A prefetch [%o1 + (16 * BLOCK_SIZE)], #n_reads
2N/A cmp %o2, MEDIUM_MAX
2N/A bmask %o5, %g0, %g0
2N/A
2N/A ! Compute o5 (number of bytes that need copying using the main loop).
2N/A ! First, compute for the medium case.
2N/A ! Then, if large case, o5 is replaced by count for block alignment.
2N/A ! Be careful not to read past end of SRC
2N/A ! Currently, o2 is the actual count remaining
2N/A ! o3 is how much sooner we'll cross the alignment boundary
2N/A ! in SRC compared to in DST
2N/A !
2N/A ! Examples: Let # denote bytes that should not be accessed
2N/A ! Let x denote a byte already copied to align DST
2N/A ! Let . and - denote bytes not yet copied
2N/A ! Let | denote double alignment boundaries
2N/A !
2N/A ! DST: ######xx|........|--------|..###### o2 = 18
2N/A ! o0
2N/A !
2N/A ! o3 = -3: SRC: ###xx...|.....---|-----..#|######## o5 = 8
2N/A ! o1
2N/A !
2N/A ! o3 = 0: SRC: ######xx|........|--------|..###### o5 = 16-8 = 8
2N/A ! o1
2N/A !
2N/A ! o3 = +1: SRC: #######x|x.......|.-------|-..##### o5 = 16-8 = 8
2N/A ! o1
2N/A
2N/A or %g0, -8, %o5
2N/A alignaddr %o1, %g0, %o1 ! set GSR.ALIGN and align o1
2N/A
2N/A movrlz %o3, %g0, %o5 ! subtract 8 from o2+o3 only if o3>=0
2N/A add %o5, %o2, %o5
2N/A add %o5, %o3, %o5
2N/A
2N/A bleu,pt %ncc, 4f
2N/A andn %o5, 7, %o5 ! 8 byte aligned count
2N/A neg %o0, %o5 ! 'large' case
2N/A and %o5, BLOCK_SIZE-1, %o5 ! bytes till DST block aligned
2N/A4:
2N/A brgez,a %o3, .beginmedloop
2N/A ldd [%o1-8], %d0
2N/A
2N/A add %o1, %o3, %o1 ! back up o1
2N/A5:
2N/A ldda [%o1]ASI_FL8_P, %d2
2N/A inc %o1
2N/A andcc %o1, 7, %g0
2N/A bnz,pt %ncc, 5b
2N/A bshuffle %d0, %d2, %d0 ! shifts d0 left 1 byte and or's in d2
2N/A
2N/A.beginmedloop:
2N/A tst %o5
2N/A bz,pn %ncc, .endmedloop
2N/A sub %o2, %o5, %o2 ! update count for later
2N/A
2N/A ! Main loop to write out doubles. Note: o5 & 7 == 0
2N/A
2N/A ldd [%o1], %d2
2N/A subcc %o5, 8, %o5 ! update local count
2N/A bz,pn %ncc, 1f
2N/A add %o1, 8, %o1 ! update SRC
2N/A
2N/A subcc %o5, 8, %o5
2N/A ble,pn %ncc, 2f
2N/A.medloop:
2N/A faligndata %d0, %d2, %d4
2N/A ldd [%o1], %d0
2N/A add %o1, 16, %o1 ! update SRC
2N/A std %d4, [%o0]
2N/A faligndata %d2, %d0, %d6
2N/A subcc %o5, 16, %o5 ! update local count
2N/A ldd [%o1 - 8], %d2
2N/A add %o0, 16, %o0 ! update DST
2N/A bgt,pt %ncc, .medloop
2N/A std %d6, [%o0 - 8]
2N/A addcc %o5, 8, %o5
2N/A bnz,pt %ncc, 2f
2N/A1:
2N/A faligndata %d0, %d2, %d4
2N/A fmovd %d2, %d0
2N/A std %d4, [%o0]
2N/A ba .endmedloop
2N/A add %o0, 8, %o0
2N/A2:
2N/A ldd [%o1], %d0
2N/A add %o1, 8, %o1
2N/A std %d4, [%o0]
2N/A faligndata %d2, %d0, %d6
2N/A std %d6, [%o0 + 8]
2N/A add %o0, 16, %o0
2N/A
2N/A.endmedloop:
2N/A ! Currently, o1 is pointing to the next double-aligned byte in SRC
2N/A ! The 8 bytes starting at [o1-8] are available in d0
2N/A ! At least one, and possibly all, of these need to be written.
2N/A
2N/A cmp %o2, BLOCK_SIZE
2N/A bgu,pt %ncc, .large ! otherwise less than 16 bytes left
2N/A
2N/A andcc %o3, 7, %o5 ! Number of bytes needed to completely
2N/A ! fill %d0 with good (unwritten) data.
2N/A bz,pn %ncc, 2f
2N/A sub %o5, 8, %o3 ! -(number of good bytes in %d0)
2N/A cmp %o2, 8
2N/A bl,a,pt %ncc, 3f ! Not enough bytes to fill %d0
2N/A add %o1, %o3, %o1 ! Back up %o1
2N/A
2N/A1:
2N/A deccc %o5
2N/A ldda [%o1]ASI_FL8_P, %d2
2N/A inc %o1
2N/A bgu,pt %ncc, 1b
2N/A bshuffle %d0, %d2, %d0 ! shifts d0 left 1 byte and or's in d2
2N/A
2N/A2:
2N/A subcc %o2, 8, %o2
2N/A std %d0, [%o0]
2N/A bz,pt %ncc, .mediumexit
2N/A add %o0, 8, %o0
2N/A tst %o2
2N/A bz,pt %ncc, .mediumexit
2N/A nop
2N/A3:
2N/A ldub [%o1], %o3
2N/A deccc %o2
2N/A inc %o1
2N/A stb %o3, [%o0]
2N/A bgu,pt %ncc, 3b
2N/A inc %o0
2N/A
2N/A.mediumexit:
2N/A wr %o4, %g0, %fprs ! fprs = o4 restore fprs
2N/A retl
2N/A mov %g1, %o0
2N/A
2N/A .align ICACHE_LINE_SIZE
2N/A.large_long:
2N/A ! %o0 DST, 8 byte aligned
2N/A ! %o1 SRC, 8 byte aligned
2N/A ! %o2 count (number of bytes to be moved)
2N/A ! %o3, %o4, %o5 available as temps
2N/A set BST_THRESHOLD, %o5
2N/A cmp %o2, %o5
2N/A bgu,pn %ncc, .xlarge_long
2N/A prefetch [%o1 + (16 * BLOCK_SIZE)], #n_reads
2N/A subcc %o2, (16 * BLOCK_SIZE) + 63, %o2 ! adjust length to allow
2N/A ! cc test for end of loop
2N/A ble,pn %ncc, .largel_no ! skip big loop if no more prefetches
2N/A prefetch [%o0 + (16 * BLOCK_SIZE)], #n_writes
2N/A.largel64p:
2N/A prefetch [%o1 + (20 * BLOCK_SIZE)], #n_reads
2N/A prefetch [%o0 + (20 * BLOCK_SIZE)], #n_writes
2N/A ldx [%o1], %o4 ! load
2N/A subcc %o2, 64, %o2 ! decrement length count
2N/A stx %o4, [%o0] ! and store
2N/A ldx [%o1+8], %o3 ! a block of 64 bytes
2N/A stx %o3, [%o0+8]
2N/A ldx [%o1+16], %o4 ! a block of 64 bytes
2N/A stx %o4, [%o0+16]
2N/A ldx [%o1+24], %o3 ! a block of 64 bytes
2N/A stx %o3, [%o0+24]
2N/A ldx [%o1+32], %o4 ! a block of 64 bytes
2N/A stx %o4, [%o0+32]
2N/A ldx [%o1+40], %o3 ! a block of 64 bytes
2N/A add %o1, 64, %o1 ! increase src ptr by 64
2N/A stx %o3, [%o0+40]
2N/A ldx [%o1-16], %o4
2N/A add %o0, 64, %o0 ! increase dst ptr by 64
2N/A stx %o4, [%o0-16]
2N/A ldx [%o1-8], %o3
2N/A bgu,pt %ncc, .largel64p ! repeat if at least 64 bytes left
2N/A stx %o3, [%o0-8]
2N/A.largel_no:
2N/A add %o2, (16 * BLOCK_SIZE), %o2
2N/A.largel64: ! finish with no more prefetches
2N/A ldx [%o1], %o4 ! load
2N/A subcc %o2, 64, %o2 ! decrement length count
2N/A stx %o4, [%o0] ! and store
2N/A ldx [%o1+8], %o3 ! a block of 64 bytes
2N/A stx %o3, [%o0+8]
2N/A ldx [%o1+16], %o4 ! a block of 64 bytes
2N/A stx %o4, [%o0+16]
2N/A ldx [%o1+24], %o3 ! a block of 64 bytes
2N/A stx %o3, [%o0+24]
2N/A ldx [%o1+32], %o4 ! a block of 64 bytes
2N/A stx %o4, [%o0+32]
2N/A ldx [%o1+40], %o3 ! a block of 64 bytes
2N/A add %o1, 64, %o1 ! increase src ptr by 64
2N/A stx %o3, [%o0+40]
2N/A ldx [%o1-16], %o4
2N/A add %o0, 64, %o0 ! increase dst ptr by 64
2N/A stx %o4, [%o0-16]
2N/A ldx [%o1-8], %o3
2N/A bgu,pt %ncc, .largel64 ! repeat if at least 64 bytes left
2N/A stx %o3, [%o0-8]
2N/A.largel32:
2N/A addcc %o2, 32, %o2 ! adjust finish count
2N/A ble,pt %ncc, .largel31
2N/A nop
2N/A ldx [%o1], %o4 ! load
2N/A sub %o2, 32, %o2 ! decrement length count
2N/A stx %o4, [%o0] ! and store
2N/A ldx [%o1+8], %o3 ! a block of 32 bytes
2N/A add %o1, 32, %o1 ! increase src ptr by 32
2N/A stx %o3, [%o0+8]
2N/A ldx [%o1-16], %o4
2N/A add %o0, 32, %o0 ! increase dst ptr by 32
2N/A stx %o4, [%o0-16]
2N/A ldx [%o1-8], %o3
2N/A stx %o3, [%o0-8]
2N/A.largel31:
2N/A addcc %o2, 16, %o2 ! adjust remaining count
2N/A ble,pt %ncc, .largel15 ! skip if 15 or fewer bytes left
2N/A nop !
2N/A ldx [%o1], %o4 ! load and store 16 bytes
2N/A add %o1, 16, %o1 ! increase src ptr by 16
2N/A stx %o4, [%o0] !
2N/A sub %o2, 16, %o2 ! decrease count by 16
2N/A ldx [%o1-8], %o3 !
2N/A add %o0, 16, %o0 ! increase dst ptr by 16
2N/A stx %o3, [%o0-8]
2N/A.largel15:
2N/A addcc %o2, 15, %o2 ! restore count
2N/A bz,pt %ncc, .medwexit ! exit if finished
2N/A nop
2N/A cmp %o2, 8
2N/A blt,pt %ncc, .medw7 ! skip if 7 or fewer bytes left
2N/A nop
2N/A ldx [%o1], %o4 ! load 8 bytes
2N/A add %o1, 8, %o1 ! increase src ptr by 8
2N/A subcc %o2, 8, %o2 ! decrease count by 8
2N/A bz,pt %ncc, .medwexit ! exit if finished
2N/A stx %o4, [%o0] ! and store 8 bytes
2N/A ba .medw7
2N/A add %o0, 8, %o0 ! increase dst ptr by 8
2N/A
2N/A
2N/A .align ICACHE_LINE_SIZE
2N/A.large:
2N/A ! %o0 I/O DST is 64-byte aligned
2N/A ! %o1 I/O 8-byte aligned (and we've set GSR.ALIGN)
2N/A ! %d0 I/O already loaded with SRC data from [%o1-8]
2N/A ! %o2 I/O count (number of bytes that need to be written)
2N/A ! %o3 I Not written. If zero, then SRC is double aligned.
2N/A ! %o4 I Not written. Holds fprs.
2N/A ! %o5 O The number of doubles that remain to be written.
2N/A
2N/A ! Load the rest of the current block
2N/A ! Recall that %o1 is further into SRC than %o0 is into DST
2N/A
2N/A set BST_THRESHOLD, %o5
2N/A cmp %o2, %o5
2N/A bgu,pn %ncc, .xlarge
2N/A prefetch [%o1 + (20 * BLOCK_SIZE)], #n_reads
2N/A
2N/A ldd [%o1], %f2
2N/A ldd [%o1 + 8], %f4
2N/A faligndata %f0, %f2, %f32
2N/A ldd [%o1 + 16], %f6
2N/A faligndata %f2, %f4, %f34
2N/A ldd [%o1 + 24], %f8
2N/A faligndata %f4, %f6, %f36
2N/A ldd [%o1 + 32], %f10
2N/A or %g0, -8, %o5 ! if %o3 >= 0, %o5 = -8
2N/A faligndata %f6, %f8, %f38
2N/A prefetch [%o0 + (16 * BLOCK_SIZE)], #n_writes
2N/A ldd [%o1 + 40], %f12
2N/A movrlz %o3, %g0, %o5 ! if %o3 < 0, %o5 = 0 (needed later)
2N/A faligndata %f8, %f10, %f40
2N/A prefetch [%o0 + (20 * BLOCK_SIZE)], #n_writes
2N/A ldd [%o1 + 48], %f14
2N/A faligndata %f10, %f12, %f42
2N/A ldd [%o1 + 56], %f0
2N/A sub %o2, BLOCK_SIZE, %o2 ! update count
2N/A add %o1, BLOCK_SIZE, %o1 ! update SRC
2N/A
2N/A ! Main loop. Write previous block. Load rest of current block.
2N/A ! Some bytes will be loaded that won't yet be written.
2N/A1:
2N/A prefetch [%o1 + (20 * BLOCK_SIZE)], #n_reads
2N/A prefetch [%o0 + (20 * BLOCK_SIZE)], #n_writes
2N/A faligndata %f12, %f14, %f44
2N/A ldd [%o1], %f2
2N/A faligndata %f14, %f0, %f46
2N/A std %f32, [%o0]
2N/A ldd [%o1 + 8], %f4
2N/A faligndata %f0, %f2, %f32
2N/A std %f34, [%o0 + 8]
2N/A ldd [%o1 + 16], %f6
2N/A faligndata %f2, %f4, %f34
2N/A std %f36, [%o0 + 16]
2N/A ldd [%o1 + 24], %f8
2N/A faligndata %f4, %f6, %f36
2N/A std %f38, [%o0 + 24]
2N/A ldd [%o1 + 32], %f10
2N/A faligndata %f6, %f8, %f38
2N/A std %f40, [%o0 + 32]
2N/A ldd [%o1 + 40], %f12
2N/A faligndata %f8, %f10, %f40
2N/A std %f42, [%o0 + 40]
2N/A ldd [%o1 + 48], %f14
2N/A sub %o2, BLOCK_SIZE, %o2 ! update count
2N/A faligndata %f10, %f12, %f42
2N/A std %f44, [%o0 + 48]
2N/A add %o0, BLOCK_SIZE, %o0 ! update DST
2N/A cmp %o2, BLOCK_SIZE + 8
2N/A ldd [%o1 + 56], %f0
2N/A add %o1, BLOCK_SIZE, %o1 ! update SRC
2N/A bgu,pt %ncc, 1b
2N/A std %f46, [%o0 - 8]
2N/A faligndata %f12, %f14, %f44
2N/A faligndata %f14, %f0, %f46
2N/A std %f32, [%o0]
2N/A std %f34, [%o0 + 8]
2N/A std %f36, [%o0 + 16]
2N/A std %f38, [%o0 + 24]
2N/A std %f40, [%o0 + 32]
2N/A std %f42, [%o0 + 40]
2N/A std %f44, [%o0 + 48]
2N/A std %f46, [%o0 + 56] ! store 64 bytes
2N/A cmp %o2, BLOCK_SIZE
2N/A bne %ncc, 2f ! exactly 1 block remaining?
2N/A add %o0, BLOCK_SIZE, %o0 ! update DST
2N/A brz,a %o3, 3f ! is SRC double aligned?
2N/A ldd [%o1], %f2
2N/A
2N/A2:
2N/A add %o5, %o2, %o5 ! %o5 was already set to 0 or -8
2N/A add %o5, %o3, %o5
2N/A ba .beginmedloop
2N/A andn %o5, 7, %o5 ! 8 byte aligned count
2N/A
2N/A
2N/A ! This is when there is exactly 1 block remaining and SRC is aligned
2N/A3:
2N/A ldd [%o1 + 0x8], %f4
2N/A ldd [%o1 + 0x10], %f6
2N/A fsrc1 %f0, %f32
2N/A ldd [%o1 + 0x18], %f8
2N/A fsrc1 %f2, %f34
2N/A ldd [%o1 + 0x20], %f10
2N/A fsrc1 %f4, %f36
2N/A ldd [%o1 + 0x28], %f12
2N/A fsrc1 %f6, %f38
2N/A ldd [%o1 + 0x30], %f14
2N/A fsrc1 %f8, %f40
2N/A fsrc1 %f10, %f42
2N/A fsrc1 %f12, %f44
2N/A fsrc1 %f14, %f46
2N/A std %f32, [%o0]
2N/A std %f34, [%o0 + 8]
2N/A std %f36, [%o0 + 16]
2N/A std %f38, [%o0 + 24]
2N/A std %f40, [%o0 + 32]
2N/A std %f42, [%o0 + 40]
2N/A std %f44, [%o0 + 48]
2N/A std %f46, [%o0 + 56] ! store 64 bytes
2N/A wr %o4, 0, %fprs
2N/A retl
2N/A mov %g1, %o0
2N/A
2N/A .align 16
2N/A.xlarge_long:
2N/A ! long word aligned, larger than Block store threshold
2N/A ! %o0 DST, 8 byte aligned
2N/A ! %o1 SRC, 8 byte aligned
2N/A ! %o2 count (number of bytes to be moved)
2N/A ! %o3, %o4, %o5 available as temps
2N/A ! prefetch through %o1 + (12* BLOCK_SIZE) has been done
2N/A ! Need to align DST to 64 byte boundary for block stores
2N/A andcc %o0, 63, %o5
2N/A bz,pt %ncc, .xlarge_aligned
2N/A sub %o5, 64, %o5
2N/A add %o2,%o5, %o2
2N/A.xlarge_a:
2N/A addcc %o5, 8, %o5
2N/A ldx [%o1], %o4
2N/A add %o1, 8, %o1
2N/A add %o0, 8, %o0
2N/A bnz,pt %ncc, .xlarge_a
2N/A stx %o4, [%o0-8]
2N/A ! DST is now on 64 byte boundary
2N/A.xlarge_aligned:
2N/A prefetch [%o1 + (20 * BLOCK_SIZE)], #one_read
2N/A rd %fprs, %o4 ! check for unused FPU
2N/A andcc %o4, FPRS_FEF, %o4 ! test FEF, fprs.du = fprs.dl = 0
2N/A bz,a %ncc, .xlarge_loop
2N/A wr %g0, FPRS_FEF, %fprs ! fprs.fef = 1
2N/A.xlarge_loop:
2N/A prefetch [%o1 + (4 * BLOCK_SIZE)], #n_reads
2N/A ldd [%o1], %d0
2N/A sub %o2, 64, %o2
2N/A ldd [%o1 + 8], %d2
2N/A ldd [%o1 + 16], %d4
2N/A ldd [%o1 + 24], %d6
2N/A ldd [%o1 + 32], %d8
2N/A ldd [%o1 + 40], %d10
2N/A ldd [%o1 + 48], %d12
2N/A add %o1, 64, %o1
2N/A ldd [%o1 - 8], %d14
2N/A stda %d0, [%o0]ASI_BLK_P ! store 64 bytes, bypass cache
2N/A add %o0, 64, %o0
2N/A cmp %o2,64
2N/A bgt,pt %ncc, .xlarge_loop
2N/A prefetch [%o1 + (20 * BLOCK_SIZE)], #one_read
2N/A membar #StoreLoad|#StoreStore ! needed after final blk store
2N/A wr %o4, 0, %fprs
2N/A subcc %o2,63,%o2
2N/A bgt,pt %ncc, .largel64
2N/A nop
2N/A ba .largel32
2N/A nop
2N/A
2N/A .align 16
2N/A.xlarge:
2N/A ! %o0 I/O DST is 64-byte aligned
2N/A ! %o1 I/O 8-byte aligned (and we've set GSR.ALIGN)
2N/A ! %d0 I/O already loaded with SRC data from [%o1-8]
2N/A ! %o2 I/O count (number of bytes that need to be written)
2N/A ! %o3 I Not written. If zero, then SRC is double aligned.
2N/A ! %o4 I Not written. Holds fprs.
2N/A ! %o5 O The number of doubles that remain to be written.
2N/A
2N/A ! Load the rest of the current block
2N/A ! Recall that %o1 is further into SRC than %o0 is into DST
2N/A
2N/A ldd [%o1], %f2
2N/A ldd [%o1 + 0x8], %f4
2N/A faligndata %f0, %f2, %f32
2N/A ldd [%o1 + 0x10], %f6
2N/A faligndata %f2, %f4, %f34
2N/A ldd [%o1 + 0x18], %f8
2N/A faligndata %f4, %f6, %f36
2N/A ldd [%o1 + 0x20], %f10
2N/A or %g0, -8, %o5 ! if %o3 >= 0, %o5 = -8
2N/A faligndata %f6, %f8, %f38
2N/A ldd [%o1 + 0x28], %f12
2N/A movrlz %o3, %g0, %o5 ! if %o3 < 0, %o5 = 0 (needed later)
2N/A prefetch [%o1 + (20 * BLOCK_SIZE)], #n_reads
2N/A faligndata %f8, %f10, %f40
2N/A ldd [%o1 + 0x30], %f14
2N/A faligndata %f10, %f12, %f42
2N/A ldd [%o1 + 0x38], %f0
2N/A sub %o2, BLOCK_SIZE, %o2 ! update count
2N/A add %o1, BLOCK_SIZE, %o1 ! update SRC
2N/A
2N/A ! This point is 32-byte aligned since 24 instructions appear since
2N/A ! the previous alignment directive.
2N/A
2N/A ! Main loop. Write previous block. Load rest of current block.
2N/A ! Some bytes will be loaded that won't yet be written.
2N/A1:
2N/A ldd [%o1], %f2
2N/A faligndata %f12, %f14, %f44
2N/A ldd [%o1 + 0x8], %f4
2N/A faligndata %f14, %f0, %f46
2N/A stda %f32, [%o0]ASI_BLK_P
2N/A sub %o2, BLOCK_SIZE, %o2 ! update count
2N/A ldd [%o1 + 0x10], %f6
2N/A faligndata %f0, %f2, %f32
2N/A ldd [%o1 + 0x18], %f8
2N/A faligndata %f2, %f4, %f34
2N/A ldd [%o1 + 0x20], %f10
2N/A faligndata %f4, %f6, %f36
2N/A ldd [%o1 + 0x28], %f12
2N/A faligndata %f6, %f8, %f38
2N/A ldd [%o1 + 0x30], %f14
2N/A prefetch [%o1 + (4 * BLOCK_SIZE)], #n_reads
2N/A faligndata %f8, %f10, %f40
2N/A ldd [%o1 + 0x38], %f0
2N/A faligndata %f10, %f12, %f42
2N/A prefetch [%o1 + (20 * BLOCK_SIZE)], #one_read
2N/A add %o0, BLOCK_SIZE, %o0 ! update DST
2N/A cmp %o2, BLOCK_SIZE + 8
2N/A bgu,pt %ncc, 1b
2N/A add %o1, BLOCK_SIZE, %o1 ! update SRC
2N/A
2N/A faligndata %f12, %f14, %f44
2N/A faligndata %f14, %f0, %f46
2N/A stda %f32, [%o0]ASI_BLK_P ! store 64 bytes, bypass cache
2N/A cmp %o2, BLOCK_SIZE
2N/A bne %ncc, 2f ! exactly 1 block remaining?
2N/A add %o0, BLOCK_SIZE, %o0 ! update DST
2N/A brz,a %o3, 3f ! is SRC double aligned?
2N/A ldd [%o1], %f2
2N/A
2N/A2:
2N/A add %o5, %o2, %o5 ! %o5 was already set to 0 or -8
2N/A add %o5, %o3, %o5
2N/A
2N/A membar #StoreLoad|#StoreStore ! needed after final blk store
2N/A
2N/A ba .beginmedloop
2N/A andn %o5, 7, %o5 ! 8 byte aligned count
2N/A
2N/A ! This is when there is exactly 1 block remaining and SRC is aligned
2N/A3:
2N/A ldd [%o1 + 0x8], %f4
2N/A ldd [%o1 + 0x10], %f6
2N/A fsrc1 %f0, %f32
2N/A ldd [%o1 + 0x18], %f8
2N/A fsrc1 %f2, %f34
2N/A ldd [%o1 + 0x20], %f10
2N/A fsrc1 %f4, %f36
2N/A ldd [%o1 + 0x28], %f12
2N/A fsrc1 %f6, %f38
2N/A ldd [%o1 + 0x30], %f14
2N/A fsrc1 %f8, %f40
2N/A fsrc1 %f10, %f42
2N/A fsrc1 %f12, %f44
2N/A fsrc1 %f14, %f46
2N/A stda %f32, [%o0]ASI_BLK_P
2N/A membar #StoreLoad|#StoreStore ! needed after final blk store
2N/A wr %o4, 0, %fprs
2N/A retl
2N/A mov %g1, %o0
2N/A
2N/A SET_SIZE(memcpy)