2 * arch/xtensa/lib/hal/memcopy.S -- Core HAL library functions
3 * xthal_memcpy and xthal_bcopy
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
9 * Copyright (C) 2002 - 2005 Tensilica Inc.
12 #include <variant/core.h>
14 .macro src_b r, w0, w1
32 * void *memcpy(void *dst, const void *src, size_t len);
33 * void *memmove(void *dst, const void *src, size_t len);
34 * void *bcopy(const void *src, void *dst, size_t len);
36 * This function is intended to do the same thing as the standard
37 * library function memcpy() (or bcopy()) for most cases.
38 * However, where the source and/or destination references
39 * an instruction RAM or ROM or a data RAM or ROM, that
40 * source and/or destination will always be accessed with
41 * 32-bit load and store instructions (as required for these
45 * !!!!!!! Handling of IRAM/IROM has not yet
46 * !!!!!!! been implemented.
48 * The bcopy version is provided here to avoid the overhead
49 * of an extra call, for callers that require this convention.
51 * The (general case) algorithm is as follows:
52 * If destination is unaligned, align it by conditionally
53 * copying 1 and 2 bytes.
54 * If source is aligned,
55 * do 16 bytes with a loop, and then finish up with
56 * 8, 4, 2, and 1 byte copies conditional on the length;
57 * else (if source is unaligned),
58 * do the same, but use SRC to align the source data.
59 * This code tries to use fall-through branches for the common
60 * case of aligned source and destination and multiple
83 entry sp, 16 # minimal stack frame
84 # a2=src, a3=dst, a4=len
85 mov a5, a3 # copy dst so that a2 is return value
88 j .Lcommon # go to common code for memcpy+bcopy
95 .byte 0 # 1 mod 4 alignment for LOOPNEZ
96 # (0 mod 4 alignment for LBEG)
99 loopnez a4, .Lbytecopydone
100 #else /* !XCHAL_HAVE_LOOPS */
101 beqz a4, .Lbytecopydone
102 add a7, a3, a4 # a7 = end address for source
103 #endif /* !XCHAL_HAVE_LOOPS */
109 #if !XCHAL_HAVE_LOOPS
110 blt a3, a7, .Lnextbyte
111 #endif /* !XCHAL_HAVE_LOOPS */
116 * Destination is unaligned
120 .Ldst1mod2: # dst is only byte aligned
121 _bltui a4, 7, .Lbytecopy # do short copies byte by byte
129 _bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then
130 # return to main algorithm
131 .Ldst2mod4: # dst 16-bit aligned
133 _bltui a4, 6, .Lbytecopy # do short copies byte by byte
141 j .Ldstaligned # dst is now aligned, return to main algorithm
145 .type memcpy,@function
148 .type memmove,@function
151 entry sp, 16 # minimal stack frame
152 # a2/ dst, a3/ src, a4/ len
153 mov a5, a2 # copy dst so that a2 is return value
155 _bbsi.l a2, 0, .Ldst1mod2 # if dst is 1 mod 2
156 _bbsi.l a2, 1, .Ldst2mod4 # if dst is 2 mod 4
157 .Ldstaligned: # return here from .Ldst?mod? once dst is aligned
158 srli a7, a4, 4 # number of loop iterations with 16B
160 movi a8, 3 # if source is not aligned,
161 _bany a3, a8, .Lsrcunaligned # then use shifting copy
163 * Destination and source are word-aligned, use word copy.
165 # copy 16 bytes per iteration for word-aligned dst and word-aligned src
167 loopnez a7, .Loop1done
168 #else /* !XCHAL_HAVE_LOOPS */
171 add a8, a8, a3 # a8 = end of last 16B source chunk
172 #endif /* !XCHAL_HAVE_LOOPS */
184 #if !XCHAL_HAVE_LOOPS
186 #endif /* !XCHAL_HAVE_LOOPS */
225 * Destination is aligned, Source is unaligned
230 _beqz a4, .Ldone # avoid loading anything for zero-length copies
231 # copy 16 bytes per iteration for word-aligned dst and unaligned src
232 ssa8 a3 # set shift amount from byte offset
233 #define SIM_CHECKS_ALIGNMENT 1 /* set to 1 when running on ISS (simulator) with the
234 lint or ferret client, or 0 to save a few cycles */
235 #if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT
236 and a11, a3, a8 # save unalignment offset for below
237 sub a3, a3, a11 # align a3
239 l32i a6, a3, 0 # load first word
241 loopnez a7, .Loop2done
242 #else /* !XCHAL_HAVE_LOOPS */
245 add a10, a10, a3 # a10 = end of last 16B source chunk
246 #endif /* !XCHAL_HAVE_LOOPS */
262 #if !XCHAL_HAVE_LOOPS
264 #endif /* !XCHAL_HAVE_LOOPS */
287 #if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT
288 add a3, a3, a11 # readjust a3 with correct misalignment
312 * comment-start: "# "
313 * comment-start-skip: "# *"