memcpy.S revision fdc5c1f56f9d21034badb8e4b092c47098f19613
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *  * Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 *  * Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *    the documentation and/or other materials provided with the
13 *    distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <machine/cpu-features.h>
30
31#if __ARM_ARCH__ == 7 || defined(__ARM_NEON__)
32
33        .text
34        .fpu    neon
35
36        .global memcpy
37        .type memcpy, %function
38        .align 4
39
40/* a prefetch distance of 32*4 works best experimentally */
41#define PREFETCH_DISTANCE   (32*4)
42
43memcpy:
44        .fnstart
45        .save       {r0, lr}
46        stmfd       sp!, {r0, lr}
47
48        /* start preloading as early as possible */
49        pld         [r1, #0]
50        pld         [r1, #32]
51
52        /* do we have at least 16-bytes to copy (needed for alignment below) */
53        cmp         r2, #16
54        blo         5f
55
56        /* align destination to half cache-line for the write-buffer */
57        rsb         r3, r0, #0
58        ands        r3, r3, #0xF
59        beq         0f
60
61        /* copy up to 15-bytes (count in r3) */
62        sub         r2, r2, r3
63        movs        ip, r3, lsl #31
64        ldrmib      lr, [r1], #1
65        strmib      lr, [r0], #1
66        ldrcsb      ip, [r1], #1
67        ldrcsb      lr, [r1], #1
68        strcsb      ip, [r0], #1
69        strcsb      lr, [r0], #1
70        movs        ip, r3, lsl #29
71        bge         1f
72        // copies 4 bytes, destination 32-bits aligned
73        vld4.8      {d0[0], d1[0], d2[0], d3[0]}, [r1]!
74        vst4.8      {d0[0], d1[0], d2[0], d3[0]}, [r0, :32]!
751:      bcc         2f
76        // copies 8 bytes, destination 64-bits aligned
77        vld1.8      {d0}, [r1]!
78        vst1.8      {d0}, [r0, :64]!
792:
80
810:      /* preload immediately the next cache line, which we may need */
82        pld         [r1, #(32*0)]
83        pld         [r1, #(32*1)]
84        pld         [r1, #(32*2)]
85        pld         [r1, #(32*3)]
86
87        /* make sure we have at least 128 bytes to copy */
88        subs        r2, r2, #128
89        blo         2f
90
91        /* preload all the cache lines we need.
92         * NOTE: the number of pld below depends on PREFETCH_DISTANCE,
93         * ideally would would increase the distance in the main loop to
94         * avoid the goofy code below. In practice this doesn't seem to make
95         * a big difference.
96         */
97        pld         [r1, #(PREFETCH_DISTANCE + 32*0)]
98        pld         [r1, #(PREFETCH_DISTANCE + 32*1)]
99        pld         [r1, #(PREFETCH_DISTANCE + 32*2)]
100        pld         [r1, #(PREFETCH_DISTANCE + 32*3)]
101
1021:      /* The main loop copies 128 bytes at a time */
103        vld1.8      {d0  - d3},   [r1]!
104        vld1.8      {d4  - d7},   [r1]!
105        vld1.8      {d16 - d19},  [r1]!
106        vld1.8      {d20 - d23},  [r1]!
107        pld         [r1, #(PREFETCH_DISTANCE + 32*0)]
108        pld         [r1, #(PREFETCH_DISTANCE + 32*1)]
109        pld         [r1, #(PREFETCH_DISTANCE + 32*2)]
110        pld         [r1, #(PREFETCH_DISTANCE + 32*3)]
111        subs        r2, r2, #128
112        vst1.8      {d0  - d3},   [r0, :128]!
113        vst1.8      {d4  - d7},   [r0, :128]!
114        vst1.8      {d16 - d19},  [r0, :128]!
115        vst1.8      {d20 - d23},  [r0, :128]!
116        bhs         1b
117
1182:      /* fix-up the remaining count and make sure we have >= 32 bytes left */
119        add         r2, r2, #128
120        subs        r2, r2, #32
121        blo         4f
122
1233:      /* 32 bytes at a time. These cache lines were already preloaded */
124        vld1.8      {d0 - d3},  [r1]!
125        subs        r2, r2, #32
126        vst1.8      {d0 - d3},  [r0, :128]!
127        bhs         3b
128
1294:      /* less than 32 left */
130        add         r2, r2, #32
131        tst         r2, #0x10
132        beq         5f
133        // copies 16 bytes, 128-bits aligned
134        vld1.8      {d0, d1}, [r1]!
135        vst1.8      {d0, d1}, [r0, :128]!
136
1375:      /* copy up to 15-bytes (count in r2) */
138        movs        ip, r2, lsl #29
139        bcc         1f
140        vld1.8      {d0}, [r1]!
141        vst1.8      {d0}, [r0]!
1421:      bge         2f
143        vld4.8      {d0[0], d1[0], d2[0], d3[0]}, [r1]!
144        vst4.8      {d0[0], d1[0], d2[0], d3[0]}, [r0]!
1452:      movs        ip, r2, lsl #31
146        ldrmib      r3, [r1], #1
147        ldrcsb      ip, [r1], #1
148        ldrcsb      lr, [r1], #1
149        strmib      r3, [r0], #1
150        strcsb      ip, [r0], #1
151        strcsb      lr, [r0], #1
152
153        ldmfd       sp!, {r0, lr}
154        bx          lr
155        .fnend
156
157
158#else   /* __ARM_ARCH__ < 7 */
159
160
161	.text
162
163    .global memcpy
164    .type memcpy, %function
165    .align 4
166
167		/*
168		 * Optimized memcpy() for ARM.
169         *
170		 * note that memcpy() always returns the destination pointer,
171		 * so we have to preserve R0.
172		 */
173
174memcpy:
175		/* The stack must always be 64-bits aligned to be compliant with the
176		 * ARM ABI. Since we have to save R0, we might as well save R4
177		 * which we can use for better pipelining of the reads below
178		 */
179        .fnstart
180        .save       {r0, r4, lr}
181        stmfd       sp!, {r0, r4, lr}
182        /* Making room for r5-r11 which will be spilled later */
183        .pad        #28
184        sub         sp, sp, #28
185
186        // preload the destination because we'll align it to a cache line
187        // with small writes. Also start the source "pump".
188        PLD         (r0, #0)
189        PLD         (r1, #0)
190        PLD         (r1, #32)
191
192		/* it simplifies things to take care of len<4 early */
193		cmp			r2, #4
194		blo			copy_last_3_and_return
195
196		/* compute the offset to align the source
197		 * offset = (4-(src&3))&3 = -src & 3
198		 */
199		rsb			r3, r1, #0
200		ands		r3, r3, #3
201		beq			src_aligned
202
203		/* align source to 32 bits. We need to insert 2 instructions between
204		 * a ldr[b|h] and str[b|h] because byte and half-word instructions
205		 * stall 2 cycles.
206		 */
207		movs		r12, r3, lsl #31
208		sub			r2, r2, r3		/* we know that r3 <= r2 because r2 >= 4 */
209		ldrmib		r3, [r1], #1
210		ldrcsb		r4, [r1], #1
211		ldrcsb		r12,[r1], #1
212        strmib		r3, [r0], #1
213		strcsb		r4, [r0], #1
214		strcsb		r12,[r0], #1
215
216src_aligned:
217
218		/* see if src and dst are aligned together (congruent) */
219		eor			r12, r0, r1
220		tst			r12, #3
221		bne			non_congruent
222
223        /* Use post-incriment mode for stm to spill r5-r11 to reserved stack
224         * frame. Don't update sp.
225         */
226        stmea		sp, {r5-r11}
227
228		/* align the destination to a cache-line */
229		rsb         r3, r0, #0
230		ands		r3, r3, #0x1C
231		beq         congruent_aligned32
232		cmp         r3, r2
233		andhi		r3, r2, #0x1C
234
235		/* conditionnaly copies 0 to 7 words (length in r3) */
236		movs		r12, r3, lsl #28
237		ldmcsia		r1!, {r4, r5, r6, r7}	/* 16 bytes */
238		ldmmiia		r1!, {r8, r9}			/*  8 bytes */
239		stmcsia		r0!, {r4, r5, r6, r7}
240		stmmiia		r0!, {r8, r9}
241		tst         r3, #0x4
242		ldrne		r10,[r1], #4			/*  4 bytes */
243		strne		r10,[r0], #4
244		sub         r2, r2, r3
245
246congruent_aligned32:
247		/*
248		 * here source is aligned to 32 bytes.
249		 */
250
251cached_aligned32:
252        subs        r2, r2, #32
253        blo         less_than_32_left
254
255        /*
256         * We preload a cache-line up to 64 bytes ahead. On the 926, this will
257         * stall only until the requested world is fetched, but the linefill
258         * continues in the the background.
259         * While the linefill is going, we write our previous cache-line
260         * into the write-buffer (which should have some free space).
261         * When the linefill is done, the writebuffer will
262         * start dumping its content into memory
263         *
264         * While all this is going, we then load a full cache line into
265         * 8 registers, this cache line should be in the cache by now
266         * (or partly in the cache).
267         *
268         * This code should work well regardless of the source/dest alignment.
269         *
270         */
271
272        // Align the preload register to a cache-line because the cpu does
273        // "critical word first" (the first word requested is loaded first).
274        bic         r12, r1, #0x1F
275        add         r12, r12, #64
276
2771:      ldmia       r1!, { r4-r11 }
278        PLD         (r12, #64)
279        subs        r2, r2, #32
280
281        // NOTE: if r12 is more than 64 ahead of r1, the following ldrhi
282        // for ARM9 preload will not be safely guarded by the preceding subs.
283        // When it is safely guarded the only possibility to have SIGSEGV here
284        // is because the caller overstates the length.
285        ldrhi       r3, [r12], #32      /* cheap ARM9 preload */
286        stmia       r0!, { r4-r11 }
287		bhs         1b
288
289        add         r2, r2, #32
290
291
292
293
294less_than_32_left:
295		/*
296		 * less than 32 bytes left at this point (length in r2)
297		 */
298
299		/* skip all this if there is nothing to do, which should
300		 * be a common case (if not executed the code below takes
301		 * about 16 cycles)
302		 */
303		tst			r2, #0x1F
304		beq			1f
305
306		/* conditionnaly copies 0 to 31 bytes */
307		movs		r12, r2, lsl #28
308		ldmcsia		r1!, {r4, r5, r6, r7}	/* 16 bytes */
309		ldmmiia		r1!, {r8, r9}			/*  8 bytes */
310		stmcsia		r0!, {r4, r5, r6, r7}
311		stmmiia		r0!, {r8, r9}
312		movs		r12, r2, lsl #30
313		ldrcs		r3, [r1], #4			/*  4 bytes */
314		ldrmih		r4, [r1], #2			/*  2 bytes */
315		strcs		r3, [r0], #4
316		strmih		r4, [r0], #2
317		tst         r2, #0x1
318		ldrneb		r3, [r1]				/*  last byte  */
319		strneb		r3, [r0]
320
321		/* we're done! restore everything and return */
3221:		ldmfd		sp!, {r5-r11}
323		ldmfd		sp!, {r0, r4, lr}
324		bx			lr
325
326		/********************************************************************/
327
328non_congruent:
329		/*
330		 * here source is aligned to 4 bytes
331		 * but destination is not.
332		 *
333		 * in the code below r2 is the number of bytes read
334		 * (the number of bytes written is always smaller, because we have
335		 * partial words in the shift queue)
336		 */
337		cmp			r2, #4
338		blo			copy_last_3_and_return
339
340        /* Use post-incriment mode for stm to spill r5-r11 to reserved stack
341         * frame. Don't update sp.
342         */
343        stmea		sp, {r5-r11}
344
345		/* compute shifts needed to align src to dest */
346		rsb			r5, r0, #0
347		and			r5, r5, #3			/* r5 = # bytes in partial words */
348		mov			r12, r5, lsl #3		/* r12 = right */
349		rsb			lr, r12, #32		/* lr = left  */
350
351		/* read the first word */
352		ldr			r3, [r1], #4
353		sub			r2, r2, #4
354
355		/* write a partial word (0 to 3 bytes), such that destination
356		 * becomes aligned to 32 bits (r5 = nb of words to copy for alignment)
357		 */
358		movs		r5, r5, lsl #31
359		strmib		r3, [r0], #1
360		movmi		r3, r3, lsr #8
361		strcsb		r3, [r0], #1
362		movcs		r3, r3, lsr #8
363		strcsb		r3, [r0], #1
364		movcs		r3, r3, lsr #8
365
366		cmp			r2, #4
367		blo			partial_word_tail
368
369		/* Align destination to 32 bytes (cache line boundary) */
3701:		tst			r0, #0x1c
371		beq			2f
372		ldr			r5, [r1], #4
373		sub         r2, r2, #4
374		orr			r4, r3, r5,		lsl lr
375		mov			r3, r5,			lsr r12
376		str			r4, [r0], #4
377        cmp         r2, #4
378		bhs			1b
379		blo			partial_word_tail
380
381		/* copy 32 bytes at a time */
3822:		subs		r2, r2, #32
383		blo			less_than_thirtytwo
384
385		/* Use immediate mode for the shifts, because there is an extra cycle
386		 * for register shifts, which could account for up to 50% of
387		 * performance hit.
388		 */
389
390        cmp			r12, #24
391		beq			loop24
392		cmp			r12, #8
393		beq			loop8
394
395loop16:
396        ldr         r12, [r1], #4
3971:      mov         r4, r12
398		ldmia		r1!, {   r5,r6,r7,  r8,r9,r10,r11}
399        PLD         (r1, #64)
400        subs        r2, r2, #32
401        ldrhs       r12, [r1], #4
402		orr			r3, r3, r4,		lsl #16
403		mov			r4, r4,			lsr #16
404		orr			r4, r4, r5,		lsl #16
405		mov			r5, r5,			lsr #16
406		orr			r5, r5, r6,		lsl #16
407		mov			r6, r6,			lsr #16
408		orr			r6, r6, r7,		lsl #16
409		mov			r7, r7,			lsr #16
410		orr			r7, r7, r8,		lsl #16
411		mov			r8, r8,			lsr #16
412		orr			r8, r8, r9,		lsl #16
413		mov			r9, r9,			lsr #16
414		orr			r9, r9, r10,	lsl #16
415		mov			r10, r10,		lsr #16
416		orr			r10, r10, r11,	lsl #16
417		stmia		r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
418		mov			r3, r11,		lsr #16
419		bhs			1b
420		b			less_than_thirtytwo
421
422loop8:
423        ldr         r12, [r1], #4
4241:      mov         r4, r12
425		ldmia		r1!, {   r5,r6,r7,  r8,r9,r10,r11}
426        PLD         (r1, #64)
427		subs		r2, r2, #32
428        ldrhs       r12, [r1], #4
429		orr			r3, r3, r4,		lsl #24
430		mov			r4, r4,			lsr #8
431		orr			r4, r4, r5,		lsl #24
432		mov			r5, r5,			lsr #8
433		orr			r5, r5, r6,		lsl #24
434		mov			r6, r6,			lsr #8
435		orr			r6, r6, r7,		lsl #24
436		mov			r7, r7,			lsr #8
437		orr			r7, r7, r8,		lsl #24
438		mov			r8, r8,			lsr #8
439		orr			r8, r8, r9,		lsl #24
440		mov			r9, r9,			lsr #8
441		orr			r9, r9, r10,	lsl #24
442		mov			r10, r10,		lsr #8
443		orr			r10, r10, r11,	lsl #24
444		stmia		r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
445		mov			r3, r11,		lsr #8
446		bhs			1b
447		b			less_than_thirtytwo
448
449loop24:
450        ldr         r12, [r1], #4
4511:      mov         r4, r12
452		ldmia		r1!, {   r5,r6,r7,  r8,r9,r10,r11}
453        PLD         (r1, #64)
454		subs		r2, r2, #32
455        ldrhs       r12, [r1], #4
456		orr			r3, r3, r4,		lsl #8
457		mov			r4, r4,			lsr #24
458		orr			r4, r4, r5,		lsl #8
459		mov			r5, r5,			lsr #24
460		orr			r5, r5, r6,		lsl #8
461		mov			r6, r6,			lsr #24
462		orr			r6, r6, r7,		lsl #8
463		mov			r7, r7,			lsr #24
464		orr			r7, r7, r8,		lsl #8
465		mov			r8, r8,			lsr #24
466		orr			r8, r8, r9,		lsl #8
467		mov			r9, r9,			lsr #24
468		orr			r9, r9, r10,	lsl #8
469		mov			r10, r10,		lsr #24
470		orr			r10, r10, r11,	lsl #8
471		stmia		r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
472		mov			r3, r11,		lsr #24
473		bhs			1b
474
475
476less_than_thirtytwo:
477		/* copy the last 0 to 31 bytes of the source */
478		rsb			r12, lr, #32		/* we corrupted r12, recompute it  */
479		add			r2, r2, #32
480		cmp			r2, #4
481		blo			partial_word_tail
482
4831:		ldr			r5, [r1], #4
484		sub         r2, r2, #4
485		orr			r4, r3, r5,		lsl lr
486		mov			r3,	r5,			lsr r12
487		str			r4, [r0], #4
488        cmp         r2, #4
489		bhs			1b
490
491partial_word_tail:
492		/* we have a partial word in the input buffer */
493		movs		r5, lr, lsl #(31-3)
494		strmib		r3, [r0], #1
495		movmi		r3, r3, lsr #8
496		strcsb		r3, [r0], #1
497		movcs		r3, r3, lsr #8
498		strcsb		r3, [r0], #1
499
500		/* Refill spilled registers from the stack. Don't update sp. */
501		ldmfd		sp, {r5-r11}
502
503copy_last_3_and_return:
504		movs		r2, r2, lsl #31	/* copy remaining 0, 1, 2 or 3 bytes */
505		ldrmib		r2, [r1], #1
506		ldrcsb		r3, [r1], #1
507		ldrcsb		r12,[r1]
508		strmib		r2, [r0], #1
509		strcsb		r3, [r0], #1
510		strcsb		r12,[r0]
511
512        /* we're done! restore sp and spilled registers and return */
513        add         sp,  sp, #28
514		ldmfd		sp!, {r0, r4, lr}
515		bx			lr
516        .fnend
517
518
519#endif    /* __ARM_ARCH__ < 7 */
520