diff --git a/LICENSE b/LICENSE index f60d2f26a5..e95c46dbcc 100644 --- a/LICENSE +++ b/LICENSE @@ -423,6 +423,7 @@ libc/string/lib_vikmemcpy.c distribution. libs/libc/machine/arm/armv7-a/gnu/arch_memchr.S +libs/libc/machine/arm/armv7-r/gnu/arch_memchr.S libs/libc/machine/arm/armv7-m/gnu/arch_memchr.S libs/libc/machine/arm/armv8-m/gnu/arch_memchr.S ================================================ @@ -487,6 +488,8 @@ libs/libc/machine/arm/armv8-m/gnu/arch_memchr.S libs/libc/machine/arm/armv7-a/gnu/arch_memcpy.S libs/libc/machine/arm/armv7-a/gnu/arch_strlen.S +libs/libc/machine/arm/armv7-r/gnu/arch_memcpy.S +libs/libc/machine/arm/armv7-r/gnu/arch_strlen.S ================================================ Copyright (c) 2013, Linaro Limited @@ -521,6 +524,7 @@ libs/libc/machine/arm/armv7-a/gnu/arch_strlen.S libs/libc/machine/arm/armv7-a/gnu/arch_memmove.S +libs/libc/machine/arm/armv7-r/gnu/arch_memmove.S libs/libc/machine/arm/armv7-m/gnu/arch_memmove.S libs/libc/machine/arm/armv7-m/gnu/arch_memset.S libs/libc/machine/arm/armv8-m/gnu/arch_memmove.S @@ -555,6 +559,7 @@ libs/libc/machine/arm/armv8-m/gnu/arch_memset.S libs/libc/machine/arm/armv7-a/gnu/arch_memset.S +libs/libc/machine/arm/armv7-r/gnu/arch_memset.S ================================================ Copyright (C) 2013 The Android Open Source Project @@ -584,6 +589,7 @@ libs/libc/machine/arm/armv7-a/gnu/arch_memset.S SUCH DAMAGE. libs/libc/machine/arm/armv7-a/gnu/arch_strcmp.S +libs/libc/machine/arm/armv7-r/gnu/arch_strcmp.S ================================================ Copyright (c) 2011 The Android Open Source Project diff --git a/libs/libc/machine/arm/armv7-r/Kconfig b/libs/libc/machine/arm/armv7-r/Kconfig index 881d8646fc..dd4bed8c88 100644 --- a/libs/libc/machine/arm/armv7-r/Kconfig +++ b/libs/libc/machine/arm/armv7-r/Kconfig @@ -3,9 +3,51 @@ # see the file kconfig-language.txt in the NuttX tools repository. # +config ARMV7R_MEMCHR + bool "Enable optimized memchr() for ARMv7-R" + default n + select LIBC_ARCH_MEMCHR + depends on ARCH_TOOLCHAIN_GNU + ---help--- + Enable optimized ARMv7-R specific memchr() library function + config ARMV7R_MEMCPY bool "Enable optimized memcpy() for ARMv7-R" select LIBC_ARCH_MEMCPY depends on ARCH_TOOLCHAIN_GNU ---help--- Enable optimized ARMv7-R specific memcpy() library function + +config ARMV7R_MEMMOVE + bool "Enable optimized memmove() for ARMv7-R" + default n + select LIBC_ARCH_MEMMOVE + depends on ARCH_TOOLCHAIN_GNU + ---help--- + Enable optimized ARMv7-R specific memmove() library function + +config ARMV7R_MEMSET + bool "Enable optimized memset() for ARMv7-R" + default n + select LIBC_ARCH_MEMSET + depends on ARCH_TOOLCHAIN_GNU + depends on ARM_NEON + ---help--- + Enable optimized ARMv7-R specific memset() library function + +config ARMV7R_STRCMP + bool "Enable optimized strcmp() for ARMv7-R" + default n + select LIBC_ARCH_STRCMP + depends on ARCH_TOOLCHAIN_GNU + ---help--- + Enable optimized ARMv7-R specific strcmp() library function + +config ARMV7R_STRLEN + bool "Enable optimized strlen() for ARMv7-R" + default n + select LIBC_ARCH_STRLEN + depends on ARCH_TOOLCHAIN_GNU + ---help--- + Enable optimized ARMv7-R specific strlen() library function + diff --git a/libs/libc/machine/arm/armv7-r/Make.defs b/libs/libc/machine/arm/armv7-r/Make.defs index 3f93d6cf0b..1814051cc7 100644 --- a/libs/libc/machine/arm/armv7-r/Make.defs +++ b/libs/libc/machine/arm/armv7-r/Make.defs @@ -18,20 +18,38 @@ # ############################################################################ +ifeq ($(CONFIG_ARMV7R_MEMCHR),y) +ASRCS += arch_memchr.S +endif + ifeq ($(CONFIG_ARMV7R_MEMCPY),y) - ASRCS += arch_memcpy.S +endif -DEPPATH += --dep-path machine/arm/armv7-r/gnu -VPATH += :machine/arm/armv7-r/gnu +ifeq ($(CONFIG_ARMV7R_MEMMOVE),y) +ASRCS += arch_memmove.S +endif +ifeq ($(CONFIG_ARMV7R_MEMSET),y) +ASRCS += arch_memset.S +endif + +ifeq ($(CONFIG_ARMV7R_STRCMP),y) +ASRCS += arch_strcmp.S +endif + +ifeq ($(CONFIG_ARMV7R_STRLEN),y) +ASRCS += arch_strlen.S endif ifeq ($(CONFIG_LIBC_ARCH_ELF),y) - CSRCS += arch_elf.c +endif + +ifeq ($(CONFIG_ARCH_TOOLCHAIN_GNU),y) +DEPPATH += --dep-path machine/arm/armv7-r/gnu +VPATH += :machine/arm/armv7-r/gnu +endif DEPPATH += --dep-path machine/arm/armv7-r VPATH += :machine/arm/armv7-r - -endif diff --git a/libs/libc/machine/arm/armv7-r/gnu/acle-compat.h b/libs/libc/machine/arm/armv7-r/gnu/acle-compat.h new file mode 100644 index 0000000000..b269933602 --- /dev/null +++ b/libs/libc/machine/arm/armv7-r/gnu/acle-compat.h @@ -0,0 +1,187 @@ +/**************************************************************************** + * libs/libc/machine/arm/armv7-r/gnu/acle-compat.h + * + * Copyright (c) 2014 ARM Ltd + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the company may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ****************************************************************************/ + +#ifndef __LIBS_LIBC_MACHINE_ARM_ARMV7R_GNU_ACLE_COMPAT_H +#define __LIBS_LIBC_MACHINE_ARM_ARMV7R_GNU_ACLE_COMPAT_H + +/* ACLE standardises a set of pre-defines that describe the ARM architecture. + * These were mostly implemented in GCC around GCC-4.8; older versions + * have no, or only partial support. To provide a level of backwards + * compatibility we try to work out what the definitions should be, given + * the older pre-defines that GCC did produce. This isn't complete, but + * it should be enough for use by routines that depend on this header. + */ + +/* No need to handle ARMv8, GCC had ACLE support before that. */ + +#ifdef __ARM_ARCH_7__ +/* The common subset of ARMv7 in all profiles. */ +# define __ARM_ARCH 7 +# define __ARM_ARCH_ISA_THUMB 2 +# define __ARM_FEATURE_CLZ +# define __ARM_FEATURE_LDREX 7 +# define __ARM_FEATURE_UNALIGNED +#endif + +#if defined (__ARM_ARCH_7A__) || defined (__ARM_ARCH_7R__) +# define __ARM_ARCH 7 +# define __ARM_ARCH_ISA_THUMB 2 +# define __ARM_ARCH_ISA_ARM +# define __ARM_FEATURE_CLZ +# define __ARM_FEATURE_SIMD32 +# define __ARM_FEATURE_DSP +# define __ARM_FEATURE_QBIT +# define __ARM_FEATURE_SAT +# define __ARM_FEATURE_LDREX 15 +# define __ARM_FEATURE_UNALIGNED +# ifdef __ARM_ARCH_7A__ +# define __ARM_ARCH_PROFILE 'A' +# else +# define __ARM_ARCH_PROFILE 'R' +# endif +#endif + +#ifdef __ARM_ARCH_7EM__ +# define __ARM_ARCH 7 +# define __ARM_ARCH_ISA_THUMB 2 +# define __ARM_FEATURE_CLZ +# define __ARM_FEATURE_SIMD32 +# define __ARM_FEATURE_DSP +# define __ARM_FEATURE_QBIT +# define __ARM_FEATURE_SAT +# define __ARM_FEATURE_LDREX 7 +# define __ARM_FEATURE_UNALIGNED +# define __ARM_ARCH_PROFILE 'M' +#endif + +#ifdef __ARM_ARCH_7M__ +# define __ARM_ARCH 7 +# define __ARM_ARCH_ISA_THUMB 2 +# define __ARM_FEATURE_CLZ +# define __ARM_FEATURE_QBIT +# define __ARM_FEATURE_SAT +# define __ARM_FEATURE_LDREX 7 +# define __ARM_FEATURE_UNALIGNED +# define __ARM_ARCH_PROFILE 'M' +#endif + +#ifdef __ARM_ARCH_6T2__ +# define __ARM_ARCH 6 +# define __ARM_ARCH_ISA_THUMB 2 +# define __ARM_ARCH_ISA_ARM +# define __ARM_FEATURE_CLZ +# define __ARM_FEATURE_SIMD32 +# define __ARM_FEATURE_DSP +# define __ARM_FEATURE_QBIT +# define __ARM_FEATURE_SAT +# define __ARM_FEATURE_LDREX 4 +# define __ARM_FEATURE_UNALIGNED +#endif + +#ifdef __ARM_ARCH_6M__ +# define __ARM_ARCH 6 +# define __ARM_ARCH_ISA_THUMB 1 +# define __ARM_ARCH_PROFILE 'M' +#endif + +#if defined (__ARM_ARCH_6__) || defined (__ARM_ARCH_6J__) \ + || defined (__ARM_ARCH_6K__) || defined (__ARM_ARCH_6Z__) \ + || defined (__ARM_ARCH_6ZK__) +# define __ARM_ARCH 6 +# define __ARM_ARCH_ISA_THUMB 1 +# define __ARM_ARCH_ISA_ARM +# define __ARM_FEATURE_CLZ +# define __ARM_FEATURE_SIMD32 +# define __ARM_FEATURE_DSP +# define __ARM_FEATURE_QBIT +# define __ARM_FEATURE_SAT +# define __ARM_FEATURE_UNALIGNED +# ifndef __thumb__ +# if defined (__ARM_ARCH_6K__) || defined (__ARM_ARCH_6ZK__) +# define __ARM_FEATURE_LDREX 15 +# else +# define __ARM_FEATURE_LDREX 4 +# endif +# endif +#endif + +#if defined (__ARM_ARCH_5TE__) || defined (__ARM_ARCH_5E__) +# define __ARM_ARCH 5 +# define __ARM_ARCH_ISA_ARM +# ifdef __ARM_ARCH_5TE__ +# define __ARM_ARCH_ISA_THUMB 1 +# endif +# define __ARM_FEATURE_CLZ +# define __ARM_FEATURE_DSP +#endif + +#if defined (__ARM_ARCH_5T__) || defined (__ARM_ARCH_5__) +# define __ARM_ARCH 5 +# define __ARM_ARCH_ISA_ARM +# ifdef __ARM_ARCH_5TE__ +# define __ARM_ARCH_ISA_THUMB 1 +# endif +# define __ARM_FEATURE_CLZ +#endif + +#ifdef __ARM_ARCH_4T__ +# define __ARM_ARCH 4 +# define __ARM_ARCH_ISA_ARM +# define __ARM_ARCH_ISA_THUMB 1 +#endif + +#ifdef __ARM_ARCH_4__ +# define __ARM_ARCH 4 +# define __ARM_ARCH_ISA_ARM +#endif + +#if defined (__ARM_ARCH_3__) || defined (__ARM_ARCH_3M__) +# define __ARM_ARCH 3 +# define __ARM_ARCH_ISA_ARM +#endif + +#ifdef __ARM_ARCH_2__ +# define __ARM_ARCH 2 +# define __ARM_ARCH_ISA_ARM +#endif + +#ifdef __ARMEB__ +# define __ARM_BIG_ENDIAN +#endif + +/* If we still don't know what the target architecture is, then we're + * probably not using GCC. + */ + +#ifndef __ARM_ARCH +# error Unable to determine architecture version. +#endif + +#endif /* __LIBS_LIBC_MACHINE_ARM_ARMV7R_GNU_ACLE_COMPAT_H */ diff --git a/libs/libc/machine/arm/armv7-r/gnu/arch_memchr.S b/libs/libc/machine/arm/armv7-r/gnu/arch_memchr.S new file mode 100644 index 0000000000..6e1ed88763 --- /dev/null +++ b/libs/libc/machine/arm/armv7-r/gnu/arch_memchr.S @@ -0,0 +1,391 @@ +/**************************************************************************** + * libs/libc/machine/arm/armv7-r/gnu/arch_memchr.S + * + * Copyright (c) 2010-2011, Linaro Limited + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * * Neither the name of Linaro Limited nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Written by Dave Gilbert + * + * This memchr routine is optimised on a Cortex-A9 and should work on + * all ARMv7 processors. It has a fast path for short sizes, and has + * an optimised path for large data sets; the worst case is finding the + * match early in a large data set. + * + * Copyright (c) 2015 ARM Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Linaro nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ****************************************************************************/ + +@ 2011-02-07 david.gilbert@linaro.org +@ Extracted from local git a5b438d861 +@ 2011-07-14 david.gilbert@linaro.org +@ Import endianness fix from local git ea786f1b +@ 2011-10-11 david.gilbert@linaro.org +@ Import from cortex-strings bzr rev 63 +@ Flip to ldrd (as suggested by Greta Yorsh) +@ Make conditional on CPU type +@ tidy + +@ This code requires armv6t2 or later. Uses Thumb2. + + .syntax unified + +#include "acle-compat.h" + +@ NOTE: This ifdef MUST match the one in memchr-stub.c +#if defined (__ARM_NEON__) || defined (__ARM_NEON) +#if __ARM_ARCH >= 8 && __ARM_ARCH_PROFILE == 'R' + .arch armv8-r +#else + .arch armv7-r +#endif + .fpu neon + + +/* Arguments */ +#define srcin r0 +#define chrin r1 +#define cntin r2 + +/* Retval */ +#define result r0 /* Live range does not overlap with srcin */ + +/* Working registers */ +#define src r1 /* Live range does not overlap with chrin */ +#define tmp r3 +#define synd r0 /* No overlap with srcin or result */ +#define soff r12 + +/* Working NEON registers */ +#define vrepchr q0 +#define vdata0 q1 +#define vdata0_0 d2 /* Lower half of vdata0 */ +#define vdata0_1 d3 /* Upper half of vdata0 */ +#define vdata1 q2 +#define vdata1_0 d4 /* Lower half of vhas_chr0 */ +#define vdata1_1 d5 /* Upper half of vhas_chr0 */ +#define vrepmask q3 +#define vrepmask0 d6 +#define vrepmask1 d7 +#define vend q4 +#define vend0 d8 +#define vend1 d9 + +/* + * Core algorithm: + * + * For each 32-byte chunk we calculate a 32-bit syndrome value, with one bit per + * byte. Each bit is set if the relevant byte matched the requested character + * and cleared otherwise. Since the bits in the syndrome reflect exactly the + * order in which things occur in the original string, counting trailing zeros + * allows to identify exactly which byte has matched. + */ + + .text + .thumb_func + .align 4 + .p2align 4,,15 + .global memchr + .type memchr,%function + +memchr: + .cfi_sections .debug_frame + .cfi_startproc + /* Use a simple loop if there are less than 8 bytes to search. */ + cmp cntin, #7 + bhi .Llargestr + and chrin, chrin, #0xff + +.Lsmallstr: + subs cntin, cntin, #1 + blo .Lnotfound /* Return not found if reached end. */ + ldrb tmp, [srcin], #1 + cmp tmp, chrin + bne .Lsmallstr /* Loop again if not found. */ + /* Otherwise fixup address and return. */ + sub result, result, #1 + bx lr + + +.Llargestr: + vdup.8 vrepchr, chrin /* Duplicate char across all lanes. */ + /* + * Magic constant 0x8040201008040201 allows us to identify which lane + * matches the requested byte. + */ + movw tmp, #0x0201 + movt tmp, #0x0804 + lsl soff, tmp, #4 + vmov vrepmask0, tmp, soff + vmov vrepmask1, tmp, soff + /* Work with aligned 32-byte chunks */ + bic src, srcin, #31 + ands soff, srcin, #31 + beq .Lloopintro /* Go straight to main loop if it's aligned. */ + + /* + * Input string is not 32-byte aligned. We calculate the syndrome + * value for the aligned 32 bytes block containing the first bytes + * and mask the irrelevant part. + */ + vld1.8 {vdata0, vdata1}, [src:256]! + sub tmp, soff, #32 + adds cntin, cntin, tmp + vceq.i8 vdata0, vdata0, vrepchr + vceq.i8 vdata1, vdata1, vrepchr + vand vdata0, vdata0, vrepmask + vand vdata1, vdata1, vrepmask + vpadd.i8 vdata0_0, vdata0_0, vdata0_1 + vpadd.i8 vdata1_0, vdata1_0, vdata1_1 + vpadd.i8 vdata0_0, vdata0_0, vdata1_0 + vpadd.i8 vdata0_0, vdata0_0, vdata0_0 + vmov synd, vdata0_0[0] + + /* Clear the soff lower bits */ + lsr synd, synd, soff + lsl synd, synd, soff + /* The first block can also be the last */ + bls .Lmasklast + /* Have we found something already? */ + cbnz synd, .Ltail + + +.Lloopintro: + vpush {vend} + /* 264/265 correspond to d8/d9 for q4 */ + .cfi_adjust_cfa_offset 16 + .cfi_rel_offset 264, 0 + .cfi_rel_offset 265, 8 + .p2align 3,,7 +.Lloop: + vld1.8 {vdata0, vdata1}, [src:256]! + subs cntin, cntin, #32 + vceq.i8 vdata0, vdata0, vrepchr + vceq.i8 vdata1, vdata1, vrepchr + /* If we're out of data we finish regardless of the result. */ + bls .Lend + /* Use a fast check for the termination condition. */ + vorr vend, vdata0, vdata1 + vorr vend0, vend0, vend1 + vmov synd, tmp, vend0 + orrs synd, synd, tmp + /* We're not out of data, loop if we haven't found the character. */ + beq .Lloop + +.Lend: + vpop {vend} + .cfi_adjust_cfa_offset -16 + .cfi_restore 264 + .cfi_restore 265 + + /* Termination condition found, let's calculate the syndrome value. */ + vand vdata0, vdata0, vrepmask + vand vdata1, vdata1, vrepmask + vpadd.i8 vdata0_0, vdata0_0, vdata0_1 + vpadd.i8 vdata1_0, vdata1_0, vdata1_1 + vpadd.i8 vdata0_0, vdata0_0, vdata1_0 + vpadd.i8 vdata0_0, vdata0_0, vdata0_0 + vmov synd, vdata0_0[0] + cbz synd, .Lnotfound + bhi .Ltail + + +.Lmasklast: + /* Clear the (-cntin) upper bits to avoid out-of-bounds matches. */ + neg cntin, cntin + lsl synd, synd, cntin + lsrs synd, synd, cntin + it eq + moveq src, #0 /* If no match, set src to 0 so the retval is 0. */ + + +.Ltail: + /* Count the trailing zeros using bit reversing */ + rbit synd, synd + /* Compensate the last post-increment */ + sub src, src, #32 + /* Count the leading zeros */ + clz synd, synd + /* Compute the potential result and return */ + add result, src, synd + bx lr + + +.Lnotfound: + /* Set result to NULL if not found and return */ + mov result, #0 + bx lr + + .cfi_endproc + .size memchr, . - memchr + +#elif __ARM_ARCH_ISA_THUMB >= 2 && defined (__ARM_FEATURE_DSP) + +#if __ARM_ARCH_PROFILE == 'M' + .arch armv7e-m +#else + .arch armv6t2 +#endif + +@ this lets us check a flag in a 00/ff byte easily in either endianness +#ifdef __ARMEB__ +#define CHARTSTMASK(c) 1<<(31-(c*8)) +#else +#define CHARTSTMASK(c) 1<<(c*8) +#endif + .text + .thumb + +@ --------------------------------------------------------------------------- + .thumb_func + .align 2 + .p2align 4,,15 + .global memchr + .type memchr,%function +memchr: + @ r0 = start of memory to scan + @ r1 = character to look for + @ r2 = length + @ returns r0 = pointer to character or NULL if not found + and r1,r1,#0xff @ Don't trust the caller to pass a char + + cmp r2,#16 @ If short don't bother with anything clever + blt 20f + + tst r0, #7 @ If it's already aligned skip the next bit + beq 10f + + @ Work up to an aligned point +5: + ldrb r3, [r0],#1 + subs r2, r2, #1 + cmp r3, r1 + beq 50f @ If it matches exit found + tst r0, #7 + cbz r2, 40f @ If we run off the end, exit not found + bne 5b @ If not aligned yet then do next byte + +10: + @ We are aligned, we know we have at least 8 bytes to work with + push {r4,r5,r6,r7} + orr r1, r1, r1, lsl #8 @ expand the match word across all bytes + orr r1, r1, r1, lsl #16 + bic r4, r2, #7 @ Number of double words to work with * 8 + mvns r7, #0 @ all F's + movs r3, #0 + +15: + ldrd r5,r6,[r0],#8 + subs r4, r4, #8 + eor r5,r5, r1 @ r5,r6 have 00's where bytes match the target + eor r6,r6, r1 + uadd8 r5, r5, r7 @ Par add 0xff - sets GE bits for bytes!=0 + sel r5, r3, r7 @ bytes are 00 for none-00 bytes, + @ or ff for 00 bytes - NOTE INVERSION + uadd8 r6, r6, r7 @ Par add 0xff - sets GE bits for bytes!=0 + sel r6, r5, r7 @ chained....bytes are 00 for none-00 bytes + @ or ff for 00 bytes - NOTE INVERSION + cbnz r6, 60f + bne 15b @ (Flags from the subs above) + + pop {r4,r5,r6,r7} + and r1,r1,#0xff @ r1 back to a single character + and r2,r2,#7 @ Leave the count remaining as the number + @ after the double words have been done + +20: + cbz r2, 40f @ 0 length or hit the end already then not found + +21: @ Post aligned section, or just a short call + ldrb r3,[r0],#1 + subs r2,r2,#1 + eor r3,r3,r1 @ r3 = 0 if match - doesn't break flags from sub + cbz r3, 50f + bne 21b @ on r2 flags + +40: + movs r0,#0 @ not found + bx lr + +50: + subs r0,r0,#1 @ found + bx lr + +60: @ We're here because the fast path found a hit + @ now we have to track down exactly which word it was + @ r0 points to the start of the double word after the one tested + @ r5 has the 00/ff pattern for the first word, r6 has the chained value + cmp r5, #0 + itte eq + moveq r5, r6 @ the end is in the 2nd word + subeq r0,r0,#3 @ Points to 2nd byte of 2nd word + subne r0,r0,#7 @ or 2nd byte of 1st word + + @ r0 currently points to the 2nd byte of the word containing the hit + tst r5, # CHARTSTMASK(0) @ 1st character + bne 61f + adds r0,r0,#1 + tst r5, # CHARTSTMASK(1) @ 2nd character + ittt eq + addeq r0,r0,#1 + tsteq r5, # (3<<15) @ 2nd & 3rd character + @ If not the 3rd must be the last one + addeq r0,r0,#1 + +61: + pop {r4,r5,r6,r7} + subs r0,r0,#1 + bx lr +#else + /* Defined in memchr-stub.c. */ +#endif diff --git a/libs/libc/machine/arm/armv7-r/gnu/arch_memcpy.S b/libs/libc/machine/arm/armv7-r/gnu/arch_memcpy.S index c4a7cbd2ed..a6a2d34094 100644 --- a/libs/libc/machine/arm/armv7-r/gnu/arch_memcpy.S +++ b/libs/libc/machine/arm/armv7-r/gnu/arch_memcpy.S @@ -1,461 +1,626 @@ -/************************************************************************************ - * libs/libc/marchine/arm/armv7-r/arch_memcpy.S - * ARMv7-R optimized memcpy. +/**************************************************************************** + * libs/libc/machine/arm/armv7-r/gnu/arch_memchr.S * - * Adapted for use with ARMv7-R and NuttX by: - * - * Copyright (C) 2017 Gregory Nutt. All rights reserved. - * Author: Gregory Nutt - * - * Based on the ARMv7-M version contributed by Mike Smith. Apparently in the public - * domain and is re-released here under the modified BSD license: - * - * Obtained via a posting on the Stellaris forum: - * http://e2e.ti.com/support/microcontrollers/\ - * stellaris_arm_cortex-m3_microcontroller/f/473/t/44360.aspx - * - * Posted by rocksoft on Jul 24, 2008 10:19 AM - * - * Hi, - * - * I recently finished a "memcpy" replacement and thought it might be useful for - * others... - * - * I've put some instructions and the code here: - * - * http://www.rock-software.net/downloads/memcpy/ - * - * Hope it works for you as well as it did for me. - * - * Liam. + * Copyright (c) 2013, Linaro Limited + * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * - * 1. Redistributions of source code must retain the above copyright + * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * 3. Neither the name NuttX nor the names of its contributors may be - * used to endorse or promote products derived from this software - * without specific prior written permission. + * + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * * Neither the name of Linaro Limited nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS - * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED - * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * - ************************************************************************************/ + ****************************************************************************/ -/************************************************************************************ - * Public Symbols - ************************************************************************************/ +/* + * This memcpy routine is optimised for Cortex-A15 cores and takes advantage + * of VFP or NEON when built with the appropriate flags. + * + * Assumptions: + * + * ARMv6 (ARMv7-a if using Neon) + * ARM state + * Unaligned accesses + * LDRD/STRD support unaligned word accesses + * + * If compiled with GCC, this file should be enclosed within following + * pre-processing check: + * if defined (__ARM_ARCH_7A__) && defined (__ARM_FEATURE_UNALIGNED) + * + */ - .global memcpy - .syntax unified - .file "arch_memcpy.S" + .syntax unified + /* This implementation requires ARM state. */ + .arm -/************************************************************************************ - * .text - ************************************************************************************/ +#ifdef __ARM_NEON__ + .fpu neon + .arch armv7-r +# define FRAME_SIZE 4 +# define USE_VFP +# define USE_NEON + +#elif !defined (__SOFTFP__) + + .arch armv6 + .fpu vfpv2 +# define FRAME_SIZE 32 +# define USE_VFP + +#else + .arch armv6 +# define FRAME_SIZE 32 + +#endif + +/* Old versions of GAS incorrectly implement the NEON align semantics. */ +#ifdef BROKEN_ASM_NEON_ALIGN +#define ALIGN(addr, align) addr,:align +#else +#define ALIGN(addr, align) addr:align +#endif + +#define PC_OFFSET 8 /* PC pipeline compensation. */ +#define INSN_SIZE 4 + +/* Call parameters. */ +#define dstin r0 +#define src r1 +#define count r2 + +/* Locals. */ +#define tmp1 r3 +#define dst ip +#define tmp2 r10 + +#ifndef USE_NEON +/* For bulk copies using GP registers. */ +#define A_l r2 /* Call-clobbered. */ +#define A_h r3 /* Call-clobbered. */ +#define B_l r4 +#define B_h r5 +#define C_l r6 +#define C_h r7 +#define D_l r8 +#define D_h r9 +#endif + +/* Number of lines ahead to pre-fetch data. If you change this the code + below will need adjustment to compensate. */ + +#define prefetch_lines 5 + +#ifdef USE_VFP + .macro cpy_line_vfp vreg, base + vstr \vreg, [dst, #\base] + vldr \vreg, [src, #\base] + vstr d0, [dst, #\base + 8] + vldr d0, [src, #\base + 8] + vstr d1, [dst, #\base + 16] + vldr d1, [src, #\base + 16] + vstr d2, [dst, #\base + 24] + vldr d2, [src, #\base + 24] + vstr \vreg, [dst, #\base + 32] + vldr \vreg, [src, #\base + prefetch_lines * 64 - 32] + vstr d0, [dst, #\base + 40] + vldr d0, [src, #\base + 40] + vstr d1, [dst, #\base + 48] + vldr d1, [src, #\base + 48] + vstr d2, [dst, #\base + 56] + vldr d2, [src, #\base + 56] + .endm + + .macro cpy_tail_vfp vreg, base + vstr \vreg, [dst, #\base] + vldr \vreg, [src, #\base] + vstr d0, [dst, #\base + 8] + vldr d0, [src, #\base + 8] + vstr d1, [dst, #\base + 16] + vldr d1, [src, #\base + 16] + vstr d2, [dst, #\base + 24] + vldr d2, [src, #\base + 24] + vstr \vreg, [dst, #\base + 32] + vstr d0, [dst, #\base + 40] + vldr d0, [src, #\base + 40] + vstr d1, [dst, #\base + 48] + vldr d1, [src, #\base + 48] + vstr d2, [dst, #\base + 56] + vldr d2, [src, #\base + 56] + .endm +#endif + + .macro def_fn f p2align=0 .text - -/************************************************************************************ - * Private Constant Data - ************************************************************************************/ - -/* We have 16 possible alignment combinations of src and dst, this jump table - * directs the copy operation - * - * Bits: Src=00, Dst=00 - Long to Long copy - * Bits: Src=00, Dst=01 - Long to Byte before half word - * Bits: Src=00, Dst=10 - Long to Half word - * Bits: Src=00, Dst=11 - Long to Byte before long word - * Bits: Src=01, Dst=00 - Byte before half word to long - * Bits: Src=01, Dst=01 - Byte before half word to byte before half word - - * Same alignment - * Bits: Src=01, Dst=10 - Byte before half word to half word - * Bits: Src=01, Dst=11 - Byte before half word to byte before long word - * Bits: Src=10, Dst=00 - Half word to long word - * Bits: Src=10, Dst=01 - Half word to byte before half word - * Bits: Src=10, Dst=10 - Half word to half word - Same Alignment - * Bits: Src=10, Dst=11 - Half word to byte before long word - * Bits: Src=11, Dst=00 - Byte before long word to long word - * Bits: Src=11, Dst=01 - Byte before long word to byte before half word - * Bits: Src=11, Dst=11 - Byte before long word to half word - * Bits: Src=11, Dst=11 - Byte before long word to Byte before long word - - * Same alignment - */ - -MEM_DataCopyTable: - .byte (MEM_DataCopy0 - MEM_DataCopyJump) >> 2 - .byte (MEM_DataCopy1 - MEM_DataCopyJump) >> 2 - .byte (MEM_DataCopy2 - MEM_DataCopyJump) >> 2 - .byte (MEM_DataCopy3 - MEM_DataCopyJump) >> 2 - .byte (MEM_DataCopy4 - MEM_DataCopyJump) >> 2 - .byte (MEM_DataCopy5 - MEM_DataCopyJump) >> 2 - .byte (MEM_DataCopy6 - MEM_DataCopyJump) >> 2 - .byte (MEM_DataCopy7 - MEM_DataCopyJump) >> 2 - .byte (MEM_DataCopy8 - MEM_DataCopyJump) >> 2 - .byte (MEM_DataCopy9 - MEM_DataCopyJump) >> 2 - .byte (MEM_DataCopy10 - MEM_DataCopyJump) >> 2 - .byte (MEM_DataCopy11 - MEM_DataCopyJump) >> 2 - .byte (MEM_DataCopy12 - MEM_DataCopyJump) >> 2 - .byte (MEM_DataCopy13 - MEM_DataCopyJump) >> 2 - .byte (MEM_DataCopy14 - MEM_DataCopyJump) >> 2 - .byte (MEM_DataCopy15 - MEM_DataCopyJump) >> 2 - - .align 2 - -MEM_LongCopyTable: - .byte (MEM_LongCopyEnd - MEM_LongCopyJump) >> 2 /* 0 bytes left */ - .byte (MEM_LongCopyJump0 - MEM_LongCopyJump) >> 2 /* 4 bytes left */ - .byte (MEM_LongCopyJump1 - MEM_LongCopyJump) >> 2 /* 8 bytes left */ - .byte (MEM_LongCopyJump2 - MEM_LongCopyJump) >> 2 /* 12 bytes left */ - .byte (MEM_LongCopyJump3 - MEM_LongCopyJump) >> 2 /* 16 bytes left */ - .byte (MEM_LongCopyJump4 - MEM_LongCopyJump) >> 2 /* 20 bytes left */ - .byte (MEM_LongCopyJump5 - MEM_LongCopyJump) >> 2 /* 24 bytes left */ - .byte (MEM_LongCopyJump6 - MEM_LongCopyJump) >> 2 /* 28 bytes left */ - .byte (MEM_LongCopyJump7 - MEM_LongCopyJump) >> 2 /* 32 bytes left */ - .byte (MEM_LongCopyJump8 - MEM_LongCopyJump) >> 2 /* 36 bytes left */ - -/************************************************************************************ - * Public Functions - ************************************************************************************/ -/************************************************************************************ - * Name: memcpy - * - * Description: - * Optimized "general" copy routine - * - * Input Parameters: - * r0 = destination, r1 = source, r2 = length - * - * Returned Value: - * r0 = destination r1-r3 burned - * - ************************************************************************************/ - - .align 4 - -memcpy: - push {r14} - push {r0} - bl _do_memcpy - pop {r0} - pop {pc} - - .align 4 - -_do_memcpy: - push {r14} - push {r4} - - /* This allows the inner workings to "assume" a minimum amount of bytes */ - /* Quickly check for very short copies */ - - cmp r2, #4 - blt MEM_DataCopyBytes - - and r14, r0, #3 /* Get destination alignment bits */ - bfi r14, r1, #2, #2 /* Get source alignment bits */ - - ldr r3, =MEM_DataCopyTable /* Jump table base address */ - ldrb r4, [r3, r14] /* DWord offset for this alignment combination */ - ldr r3, =MEM_DataCopyJump /* Base of branch table anchor */ - add r3, r3, r4, lsl #2 /* Absolute address of logic */ - bx r3 - - /* data copy branch table anchor */ - - .align 4 -MEM_DataCopyJump: - -/* Bits: Src=01, Dst=01 - Byte before half word to byte before half word - Same alignment - * 3 bytes to read for long word aligning - */ - -MEM_DataCopy5: - ldrb r3, [r1], #0x01 - strb r3, [r0], #0x01 - sub r2, r2, #0x01 - -/* Bits: Src=10, Dst=10 - Half word to half word - Same Alignment - * 2 bytes to read for long word aligning - */ - -MEM_DataCopy10: - ldrb r3, [r1], #0x01 - strb r3, [r0], #0x01 - sub r2, r2, #0x01 - -/* Bits: Src=11, Dst=11 - Byte before long word to Byte before long word - Same alignment - * 1 bytes to read for long word aligning - */ - -MEM_DataCopy15: - ldrb r3, [r1], #0x01 - strb r3, [r0], #0x01 - sub r2, r2, #0x01 - -/* Bits: Src=00, Dst=00 - Long to Long copy */ - -MEM_DataCopy0: - /* Save regs that may be used by memcpy */ - - push {r5-r12} - - /* Check for short word-aligned copy */ - - cmp r2, #0x28 - blt MEM_DataCopy0_2 - - /* Bulk copy loop */ - -MEM_DataCopy0_1: - ldmia r1!, {r3-r12} - stmia r0!, {r3-r12} - sub r2, r2, #0x28 - cmp r2, #0x28 - bge MEM_DataCopy0_1 - - /* Copy remaining long words */ - -MEM_DataCopy0_2: - ldr r14, =MEM_LongCopyTable /* Jump table base address */ - lsr r11, r2, 2 /* Convert byte count to word count */ - add r14, r14, r11 /* Jump table offset address */ - ldrb r3, [r14] /* DWord offset from branch table anchor */ - ldr r11, =MEM_LongCopyJump /* Address of branch table anchor */ - add r11, r11, r3, lsl #2 /* Absolute address into branch table */ - bx r11 /* Go there */ - - /* longword copy branch table anchor */ - -MEM_LongCopyJump: - -MEM_LongCopyJump0: - ldr r3, [r1], #0x04 /* 4 bytes remain */ - str r3, [r0], #0x04 - b MEM_LongCopyEnd - -MEM_LongCopyJump1: - ldmia r1!, {r3-r4} /* 8 bytes remain */ - stmia r0!, {r3-r4} - b MEM_LongCopyEnd - -MEM_LongCopyJump2: - ldmia r1!, {r3-r5} /* 12 bytes remain */ - stmia r0!, {r3-r5} - b MEM_LongCopyEnd - -MEM_LongCopyJump3: - ldmia r1!, {r3-r6} /* 16 bytes remain */ - stmia r0!, {r3-r6} - b MEM_LongCopyEnd - -MEM_LongCopyJump4: - ldmia r1!, {r3-r7} /* 20 bytes remain */ - stmia r0!, {r3-r7} - b MEM_LongCopyEnd - -MEM_LongCopyJump5: - ldmia r1!, {r3-r8} /* 24 bytes remain */ - stmia r0!, {r3-r8} - b MEM_LongCopyEnd - -MEM_LongCopyJump6: - ldmia r1!, {r3-r9} /* 28 bytes remain */ - stmia r0!, {r3-r9} - b MEM_LongCopyEnd - -MEM_LongCopyJump7: - ldmia r1!, {r3-r10} /* 32 bytes remain */ - stmia r0!, {r3-r10} - b MEM_LongCopyEnd - -MEM_LongCopyJump8: - ldmia r1!, {r3-r11} /* 36 bytes remain */ - stmia r0!, {r3-r11} - -MEM_LongCopyEnd: - pop {r5-r12} - and r2, r2, #0x03 /* All the longs have been copied */ - - /* Deal with up to 3 remaining bytes */ - -MEM_DataCopyBytes: - /* Deal with up to 3 remaining bytes */ - - pop {r4} - cmp r2, #0x00 - it eq - popeq {pc} - - ldrb r3, [r1], #0x01 - strb r3, [r0], #0x01 - subs r2, r2, #0x01 - it eq - popeq {pc} - - ldrb r3, [r1], #0x01 - strb r3, [r0], #0x01 - subs r2, r2, #0x01 - it eq - popeq {pc} - - ldrb r3, [r1], #0x01 - strb r3, [r0], #0x01 - pop {pc} - - .align 4 - -/* Bits: Src=01, Dst=11 - Byte before half word to byte before long word - * 3 bytes to read for long word aligning the source - */ - -MEM_DataCopy7: - ldrb r3, [r1], #0x01 - strb r3, [r0], #0x01 - sub r2, r2, #0x01 - -/* Bits: Src=10, Dst=00 - Half word to long word - * 2 bytes to read for long word aligning the source - */ - -MEM_DataCopy8: - ldrb r3, [r1], #0x01 - strb r3, [r0], #0x01 - sub r2, r2, #0x01 - -/* Bits: Src=11, Dst=01 - Byte before long word to byte before half word - * 1 byte to read for long word aligning the source - */ - -MEM_DataCopy13: - ldrb r3, [r1], #0x01 - strb r3, [r0], #0x01 - sub r2, r2, #0x01 - -/* Bits: Src=00, Dst=10 - Long to Half word */ - -MEM_DataCopy2: - cmp r2, #0x28 - blt MEM_DataCopy2_1 - - /* Save regs */ - - push {r5-r12} - - /* Bulk copy loop */ - -MEM_DataCopy2_2: - ldmia r1!, {r3-r12} - - strh r3, [r0], #0x02 - - lsr r3, r3, #0x10 - bfi r3, r4, #0x10, #0x10 - lsr r4, r4, #0x10 - bfi r4, r5, #0x10, #0x10 - lsr r5, r5, #0x10 - bfi r5, r6, #0x10, #0x10 - lsr r6, r6, #0x10 - bfi r6, r7, #0x10, #0x10 - lsr r7, r7, #0x10 - bfi r7, r8, #0x10, #0x10 - lsr r8, r8, #0x10 - bfi r8, r9, #0x10, #0x10 - lsr r9, r9, #0x10 - bfi r9, r10, #0x10, #0x10 - lsr r10, r10, #0x10 - bfi r10, r11, #0x10, #0x10 - lsr r11, r11, #0x10 - bfi r11, r12, #0x10, #0x10 - stmia r0!, {r3-r11} - lsr r12, r12, #0x10 - strh r12, [r0], #0x02 - - sub r2, r2, #0x28 - cmp r2, #0x28 - bge MEM_DataCopy2_2 - pop {r5-r12} - -MEM_DataCopy2_1: /* Read longs and write 2 x half words */ - cmp r2, #4 - blt MEM_DataCopyBytes - ldr r3, [r1], #0x04 - strh r3, [r0], #0x02 - lsr r3, r3, #0x10 - strh r3, [r0], #0x02 - sub r2, r2, #0x04 - b MEM_DataCopy2 - -/* Bits: Src=01, Dst=00 - Byte before half word to long - * Bits: Src=01, Dst=10 - Byte before half word to half word - * 3 bytes to read for long word aligning the source - */ - -MEM_DataCopy4: -MEM_DataCopy6: - /* Read B and write B */ - - ldrb r3, [r1], #0x01 - strb r3, [r0], #0x01 - sub r2, r2, #0x01 - -/* Bits: Src=10, Dst=01 - Half word to byte before half word - * Bits: Src=10, Dst=11 - Half word to byte before long word - * 2 bytes to read for long word aligning the source - */ - -MEM_DataCopy9: -MEM_DataCopy11: - ldrb r3, [r1], #0x01 - strb r3, [r0], #0x01 - sub r2, r2, #0x01 - -/* Bits: Src=11, Dst=00 -chm Byte before long word to long word - * Bits: Src=11, Dst=11 - Byte before long word to half word - * 1 byte to read for long word aligning the source - */ - -MEM_DataCopy12: -MEM_DataCopy14: - /* Read B and write B */ - - ldrb r3, [r1], #0x01 - strb r3, [r0], #0x01 - sub r2, r2, #0x01 - -/* Bits: Src=00, Dst=01 - Long to Byte before half word - * Bits: Src=00, Dst=11 - Long to Byte before long word - */ - -MEM_DataCopy1: /* Read longs, write B->H->B */ -MEM_DataCopy3: - cmp r2, #4 - blt MEM_DataCopyBytes - ldr r3, [r1], #0x04 - strb r3, [r0], #0x01 - lsr r3, r3, #0x08 - strh r3, [r0], #0x02 - lsr r3, r3, #0x10 - strb r3, [r0], #0x01 - sub r2, r2, #0x04 - b MEM_DataCopy3 - - .size memcpy, .-memcpy - .end + .p2align \p2align + .global \f + .type \f, %function +\f: + .endm + +def_fn memcpy p2align=6 + + mov dst, dstin /* Preserve dstin, we need to return it. */ + cmp count, #64 + bge .Lcpy_not_short + /* Deal with small copies quickly by dropping straight into the + exit block. */ + +.Ltail63unaligned: +#ifdef USE_NEON + and tmp1, count, #0x38 + rsb tmp1, tmp1, #(56 - PC_OFFSET + INSN_SIZE) + add pc, pc, tmp1 + vld1.8 {d0}, [src]! /* 14 words to go. */ + vst1.8 {d0}, [dst]! + vld1.8 {d0}, [src]! /* 12 words to go. */ + vst1.8 {d0}, [dst]! + vld1.8 {d0}, [src]! /* 10 words to go. */ + vst1.8 {d0}, [dst]! + vld1.8 {d0}, [src]! /* 8 words to go. */ + vst1.8 {d0}, [dst]! + vld1.8 {d0}, [src]! /* 6 words to go. */ + vst1.8 {d0}, [dst]! + vld1.8 {d0}, [src]! /* 4 words to go. */ + vst1.8 {d0}, [dst]! + vld1.8 {d0}, [src]! /* 2 words to go. */ + vst1.8 {d0}, [dst]! + + tst count, #4 + ldrne tmp1, [src], #4 + strne tmp1, [dst], #4 +#else + /* Copy up to 15 full words of data. May not be aligned. */ + /* Cannot use VFP for unaligned data. */ + and tmp1, count, #0x3c + add dst, dst, tmp1 + add src, src, tmp1 + rsb tmp1, tmp1, #(60 - PC_OFFSET/2 + INSN_SIZE/2) + /* Jump directly into the sequence below at the correct offset. */ + add pc, pc, tmp1, lsl #1 + + ldr tmp1, [src, #-60] /* 15 words to go. */ + str tmp1, [dst, #-60] + + ldr tmp1, [src, #-56] /* 14 words to go. */ + str tmp1, [dst, #-56] + ldr tmp1, [src, #-52] + str tmp1, [dst, #-52] + + ldr tmp1, [src, #-48] /* 12 words to go. */ + str tmp1, [dst, #-48] + ldr tmp1, [src, #-44] + str tmp1, [dst, #-44] + + ldr tmp1, [src, #-40] /* 10 words to go. */ + str tmp1, [dst, #-40] + ldr tmp1, [src, #-36] + str tmp1, [dst, #-36] + + ldr tmp1, [src, #-32] /* 8 words to go. */ + str tmp1, [dst, #-32] + ldr tmp1, [src, #-28] + str tmp1, [dst, #-28] + + ldr tmp1, [src, #-24] /* 6 words to go. */ + str tmp1, [dst, #-24] + ldr tmp1, [src, #-20] + str tmp1, [dst, #-20] + + ldr tmp1, [src, #-16] /* 4 words to go. */ + str tmp1, [dst, #-16] + ldr tmp1, [src, #-12] + str tmp1, [dst, #-12] + + ldr tmp1, [src, #-8] /* 2 words to go. */ + str tmp1, [dst, #-8] + ldr tmp1, [src, #-4] + str tmp1, [dst, #-4] +#endif + + lsls count, count, #31 + ldrhcs tmp1, [src], #2 + ldrbne src, [src] /* Src is dead, use as a scratch. */ + strhcs tmp1, [dst], #2 + strbne src, [dst] + bx lr + +.Lcpy_not_short: + /* At least 64 bytes to copy, but don't know the alignment yet. */ + str tmp2, [sp, #-FRAME_SIZE]! + and tmp2, src, #7 + and tmp1, dst, #7 + cmp tmp1, tmp2 + bne .Lcpy_notaligned + +#ifdef USE_VFP + /* Magic dust alert! Force VFP on Cortex-A9. Experiments show + that the FP pipeline is much better at streaming loads and + stores. This is outside the critical loop. */ + vmov.f32 s0, s0 +#endif + + /* SRC and DST have the same mutual 32-bit alignment, but we may + still need to pre-copy some bytes to get to natural alignment. + We bring DST into full 64-bit alignment. */ + lsls tmp2, dst, #29 + beq 1f + rsbs tmp2, tmp2, #0 + sub count, count, tmp2, lsr #29 + ldrmi tmp1, [src], #4 + strmi tmp1, [dst], #4 + lsls tmp2, tmp2, #2 + ldrhcs tmp1, [src], #2 + ldrbne tmp2, [src], #1 + strhcs tmp1, [dst], #2 + strbne tmp2, [dst], #1 + +1: + subs tmp2, count, #64 /* Use tmp2 for count. */ + blt .Ltail63aligned + + cmp tmp2, #512 + bge .Lcpy_body_long + +.Lcpy_body_medium: /* Count in tmp2. */ +#ifdef USE_VFP +1: + vldr d0, [src, #0] + subs tmp2, tmp2, #64 + vldr d1, [src, #8] + vstr d0, [dst, #0] + vldr d0, [src, #16] + vstr d1, [dst, #8] + vldr d1, [src, #24] + vstr d0, [dst, #16] + vldr d0, [src, #32] + vstr d1, [dst, #24] + vldr d1, [src, #40] + vstr d0, [dst, #32] + vldr d0, [src, #48] + vstr d1, [dst, #40] + vldr d1, [src, #56] + vstr d0, [dst, #48] + add src, src, #64 + vstr d1, [dst, #56] + add dst, dst, #64 + bge 1b + tst tmp2, #0x3f + beq .Ldone + +.Ltail63aligned: /* Count in tmp2. */ + and tmp1, tmp2, #0x38 + add dst, dst, tmp1 + add src, src, tmp1 + rsb tmp1, tmp1, #(56 - PC_OFFSET + INSN_SIZE) + add pc, pc, tmp1 + + vldr d0, [src, #-56] /* 14 words to go. */ + vstr d0, [dst, #-56] + vldr d0, [src, #-48] /* 12 words to go. */ + vstr d0, [dst, #-48] + vldr d0, [src, #-40] /* 10 words to go. */ + vstr d0, [dst, #-40] + vldr d0, [src, #-32] /* 8 words to go. */ + vstr d0, [dst, #-32] + vldr d0, [src, #-24] /* 6 words to go. */ + vstr d0, [dst, #-24] + vldr d0, [src, #-16] /* 4 words to go. */ + vstr d0, [dst, #-16] + vldr d0, [src, #-8] /* 2 words to go. */ + vstr d0, [dst, #-8] +#else + sub src, src, #8 + sub dst, dst, #8 +1: + ldrd A_l, A_h, [src, #8] + strd A_l, A_h, [dst, #8] + ldrd A_l, A_h, [src, #16] + strd A_l, A_h, [dst, #16] + ldrd A_l, A_h, [src, #24] + strd A_l, A_h, [dst, #24] + ldrd A_l, A_h, [src, #32] + strd A_l, A_h, [dst, #32] + ldrd A_l, A_h, [src, #40] + strd A_l, A_h, [dst, #40] + ldrd A_l, A_h, [src, #48] + strd A_l, A_h, [dst, #48] + ldrd A_l, A_h, [src, #56] + strd A_l, A_h, [dst, #56] + ldrd A_l, A_h, [src, #64]! + strd A_l, A_h, [dst, #64]! + subs tmp2, tmp2, #64 + bge 1b + tst tmp2, #0x3f + bne 1f + ldr tmp2,[sp], #FRAME_SIZE + bx lr +1: + add src, src, #8 + add dst, dst, #8 + +.Ltail63aligned: /* Count in tmp2. */ + /* Copy up to 7 d-words of data. Similar to Ltail63unaligned, but + we know that the src and dest are 32-bit aligned so we can use + LDRD/STRD to improve efficiency. */ + /* TMP2 is now negative, but we don't care about that. The bottom + six bits still tell us how many bytes are left to copy. */ + + and tmp1, tmp2, #0x38 + add dst, dst, tmp1 + add src, src, tmp1 + rsb tmp1, tmp1, #(56 - PC_OFFSET + INSN_SIZE) + add pc, pc, tmp1 + ldrd A_l, A_h, [src, #-56] /* 14 words to go. */ + strd A_l, A_h, [dst, #-56] + ldrd A_l, A_h, [src, #-48] /* 12 words to go. */ + strd A_l, A_h, [dst, #-48] + ldrd A_l, A_h, [src, #-40] /* 10 words to go. */ + strd A_l, A_h, [dst, #-40] + ldrd A_l, A_h, [src, #-32] /* 8 words to go. */ + strd A_l, A_h, [dst, #-32] + ldrd A_l, A_h, [src, #-24] /* 6 words to go. */ + strd A_l, A_h, [dst, #-24] + ldrd A_l, A_h, [src, #-16] /* 4 words to go. */ + strd A_l, A_h, [dst, #-16] + ldrd A_l, A_h, [src, #-8] /* 2 words to go. */ + strd A_l, A_h, [dst, #-8] + +#endif + tst tmp2, #4 + ldrne tmp1, [src], #4 + strne tmp1, [dst], #4 + lsls tmp2, tmp2, #31 /* Count (tmp2) now dead. */ + ldrhcs tmp1, [src], #2 + ldrbne tmp2, [src] + strhcs tmp1, [dst], #2 + strbne tmp2, [dst] + +.Ldone: + ldr tmp2, [sp], #FRAME_SIZE + bx lr + +.Lcpy_body_long: /* Count in tmp2. */ + + /* Long copy. We know that there's at least (prefetch_lines * 64) + bytes to go. */ +#ifdef USE_VFP + /* Don't use PLD. Instead, read some data in advance of the current + copy position into a register. This should act like a PLD + operation but we won't have to repeat the transfer. */ + + vldr d3, [src, #0] + vldr d4, [src, #64] + vldr d5, [src, #128] + vldr d6, [src, #192] + vldr d7, [src, #256] + + vldr d0, [src, #8] + vldr d1, [src, #16] + vldr d2, [src, #24] + add src, src, #32 + + subs tmp2, tmp2, #prefetch_lines * 64 * 2 + blt 2f +1: + cpy_line_vfp d3, 0 + cpy_line_vfp d4, 64 + cpy_line_vfp d5, 128 + add dst, dst, #3 * 64 + add src, src, #3 * 64 + cpy_line_vfp d6, 0 + cpy_line_vfp d7, 64 + add dst, dst, #2 * 64 + add src, src, #2 * 64 + subs tmp2, tmp2, #prefetch_lines * 64 + bge 1b + +2: + cpy_tail_vfp d3, 0 + cpy_tail_vfp d4, 64 + cpy_tail_vfp d5, 128 + add src, src, #3 * 64 + add dst, dst, #3 * 64 + cpy_tail_vfp d6, 0 + vstr d7, [dst, #64] + vldr d7, [src, #64] + vstr d0, [dst, #64 + 8] + vldr d0, [src, #64 + 8] + vstr d1, [dst, #64 + 16] + vldr d1, [src, #64 + 16] + vstr d2, [dst, #64 + 24] + vldr d2, [src, #64 + 24] + vstr d7, [dst, #64 + 32] + add src, src, #96 + vstr d0, [dst, #64 + 40] + vstr d1, [dst, #64 + 48] + vstr d2, [dst, #64 + 56] + add dst, dst, #128 + add tmp2, tmp2, #prefetch_lines * 64 + b .Lcpy_body_medium +#else + /* Long copy. Use an SMS style loop to maximize the I/O + bandwidth of the core. We don't have enough spare registers + to synthesise prefetching, so use PLD operations. */ + /* Pre-bias src and dst. */ + sub src, src, #8 + sub dst, dst, #8 + pld [src, #8] + pld [src, #72] + subs tmp2, tmp2, #64 + pld [src, #136] + ldrd A_l, A_h, [src, #8] + strd B_l, B_h, [sp, #8] + ldrd B_l, B_h, [src, #16] + strd C_l, C_h, [sp, #16] + ldrd C_l, C_h, [src, #24] + strd D_l, D_h, [sp, #24] + pld [src, #200] + ldrd D_l, D_h, [src, #32]! + b 1f + .p2align 6 +2: + pld [src, #232] + strd A_l, A_h, [dst, #40] + ldrd A_l, A_h, [src, #40] + strd B_l, B_h, [dst, #48] + ldrd B_l, B_h, [src, #48] + strd C_l, C_h, [dst, #56] + ldrd C_l, C_h, [src, #56] + strd D_l, D_h, [dst, #64]! + ldrd D_l, D_h, [src, #64]! + subs tmp2, tmp2, #64 +1: + strd A_l, A_h, [dst, #8] + ldrd A_l, A_h, [src, #8] + strd B_l, B_h, [dst, #16] + ldrd B_l, B_h, [src, #16] + strd C_l, C_h, [dst, #24] + ldrd C_l, C_h, [src, #24] + strd D_l, D_h, [dst, #32] + ldrd D_l, D_h, [src, #32] + bcs 2b + /* Save the remaining bytes and restore the callee-saved regs. */ + strd A_l, A_h, [dst, #40] + add src, src, #40 + strd B_l, B_h, [dst, #48] + ldrd B_l, B_h, [sp, #8] + strd C_l, C_h, [dst, #56] + ldrd C_l, C_h, [sp, #16] + strd D_l, D_h, [dst, #64] + ldrd D_l, D_h, [sp, #24] + add dst, dst, #72 + tst tmp2, #0x3f + bne .Ltail63aligned + ldr tmp2, [sp], #FRAME_SIZE + bx lr +#endif + +.Lcpy_notaligned: + pld [src] + pld [src, #64] + /* There's at least 64 bytes to copy, but there is no mutual + alignment. */ + /* Bring DST to 64-bit alignment. */ + lsls tmp2, dst, #29 + pld [src, #(2 * 64)] + beq 1f + rsbs tmp2, tmp2, #0 + sub count, count, tmp2, lsr #29 + ldrmi tmp1, [src], #4 + strmi tmp1, [dst], #4 + lsls tmp2, tmp2, #2 + ldrbne tmp1, [src], #1 + ldrhcs tmp2, [src], #2 + strbne tmp1, [dst], #1 + strhcs tmp2, [dst], #2 +1: + pld [src, #(3 * 64)] + subs count, count, #64 + ldrmi tmp2, [sp], #FRAME_SIZE + bmi .Ltail63unaligned + pld [src, #(4 * 64)] + +#ifdef USE_NEON + vld1.8 {d0-d3}, [src]! + vld1.8 {d4-d7}, [src]! + subs count, count, #64 + bmi 2f +1: + pld [src, #(4 * 64)] + vst1.8 {d0-d3}, [ALIGN (dst, 64)]! + vld1.8 {d0-d3}, [src]! + vst1.8 {d4-d7}, [ALIGN (dst, 64)]! + vld1.8 {d4-d7}, [src]! + subs count, count, #64 + bpl 1b +2: + vst1.8 {d0-d3}, [ALIGN (dst, 64)]! + vst1.8 {d4-d7}, [ALIGN (dst, 64)]! + ands count, count, #0x3f +#else + /* Use an SMS style loop to maximize the I/O bandwidth. */ + sub src, src, #4 + sub dst, dst, #8 + subs tmp2, count, #64 /* Use tmp2 for count. */ + ldr A_l, [src, #4] + ldr A_h, [src, #8] + strd B_l, B_h, [sp, #8] + ldr B_l, [src, #12] + ldr B_h, [src, #16] + strd C_l, C_h, [sp, #16] + ldr C_l, [src, #20] + ldr C_h, [src, #24] + strd D_l, D_h, [sp, #24] + ldr D_l, [src, #28] + ldr D_h, [src, #32]! + b 1f + .p2align 6 +2: + pld [src, #(5 * 64) - (32 - 4)] + strd A_l, A_h, [dst, #40] + ldr A_l, [src, #36] + ldr A_h, [src, #40] + strd B_l, B_h, [dst, #48] + ldr B_l, [src, #44] + ldr B_h, [src, #48] + strd C_l, C_h, [dst, #56] + ldr C_l, [src, #52] + ldr C_h, [src, #56] + strd D_l, D_h, [dst, #64]! + ldr D_l, [src, #60] + ldr D_h, [src, #64]! + subs tmp2, tmp2, #64 +1: + strd A_l, A_h, [dst, #8] + ldr A_l, [src, #4] + ldr A_h, [src, #8] + strd B_l, B_h, [dst, #16] + ldr B_l, [src, #12] + ldr B_h, [src, #16] + strd C_l, C_h, [dst, #24] + ldr C_l, [src, #20] + ldr C_h, [src, #24] + strd D_l, D_h, [dst, #32] + ldr D_l, [src, #28] + ldr D_h, [src, #32] + bcs 2b + + /* Save the remaining bytes and restore the callee-saved regs. */ + strd A_l, A_h, [dst, #40] + add src, src, #36 + strd B_l, B_h, [dst, #48] + ldrd B_l, B_h, [sp, #8] + strd C_l, C_h, [dst, #56] + ldrd C_l, C_h, [sp, #16] + strd D_l, D_h, [dst, #64] + ldrd D_l, D_h, [sp, #24] + add dst, dst, #72 + ands count, tmp2, #0x3f +#endif + ldr tmp2, [sp], #FRAME_SIZE + bne .Ltail63unaligned + bx lr + + .size memcpy, . - memcpy diff --git a/libs/libc/machine/arm/armv7-r/gnu/arch_memmove.S b/libs/libc/machine/arm/armv7-r/gnu/arch_memmove.S new file mode 100644 index 0000000000..38b3eee5d4 --- /dev/null +++ b/libs/libc/machine/arm/armv7-r/gnu/arch_memmove.S @@ -0,0 +1,66 @@ +/**************************************************************************** + * libs/libc/machine/arm/armv7-r/gnu/arch_memmove.S + * + * Copyright (c) 2015 ARM Ltd + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the company may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ****************************************************************************/ + + .thumb + .syntax unified + .global memmove + .type memmove, %function +memmove: + cmp r0, r1 + push {r4} + bls 3f + adds r3, r1, r2 + cmp r0, r3 + bcs 3f + adds r1, r0, r2 + cbz r2, 2f + subs r2, r3, r2 +1: + ldrb r4, [r3, #-1]! + cmp r2, r3 + strb r4, [r1, #-1]! + bne 1b +2: + pop {r4} + bx lr +3: + cmp r2, #0 + beq 2b + add r2, r2, r1 + subs r3, r0, #1 +4: + ldrb r4, [r1], #1 + cmp r2, r1 + strb r4, [r3, #1]! + bne 4b + pop {r4} + bx lr + .size memmove, . - memmove diff --git a/libs/libc/machine/arm/armv7-r/gnu/arch_memset.S b/libs/libc/machine/arm/armv7-r/gnu/arch_memset.S new file mode 100644 index 0000000000..109514283b --- /dev/null +++ b/libs/libc/machine/arm/armv7-r/gnu/arch_memset.S @@ -0,0 +1,146 @@ +/**************************************************************************** + * libs/libc/machine/arm/armv7-r/gnu/arch_memset.S + * + * Copyright (C) 2013 The Android Open Source Project + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + ****************************************************************************/ + + .arm + .syntax unified + .global memset + .type memset, %function +memset: + mov r3, r0 + // At this point only d0, d1 are going to be used below. + vdup.8 q0, r1 + cmp r2, #16 + blo .L_set_less_than_16_unknown_align + +.L_check_alignment: + // Align destination to a double word to avoid the store crossing + // a cache line boundary. + ands ip, r3, #7 + bne .L_do_double_word_align + +.L_double_word_aligned: + // Duplicate since the less than 64 can use d2, d3. + vmov q1, q0 + subs r2, #64 + blo .L_set_less_than_64 + + // Duplicate the copy value so that we can store 64 bytes at a time. + vmov q2, q0 + vmov q3, q0 + +1: + // Main loop stores 64 bytes at a time. + subs r2, #64 + vstmia r3!, {d0 - d7} + bge 1b + +.L_set_less_than_64: + // Restore r2 to the count of bytes left to set. + add r2, #64 + lsls ip, r2, #27 + bcc .L_set_less_than_32 + // Set 32 bytes. + vstmia r3!, {d0 - d3} + +.L_set_less_than_32: + bpl .L_set_less_than_16 + // Set 16 bytes. + vstmia r3!, {d0, d1} + +.L_set_less_than_16: + // Less than 16 bytes to set. + lsls ip, r2, #29 + bcc .L_set_less_than_8 + + // Set 8 bytes. + vstmia r3!, {d0} + +.L_set_less_than_8: + bpl .L_set_less_than_4 + // Set 4 bytes + vst1.32 {d0[0]}, [r3]! + +.L_set_less_than_4: + lsls ip, r2, #31 + it ne + strbne r1, [r3], #1 + itt cs + strbcs r1, [r3], #1 + strbcs r1, [r3] + bx lr + +.L_do_double_word_align: + rsb ip, ip, #8 + sub r2, r2, ip + + // Do this comparison now, otherwise we'll need to save a + // register to the stack since we've used all available + // registers. + cmp ip, #4 + blo 1f + + // Need to do a four byte copy. + movs ip, ip, lsl #31 + it mi + strbmi r1, [r3], #1 + itt cs + strbcs r1, [r3], #1 + strbcs r1, [r3], #1 + vst1.32 {d0[0]}, [r3]! + b .L_double_word_aligned + +1: + // No four byte copy. + movs ip, ip, lsl #31 + it mi + strbmi r1, [r3], #1 + itt cs + strbcs r1, [r3], #1 + strbcs r1, [r3], #1 + b .L_double_word_aligned + +.L_set_less_than_16_unknown_align: + // Set up to 15 bytes. + movs ip, r2, lsl #29 + bcc 1f + vst1.8 {d0}, [r3]! +1: + bge 2f + vst1.32 {d0[0]}, [r3]! +2: + movs ip, r2, lsl #31 + it mi + strbmi r1, [r3], #1 + itt cs + strbcs r1, [r3], #1 + strbcs r1, [r3], #1 + bx lr + .size memset, . - memset diff --git a/libs/libc/machine/arm/armv7-r/gnu/arch_strcmp.S b/libs/libc/machine/arm/armv7-r/gnu/arch_strcmp.S new file mode 100644 index 0000000000..e219b08dab --- /dev/null +++ b/libs/libc/machine/arm/armv7-r/gnu/arch_strcmp.S @@ -0,0 +1,303 @@ +/**************************************************************************** + * libs/libc/machine/arm/armv7-r/gnu/arch_strcmp.S + * + * Copyright (c) 2011 The Android Open Source Project + * Copyright (c) 2008 ARM Ltd + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the company may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ****************************************************************************/ + +#ifdef __ARMEB__ +#define SHFT2LSB lsl +#define SHFT2LSBEQ lsleq +#define SHFT2MSB lsr +#define SHFT2MSBEQ lsreq +#define MSB 0x000000ff +#define LSB 0xff000000 +#else +#define SHFT2LSB lsr +#define SHFT2LSBEQ lsreq +#define SHFT2MSB lsl +#define SHFT2MSBEQ lsleq +#define MSB 0xff000000 +#define LSB 0x000000ff +#endif +#define magic1(REG) REG +#define magic2(REG) REG, lsl #7 + + .arm + .syntax unified + .global strcmp + .type strcmp, %function +strcmp: + pld [r0, #0] + pld [r1, #0] + eor r2, r0, r1 + tst r2, #3 + /* Strings not at same byte offset from a word boundary. */ + bne .Lstrcmp_unaligned + ands r2, r0, #3 + bic r0, r0, #3 + bic r1, r1, #3 + ldr ip, [r0], #4 + it eq + ldreq r3, [r1], #4 + beq 1f + /* Although s1 and s2 have identical initial alignment, they are + * not currently word aligned. Rather than comparing bytes, + * make sure that any bytes fetched from before the addressed + * bytes are forced to 0xff. Then they will always compare + * equal. + */ + eor r2, r2, #3 + lsl r2, r2, #3 + mvn r3, #MSB + SHFT2LSB r2, r3, r2 + ldr r3, [r1], #4 + orr ip, ip, r2 + orr r3, r3, r2 +1: + /* Load the 'magic' constant 0x01010101. */ + str r4, [sp, #-4]! + mov r4, #1 + orr r4, r4, r4, lsl #8 + orr r4, r4, r4, lsl #16 + .p2align 2 +4: + pld [r0, #8] + pld [r1, #8] + sub r2, ip, magic1(r4) + cmp ip, r3 + itttt eq + /* check for any zero bytes in first word */ + biceq r2, r2, ip + tsteq r2, magic2(r4) + ldreq ip, [r0], #4 + ldreq r3, [r1], #4 + beq 4b +2: + /* There's a zero or a different byte in the word */ + SHFT2MSB r0, ip, #24 + SHFT2LSB ip, ip, #8 + cmp r0, #1 + it cs + cmpcs r0, r3, SHFT2MSB #24 + it eq + SHFT2LSBEQ r3, r3, #8 + beq 2b + /* On a big-endian machine, r0 contains the desired byte in bits + * 0-7; on a little-endian machine they are in bits 24-31. In + * both cases the other bits in r0 are all zero. For r3 the + * interesting byte is at the other end of the word, but the + * other bits are not necessarily zero. We need a signed result + * representing the differnece in the unsigned bytes, so for the + * little-endian case we can't just shift the interesting bits up. + */ +#ifdef __ARMEB__ + sub r0, r0, r3, lsr #24 +#else + and r3, r3, #255 + /* No RSB instruction in Thumb2 */ +#ifdef __thumb2__ + lsr r0, r0, #24 + sub r0, r0, r3 +#else + rsb r0, r3, r0, lsr #24 +#endif +#endif + ldr r4, [sp], #4 + bx lr +.Lstrcmp_unaligned: + wp1 .req r0 + wp2 .req r1 + b1 .req r2 + w1 .req r4 + w2 .req r5 + t1 .req ip + @ r3 is scratch + /* First of all, compare bytes until wp1(sp1) is word-aligned. */ +1: + tst wp1, #3 + beq 2f + ldrb r2, [wp1], #1 + ldrb r3, [wp2], #1 + cmp r2, #1 + it cs + cmpcs r2, r3 + beq 1b + sub r0, r2, r3 + bx lr +2: + str r5, [sp, #-4]! + str r4, [sp, #-4]! + mov b1, #1 + orr b1, b1, b1, lsl #8 + orr b1, b1, b1, lsl #16 + and t1, wp2, #3 + bic wp2, wp2, #3 + ldr w1, [wp1], #4 + ldr w2, [wp2], #4 + cmp t1, #2 + beq 2f + bhi 3f + /* Critical inner Loop: Block with 3 bytes initial overlap */ + .p2align 2 +1: + bic t1, w1, #MSB + cmp t1, w2, SHFT2LSB #8 + sub r3, w1, b1 + bic r3, r3, w1 + bne 4f + ands r3, r3, b1, lsl #7 + it eq + ldreq w2, [wp2], #4 + bne 5f + eor t1, t1, w1 + cmp t1, w2, SHFT2MSB #24 + bne 6f + ldr w1, [wp1], #4 + b 1b +4: + SHFT2LSB w2, w2, #8 + b 8f +5: +#ifdef __ARMEB__ + /* The syndrome value may contain false ones if the string ends + * with the bytes 0x01 0x00 + */ + tst w1, #0xff000000 + itt ne + tstne w1, #0x00ff0000 + tstne w1, #0x0000ff00 + beq 7f +#else + bics r3, r3, #0xff000000 + bne 7f +#endif + ldrb w2, [wp2] + SHFT2LSB t1, w1, #24 +#ifdef __ARMEB__ + lsl w2, w2, #24 +#endif + b 8f +6: + SHFT2LSB t1, w1, #24 + and w2, w2, #LSB + b 8f + /* Critical inner Loop: Block with 2 bytes initial overlap */ + .p2align 2 +2: + SHFT2MSB t1, w1, #16 + sub r3, w1, b1 + SHFT2LSB t1, t1, #16 + bic r3, r3, w1 + cmp t1, w2, SHFT2LSB #16 + bne 4f + ands r3, r3, b1, lsl #7 + it eq + ldreq w2, [wp2], #4 + bne 5f + eor t1, t1, w1 + cmp t1, w2, SHFT2MSB #16 + bne 6f + ldr w1, [wp1], #4 + b 2b +5: +#ifdef __ARMEB__ + /* The syndrome value may contain false ones if the string ends + * with the bytes 0x01 0x00 + */ + tst w1, #0xff000000 + it ne + tstne w1, #0x00ff0000 + beq 7f +#else + lsls r3, r3, #16 + bne 7f +#endif + ldrh w2, [wp2] + SHFT2LSB t1, w1, #16 +#ifdef __ARMEB__ + lsl w2, w2, #16 +#endif + b 8f +6: + SHFT2MSB w2, w2, #16 + SHFT2LSB t1, w1, #16 +4: + SHFT2LSB w2, w2, #16 + b 8f + /* Critical inner Loop: Block with 1 byte initial overlap */ + .p2align 2 +3: + and t1, w1, #LSB + cmp t1, w2, SHFT2LSB #24 + sub r3, w1, b1 + bic r3, r3, w1 + bne 4f + ands r3, r3, b1, lsl #7 + it eq + ldreq w2, [wp2], #4 + bne 5f + eor t1, t1, w1 + cmp t1, w2, SHFT2MSB #8 + bne 6f + ldr w1, [wp1], #4 + b 3b +4: + SHFT2LSB w2, w2, #24 + b 8f +5: + /* The syndrome value may contain false ones if the string ends + * with the bytes 0x01 0x00 + */ + tst w1, #LSB + beq 7f + ldr w2, [wp2], #4 +6: + SHFT2LSB t1, w1, #8 + bic w2, w2, #MSB + b 8f +7: + mov r0, #0 + ldr r4, [sp], #4 + ldr r5, [sp], #4 + bx lr +8: + and r2, t1, #LSB + and r0, w2, #LSB + cmp r0, #1 + it cs + cmpcs r0, r2 + itt eq + SHFT2LSBEQ t1, t1, #8 + SHFT2LSBEQ w2, w2, #8 + beq 8b + sub r0, r2, r0 + ldr r4, [sp], #4 + ldr r5, [sp], #4 + bx lr + .size strcmp, . - strcmp diff --git a/libs/libc/machine/arm/armv7-r/gnu/arch_strlen.S b/libs/libc/machine/arm/armv7-r/gnu/arch_strlen.S new file mode 100644 index 0000000000..c5aa3bacb0 --- /dev/null +++ b/libs/libc/machine/arm/armv7-r/gnu/arch_strlen.S @@ -0,0 +1,184 @@ +/**************************************************************************** + * libs/libc/machine/arm/armv7-r/gnu/arch_strlen.S + * + * Copyright (c) 2010-2011,2013 Linaro Limited + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * * Neither the name of Linaro Limited nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Assumes: + * ARMv6T2 or ARMv7E-M, AArch32 + * + * Copyright (c) 2015 ARM Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Linaro nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + ****************************************************************************/ + +#include "acle-compat.h" + + .macro def_fn f p2align=0 + .text + .p2align \p2align + .global \f + .type \f, %function +\f: + .endm + +#ifdef __ARMEB__ +#define S2LO lsl +#define S2HI lsr +#else +#define S2LO lsr +#define S2HI lsl +#endif + + /* This code requires Thumb. */ +#if __ARM_ARCH_PROFILE == 'M' + .arch armv7e-m +#else + .arch armv6t2 +#endif + .eabi_attribute Tag_ARM_ISA_use, 0 + .thumb + .syntax unified + +/* Parameters and result. */ +#define srcin r0 +#define result r0 + +/* Internal variables. */ +#define src r1 +#define data1a r2 +#define data1b r3 +#define const_m1 r12 +#define const_0 r4 +#define tmp1 r4 /* Overlaps const_0 */ +#define tmp2 r5 + +def_fn strlen p2align=6 + pld [srcin, #0] + strd r4, r5, [sp, #-8]! + bic src, srcin, #7 + mvn const_m1, #0 + ands tmp1, srcin, #7 /* (8 - bytes) to alignment. */ + pld [src, #32] + bne.w .Lmisaligned8 + mov const_0, #0 + mov result, #-8 +.Lloop_aligned: + /* Bytes 0-7. */ + ldrd data1a, data1b, [src] + pld [src, #64] + add result, result, #8 +.Lstart_realigned: + uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */ + sel data1a, const_0, const_m1 /* Select based on GE<0:3>. */ + uadd8 data1b, data1b, const_m1 + sel data1b, data1a, const_m1 /* Only used if d1a == 0. */ + cbnz data1b, .Lnull_found + + /* Bytes 8-15. */ + ldrd data1a, data1b, [src, #8] + uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */ + add result, result, #8 + sel data1a, const_0, const_m1 /* Select based on GE<0:3>. */ + uadd8 data1b, data1b, const_m1 + sel data1b, data1a, const_m1 /* Only used if d1a == 0. */ + cbnz data1b, .Lnull_found + + /* Bytes 16-23. */ + ldrd data1a, data1b, [src, #16] + uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */ + add result, result, #8 + sel data1a, const_0, const_m1 /* Select based on GE<0:3>. */ + uadd8 data1b, data1b, const_m1 + sel data1b, data1a, const_m1 /* Only used if d1a == 0. */ + cbnz data1b, .Lnull_found + + /* Bytes 24-31. */ + ldrd data1a, data1b, [src, #24] + add src, src, #32 + uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */ + add result, result, #8 + sel data1a, const_0, const_m1 /* Select based on GE<0:3>. */ + uadd8 data1b, data1b, const_m1 + sel data1b, data1a, const_m1 /* Only used if d1a == 0. */ + cmp data1b, #0 + beq .Lloop_aligned + +.Lnull_found: + cmp data1a, #0 + itt eq + addeq result, result, #4 + moveq data1a, data1b +#ifndef __ARMEB__ + rev data1a, data1a +#endif + clz data1a, data1a + ldrd r4, r5, [sp], #8 + add result, result, data1a, lsr #3 /* Bits -> Bytes. */ + bx lr + +.Lmisaligned8: + ldrd data1a, data1b, [src] + and tmp2, tmp1, #3 + rsb result, tmp1, #0 + lsl tmp2, tmp2, #3 /* Bytes -> bits. */ + tst tmp1, #4 + pld [src, #64] + S2HI tmp2, const_m1, tmp2 + orn data1a, data1a, tmp2 + itt ne + ornne data1b, data1b, tmp2 + movne data1a, const_m1 + mov const_0, #0 + b .Lstart_realigned + .size strlen, . - strlen