repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
0xffea/MINIX3
| 7,836
|
common/lib/libc/arch/ia64/atomic/atomic.S
|
/* $NetBSD: atomic.S,v 1.5 2009/11/09 14:22:02 skrll Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Takayoshi Kochi.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/asm.h>
#ifdef _KERNEL
#define ALIAS(f, t) STRONG_ALIAS(f,t)
#else
#define ALIAS(f, t) WEAK_ALIAS(f,t)
#endif
.text
ENTRY(_atomic_dec_32,1)
fetchadd4.rel r8=[r32],-1
br.ret.sptk rp
END(_atomic_dec_32)
ENTRY(_atomic_dec_64,1)
fetchadd8.rel r8=[r32],-1
br.ret.sptk rp
END(_atomic_dec_64)
ENTRY(_atomic_dec_32_nv,1)
fetchadd4.rel r8=[r32],-1
br.ret.sptk rp
END(_atomic_dec_32_nv)
ENTRY(_atomic_dec_64_nv,1)
fetchadd8.rel r8=[r32],-1
br.ret.sptk rp
END(_atomic_dec_64_nv)
ENTRY(_atomic_inc_32,1)
fetchadd4.rel r8=[r32],1
br.ret.sptk rp
END(_atomic_inc_32)
ENTRY(_atomic_inc_64,1)
fetchadd8.rel r8=[r32],1
br.ret.sptk rp
END(_atomic_inc_64)
ENTRY(_atomic_inc_32_nv,1)
fetchadd4.rel r8=[r32],1
br.ret.sptk rp
END(_atomic_inc_32_nv)
ENTRY(_atomic_inc_64_nv,1)
fetchadd8.rel r8=[r32],1
br.ret.sptk rp
END(_atomic_inc_64_nv)
ENTRY(_atomic_swap_32,2)
xchg4 r8=[r32],r33
;;
mov r33=r8
br.ret.sptk rp
END(_atomic_swap_32)
ENTRY(_atomic_swap_64,2)
xchg8 r8=[r32],r33
;;
mov r33=r8
br.ret.sptk rp
END(_atomic_swap_64)
ENTRY(_atomic_cas_32,3)
mov ar.ccv=r33
;;
cmpxchg4.acq r8=[r32],r34,ar.ccv
br.ret.sptk rp
END(_atomic_cas_32)
ENTRY(_atomic_cas_64,3)
mov ar.ccv=r33
;;
cmpxchg8.acq r8=[r32],r34,ar.ccv
br.ret.sptk rp
END(_atomic_cas_64)
ENTRY(_membar_consumer,0)
mf
br.ret.sptk rp
END(_membar_consumer)
ENTRY(_membar_producer,0)
mf
br.ret.sptk rp
END(_membar_producer)
ENTRY(_membar_enter,0)
mf
br.ret.sptk rp
END(_membar_enter)
ENTRY(_membar_exit,0)
mf
br.ret.sptk rp
END(_membar_exit)
ENTRY(_membar_sync,0)
mf
br.ret.sptk rp
END(_membar_sync)
ALIAS(atomic_add_32,_atomic_add_32)
ALIAS(atomic_add_int,_atomic_add_32)
ALIAS(atomic_add_64,_atomic_add_64)
ALIAS(atomic_add_long,_atomic_add_64)
ALIAS(atomic_add_ptr,_atomic_add_64)
ALIAS(atomic_add_32_nv,_atomic_add_32_nv)
ALIAS(atomic_add_int_nv,_atomic_add_32_nv)
ALIAS(atomic_add_64_nv,_atomic_add_64_nv)
ALIAS(atomic_add_long_nv,_atomic_add_64_nv)
ALIAS(atomic_add_ptr_nv,_atomic_add_64_nv)
ALIAS(atomic_and_32,_atomic_and_32)
ALIAS(atomic_and_uint,_atomic_and_32)
ALIAS(atomic_and_64,_atomic_and_64)
ALIAS(atomic_and_ulong,_atomic_and_64)
ALIAS(atomic_and_ptr,_atomic_and_64)
ALIAS(atomic_and_32_nv,_atomic_and_32_nv)
ALIAS(atomic_and_uint_nv,_atomic_and_32_nv)
ALIAS(atomic_and_64_nv,_atomic_and_64_nv)
ALIAS(atomic_and_ulong_nv,_atomic_and_64_nv)
ALIAS(atomic_and_ptr_nv,_atomic_and_64_nv)
ALIAS(atomic_dec_32,_atomic_dec_32)
ALIAS(atomic_dec_uint,_atomic_dec_32)
ALIAS(atomic_dec_64,_atomic_dec_64)
ALIAS(atomic_dec_ulong,_atomic_dec_64)
ALIAS(atomic_dec_ptr,_atomic_dec_64)
ALIAS(atomic_dec_32_nv,_atomic_dec_32_nv)
ALIAS(atomic_dec_uint_nv,_atomic_dec_32_nv)
ALIAS(atomic_dec_64_nv,_atomic_dec_64_nv)
ALIAS(atomic_dec_ulong_nv,_atomic_dec_64_nv)
ALIAS(atomic_dec_ptr_nv,_atomic_dec_64_nv)
ALIAS(atomic_inc_32,_atomic_inc_32)
ALIAS(atomic_inc_uint,_atomic_inc_32)
ALIAS(atomic_inc_64,_atomic_inc_64)
ALIAS(atomic_inc_ulong,_atomic_inc_64)
ALIAS(atomic_inc_ptr,_atomic_inc_64)
ALIAS(atomic_inc_32_nv,_atomic_inc_32_nv)
ALIAS(atomic_inc_uint_nv,_atomic_inc_32_nv)
ALIAS(atomic_inc_64_nv,_atomic_inc_64_nv)
ALIAS(atomic_inc_ulong_nv,_atomic_inc_64_nv)
ALIAS(atomic_inc_ptr_nv,_atomic_inc_64_nv)
ALIAS(atomic_or_32,_atomic_or_32)
ALIAS(atomic_or_uint,_atomic_or_32)
ALIAS(atomic_or_64,_atomic_or_64)
ALIAS(atomic_or_ulong,_atomic_or_64)
ALIAS(atomic_or_ptr,_atomic_or_64)
ALIAS(atomic_or_32_nv,_atomic_or_32_nv)
ALIAS(atomic_or_uint_nv,_atomic_or_32_nv)
ALIAS(atomic_or_64_nv,_atomic_or_64_nv)
ALIAS(atomic_or_ulong_nv,_atomic_or_64_nv)
ALIAS(atomic_or_ptr_nv,_atomic_or_64_nv)
ALIAS(atomic_swap_32,_atomic_swap_32)
ALIAS(atomic_swap_uint,_atomic_swap_32)
ALIAS(atomic_swap_64,_atomic_swap_64)
ALIAS(atomic_swap_ulong,_atomic_swap_64)
ALIAS(atomic_swap_ptr,_atomic_swap_64)
ALIAS(atomic_cas_32,_atomic_cas_32)
ALIAS(atomic_cas_uint,_atomic_cas_32)
ALIAS(atomic_cas_64,_atomic_cas_64)
ALIAS(atomic_cas_ulong,_atomic_cas_64)
ALIAS(atomic_cas_ptr,_atomic_cas_64)
ALIAS(atomic_cas_32_ni,_atomic_cas_32)
ALIAS(atomic_cas_uint_ni,_atomic_cas_32)
ALIAS(atomic_cas_64_ni,_atomic_cas_64)
ALIAS(atomic_cas_ulong_ni,_atomic_cas_64)
ALIAS(atomic_cas_ptr_ni,_atomic_cas_64)
ALIAS(membar_consumer,_membar_consumer)
ALIAS(membar_producer,_membar_producer)
ALIAS(membar_enter,_membar_enter)
ALIAS(membar_exit,_membar_exit)
ALIAS(membar_sync,_membar_sync)
STRONG_ALIAS(_atomic_add_int,_atomic_add_32)
STRONG_ALIAS(_atomic_add_long,_atomic_add_64)
STRONG_ALIAS(_atomic_add_ptr,_atomic_add_64)
STRONG_ALIAS(_atomic_add_int_nv,_atomic_add_32_nv)
STRONG_ALIAS(_atomic_add_long_nv,_atomic_add_64_nv)
STRONG_ALIAS(_atomic_add_ptr_nv,_atomic_add_64_nv)
STRONG_ALIAS(_atomic_and_uint,_atomic_and_32)
STRONG_ALIAS(_atomic_and_ulong,_atomic_and_64)
STRONG_ALIAS(_atomic_and_ptr,_atomic_and_64)
STRONG_ALIAS(_atomic_and_uint_nv,_atomic_and_32_nv)
STRONG_ALIAS(_atomic_and_ulong_nv,_atomic_and_64_nv)
STRONG_ALIAS(_atomic_and_ptr_nv,_atomic_and_64_nv)
STRONG_ALIAS(_atomic_dec_uint,_atomic_dec_32)
STRONG_ALIAS(_atomic_dec_ulong,_atomic_dec_64)
STRONG_ALIAS(_atomic_dec_ptr,_atomic_dec_64)
STRONG_ALIAS(_atomic_dec_uint_nv,_atomic_dec_32_nv)
STRONG_ALIAS(_atomic_dec_ulong_nv,_atomic_dec_64_nv)
STRONG_ALIAS(_atomic_dec_ptr_nv,_atomic_dec_64_nv)
STRONG_ALIAS(_atomic_inc_uint,_atomic_inc_32)
STRONG_ALIAS(_atomic_inc_ulong,_atomic_inc_64)
STRONG_ALIAS(_atomic_inc_ptr,_atomic_inc_64)
STRONG_ALIAS(_atomic_inc_uint_nv,_atomic_inc_32_nv)
STRONG_ALIAS(_atomic_inc_ulong_nv,_atomic_inc_64_nv)
STRONG_ALIAS(_atomic_inc_ptr_nv,_atomic_inc_64_nv)
STRONG_ALIAS(_atomic_or_uint,_atomic_or_32)
STRONG_ALIAS(_atomic_or_ulong,_atomic_or_64)
STRONG_ALIAS(_atomic_or_ptr,_atomic_or_64)
STRONG_ALIAS(_atomic_or_uint_nv,_atomic_or_32_nv)
STRONG_ALIAS(_atomic_or_ulong_nv,_atomic_or_64_nv)
STRONG_ALIAS(_atomic_or_ptr_nv,_atomic_or_64_nv)
STRONG_ALIAS(_atomic_swap_uint,_atomic_swap_32)
STRONG_ALIAS(_atomic_swap_ulong,_atomic_swap_64)
STRONG_ALIAS(_atomic_swap_ptr,_atomic_swap_64)
STRONG_ALIAS(_atomic_cas_uint,_atomic_cas_32)
STRONG_ALIAS(_atomic_cas_ulong,_atomic_cas_64)
STRONG_ALIAS(_atomic_cas_ptr,_atomic_cas_64)
STRONG_ALIAS(_atomic_cas_uint_ni,_atomic_cas_32)
STRONG_ALIAS(_atomic_cas_ulong_ni,_atomic_cas_64)
STRONG_ALIAS(_atomic_cas_ptr_ni,_atomic_cas_64)
|
0xffea/MINIX3
| 1,890
|
common/lib/libc/arch/vax/gen/bswap32.S
|
/* $NetBSD: bswap32.S,v 1.2 2008/04/28 20:22:52 martin Exp $ */
/*-
* Copyright (c) 1997 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Matt Thomas.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/asm.h>
#if defined(_KERNEL) || defined(_STANDALONE)
ENTRY(_C_LABEL(bswap32), 0)
#else
#if BYTE_ORDER == LITTLE_ENDIAN
ALTENTRY(_C_LABEL(ntohl))
ALTENTRY(_C_LABEL(htonl))
#endif
ENTRY(_C_LABEL(__bswap32), 0)
#endif
rotl $-8, 4(%ap), %r0
insv %r0, $16, $8, %r0
rotl $8, 4(%ap), %r1
movb %r1, %r0
ret
|
0xffea/MINIX3
| 3,153
|
common/lib/libc/arch/vax/gen/udiv.S
|
/*-
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Donn Seeley at UUNET Technologies, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#if defined(LIBC_SCCS) && !defined(lint)
/* .asciz "@(#)udiv.s 8.1 (Berkeley) 6/4/93" */
.asciz "$NetBSD: udiv.S,v 1.2 2005/12/24 08:33:32 matt Exp $"
#endif /* LIBC_SCCS and not lint */
/*
* Unsigned division, PCC flavor.
* udiv() takes an ordinary dividend/divisor pair;
* audiv() takes a pointer to a dividend and an ordinary divisor.
*/
#include <machine/asm.h>
#define DIVIDEND 4(%ap)
#define DIVISOR 8(%ap)
#ifdef __ELF__
ASENTRY(__udiv,0)
#else
ASENTRY(udiv,0)
#endif
movl DIVISOR,%r2
jlss Leasy # big divisor: settle by comparison
movl DIVIDEND,%r0
jlss Lhard # big dividend: extended division
divl2 %r2,%r0 # small divisor and dividend: signed division
ret
Lhard:
clrl %r1
ediv %r2,%r0,%r0,%r1
ret
Leasy:
cmpl DIVIDEND,%r2
jgequ Lone # if dividend is as big or bigger, return 1
clrl %r0 # else return 0
ret
Lone:
movl $1,%r0
ret
#ifdef __ELF__
ASENTRY(__audiv,0)
#else
ASENTRY(audiv,0)
#endif
movl DIVIDEND,%r3
movl DIVISOR,%r2
jlss La_easy # big divisor: settle by comparison
movl (%r3),%r0
jlss La_hard # big dividend: extended division
divl2 %r2,%r0 # small divisor and dividend: signed division
movl %r0,(%r3) # leave the value of the assignment in %r0
ret
La_hard:
clrl %r1
ediv %r2,%r0,%r0,%r1
movl %r0,(%r3)
ret
La_easy:
cmpl (%r3),%r2
jgequ La_one # if dividend is as big or bigger, return 1
clrl %r0 # else return 0
clrl (%r3)
ret
La_one:
movl $1,%r0
movl %r0,(%r3)
ret
|
0xffea/MINIX3
| 3,279
|
common/lib/libc/arch/vax/gen/urem.S
|
/*-
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Donn Seeley at UUNET Technologies, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#if defined(LIBC_SCCS) && !defined(lint)
/* .asciz "@(#)urem.s 8.1 (Berkeley) 6/4/93" */
.asciz "$NetBSD: urem.S,v 1.2 2005/12/24 08:33:32 matt Exp $"
#endif /* LIBC_SCCS and not lint */
#include <machine/asm.h>
/*
* Unsigned modulus, PCC flavor.
* urem() takes an ordinary dividend/divisor pair;
* aurem() takes a pointer to a dividend and an ordinary divisor.
*/
#define DIVIDEND 4(%ap)
#define DIVISOR 8(%ap)
#ifdef __ELF__
ASENTRY(__urem,0)
#else
ASENTRY(urem,0)
#endif
movl DIVISOR,%r2
jlss Leasy # big divisor: settle by comparison
movl DIVIDEND,%r0
jlss Lhard # big dividend: need extended division
divl3 %r2,%r0,%r1 # small divisor and dividend: signed modulus
mull2 %r2,%r1
subl2 %r1,%r0
ret
Lhard:
clrl %r1
ediv %r2,%r0,%r1,%r0
ret
Leasy:
subl3 %r2,DIVIDEND,%r0
jcc Ldifference # if divisor goes in once, return difference
movl DIVIDEND,%r0 # if divisor is bigger, return dividend
Ldifference:
ret
#ifdef __ELF__
ASENTRY(__aurem,0)
#else
ASENTRY(aurem,0)
#endif
movl DIVIDEND,%r3
movl DIVISOR,%r2
jlss La_easy # big divisor: settle by comparison
movl (%r3),%r0
jlss La_hard # big dividend: need extended division
divl3 %r2,%r0,%r1 # small divisor and dividend: signed modulus
mull2 %r2,%r1
subl2 %r1,%r0
movl %r0,(%r3) # leave the value of the assignment in %r0
ret
La_hard:
clrl %r1
ediv %r2,%r0,%r1,%r0
movl %r0,(%r3)
ret
La_easy:
subl3 %r2,(%r3),%r0
jcs La_dividend # if divisor is bigger, leave dividend alone
movl %r0,(%r3) # if divisor goes in once, store difference
ret
La_dividend:
movl (%r3),%r0
ret
|
0xffea/MINIX3
| 1,817
|
common/lib/libc/arch/vax/gen/bswap16.S
|
/* $NetBSD: bswap16.S,v 1.2 2008/04/28 20:22:52 martin Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Matt Thomas.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/asm.h>
#if defined(_KERNEL) || defined(_STANDALONE)
ENTRY(_C_LABEL(bswap16), 0)
#else
ALTENTRY(_C_LABEL(ntohs))
ALTENTRY(_C_LABEL(htons))
ENTRY(_C_LABEL(__bswap16), 0)
#endif
movl 4(%ap), %r1
extzv $8, $8, %r1, %r0
insv %r1, $8, $8, %r0
ret
|
0xffea/MINIX3
| 3,098
|
common/lib/libc/arch/vax/string/memcpy.S
|
/* $NetBSD: memcpy.S,v 1.2 2005/12/24 08:33:32 matt Exp $ */
/*-
* Copyright (c) 1990, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#if defined(LIBC_SCCS) && !defined(lint)
.asciz "@(#)memcpy.s 8.1 (Berkeley) 6/4/93"
#endif /* LIBC_SCCS and not lint */
/*
* void *memcpy(dst, src, size)
* returns dst
*
* This optimises the usual case (count < 65536) at the expense
* of some extra memory references and branches when count >= 65536.
*/
#include <machine/asm.h>
ENTRY(memcpy, 0)
movzwl $65535,%r0 /* %r0 = 64K (needed below) */
movq 8(%ap),%r1 /* %r1 = src, %r2 = length */
movl 4(%ap),%r3 /* %r3 = dst */
cmpl %r1,%r3
bgtru 1f /* normal forward case */
beql 2f /* equal, nothing to do */
addl2 %r2,%r1 /* overlaps iff src<dst but src+len>dst */
cmpl %r1,%r3
bgtru 4f /* overlapping, must move backwards */
subl2 %r2,%r1
1: /* move forward */
cmpl %r2,%r0
bgtru 3f /* stupid movc3 limitation */
movc3 %r2,(%r1),(%r3) /* move it all */
2:
movl 4(%ap),%r0 /* return original dst */
ret
3:
subl2 %r0,12(%ap) /* adjust length by 64K */
movc3 %r0,(%r1),(%r3) /* move 64K */
movl 12(%ap),%r2
decw %r0 /* from 0 to 65535 */
brb 1b /* retry */
4: /* move backward */
addl2 %r2,%r3
5:
cmpl %r2,%r0
bgtru 6f /* stupid movc3 limitation */
subl2 %r2,%r1
subl2 %r2,%r3
movc3 %r2,(%r1),(%r3) /* move it all */
movl 4(%ap),%r0 /* return original dst */
ret
6:
subl2 %r0,12(%ap) /* adjust length by 64K */
subl2 %r0,%r1
subl2 %r0,%r3
movc3 %r0,(%r1),(%r3) /* move 64K */
movl 12(%ap),%r2
decw %r0
subl2 %r0,%r1
subl2 %r0,%r3
brb 5b
|
0xffea/MINIX3
| 3,102
|
common/lib/libc/arch/vax/string/memmove.S
|
/* $NetBSD: memmove.S,v 1.2 2005/12/24 08:33:32 matt Exp $ */
/*-
* Copyright (c) 1990, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#if defined(LIBC_SCCS) && !defined(lint)
.asciz "@(#)memmove.s 8.1 (Berkeley) 6/4/93"
#endif /* LIBC_SCCS and not lint */
/*
* void *memmove(dst, src, size)
* returns dst
*
* This optimises the usual case (count < 65536) at the expense
* of some extra memory references and branches when count >= 65536.
*/
#include <machine/asm.h>
ENTRY(memmove, 0)
movzwl $65535,%r0 /* %r0 = 64K (needed below) */
movq 8(%ap),%r1 /* %r1 = src, %r2 = length */
movl 4(%ap),%r3 /* %r3 = dst */
cmpl %r1,%r3
bgtru 1f /* normal forward case */
beql 2f /* equal, nothing to do */
addl2 %r2,%r1 /* overlaps iff src<dst but src+len>dst */
cmpl %r1,%r3
bgtru 4f /* overlapping, must move backwards */
subl2 %r2,%r1
1: /* move forward */
cmpl %r2,%r0
bgtru 3f /* stupid movc3 limitation */
movc3 %r2,(%r1),(%r3) /* move it all */
2:
movl 4(%ap),%r0 /* return original dst */
ret
3:
subl2 %r0,12(%ap) /* adjust length by 64K */
movc3 %r0,(%r1),(%r3) /* move 64K */
movl 12(%ap),%r2
decw %r0 /* from 0 to 65535 */
brb 1b /* retry */
4: /* move backward */
addl2 %r2,%r3
5:
cmpl %r2,%r0
bgtru 6f /* stupid movc3 limitation */
subl2 %r2,%r1
subl2 %r2,%r3
movc3 %r2,(%r1),(%r3) /* move it all */
movl 4(%ap),%r0 /* return original dst */
ret
6:
subl2 %r0,12(%ap) /* adjust length by 64K */
subl2 %r0,%r1
subl2 %r0,%r3
movc3 %r0,(%r1),(%r3) /* move 64K */
movl 12(%ap),%r2
decw %r0
subl2 %r0,%r1
subl2 %r0,%r3
brb 5b
|
0xffea/MINIX3
| 2,048
|
common/lib/libc/arch/vax/string/memset.S
|
/* $NetBSD: memset.S,v 1.1 2010/03/12 09:12:34 uwe Exp $ */
/*-
* Copyright (c) 1990, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#if defined(LIBC_SCCS) && !defined(lint)
.asciz "@(#)memset.s 8.1 (Berkeley) 6/4/93"
#endif /* LIBC_SCCS and not lint */
/* void *memset(base, c, length) */
#include <machine/asm.h>
ENTRY(memset, 0)
movl 4(%ap),%r3
1:
movzwl $65535,%r0
movq 8(%ap),%r1
cmpl %r2,%r0
jgtru 2f
movc5 $0,(%r3),%r1,%r2,(%r3)
movl %r1,%r0
ret
2:
subl2 %r0,12(%ap)
movc5 $0,(%r3),%r1,%r0,(%r3)
jbr 1b
|
0xffea/MINIX3
| 2,245
|
common/lib/libc/arch/sparc64/net/htonl.S
|
/* $NetBSD: htonl.S,v 1.1 2005/12/20 19:28:50 christos Exp $ */
/*
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* This software was developed by the Computer Systems Engineering group
* at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
* contributed to Berkeley.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: Header: htonl.s,v 1.1 92/06/25 12:47:05 torek Exp
*/
#include <machine/asm.h>
#if defined(LIBC_SCCS) && !defined(lint)
#if 0
.asciz "@(#)htonl.s 8.1 (Berkeley) 6/4/93"
#else
RCSID("$NetBSD: htonl.S,v 1.1 2005/12/20 19:28:50 christos Exp $")
#endif
#endif /* LIBC_SCCS and not lint */
/* netorder = htonl(hostorder) */
ENTRY(htonl)
retl
srl %o0, 0, %o0 /* zero extend -- or do we sign extend? */
|
0xffea/MINIX3
| 2,198
|
common/lib/libc/arch/sparc64/net/ntohl.S
|
/* $NetBSD: ntohl.S,v 1.1 2005/12/20 19:28:50 christos Exp $ */
/*
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* This software was developed by the Computer Systems Engineering group
* at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
* contributed to Berkeley.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: Header: ntohl.s,v 1.1 92/06/25 12:47:06 torek Exp
*/
#include <machine/asm.h>
#if defined(LIBC_SCCS) && !defined(lint)
#if 0
.asciz "@(#)ntohl.s 8.1 (Berkeley) 6/4/93"
#else
RCSID("$NetBSD: ntohl.S,v 1.1 2005/12/20 19:28:50 christos Exp $")
#endif
#endif /* LIBC_SCCS and not lint */
/* hostorder = ntohl(netorder) */
ENTRY(ntohl)
retl
signx %o0, %o0
|
0xffea/MINIX3
| 2,247
|
common/lib/libc/arch/sparc64/net/htons.S
|
/* $NetBSD: htons.S,v 1.1 2005/12/20 19:28:50 christos Exp $ */
/*
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* This software was developed by the Computer Systems Engineering group
* at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
* contributed to Berkeley.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: Header: htons.s,v 1.1 92/06/25 12:47:05 torek Exp
*/
#include <machine/asm.h>
#if defined(LIBC_SCCS) && !defined(lint)
#if 0
.asciz "@(#)htons.s 8.1 (Berkeley) 6/4/93"
#else
RCSID("$NetBSD: htons.S,v 1.1 2005/12/20 19:28:50 christos Exp $")
#endif
#endif /* LIBC_SCCS and not lint */
/* netorder = htons(hostorder) */
ENTRY(htons)
sethi %hi(0xffff0000), %o1
signx %o1, %o1
retl
andn %o0, %o1, %o0
|
0xffea/MINIX3
| 2,249
|
common/lib/libc/arch/sparc64/net/ntohs.S
|
/* $NetBSD: ntohs.S,v 1.1 2005/12/20 19:28:50 christos Exp $ */
/*
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* This software was developed by the Computer Systems Engineering group
* at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
* contributed to Berkeley.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: Header: ntohs.s,v 1.1 92/06/25 12:47:07 torek Exp
*/
#include <machine/asm.h>
#if defined(LIBC_SCCS) && !defined(lint)
#if 0
.asciz "@(#)ntohs.s 8.1 (Berkeley) 6/4/93"
#else
RCSID("$NetBSD: ntohs.S,v 1.1 2005/12/20 19:28:50 christos Exp $")
#endif
#endif /* LIBC_SCCS and not lint */
/* hostorder = ntohs(netorder) */
ENTRY(ntohs)
sethi %hi(0xffff0000), %o1
signx %o1, %o1
retl
andn %o0, %o1, %o0
|
0xffea/MINIX3
| 2,376
|
common/lib/libc/arch/sparc64/gen/mul.S
|
/* $NetBSD: mul.S,v 1.1 2005/12/20 19:28:50 christos Exp $ */
/*
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* This software was developed by the Computer Systems Engineering group
* at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
* contributed to Berkeley.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: Header: mul.s,v 1.5 92/06/25 13:24:03 torek Exp
*/
#include <machine/asm.h>
#if defined(LIBC_SCCS) && !defined(lint)
#if 0
.asciz "@(#)mul.s 8.1 (Berkeley) 6/4/93"
#else
RCSID("$NetBSD: mul.S,v 1.1 2005/12/20 19:28:50 christos Exp $")
#endif
#endif /* LIBC_SCCS and not lint */
/*
* Signed multiply.
*
* Returns %o0 * %o1 in %o0
*
* According to the manual the smaller parameter should be in
* rs1, so we'll move it there.
*/
FUNC(.mul)
cmp %o0, %o1
bge 1f
nop
retl
mulx %o0, %o1, %o0
1:
retl
mulx %o1, %o0, %o0
|
0xffea/MINIX3
| 2,796
|
common/lib/libc/arch/sparc64/gen/saveregs.S
|
/* $NetBSD: saveregs.S,v 1.1 2005/12/20 19:28:50 christos Exp $ */
/*
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* This software was developed by the Computer Systems Engineering group
* at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
* contributed to Berkeley.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: Header: saveregs.s,v 1.1 91/07/06 17:22:33 torek Exp
*/
#include <machine/asm.h>
#if defined(LIBC_SCCS) && !defined(lint)
RCSID("$NetBSD: saveregs.S,v 1.1 2005/12/20 19:28:50 christos Exp $")
#endif /* LIBC_SCCS and not lint */
/*
* Save register arguments in caller's `arg dump' area, so that
* stdarg functions work.
*
* This really should be done with a pointer to the arg dump area;
* our caller should allocate that area, not our caller's caller.
* But then, they did not let me invent the calling sequence....
*
* We assume the caller has executed a `save' instruction.
*/
#define REGOFF (BIAS - CC64FSZ)
ENTRY(__builtin_saveregs)
stx %i0, [%fp + REGOFF + 0x00] ! fr->fr_argd[0]
stx %i1, [%fp + REGOFF + 0x08] ! fr->fr_argd[1]
stx %i2, [%fp + REGOFF + 0x10] ! fr->fr_argd[2]
stx %i3, [%fp + REGOFF + 0x18] ! fr->fr_argd[3]
stx %i4, [%fp + REGOFF + 0x20] ! fr->fr_argd[4]
retl
stx %i5, [%fp + REGOFF + 0x28] ! fr->fr_argd[5]
|
0xffea/MINIX3
| 2,378
|
common/lib/libc/arch/sparc64/gen/umul.S
|
/* $NetBSD: umul.S,v 1.1 2005/12/20 19:28:50 christos Exp $ */
/*
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* This software was developed by the Computer Systems Engineering group
* at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
* contributed to Berkeley.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: Header: umul.s,v 1.4 92/06/25 13:24:05 torek Exp
*/
#include <machine/asm.h>
#if defined(LIBC_SCCS) && !defined(lint)
#if 0
.asciz "@(#)umul.s 8.1 (Berkeley) 6/4/93"
#else
RCSID("$NetBSD: umul.S,v 1.1 2005/12/20 19:28:50 christos Exp $")
#endif
#endif /* LIBC_SCCS and not lint */
/*
* Unsigned multiply. Returns %o0 * %o1 in %o0
*
* According to the manual the smaller parameter should be in
* rs1, so we'll move it there.
*/
FUNC(.umul)
cmp %o0, %o1
bge 1f
nop
retl
mulx %o0, %o1, %o0
1:
retl
mulx %o1, %o0, %o0
|
0xffea/MINIX3
| 2,290
|
common/lib/libc/arch/sparc64/atomic/atomic_swap.S
|
/* $NetBSD: atomic_swap.S,v 1.6 2008/05/25 15:56:12 chs Exp $ */
/*-
* Copyright (c) 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
.text
ENTRY(_atomic_swap_32)
ld [%o0], %o2
1: mov %o1, %o3
cas [%o0], %o2, %o3
cmp %o2, %o3
bne,a,pn %icc, 1b
mov %o3, %o2
retl
mov %o3, %o0
ATOMIC_OP_ALIAS(atomic_swap_32,_atomic_swap_32)
ATOMIC_OP_ALIAS(atomic_swap_uint,_atomic_swap_32)
STRONG_ALIAS(_atomic_swap_uint,_atomic_swap_32)
ENTRY(_atomic_swap_64)
ldx [%o0], %o2
1: mov %o1, %o3
casx [%o0], %o2, %o3
cmp %o2, %o3
bne,a,pn %xcc, 1b
mov %o3, %o2
retl
mov %o3, %o0
ATOMIC_OP_ALIAS(atomic_swap_64,_atomic_swap_64)
ATOMIC_OP_ALIAS_SIZE(atomic_swap_ulong,_atomic_swap)
STRONG_ALIAS_SIZE(_atomic_swap_ulong,_atomic_swap)
ATOMIC_OP_ALIAS_SIZE(atomic_swap_ptr,_atomic_swap)
STRONG_ALIAS_SIZE(_atomic_swap_ptr,_atomic_swap)
|
0xffea/MINIX3
| 2,596
|
common/lib/libc/arch/sparc64/atomic/atomic_and.S
|
/* $NetBSD: atomic_and.S,v 1.6 2008/05/25 15:56:12 chs Exp $ */
/*-
* Copyright (c) 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
.text
ENTRY(_atomic_and_32)
ld [%o0], %o2
1: and %o1, %o2, %o3
cas [%o0], %o2, %o3
cmp %o2, %o3
bne,a,pn %icc, 1b
mov %o3, %o2
retl
and %o1, %o2, %o0
ATOMIC_OP_ALIAS(atomic_and_32,_atomic_and_32)
ATOMIC_OP_ALIAS(atomic_and_uint,_atomic_and_32)
STRONG_ALIAS(_atomic_and_uint,_atomic_and_32)
ATOMIC_OP_ALIAS(atomic_and_32_nv,_atomic_and_32)
STRONG_ALIAS(_atomic_and_32_nv,_atomic_and_32)
ATOMIC_OP_ALIAS(atomic_and_uint_nv,_atomic_and_32)
STRONG_ALIAS(_atomic_and_uint_nv,_atomic_and_32)
ENTRY(_atomic_and_64)
ldx [%o0], %o2
1: and %o1, %o2, %o3
casx [%o0], %o2, %o3
cmp %o2, %o3
bne,a,pn %xcc, 1b
mov %o3, %o2
retl
and %o1, %o2, %o0
ATOMIC_OP_ALIAS(atomic_and_64,_atomic_and_64)
ATOMIC_OP_ALIAS(atomic_and_64_nv,_atomic_and_64)
STRONG_ALIAS(_atomic_and_64_nv,_atomic_and_64)
ATOMIC_OP_ALIAS_SIZE(atomic_and_ulong,_atomic_and)
STRONG_ALIAS_SIZE(_atomic_and_ulong,_atomic_and)
ATOMIC_OP_ALIAS_SIZE(atomic_and_ulong_nv,_atomic_and)
STRONG_ALIAS_SIZE(_atomic_and_ulong_nv,_atomic_and)
|
0xffea/MINIX3
| 2,128
|
common/lib/libc/arch/sparc64/atomic/membar_ops.S
|
/* $NetBSD: membar_ops.S,v 1.5 2008/05/25 15:56:12 chs Exp $ */
/*-
* Copyright (c) 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe, and by Andrew Doran.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
.text
/* These assume Total Store Order (TSO) */
ENTRY(_membar_producer)
retl
nop
ENTRY(_membar_consumer)
membar #LoadLoad
retl
nop
ATOMIC_OP_ALIAS(membar_producer,_membar_producer)
ATOMIC_OP_ALIAS(membar_consumer,_membar_consumer)
ATOMIC_OP_ALIAS(membar_enter,_membar_consumer)
STRONG_ALIAS(_membar_enter,_membar_consumer)
ATOMIC_OP_ALIAS(membar_exit,_membar_consumer)
STRONG_ALIAS(_membar_exit,_membar_consumer)
ATOMIC_OP_ALIAS(membar_sync,_membar_consumer)
STRONG_ALIAS(_membar_sync,_membar_consumer)
|
0xffea/MINIX3
| 2,786
|
common/lib/libc/arch/sparc64/atomic/atomic_inc.S
|
/* $NetBSD: atomic_inc.S,v 1.6 2008/05/25 15:56:12 chs Exp $ */
/*-
* Copyright (c) 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
.text
ENTRY(_atomic_inc_32)
ld [%o0], %o2
1: add %o2, 1, %o3
cas [%o0], %o2, %o3
cmp %o2, %o3
bne,a,pn %icc, 1b
mov %o3, %o2
retl
add %o2, 1, %o0
ATOMIC_OP_ALIAS(atomic_inc_32,_atomic_inc_32)
ATOMIC_OP_ALIAS(atomic_inc_uint,_atomic_inc_32)
STRONG_ALIAS(_atomic_inc_uint,_atomic_inc_32)
ATOMIC_OP_ALIAS(atomic_inc_32_nv,_atomic_inc_32)
STRONG_ALIAS(_atomic_inc_32_nv,_atomic_inc_32)
ATOMIC_OP_ALIAS(atomic_inc_uint_nv,_atomic_inc_32)
STRONG_ALIAS(_atomic_inc_uint_nv,_atomic_inc_32)
ENTRY(_atomic_inc_64)
ldx [%o0], %o2
1: add %o2, 1, %o3
casx [%o0], %o2, %o3
cmp %o2, %o3
bne,a,pn %xcc, 1b
mov %o3, %o2
retl
add %o2, 1, %o0
ATOMIC_OP_ALIAS(atomic_inc_64,_atomic_inc_64)
ATOMIC_OP_ALIAS(atomic_inc_64_nv,_atomic_inc_64)
STRONG_ALIAS(_atomic_inc_64_nv,_atomic_inc_64)
ATOMIC_OP_ALIAS_SIZE(atomic_inc_ulong,_atomic_inc)
STRONG_ALIAS_SIZE(_atomic_inc_ulong,_atomic_inc)
ATOMIC_OP_ALIAS_SIZE(atomic_inc_ptr,_atomic_inc)
STRONG_ALIAS_SIZE(_atomic_inc_ptr,_atomic_inc)
ATOMIC_OP_ALIAS_SIZE(atomic_inc_ulong_nv,_atomic_inc)
STRONG_ALIAS_SIZE(_atomic_inc_ulong_nv,_atomic_inc)
ATOMIC_OP_ALIAS_SIZE(atomic_inc_ptr_nv,_atomic_inc)
STRONG_ALIAS_SIZE(_atomic_inc_ptr_nv,_atomic_inc)
|
0xffea/MINIX3
| 2,790
|
common/lib/libc/arch/sparc64/atomic/atomic_dec.S
|
/* $NetBSD: atomic_dec.S,v 1.7 2008/05/25 15:56:12 chs Exp $ */
/*-
* Copyright (c) 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
.text
ENTRY(_atomic_dec_32)
ld [%o0], %o2
1: add %o2, -1, %o3
cas [%o0], %o2, %o3
cmp %o2, %o3
bne,a,pn %icc, 1b
mov %o3, %o2
retl
add %o2, -1, %o0
ATOMIC_OP_ALIAS(atomic_dec_32,_atomic_dec_32)
ATOMIC_OP_ALIAS(atomic_dec_uint,_atomic_dec_32)
STRONG_ALIAS(_atomic_dec_uint,_atomic_dec_32)
ATOMIC_OP_ALIAS(atomic_dec_32_nv,_atomic_dec_32)
STRONG_ALIAS(_atomic_dec_32_nv,_atomic_dec_32)
ATOMIC_OP_ALIAS(atomic_dec_uint_nv,_atomic_dec_32)
STRONG_ALIAS(_atomic_dec_uint_nv,_atomic_dec_32)
ENTRY(_atomic_dec_64)
ldx [%o0], %o2
1: add %o2, -1, %o3
casx [%o0], %o2, %o3
cmp %o2, %o3
bne,a,pn %xcc, 1b
mov %o3, %o2
retl
add %o2, -1, %o0
ATOMIC_OP_ALIAS(atomic_dec_64,_atomic_dec_64)
ATOMIC_OP_ALIAS(atomic_dec_64_nv,_atomic_dec_64)
STRONG_ALIAS(_atomic_dec_64_nv,_atomic_dec_64)
ATOMIC_OP_ALIAS_SIZE(atomic_dec_ulong,_atomic_dec)
STRONG_ALIAS_SIZE(_atomic_dec_ulong,_atomic_dec)
ATOMIC_OP_ALIAS_SIZE(atomic_dec_ptr,_atomic_dec)
STRONG_ALIAS_SIZE(_atomic_dec_ptr,_atomic_dec)
ATOMIC_OP_ALIAS_SIZE(atomic_dec_ulong_nv,_atomic_dec)
STRONG_ALIAS_SIZE(_atomic_dec_ulong_nv,_atomic_dec)
ATOMIC_OP_ALIAS_SIZE(atomic_dec_ptr_nv,_atomic_dec)
STRONG_ALIAS_SIZE(_atomic_dec_ptr_nv,_atomic_dec)
|
0xffea/MINIX3
| 2,786
|
common/lib/libc/arch/sparc64/atomic/atomic_add.S
|
/* $NetBSD: atomic_add.S,v 1.6 2008/05/25 15:56:12 chs Exp $ */
/*-
* Copyright (c) 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
.text
ENTRY(_atomic_add_32)
ld [%o0], %o2
1: add %o1, %o2, %o3
cas [%o0], %o2, %o3
cmp %o2, %o3
bne,a,pn %icc, 1b
mov %o3, %o2
retl
add %o1, %o2, %o0
ATOMIC_OP_ALIAS(atomic_add_32,_atomic_add_32)
ATOMIC_OP_ALIAS(atomic_add_int,_atomic_add_32)
STRONG_ALIAS(_atomic_add_int,_atomic_add_32)
ATOMIC_OP_ALIAS(atomic_add_32_nv,_atomic_add_32)
STRONG_ALIAS(_atomic_add_32_nv,_atomic_add_32)
ATOMIC_OP_ALIAS(atomic_add_int_nv,_atomic_add_32)
STRONG_ALIAS(_atomic_add_int_nv,_atomic_add_32)
ENTRY(_atomic_add_64)
ldx [%o0], %o2
1: add %o1, %o2, %o3
casx [%o0], %o2, %o3
cmp %o2, %o3
bne,a,pn %xcc, 1b
mov %o3, %o2
retl
add %o1, %o2, %o0
ATOMIC_OP_ALIAS(atomic_add_64,_atomic_add_64)
ATOMIC_OP_ALIAS(atomic_add_64_nv,_atomic_add_64)
STRONG_ALIAS(_atomic_add_64_nv,_atomic_add_64)
ATOMIC_OP_ALIAS_SIZE(atomic_add_long,_atomic_add)
STRONG_ALIAS_SIZE(_atomic_add_long,_atomic_add)
ATOMIC_OP_ALIAS_SIZE(atomic_add_ptr,_atomic_add)
STRONG_ALIAS_SIZE(_atomic_add_ptr,_atomic_add)
ATOMIC_OP_ALIAS_SIZE(atomic_add_long_nv,_atomic_add)
STRONG_ALIAS_SIZE(_atomic_add_long_nv,_atomic_add)
ATOMIC_OP_ALIAS_SIZE(atomic_add_ptr_nv,_atomic_add)
STRONG_ALIAS_SIZE(_atomic_add_ptr_nv,_atomic_add)
|
0xffea/MINIX3
| 2,623
|
common/lib/libc/arch/sparc64/atomic/atomic_cas.S
|
/* $NetBSD: atomic_cas.S,v 1.6 2008/05/25 15:56:12 chs Exp $ */
/*-
* Copyright (c) 2007, 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
.text
ENTRY(_atomic_cas_32)
cas [%o0], %o1, %o2
retl
mov %o2, %o0
ATOMIC_OP_ALIAS(atomic_cas_32,_atomic_cas_32)
ATOMIC_OP_ALIAS(atomic_cas_uint,_atomic_cas_32)
STRONG_ALIAS(_atomic_cas_uint,_atomic_cas_32)
ATOMIC_OP_ALIAS(atomic_cas_32_ni,_atomic_cas_32)
STRONG_ALIAS(_atomic_cas_32_ni,_atomic_cas_32)
ATOMIC_OP_ALIAS(atomic_cas_uint_ni,_atomic_cas_32)
STRONG_ALIAS(_atomic_cas_uint_ni,_atomic_cas_32)
ENTRY(_atomic_cas_64)
casx [%o0], %o1, %o2
retl
mov %o2, %o0
ATOMIC_OP_ALIAS(atomic_cas_64,_atomic_cas_64)
ATOMIC_OP_ALIAS_SIZE(atomic_cas_ulong,_atomic_cas)
STRONG_ALIAS_SIZE(_atomic_cas_ulong,_atomic_cas)
ATOMIC_OP_ALIAS_SIZE(atomic_cas_ptr,_atomic_cas)
STRONG_ALIAS_SIZE(_atomic_cas_ptr,_atomic_cas)
ATOMIC_OP_ALIAS(atomic_cas_64_ni,_atomic_cas_64)
STRONG_ALIAS_SIZE(_atomic_cas_64_ni,_atomic_cas)
ATOMIC_OP_ALIAS_SIZE(atomic_cas_ulong_ni,_atomic_cas)
STRONG_ALIAS_SIZE(_atomic_cas_ulong_ni,_atomic_cas)
ATOMIC_OP_ALIAS_SIZE(atomic_cas_ptr_ni,_atomic_cas)
STRONG_ALIAS_SIZE(_atomic_cas_ptr_ni,_atomic_cas)
|
0xffea/MINIX3
| 2,561
|
common/lib/libc/arch/sparc64/atomic/atomic_or.S
|
/* $NetBSD: atomic_or.S,v 1.7 2008/05/25 15:56:12 chs Exp $ */
/*-
* Copyright (c) 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
.text
ENTRY(_atomic_or_32)
ld [%o0], %o2
1: or %o1, %o2, %o3
cas [%o0], %o2, %o3
cmp %o2, %o3
bne,a,pn %icc, 1b
mov %o3, %o2
retl
or %o1, %o2, %o0
ATOMIC_OP_ALIAS(atomic_or_32,_atomic_or_32)
ATOMIC_OP_ALIAS(atomic_or_uint,_atomic_or_32)
STRONG_ALIAS(_atomic_or_uint,_atomic_or_32)
ATOMIC_OP_ALIAS(atomic_or_32_nv,_atomic_or_32)
STRONG_ALIAS(_atomic_or_32_nv,_atomic_or_32)
ATOMIC_OP_ALIAS(atomic_or_uint_nv,_atomic_or_32)
STRONG_ALIAS(_atomic_or_uint_nv,_atomic_or_32)
ENTRY(_atomic_or_64)
ldx [%o0], %o2
1: or %o1, %o2, %o3
casx [%o0], %o2, %o3
cmp %o2, %o3
bne,a,pn %xcc, 1b
mov %o3, %o2
retl
or %o1, %o2, %o0
ATOMIC_OP_ALIAS(atomic_or_64,_atomic_or_64)
ATOMIC_OP_ALIAS(atomic_or_64_nv,_atomic_or_64)
STRONG_ALIAS(_atomic_or_64_nv,_atomic_or_64)
ATOMIC_OP_ALIAS_SIZE(atomic_or_ulong,_atomic_or)
STRONG_ALIAS_SIZE(_atomic_or_ulong,_atomic_or)
ATOMIC_OP_ALIAS_SIZE(atomic_or_ulong_nv,_atomic_or)
STRONG_ALIAS_SIZE(_atomic_or_ulong_nv,_atomic_or)
|
0xffea/MINIX3
| 4,710
|
common/lib/libc/arch/sparc64/string/ffs.S
|
/* $NetBSD: ffs.S,v 1.2 2009/12/19 19:09:48 pooka Exp $ */
/*
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* This software was developed by the Computer Systems Engineering group
* at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
* contributed to Berkeley.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: Header: ffs.s,v 1.3 92/07/07 00:23:57 torek Exp
*/
#include <machine/asm.h>
#if defined(LIBC_SCCS) && !defined(lint)
#if 0
.asciz "@(#)ffs.s 8.1 (Berkeley) 6/4/93"
#else
RCSID("$NetBSD: ffs.S,v 1.2 2009/12/19 19:09:48 pooka Exp $")
#endif
#endif /* LIBC_SCCS and not lint */
#if 0
/*
* We have a popcount instruction -- use it.
* only uses %o0, %o1, %o2
*
* Here's the pseudo-code from the v9 spec:
*
* int ffs(unsigned zz) {
* return popc( zz ^ ( ~ (-zz)));
* }
*
* XXXX sptifires and blackbirds don't implement popc,
* so we won't use this nice clean code 8^(.
*/
ENTRY(ffs)
neg %o0, %o1 ! %o1 = -zz
xnor %o0, %o1, %o2 ! %o2 = zz ^ ~ -zz
popc %o2, %o1
movrz %o0, %g0, %o1 ! result of ffs(0) should be zero
retl
mov %o1, %o0
#endif
/*
* ffs returns the number of the rightmost bit set in its argument,
* i.e., the lowest value such that (x & (ffs(x) - 1)) is nonzero.
* If no bits are set, ffs returns 0.
*
* We use a table lookup on each byte.
*
* In each section below, %o1 is the current byte (0, 1, 2, or 3).
* The last byte is handled specially: for the first three,
* if that byte is nonzero, we return the table value
* (plus 0, 8, or 16 for the byte number), but for the last
* one, we just return the table value plus 24. This means
* that ffstab[0] must be -24 so that ffs(0) will return 0.
*/
ENTRY(ffs)
#ifdef PIC
PICCY_SET(ffstab, %o2, %o3)
#else
set ffstab, %o2
#endif
andcc %o0, 0xff, %o1 ! get low byte
be,a 1f ! try again if 0
srl %o0, 8, %o0 ! delay slot, get ready for next byte
retl ! return ffstab[%o1]
ldsb [%o2 + %o1], %o0
1:
andcc %o0, 0xff, %o1 ! byte 1 like byte 0...
be,a 2f
srl %o0, 8, %o0 ! (use delay to prepare for byte 2)
ldsb [%o2 + %o1], %o0
retl ! return ffstab[%o1] + 8
add %o0, 8, %o0
2:
andcc %o0, 0xff, %o1
be,a 3f
srl %o0, 8, %o0 ! (prepare for byte 3)
ldsb [%o2 + %o1], %o0
retl ! return ffstab[%o1] + 16
add %o0, 16, %o0
3: ! just return ffstab[%o0] + 24
ldsb [%o2 + %o0], %o0
retl
add %o0, 24, %o0
ffstab:
.byte -24,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1 /* 00-0f */
.byte 5,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1 /* 10-1f */
.byte 6,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1 /* 20-2f */
.byte 5,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1 /* 30-3f */
.byte 7,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1 /* 40-4f */
.byte 5,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1 /* 50-5f */
.byte 6,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1 /* 60-6f */
.byte 5,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1 /* 70-7f */
.byte 8,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1 /* 80-8f */
.byte 5,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1 /* 10-9f */
.byte 6,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1 /* a0-af */
.byte 5,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1 /* b0-bf */
.byte 7,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1 /* c0-cf */
.byte 5,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1 /* d0-df */
.byte 6,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1 /* e0-ef */
.byte 5,1,2,1,3,1,2,1,4,1,2,1,3,1,2,1 /* f0-ff */
|
0xffea/MINIX3
| 4,633
|
common/lib/libc/arch/sparc64/string/strlen.S
|
/* $NetBSD: strlen.S,v 1.1 2005/12/20 19:28:50 christos Exp $ */
/*
* Copyright 2002 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Eduardo Horvath for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/asm.h>
#if defined(LIBC_SCCS) && !defined(lint)
RCSID("$NetBSD: strlen.S,v 1.1 2005/12/20 19:28:50 christos Exp $")
#endif /* LIBC_SCCS and not lint */
/* The algorithm here uses the following techniques:
*
* 1) Given a word 'x', we can test to see if it contains any 0 bytes
* by subtracting 0x01010101, and seeing if any of the high bits of each
* byte changed from 0 to 1. This works because the least significant
* 0 byte must have had no incoming carry (otherwise it's not the least
* significant), so it is 0x00 - 0x01 == 0xff. For all other
* byte values, either they have the high bit set initially, or when
* 1 is subtracted you get a value in the range 0x00-0x7f, none of which
* have their high bit set. The expression here is
* (x + 0xfefefeff) & ~(x | 0x7f7f7f7f), which gives 0x00000000 when
* there were no 0x00 bytes in the word.
*
* 2) Now just hunt for the first byte that's 0x00 in 'x'.
*
* This is from the book 'The PowerPC Compiler Writer's Guide',
* by Steve Hoxey, Faraydon Karim, Bill Hay and Hank Warren.
*/
ENTRY(strlen)
/*
* Calculate address for and load the first xword.
*/
andn %o0, 0x7, %o1
ldx [%o1], %g1
/*
* Now prepare some constants while the data arrives...
*/
sethi %hi(0xfefefefe), %o3
sethi %hi(0x7f7f7f7f), %o2
or %o3, %lo(0xfefefefe), %o3
or %o2, %lo(0x7f7f7f7f), %o2
sllx %o3, 32, %o5
andcc %o0, 0x7, %g5 ! Hoisted from below to fill a slot
sllx %o2, 32, %o4
or %o3, %o5, %o3
sll %g5, 3, %g5 ! Convert to bytes. hoisted
or %o2, %o4, %o2
inc %o3
neg %g5 ! hoisted
/*
* Mask off the leading bits:
*
* if (ptr & 0x7)
* mask = -1 << (64 - ((ptr & 0x7) << 3));
*/
! andcc %o0, 0x7, %g5 ! Hoisted above
bz,pt %icc, 0f
! sll %g5, 3, %g5 ! Convert to bytes. Also hoisted
! neg %g5 ! Hoisted
add %g5, 64, %g5
mov -1, %o4
sllx %o4, %g5, %o4
or %o4, %g1, %g1 ! Make leading bytes != 0
0:
or %g1, %o2, %o5 ! Do step 1 -- use or/andn instead of nor/and
add %g1, %o3, %g5
inc 8, %o1 ! Point to next word
andncc %g5, %o5, %g0
bz,a,pt %xcc, 0b
ldx [%o1], %g1
mov -1, %o4
dec 8, %o1
sllx %o4, 64-8, %o5
btst %g1, %o5 ! Check high byte
bz %xcc,0f
srlx %o5, 8, %o5
inc %o1
btst %g1, %o5 ! Check 2nd byte
bz %xcc,0f
srlx %o5, 8, %o5
inc %o1
btst %g1, %o5 ! Check 3rd byte
bz %xcc,0f
srlx %o5, 8, %o5
inc %o1
btst %g1, %o5 ! Check 4th byte
bz %xcc,0f
srlx %o5, 8, %o5
inc %o1
btst %g1, %o5 ! Check 5th byte
bz %xcc,0f
srlx %o5, 8, %o5
inc %o1
btst %g1, %o5 ! Check 6th byte
bz %xcc,0f
srlx %o5, 8, %o5
inc %o1
btst %g1, %o5 ! Check 7th byte
bz %xcc,0f
nop
inc %o1
0:
retl
sub %o1, %o0, %o0 ! return length (ptr - (origptr+1))
|
0xffea/MINIX3
| 1,874
|
common/lib/libc/arch/arm/gen/byte_swap_2.S
|
/* $NetBSD: byte_swap_2.S,v 1.4 2008/04/28 20:22:52 martin Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Charles M. Hannum.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/asm.h>
#if defined(_KERNEL) || defined(_STANDALONE)
_ENTRY(_C_LABEL(bswap16))
#else
_ENTRY(_C_LABEL(__bswap16))
#endif
#if BYTE_ORDER == LITTLE_ENDIAN
_ENTRY(_C_LABEL(ntohs))
_ENTRY(_C_LABEL(htons))
#endif
_PROF_PROLOGUE
and r1, r0, #0xff
mov r0, r0, lsr #8
orr r0, r0, r1, lsl #8
RET
|
0xffea/MINIX3
| 8,359
|
common/lib/libc/arch/arm/gen/divsi3.S
|
/* $NetBSD: divsi3.S,v 1.1 2005/12/20 19:28:49 christos Exp $ */
/*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/asm.h>
/*
* stack is aligned as there's a possibility of branching to .L_overflow
* which makes a C call
*/
ENTRY(__umodsi3)
stmfd sp!, {lr}
sub sp, sp, #4 /* align stack */
bl .L_udivide
add sp, sp, #4 /* unalign stack */
mov r0, r1
ldmfd sp!, {pc}
ENTRY(__modsi3)
stmfd sp!, {lr}
sub sp, sp, #4 /* align stack */
bl .L_divide
add sp, sp, #4 /* unalign stack */
mov r0, r1
ldmfd sp!, {pc}
.L_overflow:
#if !defined(_KERNEL) && !defined(_STANDALONE)
mov r0, #8 /* SIGFPE */
bl PIC_SYM(_C_LABEL(raise), PLT) /* raise it */
mov r0, #0
#else
/* XXX should cause a fatal error */
mvn r0, #0
#endif
RET
ENTRY(__udivsi3)
.L_udivide: /* r0 = r0 / r1; r1 = r0 % r1 */
eor r0, r1, r0
eor r1, r0, r1
eor r0, r1, r0
/* r0 = r1 / r0; r1 = r1 % r0 */
cmp r0, #1
bcc .L_overflow
beq .L_divide_l0
mov ip, #0
movs r1, r1
bpl .L_divide_l1
orr ip, ip, #0x20000000 /* ip bit 0x20000000 = -ve r1 */
movs r1, r1, lsr #1
orrcs ip, ip, #0x10000000 /* ip bit 0x10000000 = bit 0 of r1 */
b .L_divide_l1
.L_divide_l0: /* r0 == 1 */
mov r0, r1
mov r1, #0
RET
ENTRY(__divsi3)
.L_divide: /* r0 = r0 / r1; r1 = r0 % r1 */
eor r0, r1, r0
eor r1, r0, r1
eor r0, r1, r0
/* r0 = r1 / r0; r1 = r1 % r0 */
cmp r0, #1
bcc .L_overflow
beq .L_divide_l0
ands ip, r0, #0x80000000
rsbmi r0, r0, #0
ands r2, r1, #0x80000000
eor ip, ip, r2
rsbmi r1, r1, #0
orr ip, r2, ip, lsr #1 /* ip bit 0x40000000 = -ve division */
/* ip bit 0x80000000 = -ve remainder */
.L_divide_l1:
mov r2, #1
mov r3, #0
/*
* If the highest bit of the dividend is set, we have to be
* careful when shifting the divisor. Test this.
*/
movs r1,r1
bpl .L_old_code
/*
* At this point, the highest bit of r1 is known to be set.
* We abuse this below in the tst instructions.
*/
tst r1, r0 /*, lsl #0 */
bmi .L_divide_b1
tst r1, r0, lsl #1
bmi .L_divide_b2
tst r1, r0, lsl #2
bmi .L_divide_b3
tst r1, r0, lsl #3
bmi .L_divide_b4
tst r1, r0, lsl #4
bmi .L_divide_b5
tst r1, r0, lsl #5
bmi .L_divide_b6
tst r1, r0, lsl #6
bmi .L_divide_b7
tst r1, r0, lsl #7
bmi .L_divide_b8
tst r1, r0, lsl #8
bmi .L_divide_b9
tst r1, r0, lsl #9
bmi .L_divide_b10
tst r1, r0, lsl #10
bmi .L_divide_b11
tst r1, r0, lsl #11
bmi .L_divide_b12
tst r1, r0, lsl #12
bmi .L_divide_b13
tst r1, r0, lsl #13
bmi .L_divide_b14
tst r1, r0, lsl #14
bmi .L_divide_b15
tst r1, r0, lsl #15
bmi .L_divide_b16
tst r1, r0, lsl #16
bmi .L_divide_b17
tst r1, r0, lsl #17
bmi .L_divide_b18
tst r1, r0, lsl #18
bmi .L_divide_b19
tst r1, r0, lsl #19
bmi .L_divide_b20
tst r1, r0, lsl #20
bmi .L_divide_b21
tst r1, r0, lsl #21
bmi .L_divide_b22
tst r1, r0, lsl #22
bmi .L_divide_b23
tst r1, r0, lsl #23
bmi .L_divide_b24
tst r1, r0, lsl #24
bmi .L_divide_b25
tst r1, r0, lsl #25
bmi .L_divide_b26
tst r1, r0, lsl #26
bmi .L_divide_b27
tst r1, r0, lsl #27
bmi .L_divide_b28
tst r1, r0, lsl #28
bmi .L_divide_b29
tst r1, r0, lsl #29
bmi .L_divide_b30
tst r1, r0, lsl #30
bmi .L_divide_b31
/*
* instead of:
* tst r1, r0, lsl #31
* bmi .L_divide_b32
*/
b .L_divide_b32
.L_old_code:
cmp r1, r0
bcc .L_divide_b0
cmp r1, r0, lsl #1
bcc .L_divide_b1
cmp r1, r0, lsl #2
bcc .L_divide_b2
cmp r1, r0, lsl #3
bcc .L_divide_b3
cmp r1, r0, lsl #4
bcc .L_divide_b4
cmp r1, r0, lsl #5
bcc .L_divide_b5
cmp r1, r0, lsl #6
bcc .L_divide_b6
cmp r1, r0, lsl #7
bcc .L_divide_b7
cmp r1, r0, lsl #8
bcc .L_divide_b8
cmp r1, r0, lsl #9
bcc .L_divide_b9
cmp r1, r0, lsl #10
bcc .L_divide_b10
cmp r1, r0, lsl #11
bcc .L_divide_b11
cmp r1, r0, lsl #12
bcc .L_divide_b12
cmp r1, r0, lsl #13
bcc .L_divide_b13
cmp r1, r0, lsl #14
bcc .L_divide_b14
cmp r1, r0, lsl #15
bcc .L_divide_b15
cmp r1, r0, lsl #16
bcc .L_divide_b16
cmp r1, r0, lsl #17
bcc .L_divide_b17
cmp r1, r0, lsl #18
bcc .L_divide_b18
cmp r1, r0, lsl #19
bcc .L_divide_b19
cmp r1, r0, lsl #20
bcc .L_divide_b20
cmp r1, r0, lsl #21
bcc .L_divide_b21
cmp r1, r0, lsl #22
bcc .L_divide_b22
cmp r1, r0, lsl #23
bcc .L_divide_b23
cmp r1, r0, lsl #24
bcc .L_divide_b24
cmp r1, r0, lsl #25
bcc .L_divide_b25
cmp r1, r0, lsl #26
bcc .L_divide_b26
cmp r1, r0, lsl #27
bcc .L_divide_b27
cmp r1, r0, lsl #28
bcc .L_divide_b28
cmp r1, r0, lsl #29
bcc .L_divide_b29
cmp r1, r0, lsl #30
bcc .L_divide_b30
.L_divide_b32:
cmp r1, r0, lsl #31
subhs r1, r1,r0, lsl #31
addhs r3, r3,r2, lsl #31
.L_divide_b31:
cmp r1, r0, lsl #30
subhs r1, r1,r0, lsl #30
addhs r3, r3,r2, lsl #30
.L_divide_b30:
cmp r1, r0, lsl #29
subhs r1, r1,r0, lsl #29
addhs r3, r3,r2, lsl #29
.L_divide_b29:
cmp r1, r0, lsl #28
subhs r1, r1,r0, lsl #28
addhs r3, r3,r2, lsl #28
.L_divide_b28:
cmp r1, r0, lsl #27
subhs r1, r1,r0, lsl #27
addhs r3, r3,r2, lsl #27
.L_divide_b27:
cmp r1, r0, lsl #26
subhs r1, r1,r0, lsl #26
addhs r3, r3,r2, lsl #26
.L_divide_b26:
cmp r1, r0, lsl #25
subhs r1, r1,r0, lsl #25
addhs r3, r3,r2, lsl #25
.L_divide_b25:
cmp r1, r0, lsl #24
subhs r1, r1,r0, lsl #24
addhs r3, r3,r2, lsl #24
.L_divide_b24:
cmp r1, r0, lsl #23
subhs r1, r1,r0, lsl #23
addhs r3, r3,r2, lsl #23
.L_divide_b23:
cmp r1, r0, lsl #22
subhs r1, r1,r0, lsl #22
addhs r3, r3,r2, lsl #22
.L_divide_b22:
cmp r1, r0, lsl #21
subhs r1, r1,r0, lsl #21
addhs r3, r3,r2, lsl #21
.L_divide_b21:
cmp r1, r0, lsl #20
subhs r1, r1,r0, lsl #20
addhs r3, r3,r2, lsl #20
.L_divide_b20:
cmp r1, r0, lsl #19
subhs r1, r1,r0, lsl #19
addhs r3, r3,r2, lsl #19
.L_divide_b19:
cmp r1, r0, lsl #18
subhs r1, r1,r0, lsl #18
addhs r3, r3,r2, lsl #18
.L_divide_b18:
cmp r1, r0, lsl #17
subhs r1, r1,r0, lsl #17
addhs r3, r3,r2, lsl #17
.L_divide_b17:
cmp r1, r0, lsl #16
subhs r1, r1,r0, lsl #16
addhs r3, r3,r2, lsl #16
.L_divide_b16:
cmp r1, r0, lsl #15
subhs r1, r1,r0, lsl #15
addhs r3, r3,r2, lsl #15
.L_divide_b15:
cmp r1, r0, lsl #14
subhs r1, r1,r0, lsl #14
addhs r3, r3,r2, lsl #14
.L_divide_b14:
cmp r1, r0, lsl #13
subhs r1, r1,r0, lsl #13
addhs r3, r3,r2, lsl #13
.L_divide_b13:
cmp r1, r0, lsl #12
subhs r1, r1,r0, lsl #12
addhs r3, r3,r2, lsl #12
.L_divide_b12:
cmp r1, r0, lsl #11
subhs r1, r1,r0, lsl #11
addhs r3, r3,r2, lsl #11
.L_divide_b11:
cmp r1, r0, lsl #10
subhs r1, r1,r0, lsl #10
addhs r3, r3,r2, lsl #10
.L_divide_b10:
cmp r1, r0, lsl #9
subhs r1, r1,r0, lsl #9
addhs r3, r3,r2, lsl #9
.L_divide_b9:
cmp r1, r0, lsl #8
subhs r1, r1,r0, lsl #8
addhs r3, r3,r2, lsl #8
.L_divide_b8:
cmp r1, r0, lsl #7
subhs r1, r1,r0, lsl #7
addhs r3, r3,r2, lsl #7
.L_divide_b7:
cmp r1, r0, lsl #6
subhs r1, r1,r0, lsl #6
addhs r3, r3,r2, lsl #6
.L_divide_b6:
cmp r1, r0, lsl #5
subhs r1, r1,r0, lsl #5
addhs r3, r3,r2, lsl #5
.L_divide_b5:
cmp r1, r0, lsl #4
subhs r1, r1,r0, lsl #4
addhs r3, r3,r2, lsl #4
.L_divide_b4:
cmp r1, r0, lsl #3
subhs r1, r1,r0, lsl #3
addhs r3, r3,r2, lsl #3
.L_divide_b3:
cmp r1, r0, lsl #2
subhs r1, r1,r0, lsl #2
addhs r3, r3,r2, lsl #2
.L_divide_b2:
cmp r1, r0, lsl #1
subhs r1, r1,r0, lsl #1
addhs r3, r3,r2, lsl #1
.L_divide_b1:
cmp r1, r0
subhs r1, r1, r0
addhs r3, r3, r2
.L_divide_b0:
tst ip, #0x20000000
bne .L_udivide_l1
mov r0, r3
cmp ip, #0
rsbmi r1, r1, #0
movs ip, ip, lsl #1
bicmi r0, r0, #0x80000000 /* Fix incase we divided 0x80000000 */
rsbmi r0, r0, #0
RET
.L_udivide_l1:
tst ip, #0x10000000
mov r1, r1, lsl #1
orrne r1, r1, #1
mov r3, r3, lsl #1
cmp r1, r0
subhs r1, r1, r0
addhs r3, r3, r2
mov r0, r3
RET
|
0xffea/MINIX3
| 1,902
|
common/lib/libc/arch/arm/gen/byte_swap_4.S
|
/* $NetBSD: byte_swap_4.S,v 1.4 2008/04/28 20:22:52 martin Exp $ */
/*-
* Copyright (c) 1997 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Neil A. Carson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/asm.h>
#if defined(_KERNEL) || defined(_STANDALONE)
_ENTRY(_C_LABEL(bswap32))
#else
_ENTRY(_C_LABEL(__bswap32))
#endif
#if BYTE_ORDER == LITTLE_ENDIAN
_ENTRY(_C_LABEL(ntohl))
_ENTRY(_C_LABEL(htonl))
#endif
_PROF_PROLOGUE
eor r1, r0, r0, ror #16
bic r1, r1, #0x00FF0000
mov r0, r0, ror #8
eor r0, r0, r1, lsr #8
RET
|
0xffea/MINIX3
| 3,010
|
common/lib/libc/arch/arm/atomic/atomic_dec_32.S
|
/* $NetBSD: atomic_dec_32.S,v 1.2 2008/08/16 07:12:39 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Matt Thomas <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
#ifdef _ARM_ARCH_6
ENTRY_NP(_atomic_dec_32)
mov r2, r0 /* need r0 for return value */
1: ldrex r0, [r2] /* load old value (return value) */
sub r1, r0, #1 /* calculate new value */
strex r3, r1, [r2] /* try to store */
cmp r3, #0 /* succeed? */
bne 1b /* no, try again? */
RET /* return new value */
END(_atomic_dec_32)
ATOMIC_OP_ALIAS(atomic_dec_32,_atomic_dec_32)
ATOMIC_OP_ALIAS(atomic_dec_uint,_atomic_dec_32)
ATOMIC_OP_ALIAS(atomic_dec_ulong,_atomic_dec_32)
ATOMIC_OP_ALIAS(atomic_dec_ptr,_atomic_dec_32)
STRONG_ALIAS(_atomic_dec_uint,_atomic_dec_32)
STRONG_ALIAS(_atomic_dec_ulong,_atomic_dec_32)
STRONG_ALIAS(_atomic_dec_ptr,_atomic_dec_32)
ENTRY_NP(_atomic_dec_32_nv)
mov r2, r0 /* need r0 for return value */
1: ldrex r0, [r2] /* load old value */
sub r0, r0, #1 /* calculate new value (return value) */
strex r1, r0, [r2] /* try to store */
cmp r1, #0 /* succeed? */
bne 1b /* no, try again? */
RET /* return new value */
END(_atomic_dec_32_nv)
ATOMIC_OP_ALIAS(atomic_dec_32_nv,_atomic_dec_32_nv)
ATOMIC_OP_ALIAS(atomic_dec_uint_nv,_atomic_dec_32_nv)
ATOMIC_OP_ALIAS(atomic_dec_ulong_nv,_atomic_dec_32_nv)
ATOMIC_OP_ALIAS(atomic_dec_ptr_nv,_atomic_dec_32_nv)
STRONG_ALIAS(_atomic_dec_uint_nv,_atomic_dec_32_nv)
STRONG_ALIAS(_atomic_dec_ulong_nv,_atomic_dec_32_nv)
STRONG_ALIAS(_atomic_dec_ptr_nv,_atomic_dec_32_nv)
#endif /* _ARM_ARCH_6 */
|
0xffea/MINIX3
| 2,768
|
common/lib/libc/arch/arm/atomic/atomic_swap.S
|
/* $NetBSD: atomic_swap.S,v 1.2 2008/08/16 07:12:40 matt Exp $ */
/*-
* Copyright (c) 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe and Matt Thomas.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the NetBSD
* Foundation, Inc. and its contributors.
* 4. Neither the name of The NetBSD Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
ENTRY_NP(_atomic_swap_32)
swp r0, r1, [r0]
RET
END(_atomic_swap_32)
ATOMIC_OP_ALIAS(atomic_swap_32,_atomic_swap_32)
ATOMIC_OP_ALIAS(atomic_swap_uint,_atomic_swap_32)
ATOMIC_OP_ALIAS(atomic_swap_ulong,_atomic_swap_32)
ATOMIC_OP_ALIAS(atomic_swap_ptr,_atomic_swap_32)
STRONG_ALIAS(_atomic_swap_uint,_atomic_swap_32)
STRONG_ALIAS(_atomic_swap_ulong,_atomic_swap_32)
STRONG_ALIAS(_atomic_swap_ptr,_atomic_swap_32)
ENTRY_NP(_atomic_swap_8)
swpb r0, r1, [r0]
RET
END(_atomic_swap_8)
ATOMIC_OP_ALIAS(atomic_swap_8,_atomic_swap_8)
ATOMIC_OP_ALIAS(atomic_swap_char,_atomic_swap_8)
ATOMIC_OP_ALIAS(atomic_swap_uchar,_atomic_swap_8)
STRONG_ALIAS(_atomic_swap_char,_atomic_swap_32)
STRONG_ALIAS(_atomic_swap_uchar,_atomic_swap_32)
|
0xffea/MINIX3
| 1,790
|
common/lib/libc/arch/arm/atomic/atomic_cas_up.S
|
/* $NetBSD: atomic_cas_up.S,v 1.2 2008/05/25 15:56:11 chs Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Steve C. Woodford.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/ras.h>
#include <machine/asm.h>
ENTRY(_atomic_cas_up)
.hidden _C_LABEL(_atomic_cas_up)
mov r3, r0
RAS_START_ASM_HIDDEN(_atomic_cas)
ldr r0, [r3]
cmp r0, r1
streq r2, [r3]
RAS_END_ASM_HIDDEN(_atomic_cas)
RET
|
0xffea/MINIX3
| 3,010
|
common/lib/libc/arch/arm/atomic/atomic_inc_32.S
|
/* $NetBSD: atomic_inc_32.S,v 1.2 2008/08/16 07:12:39 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Matt Thomas <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
#ifdef _ARM_ARCH_6
ENTRY_NP(_atomic_inc_32)
mov r2, r0 /* need r0 for return value */
1: ldrex r0, [r2] /* load old value (return value) */
add r1, r0, #1 /* calculate new value */
strex r3, r1, [r2] /* try to store */
cmp r3, #0 /* succeed? */
bne 1b /* no, try again? */
RET /* return new value */
END(_atomic_inc_32)
ATOMIC_OP_ALIAS(atomic_inc_32,_atomic_inc_32)
ATOMIC_OP_ALIAS(atomic_inc_uint,_atomic_inc_32)
ATOMIC_OP_ALIAS(atomic_inc_ulong,_atomic_inc_32)
ATOMIC_OP_ALIAS(atomic_inc_ptr,_atomic_inc_32)
STRONG_ALIAS(_atomic_inc_uint,_atomic_inc_32)
STRONG_ALIAS(_atomic_inc_ulong,_atomic_inc_32)
STRONG_ALIAS(_atomic_inc_ptr,_atomic_inc_32)
ENTRY_NP(_atomic_inc_32_nv)
mov r2, r0 /* need r0 for return value */
1: ldrex r0, [r2] /* load old value */
add r0, r0, #1 /* calculate new value (return value) */
strex r1, r0, [r2] /* try to store */
cmp r1, #0 /* succeed? */
bne 1b /* no, try again? */
RET /* return new value */
END(_atomic_inc_32_nv)
ATOMIC_OP_ALIAS(atomic_inc_32_nv,_atomic_inc_32_nv)
ATOMIC_OP_ALIAS(atomic_inc_uint_nv,_atomic_inc_32_nv)
ATOMIC_OP_ALIAS(atomic_inc_ulong_nv,_atomic_inc_32_nv)
ATOMIC_OP_ALIAS(atomic_inc_ptr_nv,_atomic_inc_32_nv)
STRONG_ALIAS(_atomic_inc_uint_nv,_atomic_inc_32_nv)
STRONG_ALIAS(_atomic_inc_ulong_nv,_atomic_inc_32_nv)
STRONG_ALIAS(_atomic_inc_ptr_nv,_atomic_inc_32_nv)
#endif /* _ARCH_ARM_6 */
|
0xffea/MINIX3
| 2,393
|
common/lib/libc/arch/arm/atomic/membar_ops.S
|
/* $NetBSD: membar_ops.S,v 1.2 2008/08/16 07:12:40 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Matt Thomas <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
#ifdef _ARM_ARCH_6
ENTRY_NP(_membar_producer)
mcr p15, 0, r0, c7, c10, 4 /* Data Synchronization Barrier */
RET
END(_membar_producer)
ATOMIC_OP_ALIAS(membar_producer,_membar_producer)
ATOMIC_OP_ALIAS(membar_write,_membar_producer)
STRONG_ALIAS(_membar_write,_membar_producer)
ENTRY_NP(_membar_sync)
mcr p15, 0, r0, c7, c10, 5 /* Data Memory Barrier */
RET
END(_membar_sync)
ATOMIC_OP_ALIAS(membar_sync,_membar_sync)
ATOMIC_OP_ALIAS(membar_enter,_membar_sync)
ATOMIC_OP_ALIAS(membar_exit,_membar_sync)
ATOMIC_OP_ALIAS(membar_consumer,_membar_sync)
ATOMIC_OP_ALIAS(membar_read,_membar_sync)
STRONG_ALIAS(_membar_enter,_membar_sync)
STRONG_ALIAS(_membar_exit,_membar_sync)
STRONG_ALIAS(_membar_consumer,_membar_sync)
STRONG_ALIAS(_membar_read,_membar_sync)
#endif /* _ARM_ARCH_6 */
|
0xffea/MINIX3
| 2,790
|
common/lib/libc/arch/arm/atomic/atomic_or_32.S
|
/* $NetBSD: atomic_or_32.S,v 1.2 2008/08/16 07:12:39 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Matt Thomas <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
#ifdef _ARM_ARCH_6
ENTRY_NP(_atomic_or_32)
mov r3, r0 /* need r0 for return value */
1: ldrex r0, [r3] /* load old value (to be returned) */
orr r2, r0, r1 /* calculate new value */
strex ip, r2, [r3] /* try to store */
cmp ip, #0 /* succeed? */
bne 1b /* no, try again */
RET /* return old value */
END(_atomic_or_32)
ATOMIC_OP_ALIAS(atomic_or_32,_atomic_or_32)
ATOMIC_OP_ALIAS(atomic_or_uint,_atomic_or_32)
ATOMIC_OP_ALIAS(atomic_or_ulong,_atomic_or_32)
STRONG_ALIAS(_atomic_or_uint,_atomic_or_32)
STRONG_ALIAS(_atomic_or_ulong,_atomic_or_32)
ENTRY_NP(_atomic_or_32_nv)
mov r3, r0 /* need r0 for return value */
1: ldrex r0, [r3] /* load old value */
orr r0, r0, r1 /* calculate new value (return value) */
strex r2, r0, [r3] /* try to store */
cmp r2, #0 /* succeed? */
bne 1b /* no, try again? */
RET /* return new value */
END(_atomic_or_32_nv)
ATOMIC_OP_ALIAS(atomic_or_32_nv,_atomic_or_32_nv)
ATOMIC_OP_ALIAS(atomic_or_uint_nv,_atomic_or_32_nv)
ATOMIC_OP_ALIAS(atomic_or_ulong_nv,_atomic_or_32_nv)
STRONG_ALIAS(_atomic_or_uint_nv,_atomic_or_32_nv)
STRONG_ALIAS(_atomic_or_ulong_nv,_atomic_or_32_nv)
#endif /* _ARM_ARCH_6 */
|
0xffea/MINIX3
| 2,667
|
common/lib/libc/arch/arm/atomic/atomic_cas_8.S
|
/* $NetBSD: atomic_cas_8.S,v 1.1 2008/11/18 15:22:56 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Matt Thomas <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/asm.h>
RCSID("$NetBSD: atomic_cas_8.S,v 1.1 2008/11/18 15:22:56 matt Exp $")
ENTRY(atomic_cas_8)
XPUSH {r4,r5} /* we need some more registers */
and r3, r0, #3 /* which byte do we replace? */
#if __ARMEB__
eor r3, r3, #3 /* bytes are reversed on BE */
#endif
mov r3, r3, lsl #3 /* multiply by 8 */
mov r1, r1, lsl r3 /* mov old value to correct byte */
eor r2, r1, r2, lsl r3 /* move new value to correct byte */
/* eor r2, r2, r1 */ /* new value is now (old ^ new) */
mov r5, #0xff /* load mask */
mov r5, r5, lsl r3 /* and move to correct byte */
mov r3, r0 /* move pointer */
1: ldrex r4, [r3] /* load 32bit value */
and r0, r4, r5 /* clear other bytes */
teq r0, r1 /* equal old value? */
bne 2f /* nope, bail. */
eor r4, r4, r2 /* new == old ^ (old ^ new) */
strex ip, r4, [r3] /* attempt to store it */
cmp ip, #0 /* succeed? */
bne 1b /* nope, try again. */
2: XPOP {r4,r5} /* don't need these anymore */
and r1, r3, #3
#if __ARMEB__
eor r1, r1, #3
#endif
mov r0, r0, lsr r1 /* shift it back to lsb byte */
RET
|
0xffea/MINIX3
| 2,816
|
common/lib/libc/arch/arm/atomic/atomic_and_32.S
|
/* $NetBSD: atomic_and_32.S,v 1.2 2008/08/16 07:12:39 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Matt Thomas <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
#ifdef _ARM_ARCH_6
ENTRY_NP(_atomic_and_32)
mov r3, r0 /* need r0 for return value */
1: ldrex r0, [r3] /* load old value (to be returned) */
and r2, r0, r1 /* calculate new value */
strex ip, r2, [r3] /* try to store */
cmp ip, #0 /* succeed? */
bne 1b /* no, try again */
RET /* return old value */
END(_atomic_and_32)
ATOMIC_OP_ALIAS(atomic_and_32,_atomic_and_32)
ATOMIC_OP_ALIAS(atomic_and_uint,_atomic_and_32)
ATOMIC_OP_ALIAS(atomic_and_ulong,_atomic_and_32)
STRONG_ALIAS(_atomic_and_uint,_atomic_and_32)
STRONG_ALIAS(_atomic_and_ulong,_atomic_and_32)
ENTRY_NP(_atomic_and_32_nv)
mov r3, r0 /* need r0 for return value */
1: ldrex r0, [r3] /* load old value */
and r0, r0, r1 /* calculate new value (return value) */
strex r2, r0, [r3] /* try to store */
cmp r2, #0 /* succeed? */
bne 1b /* no, try again? */
RET /* return new value */
END(_atomic_and_32_nv)
ATOMIC_OP_ALIAS(atomic_and_32_nv,_atomic_and_32_nv)
ATOMIC_OP_ALIAS(atomic_and_uint_nv,_atomic_and_32_nv)
ATOMIC_OP_ALIAS(atomic_and_ulong_nv,_atomic_and_32_nv)
STRONG_ALIAS(_atomic_and_uint_nv,_atomic_and_32_nv)
STRONG_ALIAS(_atomic_and_ulong_nv,_atomic_and_32_nv)
#endif /* _ARM_ARCH_6 */
|
0xffea/MINIX3
| 3,004
|
common/lib/libc/arch/arm/atomic/atomic_add_32.S
|
/* $NetBSD: atomic_add_32.S,v 1.2 2008/08/16 07:12:39 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Matt Thomas <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
#ifdef _ARM_ARCH_6
ENTRY_NP(_atomic_add_32)
mov r3, r0 /* need r0 for return value */
1: ldrex r0, [r3] /* load old value (to be returned) */
add r2, r0, r1 /* calculate new value */
strex ip, r2, [r3] /* try to store */
cmp ip, #0 /* succeed? */
bne 1b /* no, try again */
RET /* return old value */
END(_atomic_add_32)
ATOMIC_OP_ALIAS(atomic_add_32,_atomic_add_32)
ATOMIC_OP_ALIAS(atomic_add_int,_atomic_add_32)
ATOMIC_OP_ALIAS(atomic_add_long,_atomic_add_32)
ATOMIC_OP_ALIAS(atomic_add_ptr,_atomic_add_32)
STRONG_ALIAS(_atomic_add_int,_atomic_add_32)
STRONG_ALIAS(_atomic_add_long,_atomic_add_32)
STRONG_ALIAS(_atomic_add_ptr,_atomic_add_32)
ENTRY_NP(_atomic_add_32_nv)
mov r3, r0 /* need r0 for return value */
1: ldrex r0, [r3] /* load old value */
add r0, r0, r1 /* calculate new value (return value) */
strex r2, r0, [r3] /* try to store */
cmp r2, #0 /* succeed? */
bne 1b /* no, try again? */
RET /* return new value */
END(_atomic_add_32_nv)
ATOMIC_OP_ALIAS(atomic_add_32_nv,_atomic_add_32_nv)
ATOMIC_OP_ALIAS(atomic_add_int_nv,_atomic_add_32_nv)
ATOMIC_OP_ALIAS(atomic_add_long_nv,_atomic_add_32_nv)
ATOMIC_OP_ALIAS(atomic_add_ptr_nv,_atomic_add_32_nv)
STRONG_ALIAS(_atomic_add_int_nv,_atomic_add_32_nv)
STRONG_ALIAS(_atomic_add_long_nv,_atomic_add_32_nv)
STRONG_ALIAS(_atomic_add_ptr_nv,_atomic_add_32_nv)
#endif /* _ARM_ARCH_6 */
|
0xffea/MINIX3
| 2,428
|
common/lib/libc/arch/arm/atomic/atomic_cas_32.S
|
/* $NetBSD: atomic_cas_32.S,v 1.2 2008/08/16 07:12:39 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Matt Thomas <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
#if defined(_ARM_ARCH_6)
/*
* ARMv6 has load-exclusive/store-exclusive which works for both user
* and kernel.
*/
ENTRY_NP(_atomic_cas_32)
mov r3, r0 /* we need r0 for return value */
1:
ldrex r0, [r3] /* load old value */
teq r0, r1 /* compare? */
RETc(ne) /* return if different */
strex ip, r2, [r3] /* store new value */
cmp ip, #0 /* succeed? */
bne 1b /* nope, try again. */
RET /* yes, return. */
END(_atomic_cas_32)
ATOMIC_OP_ALIAS(atomic_cas_32,_atomic_cas_32)
ATOMIC_OP_ALIAS(atomic_cas_uint,_atomic_cas_32)
ATOMIC_OP_ALIAS(atomic_cas_ulong,_atomic_cas_32)
ATOMIC_OP_ALIAS(atomic_cas_ptr,_atomic_cas_32)
STRONG_ALIAS(_atomic_cas_uint,_atomic_cas_32)
STRONG_ALIAS(_atomic_cas_ulong,_atomic_cas_32)
STRONG_ALIAS(_atomic_cas_ptr,_atomic_cas_32)
#endif /* _ARCH_ARM_6 */
|
0xffea/MINIX3
| 46,291
|
common/lib/libc/arch/arm/string/memcpy_xscale.S
|
/* $NetBSD: memcpy_xscale.S,v 1.2 2007/06/21 21:37:04 scw Exp $ */
/*
* Copyright 2003 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Steve C. Woodford for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/asm.h>
/* LINTSTUB: Func: void *memcpy(void *dst, const void *src, size_t len) */
ENTRY(memcpy)
pld [r1]
cmp r2, #0x0c
ble .Lmemcpy_short /* <= 12 bytes */
mov r3, r0 /* We must not clobber r0 */
/* Word-align the destination buffer */
ands ip, r3, #0x03 /* Already word aligned? */
beq .Lmemcpy_wordaligned /* Yup */
cmp ip, #0x02
ldrb ip, [r1], #0x01
sub r2, r2, #0x01
strb ip, [r3], #0x01
ldrleb ip, [r1], #0x01
suble r2, r2, #0x01
strleb ip, [r3], #0x01
ldrltb ip, [r1], #0x01
sublt r2, r2, #0x01
strltb ip, [r3], #0x01
/* Destination buffer is now word aligned */
.Lmemcpy_wordaligned:
ands ip, r1, #0x03 /* Is src also word-aligned? */
bne .Lmemcpy_bad_align /* Nope. Things just got bad */
/* Quad-align the destination buffer */
tst r3, #0x07 /* Already quad aligned? */
ldrne ip, [r1], #0x04
stmfd sp!, {r4-r9} /* Free up some registers */
subne r2, r2, #0x04
strne ip, [r3], #0x04
/* Destination buffer quad aligned, source is at least word aligned */
subs r2, r2, #0x80
blt .Lmemcpy_w_lessthan128
/* Copy 128 bytes at a time */
.Lmemcpy_w_loop128:
ldr r4, [r1], #0x04 /* LD:00-03 */
ldr r5, [r1], #0x04 /* LD:04-07 */
pld [r1, #0x18] /* Prefetch 0x20 */
ldr r6, [r1], #0x04 /* LD:08-0b */
ldr r7, [r1], #0x04 /* LD:0c-0f */
ldr r8, [r1], #0x04 /* LD:10-13 */
ldr r9, [r1], #0x04 /* LD:14-17 */
strd r4, [r3], #0x08 /* ST:00-07 */
ldr r4, [r1], #0x04 /* LD:18-1b */
ldr r5, [r1], #0x04 /* LD:1c-1f */
strd r6, [r3], #0x08 /* ST:08-0f */
ldr r6, [r1], #0x04 /* LD:20-23 */
ldr r7, [r1], #0x04 /* LD:24-27 */
pld [r1, #0x18] /* Prefetch 0x40 */
strd r8, [r3], #0x08 /* ST:10-17 */
ldr r8, [r1], #0x04 /* LD:28-2b */
ldr r9, [r1], #0x04 /* LD:2c-2f */
strd r4, [r3], #0x08 /* ST:18-1f */
ldr r4, [r1], #0x04 /* LD:30-33 */
ldr r5, [r1], #0x04 /* LD:34-37 */
strd r6, [r3], #0x08 /* ST:20-27 */
ldr r6, [r1], #0x04 /* LD:38-3b */
ldr r7, [r1], #0x04 /* LD:3c-3f */
strd r8, [r3], #0x08 /* ST:28-2f */
ldr r8, [r1], #0x04 /* LD:40-43 */
ldr r9, [r1], #0x04 /* LD:44-47 */
pld [r1, #0x18] /* Prefetch 0x60 */
strd r4, [r3], #0x08 /* ST:30-37 */
ldr r4, [r1], #0x04 /* LD:48-4b */
ldr r5, [r1], #0x04 /* LD:4c-4f */
strd r6, [r3], #0x08 /* ST:38-3f */
ldr r6, [r1], #0x04 /* LD:50-53 */
ldr r7, [r1], #0x04 /* LD:54-57 */
strd r8, [r3], #0x08 /* ST:40-47 */
ldr r8, [r1], #0x04 /* LD:58-5b */
ldr r9, [r1], #0x04 /* LD:5c-5f */
strd r4, [r3], #0x08 /* ST:48-4f */
ldr r4, [r1], #0x04 /* LD:60-63 */
ldr r5, [r1], #0x04 /* LD:64-67 */
pld [r1, #0x18] /* Prefetch 0x80 */
strd r6, [r3], #0x08 /* ST:50-57 */
ldr r6, [r1], #0x04 /* LD:68-6b */
ldr r7, [r1], #0x04 /* LD:6c-6f */
strd r8, [r3], #0x08 /* ST:58-5f */
ldr r8, [r1], #0x04 /* LD:70-73 */
ldr r9, [r1], #0x04 /* LD:74-77 */
strd r4, [r3], #0x08 /* ST:60-67 */
ldr r4, [r1], #0x04 /* LD:78-7b */
ldr r5, [r1], #0x04 /* LD:7c-7f */
strd r6, [r3], #0x08 /* ST:68-6f */
strd r8, [r3], #0x08 /* ST:70-77 */
subs r2, r2, #0x80
strd r4, [r3], #0x08 /* ST:78-7f */
bge .Lmemcpy_w_loop128
.Lmemcpy_w_lessthan128:
adds r2, r2, #0x80 /* Adjust for extra sub */
ldmeqfd sp!, {r4-r9}
bxeq lr /* Return now if done */
subs r2, r2, #0x20
blt .Lmemcpy_w_lessthan32
/* Copy 32 bytes at a time */
.Lmemcpy_w_loop32:
ldr r4, [r1], #0x04
ldr r5, [r1], #0x04
pld [r1, #0x18]
ldr r6, [r1], #0x04
ldr r7, [r1], #0x04
ldr r8, [r1], #0x04
ldr r9, [r1], #0x04
strd r4, [r3], #0x08
ldr r4, [r1], #0x04
ldr r5, [r1], #0x04
strd r6, [r3], #0x08
strd r8, [r3], #0x08
subs r2, r2, #0x20
strd r4, [r3], #0x08
bge .Lmemcpy_w_loop32
.Lmemcpy_w_lessthan32:
adds r2, r2, #0x20 /* Adjust for extra sub */
ldmeqfd sp!, {r4-r9}
bxeq lr /* Return now if done */
and r4, r2, #0x18
rsbs r4, r4, #0x18
addne pc, pc, r4, lsl #1
nop
/* At least 24 bytes remaining */
ldr r4, [r1], #0x04
ldr r5, [r1], #0x04
sub r2, r2, #0x08
strd r4, [r3], #0x08
/* At least 16 bytes remaining */
ldr r4, [r1], #0x04
ldr r5, [r1], #0x04
sub r2, r2, #0x08
strd r4, [r3], #0x08
/* At least 8 bytes remaining */
ldr r4, [r1], #0x04
ldr r5, [r1], #0x04
subs r2, r2, #0x08
strd r4, [r3], #0x08
/* Less than 8 bytes remaining */
ldmfd sp!, {r4-r9}
bxeq lr /* Return now if done */
subs r2, r2, #0x04
ldrge ip, [r1], #0x04
strge ip, [r3], #0x04
bxeq lr /* Return now if done */
addlt r2, r2, #0x04
ldrb ip, [r1], #0x01
cmp r2, #0x02
ldrgeb r2, [r1], #0x01
strb ip, [r3], #0x01
ldrgtb ip, [r1]
strgeb r2, [r3], #0x01
strgtb ip, [r3]
bx lr
/*
* At this point, it has not been possible to word align both buffers.
* The destination buffer is word aligned, but the source buffer is not.
*/
.Lmemcpy_bad_align:
stmfd sp!, {r4-r7}
bic r1, r1, #0x03
cmp ip, #2
ldr ip, [r1], #0x04
bgt .Lmemcpy_bad3
beq .Lmemcpy_bad2
b .Lmemcpy_bad1
.Lmemcpy_bad1_loop16:
#ifdef __ARMEB__
mov r4, ip, lsl #8
#else
mov r4, ip, lsr #8
#endif
ldr r5, [r1], #0x04
pld [r1, #0x018]
ldr r6, [r1], #0x04
ldr r7, [r1], #0x04
ldr ip, [r1], #0x04
#ifdef __ARMEB__
orr r4, r4, r5, lsr #24
mov r5, r5, lsl #8
orr r5, r5, r6, lsr #24
mov r6, r6, lsl #8
orr r6, r6, r7, lsr #24
mov r7, r7, lsl #8
orr r7, r7, ip, lsr #24
#else
orr r4, r4, r5, lsl #24
mov r5, r5, lsr #8
orr r5, r5, r6, lsl #24
mov r6, r6, lsr #8
orr r6, r6, r7, lsl #24
mov r7, r7, lsr #8
orr r7, r7, ip, lsl #24
#endif
str r4, [r3], #0x04
str r5, [r3], #0x04
str r6, [r3], #0x04
str r7, [r3], #0x04
sub r2, r2, #0x10
.Lmemcpy_bad1:
cmp r2, #0x20
bge .Lmemcpy_bad1_loop16
cmp r2, #0x10
blt .Lmemcpy_bad1_loop16_short
/* copy last 16 bytes (without preload) */
#ifdef __ARMEB__
mov r4, ip, lsl #8
#else
mov r4, ip, lsr #8
#endif
ldr r5, [r1], #0x04
ldr r6, [r1], #0x04
ldr r7, [r1], #0x04
ldr ip, [r1], #0x04
#ifdef __ARMEB__
orr r4, r4, r5, lsr #24
mov r5, r5, lsl #8
orr r5, r5, r6, lsr #24
mov r6, r6, lsl #8
orr r6, r6, r7, lsr #24
mov r7, r7, lsl #8
orr r7, r7, ip, lsr #24
#else
orr r4, r4, r5, lsl #24
mov r5, r5, lsr #8
orr r5, r5, r6, lsl #24
mov r6, r6, lsr #8
orr r6, r6, r7, lsl #24
mov r7, r7, lsr #8
orr r7, r7, ip, lsl #24
#endif
str r4, [r3], #0x04
str r5, [r3], #0x04
str r6, [r3], #0x04
str r7, [r3], #0x04
subs r2, r2, #0x10
ldmeqfd sp!, {r4-r7}
bxeq lr /* Return now if done */
.Lmemcpy_bad1_loop16_short:
subs r2, r2, #0x04
sublt r1, r1, #0x03
blt .Lmemcpy_bad_done
.Lmemcpy_bad1_loop4:
#ifdef __ARMEB__
mov r4, ip, lsl #8
#else
mov r4, ip, lsr #8
#endif
ldr ip, [r1], #0x04
subs r2, r2, #0x04
#ifdef __ARMEB__
orr r4, r4, ip, lsr #24
#else
orr r4, r4, ip, lsl #24
#endif
str r4, [r3], #0x04
bge .Lmemcpy_bad1_loop4
sub r1, r1, #0x03
b .Lmemcpy_bad_done
.Lmemcpy_bad2_loop16:
#ifdef __ARMEB__
mov r4, ip, lsl #16
#else
mov r4, ip, lsr #16
#endif
ldr r5, [r1], #0x04
pld [r1, #0x018]
ldr r6, [r1], #0x04
ldr r7, [r1], #0x04
ldr ip, [r1], #0x04
#ifdef __ARMEB__
orr r4, r4, r5, lsr #16
mov r5, r5, lsl #16
orr r5, r5, r6, lsr #16
mov r6, r6, lsl #16
orr r6, r6, r7, lsr #16
mov r7, r7, lsl #16
orr r7, r7, ip, lsr #16
#else
orr r4, r4, r5, lsl #16
mov r5, r5, lsr #16
orr r5, r5, r6, lsl #16
mov r6, r6, lsr #16
orr r6, r6, r7, lsl #16
mov r7, r7, lsr #16
orr r7, r7, ip, lsl #16
#endif
str r4, [r3], #0x04
str r5, [r3], #0x04
str r6, [r3], #0x04
str r7, [r3], #0x04
sub r2, r2, #0x10
.Lmemcpy_bad2:
cmp r2, #0x20
bge .Lmemcpy_bad2_loop16
cmp r2, #0x10
blt .Lmemcpy_bad2_loop16_short
/* copy last 16 bytes (without preload) */
#ifdef __ARMEB__
mov r4, ip, lsl #16
#else
mov r4, ip, lsr #16
#endif
ldr r5, [r1], #0x04
ldr r6, [r1], #0x04
ldr r7, [r1], #0x04
ldr ip, [r1], #0x04
#ifdef __ARMEB__
orr r4, r4, r5, lsr #16
mov r5, r5, lsl #16
orr r5, r5, r6, lsr #16
mov r6, r6, lsl #16
orr r6, r6, r7, lsr #16
mov r7, r7, lsl #16
orr r7, r7, ip, lsr #16
#else
orr r4, r4, r5, lsl #16
mov r5, r5, lsr #16
orr r5, r5, r6, lsl #16
mov r6, r6, lsr #16
orr r6, r6, r7, lsl #16
mov r7, r7, lsr #16
orr r7, r7, ip, lsl #16
#endif
str r4, [r3], #0x04
str r5, [r3], #0x04
str r6, [r3], #0x04
str r7, [r3], #0x04
subs r2, r2, #0x10
ldmeqfd sp!, {r4-r7}
bxeq lr /* Return now if done */
.Lmemcpy_bad2_loop16_short:
subs r2, r2, #0x04
sublt r1, r1, #0x02
blt .Lmemcpy_bad_done
.Lmemcpy_bad2_loop4:
#ifdef __ARMEB__
mov r4, ip, lsl #16
#else
mov r4, ip, lsr #16
#endif
ldr ip, [r1], #0x04
subs r2, r2, #0x04
#ifdef __ARMEB__
orr r4, r4, ip, lsr #16
#else
orr r4, r4, ip, lsl #16
#endif
str r4, [r3], #0x04
bge .Lmemcpy_bad2_loop4
sub r1, r1, #0x02
b .Lmemcpy_bad_done
.Lmemcpy_bad3_loop16:
#ifdef __ARMEB__
mov r4, ip, lsl #24
#else
mov r4, ip, lsr #24
#endif
ldr r5, [r1], #0x04
pld [r1, #0x018]
ldr r6, [r1], #0x04
ldr r7, [r1], #0x04
ldr ip, [r1], #0x04
#ifdef __ARMEB__
orr r4, r4, r5, lsr #8
mov r5, r5, lsl #24
orr r5, r5, r6, lsr #8
mov r6, r6, lsl #24
orr r6, r6, r7, lsr #8
mov r7, r7, lsl #24
orr r7, r7, ip, lsr #8
#else
orr r4, r4, r5, lsl #8
mov r5, r5, lsr #24
orr r5, r5, r6, lsl #8
mov r6, r6, lsr #24
orr r6, r6, r7, lsl #8
mov r7, r7, lsr #24
orr r7, r7, ip, lsl #8
#endif
str r4, [r3], #0x04
str r5, [r3], #0x04
str r6, [r3], #0x04
str r7, [r3], #0x04
sub r2, r2, #0x10
.Lmemcpy_bad3:
cmp r2, #0x20
bge .Lmemcpy_bad3_loop16
cmp r2, #0x10
blt .Lmemcpy_bad3_loop16_short
/* copy last 16 bytes (without preload) */
#ifdef __ARMEB__
mov r4, ip, lsl #24
#else
mov r4, ip, lsr #24
#endif
ldr r5, [r1], #0x04
ldr r6, [r1], #0x04
ldr r7, [r1], #0x04
ldr ip, [r1], #0x04
#ifdef __ARMEB__
orr r4, r4, r5, lsr #8
mov r5, r5, lsl #24
orr r5, r5, r6, lsr #8
mov r6, r6, lsl #24
orr r6, r6, r7, lsr #8
mov r7, r7, lsl #24
orr r7, r7, ip, lsr #8
#else
orr r4, r4, r5, lsl #8
mov r5, r5, lsr #24
orr r5, r5, r6, lsl #8
mov r6, r6, lsr #24
orr r6, r6, r7, lsl #8
mov r7, r7, lsr #24
orr r7, r7, ip, lsl #8
#endif
str r4, [r3], #0x04
str r5, [r3], #0x04
str r6, [r3], #0x04
str r7, [r3], #0x04
subs r2, r2, #0x10
ldmeqfd sp!, {r4-r7}
bxeq lr /* Return now if done */
.Lmemcpy_bad3_loop16_short:
subs r2, r2, #0x04
sublt r1, r1, #0x01
blt .Lmemcpy_bad_done
.Lmemcpy_bad3_loop4:
#ifdef __ARMEB__
mov r4, ip, lsl #24
#else
mov r4, ip, lsr #24
#endif
ldr ip, [r1], #0x04
subs r2, r2, #0x04
#ifdef __ARMEB__
orr r4, r4, ip, lsr #8
#else
orr r4, r4, ip, lsl #8
#endif
str r4, [r3], #0x04
bge .Lmemcpy_bad3_loop4
sub r1, r1, #0x01
.Lmemcpy_bad_done:
ldmfd sp!, {r4-r7}
adds r2, r2, #0x04
bxeq lr
ldrb ip, [r1], #0x01
cmp r2, #0x02
ldrgeb r2, [r1], #0x01
strb ip, [r3], #0x01
ldrgtb ip, [r1]
strgeb r2, [r3], #0x01
strgtb ip, [r3]
bx lr
/*
* Handle short copies (less than 16 bytes), possibly misaligned.
* Some of these are *very* common, thanks to the network stack,
* and so are handled specially.
*/
.Lmemcpy_short:
#ifndef _STANDALONE
add pc, pc, r2, lsl #2
nop
bx lr /* 0x00 */
b .Lmemcpy_bytewise /* 0x01 */
b .Lmemcpy_bytewise /* 0x02 */
b .Lmemcpy_bytewise /* 0x03 */
b .Lmemcpy_4 /* 0x04 */
b .Lmemcpy_bytewise /* 0x05 */
b .Lmemcpy_6 /* 0x06 */
b .Lmemcpy_bytewise /* 0x07 */
b .Lmemcpy_8 /* 0x08 */
b .Lmemcpy_bytewise /* 0x09 */
b .Lmemcpy_bytewise /* 0x0a */
b .Lmemcpy_bytewise /* 0x0b */
b .Lmemcpy_c /* 0x0c */
#endif
.Lmemcpy_bytewise:
mov r3, r0 /* We must not clobber r0 */
ldrb ip, [r1], #0x01
1: subs r2, r2, #0x01
strb ip, [r3], #0x01
ldrneb ip, [r1], #0x01
bne 1b
bx lr
#ifndef _STANDALONE
/******************************************************************************
* Special case for 4 byte copies
*/
#define LMEMCPY_4_LOG2 6 /* 64 bytes */
#define LMEMCPY_4_PAD .align LMEMCPY_4_LOG2
LMEMCPY_4_PAD
.Lmemcpy_4:
and r2, r1, #0x03
orr r2, r2, r0, lsl #2
ands r2, r2, #0x0f
sub r3, pc, #0x14
addne pc, r3, r2, lsl #LMEMCPY_4_LOG2
/*
* 0000: dst is 32-bit aligned, src is 32-bit aligned
*/
ldr r2, [r1]
str r2, [r0]
bx lr
LMEMCPY_4_PAD
/*
* 0001: dst is 32-bit aligned, src is 8-bit aligned
*/
ldr r3, [r1, #-1] /* BE:r3 = x012 LE:r3 = 210x */
ldr r2, [r1, #3] /* BE:r2 = 3xxx LE:r2 = xxx3 */
#ifdef __ARMEB__
mov r3, r3, lsl #8 /* r3 = 012. */
orr r3, r3, r2, lsr #24 /* r3 = 0123 */
#else
mov r3, r3, lsr #8 /* r3 = .210 */
orr r3, r3, r2, lsl #24 /* r3 = 3210 */
#endif
str r3, [r0]
bx lr
LMEMCPY_4_PAD
/*
* 0010: dst is 32-bit aligned, src is 16-bit aligned
*/
#ifdef __ARMEB__
ldrh r3, [r1]
ldrh r2, [r1, #0x02]
#else
ldrh r3, [r1, #0x02]
ldrh r2, [r1]
#endif
orr r3, r2, r3, lsl #16
str r3, [r0]
bx lr
LMEMCPY_4_PAD
/*
* 0011: dst is 32-bit aligned, src is 8-bit aligned
*/
ldr r3, [r1, #-3] /* BE:r3 = xxx0 LE:r3 = 0xxx */
ldr r2, [r1, #1] /* BE:r2 = 123x LE:r2 = x321 */
#ifdef __ARMEB__
mov r3, r3, lsl #24 /* r3 = 0... */
orr r3, r3, r2, lsr #8 /* r3 = 0123 */
#else
mov r3, r3, lsr #24 /* r3 = ...0 */
orr r3, r3, r2, lsl #8 /* r3 = 3210 */
#endif
str r3, [r0]
bx lr
LMEMCPY_4_PAD
/*
* 0100: dst is 8-bit aligned, src is 32-bit aligned
*/
ldr r2, [r1]
#ifdef __ARMEB__
strb r2, [r0, #0x03]
mov r3, r2, lsr #8
mov r1, r2, lsr #24
strb r1, [r0]
#else
strb r2, [r0]
mov r3, r2, lsr #8
mov r1, r2, lsr #24
strb r1, [r0, #0x03]
#endif
strh r3, [r0, #0x01]
bx lr
LMEMCPY_4_PAD
/*
* 0101: dst is 8-bit aligned, src is 8-bit aligned
*/
ldrb r2, [r1]
ldrh r3, [r1, #0x01]
ldrb r1, [r1, #0x03]
strb r2, [r0]
strh r3, [r0, #0x01]
strb r1, [r0, #0x03]
bx lr
LMEMCPY_4_PAD
/*
* 0110: dst is 8-bit aligned, src is 16-bit aligned
*/
ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
ldrh r3, [r1, #0x02] /* LE:r3 = ..23 LE:r3 = ..32 */
#ifdef __ARMEB__
mov r1, r2, lsr #8 /* r1 = ...0 */
strb r1, [r0]
mov r2, r2, lsl #8 /* r2 = .01. */
orr r2, r2, r3, lsr #8 /* r2 = .012 */
#else
strb r2, [r0]
mov r2, r2, lsr #8 /* r2 = ...1 */
orr r2, r2, r3, lsl #8 /* r2 = .321 */
mov r3, r3, lsr #8 /* r3 = ...3 */
#endif
strh r2, [r0, #0x01]
strb r3, [r0, #0x03]
bx lr
LMEMCPY_4_PAD
/*
* 0111: dst is 8-bit aligned, src is 8-bit aligned
*/
ldrb r2, [r1]
ldrh r3, [r1, #0x01]
ldrb r1, [r1, #0x03]
strb r2, [r0]
strh r3, [r0, #0x01]
strb r1, [r0, #0x03]
bx lr
LMEMCPY_4_PAD
/*
* 1000: dst is 16-bit aligned, src is 32-bit aligned
*/
ldr r2, [r1]
#ifdef __ARMEB__
strh r2, [r0, #0x02]
mov r3, r2, lsr #16
strh r3, [r0]
#else
strh r2, [r0]
mov r3, r2, lsr #16
strh r3, [r0, #0x02]
#endif
bx lr
LMEMCPY_4_PAD
/*
* 1001: dst is 16-bit aligned, src is 8-bit aligned
*/
ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
ldr r3, [r1, #3] /* BE:r3 = 3xxx LE:r3 = xxx3 */
mov r1, r2, lsr #8 /* BE:r1 = .x01 LE:r1 = .210 */
strh r1, [r0]
#ifdef __ARMEB__
mov r2, r2, lsl #8 /* r2 = 012. */
orr r2, r2, r3, lsr #24 /* r2 = 0123 */
#else
mov r2, r2, lsr #24 /* r2 = ...2 */
orr r2, r2, r3, lsl #8 /* r2 = xx32 */
#endif
strh r2, [r0, #0x02]
bx lr
LMEMCPY_4_PAD
/*
* 1010: dst is 16-bit aligned, src is 16-bit aligned
*/
ldrh r2, [r1]
ldrh r3, [r1, #0x02]
strh r2, [r0]
strh r3, [r0, #0x02]
bx lr
LMEMCPY_4_PAD
/*
* 1011: dst is 16-bit aligned, src is 8-bit aligned
*/
ldr r3, [r1, #1] /* BE:r3 = 123x LE:r3 = x321 */
ldr r2, [r1, #-3] /* BE:r2 = xxx0 LE:r2 = 0xxx */
mov r1, r3, lsr #8 /* BE:r1 = .123 LE:r1 = .x32 */
strh r1, [r0, #0x02]
#ifdef __ARMEB__
mov r3, r3, lsr #24 /* r3 = ...1 */
orr r3, r3, r2, lsl #8 /* r3 = xx01 */
#else
mov r3, r3, lsl #8 /* r3 = 321. */
orr r3, r3, r2, lsr #24 /* r3 = 3210 */
#endif
strh r3, [r0]
bx lr
LMEMCPY_4_PAD
/*
* 1100: dst is 8-bit aligned, src is 32-bit aligned
*/
ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
#ifdef __ARMEB__
strb r2, [r0, #0x03]
mov r3, r2, lsr #8
mov r1, r2, lsr #24
strh r3, [r0, #0x01]
strb r1, [r0]
#else
strb r2, [r0]
mov r3, r2, lsr #8
mov r1, r2, lsr #24
strh r3, [r0, #0x01]
strb r1, [r0, #0x03]
#endif
bx lr
LMEMCPY_4_PAD
/*
* 1101: dst is 8-bit aligned, src is 8-bit aligned
*/
ldrb r2, [r1]
ldrh r3, [r1, #0x01]
ldrb r1, [r1, #0x03]
strb r2, [r0]
strh r3, [r0, #0x01]
strb r1, [r0, #0x03]
bx lr
LMEMCPY_4_PAD
/*
* 1110: dst is 8-bit aligned, src is 16-bit aligned
*/
#ifdef __ARMEB__
ldrh r3, [r1, #0x02] /* BE:r3 = ..23 LE:r3 = ..32 */
ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
strb r3, [r0, #0x03]
mov r3, r3, lsr #8 /* r3 = ...2 */
orr r3, r3, r2, lsl #8 /* r3 = ..12 */
strh r3, [r0, #0x01]
mov r2, r2, lsr #8 /* r2 = ...0 */
strb r2, [r0]
#else
ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
ldrh r3, [r1, #0x02] /* BE:r3 = ..23 LE:r3 = ..32 */
strb r2, [r0]
mov r2, r2, lsr #8 /* r2 = ...1 */
orr r2, r2, r3, lsl #8 /* r2 = .321 */
strh r2, [r0, #0x01]
mov r3, r3, lsr #8 /* r3 = ...3 */
strb r3, [r0, #0x03]
#endif
bx lr
LMEMCPY_4_PAD
/*
* 1111: dst is 8-bit aligned, src is 8-bit aligned
*/
ldrb r2, [r1]
ldrh r3, [r1, #0x01]
ldrb r1, [r1, #0x03]
strb r2, [r0]
strh r3, [r0, #0x01]
strb r1, [r0, #0x03]
bx lr
LMEMCPY_4_PAD
/******************************************************************************
* Special case for 6 byte copies
*/
#define LMEMCPY_6_LOG2 6 /* 64 bytes */
#define LMEMCPY_6_PAD .align LMEMCPY_6_LOG2
LMEMCPY_6_PAD
.Lmemcpy_6:
and r2, r1, #0x03
orr r2, r2, r0, lsl #2
ands r2, r2, #0x0f
sub r3, pc, #0x14
addne pc, r3, r2, lsl #LMEMCPY_6_LOG2
/*
* 0000: dst is 32-bit aligned, src is 32-bit aligned
*/
ldr r2, [r1]
ldrh r3, [r1, #0x04]
str r2, [r0]
strh r3, [r0, #0x04]
bx lr
LMEMCPY_6_PAD
/*
* 0001: dst is 32-bit aligned, src is 8-bit aligned
*/
ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
ldr r3, [r1, #0x03] /* BE:r3 = 345x LE:r3 = x543 */
#ifdef __ARMEB__
mov r2, r2, lsl #8 /* r2 = 012. */
orr r2, r2, r3, lsr #24 /* r2 = 0123 */
#else
mov r2, r2, lsr #8 /* r2 = .210 */
orr r2, r2, r3, lsl #24 /* r2 = 3210 */
#endif
mov r3, r3, lsr #8 /* BE:r3 = .345 LE:r3 = .x54 */
str r2, [r0]
strh r3, [r0, #0x04]
bx lr
LMEMCPY_6_PAD
/*
* 0010: dst is 32-bit aligned, src is 16-bit aligned
*/
ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
#ifdef __ARMEB__
mov r1, r3, lsr #16 /* r1 = ..23 */
orr r1, r1, r2, lsl #16 /* r1 = 0123 */
str r1, [r0]
strh r3, [r0, #0x04]
#else
mov r1, r3, lsr #16 /* r1 = ..54 */
orr r2, r2, r3, lsl #16 /* r2 = 3210 */
str r2, [r0]
strh r1, [r0, #0x04]
#endif
bx lr
LMEMCPY_6_PAD
/*
* 0011: dst is 32-bit aligned, src is 8-bit aligned
*/
ldr r2, [r1, #-3] /* BE:r2 = xxx0 LE:r2 = 0xxx */
ldr r3, [r1, #1] /* BE:r3 = 1234 LE:r3 = 4321 */
ldr r1, [r1, #5] /* BE:r1 = 5xxx LE:r3 = xxx5 */
#ifdef __ARMEB__
mov r2, r2, lsl #24 /* r2 = 0... */
orr r2, r2, r3, lsr #8 /* r2 = 0123 */
mov r3, r3, lsl #8 /* r3 = 234. */
orr r1, r3, r1, lsr #24 /* r1 = 2345 */
#else
mov r2, r2, lsr #24 /* r2 = ...0 */
orr r2, r2, r3, lsl #8 /* r2 = 3210 */
mov r1, r1, lsl #8 /* r1 = xx5. */
orr r1, r1, r3, lsr #24 /* r1 = xx54 */
#endif
str r2, [r0]
strh r1, [r0, #0x04]
bx lr
LMEMCPY_6_PAD
/*
* 0100: dst is 8-bit aligned, src is 32-bit aligned
*/
ldr r3, [r1] /* BE:r3 = 0123 LE:r3 = 3210 */
ldrh r2, [r1, #0x04] /* BE:r2 = ..45 LE:r2 = ..54 */
mov r1, r3, lsr #8 /* BE:r1 = .012 LE:r1 = .321 */
strh r1, [r0, #0x01]
#ifdef __ARMEB__
mov r1, r3, lsr #24 /* r1 = ...0 */
strb r1, [r0]
mov r3, r3, lsl #8 /* r3 = 123. */
orr r3, r3, r2, lsr #8 /* r3 = 1234 */
#else
strb r3, [r0]
mov r3, r3, lsr #24 /* r3 = ...3 */
orr r3, r3, r2, lsl #8 /* r3 = .543 */
mov r2, r2, lsr #8 /* r2 = ...5 */
#endif
strh r3, [r0, #0x03]
strb r2, [r0, #0x05]
bx lr
LMEMCPY_6_PAD
/*
* 0101: dst is 8-bit aligned, src is 8-bit aligned
*/
ldrb r2, [r1]
ldrh r3, [r1, #0x01]
ldrh ip, [r1, #0x03]
ldrb r1, [r1, #0x05]
strb r2, [r0]
strh r3, [r0, #0x01]
strh ip, [r0, #0x03]
strb r1, [r0, #0x05]
bx lr
LMEMCPY_6_PAD
/*
* 0110: dst is 8-bit aligned, src is 16-bit aligned
*/
ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
ldr r1, [r1, #0x02] /* BE:r1 = 2345 LE:r1 = 5432 */
#ifdef __ARMEB__
mov r3, r2, lsr #8 /* r3 = ...0 */
strb r3, [r0]
strb r1, [r0, #0x05]
mov r3, r1, lsr #8 /* r3 = .234 */
strh r3, [r0, #0x03]
mov r3, r2, lsl #8 /* r3 = .01. */
orr r3, r3, r1, lsr #24 /* r3 = .012 */
strh r3, [r0, #0x01]
#else
strb r2, [r0]
mov r3, r1, lsr #24
strb r3, [r0, #0x05]
mov r3, r1, lsr #8 /* r3 = .543 */
strh r3, [r0, #0x03]
mov r3, r2, lsr #8 /* r3 = ...1 */
orr r3, r3, r1, lsl #8 /* r3 = 4321 */
strh r3, [r0, #0x01]
#endif
bx lr
LMEMCPY_6_PAD
/*
* 0111: dst is 8-bit aligned, src is 8-bit aligned
*/
ldrb r2, [r1]
ldrh r3, [r1, #0x01]
ldrh ip, [r1, #0x03]
ldrb r1, [r1, #0x05]
strb r2, [r0]
strh r3, [r0, #0x01]
strh ip, [r0, #0x03]
strb r1, [r0, #0x05]
bx lr
LMEMCPY_6_PAD
/*
* 1000: dst is 16-bit aligned, src is 32-bit aligned
*/
#ifdef __ARMEB__
ldr r2, [r1] /* r2 = 0123 */
ldrh r3, [r1, #0x04] /* r3 = ..45 */
mov r1, r2, lsr #16 /* r1 = ..01 */
orr r3, r3, r2, lsl#16 /* r3 = 2345 */
strh r1, [r0]
str r3, [r0, #0x02]
#else
ldrh r2, [r1, #0x04] /* r2 = ..54 */
ldr r3, [r1] /* r3 = 3210 */
mov r2, r2, lsl #16 /* r2 = 54.. */
orr r2, r2, r3, lsr #16 /* r2 = 5432 */
strh r3, [r0]
str r2, [r0, #0x02]
#endif
bx lr
LMEMCPY_6_PAD
/*
* 1001: dst is 16-bit aligned, src is 8-bit aligned
*/
ldr r3, [r1, #-1] /* BE:r3 = x012 LE:r3 = 210x */
ldr r2, [r1, #3] /* BE:r2 = 345x LE:r2 = x543 */
mov r1, r3, lsr #8 /* BE:r1 = .x01 LE:r1 = .210 */
#ifdef __ARMEB__
mov r2, r2, lsr #8 /* r2 = .345 */
orr r2, r2, r3, lsl #24 /* r2 = 2345 */
#else
mov r2, r2, lsl #8 /* r2 = 543. */
orr r2, r2, r3, lsr #24 /* r2 = 5432 */
#endif
strh r1, [r0]
str r2, [r0, #0x02]
bx lr
LMEMCPY_6_PAD
/*
* 1010: dst is 16-bit aligned, src is 16-bit aligned
*/
ldrh r2, [r1]
ldr r3, [r1, #0x02]
strh r2, [r0]
str r3, [r0, #0x02]
bx lr
LMEMCPY_6_PAD
/*
* 1011: dst is 16-bit aligned, src is 8-bit aligned
*/
ldrb r3, [r1] /* r3 = ...0 */
ldr r2, [r1, #0x01] /* BE:r2 = 1234 LE:r2 = 4321 */
ldrb r1, [r1, #0x05] /* r1 = ...5 */
#ifdef __ARMEB__
mov r3, r3, lsl #8 /* r3 = ..0. */
orr r3, r3, r2, lsr #24 /* r3 = ..01 */
orr r1, r1, r2, lsl #8 /* r1 = 2345 */
#else
orr r3, r3, r2, lsl #8 /* r3 = 3210 */
mov r1, r1, lsl #24 /* r1 = 5... */
orr r1, r1, r2, lsr #8 /* r1 = 5432 */
#endif
strh r3, [r0]
str r1, [r0, #0x02]
bx lr
LMEMCPY_6_PAD
/*
* 1100: dst is 8-bit aligned, src is 32-bit aligned
*/
ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
ldrh r1, [r1, #0x04] /* BE:r1 = ..45 LE:r1 = ..54 */
#ifdef __ARMEB__
mov r3, r2, lsr #24 /* r3 = ...0 */
strb r3, [r0]
mov r2, r2, lsl #8 /* r2 = 123. */
orr r2, r2, r1, lsr #8 /* r2 = 1234 */
#else
strb r2, [r0]
mov r2, r2, lsr #8 /* r2 = .321 */
orr r2, r2, r1, lsl #24 /* r2 = 4321 */
mov r1, r1, lsr #8 /* r1 = ...5 */
#endif
str r2, [r0, #0x01]
strb r1, [r0, #0x05]
bx lr
LMEMCPY_6_PAD
/*
* 1101: dst is 8-bit aligned, src is 8-bit aligned
*/
ldrb r2, [r1]
ldrh r3, [r1, #0x01]
ldrh ip, [r1, #0x03]
ldrb r1, [r1, #0x05]
strb r2, [r0]
strh r3, [r0, #0x01]
strh ip, [r0, #0x03]
strb r1, [r0, #0x05]
bx lr
LMEMCPY_6_PAD
/*
* 1110: dst is 8-bit aligned, src is 16-bit aligned
*/
ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
ldr r1, [r1, #0x02] /* BE:r1 = 2345 LE:r1 = 5432 */
#ifdef __ARMEB__
mov r3, r2, lsr #8 /* r3 = ...0 */
strb r3, [r0]
mov r2, r2, lsl #24 /* r2 = 1... */
orr r2, r2, r1, lsr #8 /* r2 = 1234 */
#else
strb r2, [r0]
mov r2, r2, lsr #8 /* r2 = ...1 */
orr r2, r2, r1, lsl #8 /* r2 = 4321 */
mov r1, r1, lsr #24 /* r1 = ...5 */
#endif
str r2, [r0, #0x01]
strb r1, [r0, #0x05]
bx lr
LMEMCPY_6_PAD
/*
* 1111: dst is 8-bit aligned, src is 8-bit aligned
*/
ldrb r2, [r1]
ldr r3, [r1, #0x01]
ldrb r1, [r1, #0x05]
strb r2, [r0]
str r3, [r0, #0x01]
strb r1, [r0, #0x05]
bx lr
LMEMCPY_6_PAD
/******************************************************************************
* Special case for 8 byte copies
*/
#define LMEMCPY_8_LOG2 6 /* 64 bytes */
#define LMEMCPY_8_PAD .align LMEMCPY_8_LOG2
LMEMCPY_8_PAD
.Lmemcpy_8:
and r2, r1, #0x03
orr r2, r2, r0, lsl #2
ands r2, r2, #0x0f
sub r3, pc, #0x14
addne pc, r3, r2, lsl #LMEMCPY_8_LOG2
/*
* 0000: dst is 32-bit aligned, src is 32-bit aligned
*/
ldr r2, [r1]
ldr r3, [r1, #0x04]
str r2, [r0]
str r3, [r0, #0x04]
bx lr
LMEMCPY_8_PAD
/*
* 0001: dst is 32-bit aligned, src is 8-bit aligned
*/
ldr r3, [r1, #-1] /* BE:r3 = x012 LE:r3 = 210x */
ldr r2, [r1, #0x03] /* BE:r2 = 3456 LE:r2 = 6543 */
ldrb r1, [r1, #0x07] /* r1 = ...7 */
#ifdef __ARMEB__
mov r3, r3, lsl #8 /* r3 = 012. */
orr r3, r3, r2, lsr #24 /* r3 = 0123 */
orr r2, r1, r2, lsl #8 /* r2 = 4567 */
#else
mov r3, r3, lsr #8 /* r3 = .210 */
orr r3, r3, r2, lsl #24 /* r3 = 3210 */
mov r1, r1, lsl #24 /* r1 = 7... */
orr r2, r1, r2, lsr #8 /* r2 = 7654 */
#endif
str r3, [r0]
str r2, [r0, #0x04]
bx lr
LMEMCPY_8_PAD
/*
* 0010: dst is 32-bit aligned, src is 16-bit aligned
*/
ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
ldrh r1, [r1, #0x06] /* BE:r1 = ..67 LE:r1 = ..76 */
#ifdef __ARMEB__
mov r2, r2, lsl #16 /* r2 = 01.. */
orr r2, r2, r3, lsr #16 /* r2 = 0123 */
orr r3, r1, r3, lsl #16 /* r3 = 4567 */
#else
orr r2, r2, r3, lsl #16 /* r2 = 3210 */
mov r3, r3, lsr #16 /* r3 = ..54 */
orr r3, r3, r1, lsl #16 /* r3 = 7654 */
#endif
str r2, [r0]
str r3, [r0, #0x04]
bx lr
LMEMCPY_8_PAD
/*
* 0011: dst is 32-bit aligned, src is 8-bit aligned
*/
ldrb r3, [r1] /* r3 = ...0 */
ldr r2, [r1, #0x01] /* BE:r2 = 1234 LE:r2 = 4321 */
ldr r1, [r1, #0x05] /* BE:r1 = 567x LE:r1 = x765 */
#ifdef __ARMEB__
mov r3, r3, lsl #24 /* r3 = 0... */
orr r3, r3, r2, lsr #8 /* r3 = 0123 */
mov r2, r2, lsl #24 /* r2 = 4... */
orr r2, r2, r1, lsr #8 /* r2 = 4567 */
#else
orr r3, r3, r2, lsl #8 /* r3 = 3210 */
mov r2, r2, lsr #24 /* r2 = ...4 */
orr r2, r2, r1, lsl #8 /* r2 = 7654 */
#endif
str r3, [r0]
str r2, [r0, #0x04]
bx lr
LMEMCPY_8_PAD
/*
* 0100: dst is 8-bit aligned, src is 32-bit aligned
*/
ldr r3, [r1] /* BE:r3 = 0123 LE:r3 = 3210 */
ldr r2, [r1, #0x04] /* BE:r2 = 4567 LE:r2 = 7654 */
#ifdef __ARMEB__
mov r1, r3, lsr #24 /* r1 = ...0 */
strb r1, [r0]
mov r1, r3, lsr #8 /* r1 = .012 */
strb r2, [r0, #0x07]
mov r3, r3, lsl #24 /* r3 = 3... */
orr r3, r3, r2, lsr #8 /* r3 = 3456 */
#else
strb r3, [r0]
mov r1, r2, lsr #24 /* r1 = ...7 */
strb r1, [r0, #0x07]
mov r1, r3, lsr #8 /* r1 = .321 */
mov r3, r3, lsr #24 /* r3 = ...3 */
orr r3, r3, r2, lsl #8 /* r3 = 6543 */
#endif
strh r1, [r0, #0x01]
str r3, [r0, #0x03]
bx lr
LMEMCPY_8_PAD
/*
* 0101: dst is 8-bit aligned, src is 8-bit aligned
*/
ldrb r2, [r1]
ldrh r3, [r1, #0x01]
ldr ip, [r1, #0x03]
ldrb r1, [r1, #0x07]
strb r2, [r0]
strh r3, [r0, #0x01]
str ip, [r0, #0x03]
strb r1, [r0, #0x07]
bx lr
LMEMCPY_8_PAD
/*
* 0110: dst is 8-bit aligned, src is 16-bit aligned
*/
ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
ldrh r1, [r1, #0x06] /* BE:r1 = ..67 LE:r1 = ..76 */
#ifdef __ARMEB__
mov ip, r2, lsr #8 /* ip = ...0 */
strb ip, [r0]
mov ip, r2, lsl #8 /* ip = .01. */
orr ip, ip, r3, lsr #24 /* ip = .012 */
strb r1, [r0, #0x07]
mov r3, r3, lsl #8 /* r3 = 345. */
orr r3, r3, r1, lsr #8 /* r3 = 3456 */
#else
strb r2, [r0] /* 0 */
mov ip, r1, lsr #8 /* ip = ...7 */
strb ip, [r0, #0x07] /* 7 */
mov ip, r2, lsr #8 /* ip = ...1 */
orr ip, ip, r3, lsl #8 /* ip = 4321 */
mov r3, r3, lsr #8 /* r3 = .543 */
orr r3, r3, r1, lsl #24 /* r3 = 6543 */
#endif
strh ip, [r0, #0x01]
str r3, [r0, #0x03]
bx lr
LMEMCPY_8_PAD
/*
* 0111: dst is 8-bit aligned, src is 8-bit aligned
*/
ldrb r3, [r1] /* r3 = ...0 */
ldr ip, [r1, #0x01] /* BE:ip = 1234 LE:ip = 4321 */
ldrh r2, [r1, #0x05] /* BE:r2 = ..56 LE:r2 = ..65 */
ldrb r1, [r1, #0x07] /* r1 = ...7 */
strb r3, [r0]
mov r3, ip, lsr #16 /* BE:r3 = ..12 LE:r3 = ..43 */
#ifdef __ARMEB__
strh r3, [r0, #0x01]
orr r2, r2, ip, lsl #16 /* r2 = 3456 */
#else
strh ip, [r0, #0x01]
orr r2, r3, r2, lsl #16 /* r2 = 6543 */
#endif
str r2, [r0, #0x03]
strb r1, [r0, #0x07]
bx lr
LMEMCPY_8_PAD
/*
* 1000: dst is 16-bit aligned, src is 32-bit aligned
*/
ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */
mov r1, r2, lsr #16 /* BE:r1 = ..01 LE:r1 = ..32 */
#ifdef __ARMEB__
strh r1, [r0]
mov r1, r3, lsr #16 /* r1 = ..45 */
orr r2, r1 ,r2, lsl #16 /* r2 = 2345 */
#else
strh r2, [r0]
orr r2, r1, r3, lsl #16 /* r2 = 5432 */
mov r3, r3, lsr #16 /* r3 = ..76 */
#endif
str r2, [r0, #0x02]
strh r3, [r0, #0x06]
bx lr
LMEMCPY_8_PAD
/*
* 1001: dst is 16-bit aligned, src is 8-bit aligned
*/
ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
ldr r3, [r1, #0x03] /* BE:r3 = 3456 LE:r3 = 6543 */
ldrb ip, [r1, #0x07] /* ip = ...7 */
mov r1, r2, lsr #8 /* BE:r1 = .x01 LE:r1 = .210 */
strh r1, [r0]
#ifdef __ARMEB__
mov r1, r2, lsl #24 /* r1 = 2... */
orr r1, r1, r3, lsr #8 /* r1 = 2345 */
orr r3, ip, r3, lsl #8 /* r3 = 4567 */
#else
mov r1, r2, lsr #24 /* r1 = ...2 */
orr r1, r1, r3, lsl #8 /* r1 = 5432 */
mov r3, r3, lsr #24 /* r3 = ...6 */
orr r3, r3, ip, lsl #8 /* r3 = ..76 */
#endif
str r1, [r0, #0x02]
strh r3, [r0, #0x06]
bx lr
LMEMCPY_8_PAD
/*
* 1010: dst is 16-bit aligned, src is 16-bit aligned
*/
ldrh r2, [r1]
ldr ip, [r1, #0x02]
ldrh r3, [r1, #0x06]
strh r2, [r0]
str ip, [r0, #0x02]
strh r3, [r0, #0x06]
bx lr
LMEMCPY_8_PAD
/*
* 1011: dst is 16-bit aligned, src is 8-bit aligned
*/
ldr r3, [r1, #0x05] /* BE:r3 = 567x LE:r3 = x765 */
ldr r2, [r1, #0x01] /* BE:r2 = 1234 LE:r2 = 4321 */
ldrb ip, [r1] /* ip = ...0 */
mov r1, r3, lsr #8 /* BE:r1 = .567 LE:r1 = .x76 */
strh r1, [r0, #0x06]
#ifdef __ARMEB__
mov r3, r3, lsr #24 /* r3 = ...5 */
orr r3, r3, r2, lsl #8 /* r3 = 2345 */
mov r2, r2, lsr #24 /* r2 = ...1 */
orr r2, r2, ip, lsl #8 /* r2 = ..01 */
#else
mov r3, r3, lsl #24 /* r3 = 5... */
orr r3, r3, r2, lsr #8 /* r3 = 5432 */
orr r2, ip, r2, lsl #8 /* r2 = 3210 */
#endif
str r3, [r0, #0x02]
strh r2, [r0]
bx lr
LMEMCPY_8_PAD
/*
* 1100: dst is 8-bit aligned, src is 32-bit aligned
*/
ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */
ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
mov r1, r3, lsr #8 /* BE:r1 = .456 LE:r1 = .765 */
strh r1, [r0, #0x05]
#ifdef __ARMEB__
strb r3, [r0, #0x07]
mov r1, r2, lsr #24 /* r1 = ...0 */
strb r1, [r0]
mov r2, r2, lsl #8 /* r2 = 123. */
orr r2, r2, r3, lsr #24 /* r2 = 1234 */
str r2, [r0, #0x01]
#else
strb r2, [r0]
mov r1, r3, lsr #24 /* r1 = ...7 */
strb r1, [r0, #0x07]
mov r2, r2, lsr #8 /* r2 = .321 */
orr r2, r2, r3, lsl #24 /* r2 = 4321 */
str r2, [r0, #0x01]
#endif
bx lr
LMEMCPY_8_PAD
/*
* 1101: dst is 8-bit aligned, src is 8-bit aligned
*/
ldrb r3, [r1] /* r3 = ...0 */
ldrh r2, [r1, #0x01] /* BE:r2 = ..12 LE:r2 = ..21 */
ldr ip, [r1, #0x03] /* BE:ip = 3456 LE:ip = 6543 */
ldrb r1, [r1, #0x07] /* r1 = ...7 */
strb r3, [r0]
mov r3, ip, lsr #16 /* BE:r3 = ..34 LE:r3 = ..65 */
#ifdef __ARMEB__
strh ip, [r0, #0x05]
orr r2, r3, r2, lsl #16 /* r2 = 1234 */
#else
strh r3, [r0, #0x05]
orr r2, r2, ip, lsl #16 /* r2 = 4321 */
#endif
str r2, [r0, #0x01]
strb r1, [r0, #0x07]
bx lr
LMEMCPY_8_PAD
/*
* 1110: dst is 8-bit aligned, src is 16-bit aligned
*/
ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
ldrh r1, [r1, #0x06] /* BE:r1 = ..67 LE:r1 = ..76 */
#ifdef __ARMEB__
mov ip, r2, lsr #8 /* ip = ...0 */
strb ip, [r0]
mov ip, r2, lsl #24 /* ip = 1... */
orr ip, ip, r3, lsr #8 /* ip = 1234 */
strb r1, [r0, #0x07]
mov r1, r1, lsr #8 /* r1 = ...6 */
orr r1, r1, r3, lsl #8 /* r1 = 3456 */
#else
strb r2, [r0]
mov ip, r2, lsr #8 /* ip = ...1 */
orr ip, ip, r3, lsl #8 /* ip = 4321 */
mov r2, r1, lsr #8 /* r2 = ...7 */
strb r2, [r0, #0x07]
mov r1, r1, lsl #8 /* r1 = .76. */
orr r1, r1, r3, lsr #24 /* r1 = .765 */
#endif
str ip, [r0, #0x01]
strh r1, [r0, #0x05]
bx lr
LMEMCPY_8_PAD
/*
* 1111: dst is 8-bit aligned, src is 8-bit aligned
*/
ldrb r2, [r1]
ldr ip, [r1, #0x01]
ldrh r3, [r1, #0x05]
ldrb r1, [r1, #0x07]
strb r2, [r0]
str ip, [r0, #0x01]
strh r3, [r0, #0x05]
strb r1, [r0, #0x07]
bx lr
LMEMCPY_8_PAD
/******************************************************************************
* Special case for 12 byte copies
*/
#define LMEMCPY_C_LOG2 7 /* 128 bytes */
#define LMEMCPY_C_PAD .align LMEMCPY_C_LOG2
LMEMCPY_C_PAD
.Lmemcpy_c:
and r2, r1, #0x03
orr r2, r2, r0, lsl #2
ands r2, r2, #0x0f
sub r3, pc, #0x14
addne pc, r3, r2, lsl #LMEMCPY_C_LOG2
/*
* 0000: dst is 32-bit aligned, src is 32-bit aligned
*/
ldr r2, [r1]
ldr r3, [r1, #0x04]
ldr r1, [r1, #0x08]
str r2, [r0]
str r3, [r0, #0x04]
str r1, [r0, #0x08]
bx lr
LMEMCPY_C_PAD
/*
* 0001: dst is 32-bit aligned, src is 8-bit aligned
*/
ldrb r2, [r1, #0xb] /* r2 = ...B */
ldr ip, [r1, #0x07] /* BE:ip = 789A LE:ip = A987 */
ldr r3, [r1, #0x03] /* BE:r3 = 3456 LE:r3 = 6543 */
ldr r1, [r1, #-1] /* BE:r1 = x012 LE:r1 = 210x */
#ifdef __ARMEB__
orr r2, r2, ip, lsl #8 /* r2 = 89AB */
str r2, [r0, #0x08]
mov r2, ip, lsr #24 /* r2 = ...7 */
orr r2, r2, r3, lsl #8 /* r2 = 4567 */
mov r1, r1, lsl #8 /* r1 = 012. */
orr r1, r1, r3, lsr #24 /* r1 = 0123 */
#else
mov r2, r2, lsl #24 /* r2 = B... */
orr r2, r2, ip, lsr #8 /* r2 = BA98 */
str r2, [r0, #0x08]
mov r2, ip, lsl #24 /* r2 = 7... */
orr r2, r2, r3, lsr #8 /* r2 = 7654 */
mov r1, r1, lsr #8 /* r1 = .210 */
orr r1, r1, r3, lsl #24 /* r1 = 3210 */
#endif
str r2, [r0, #0x04]
str r1, [r0]
bx lr
LMEMCPY_C_PAD
/*
* 0010: dst is 32-bit aligned, src is 16-bit aligned
*/
ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
ldr ip, [r1, #0x06] /* BE:ip = 6789 LE:ip = 9876 */
ldrh r1, [r1, #0x0a] /* BE:r1 = ..AB LE:r1 = ..BA */
#ifdef __ARMEB__
mov r2, r2, lsl #16 /* r2 = 01.. */
orr r2, r2, r3, lsr #16 /* r2 = 0123 */
str r2, [r0]
mov r3, r3, lsl #16 /* r3 = 45.. */
orr r3, r3, ip, lsr #16 /* r3 = 4567 */
orr r1, r1, ip, lsl #16 /* r1 = 89AB */
#else
orr r2, r2, r3, lsl #16 /* r2 = 3210 */
str r2, [r0]
mov r3, r3, lsr #16 /* r3 = ..54 */
orr r3, r3, ip, lsl #16 /* r3 = 7654 */
mov r1, r1, lsl #16 /* r1 = BA.. */
orr r1, r1, ip, lsr #16 /* r1 = BA98 */
#endif
str r3, [r0, #0x04]
str r1, [r0, #0x08]
bx lr
LMEMCPY_C_PAD
/*
* 0011: dst is 32-bit aligned, src is 8-bit aligned
*/
ldrb r2, [r1] /* r2 = ...0 */
ldr r3, [r1, #0x01] /* BE:r3 = 1234 LE:r3 = 4321 */
ldr ip, [r1, #0x05] /* BE:ip = 5678 LE:ip = 8765 */
ldr r1, [r1, #0x09] /* BE:r1 = 9ABx LE:r1 = xBA9 */
#ifdef __ARMEB__
mov r2, r2, lsl #24 /* r2 = 0... */
orr r2, r2, r3, lsr #8 /* r2 = 0123 */
str r2, [r0]
mov r3, r3, lsl #24 /* r3 = 4... */
orr r3, r3, ip, lsr #8 /* r3 = 4567 */
mov r1, r1, lsr #8 /* r1 = .9AB */
orr r1, r1, ip, lsl #24 /* r1 = 89AB */
#else
orr r2, r2, r3, lsl #8 /* r2 = 3210 */
str r2, [r0]
mov r3, r3, lsr #24 /* r3 = ...4 */
orr r3, r3, ip, lsl #8 /* r3 = 7654 */
mov r1, r1, lsl #8 /* r1 = BA9. */
orr r1, r1, ip, lsr #24 /* r1 = BA98 */
#endif
str r3, [r0, #0x04]
str r1, [r0, #0x08]
bx lr
LMEMCPY_C_PAD
/*
* 0100: dst is 8-bit aligned (byte 1), src is 32-bit aligned
*/
ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */
ldr ip, [r1, #0x08] /* BE:ip = 89AB LE:ip = BA98 */
mov r1, r2, lsr #8 /* BE:r1 = .012 LE:r1 = .321 */
strh r1, [r0, #0x01]
#ifdef __ARMEB__
mov r1, r2, lsr #24 /* r1 = ...0 */
strb r1, [r0]
mov r1, r2, lsl #24 /* r1 = 3... */
orr r2, r1, r3, lsr #8 /* r1 = 3456 */
mov r1, r3, lsl #24 /* r1 = 7... */
orr r1, r1, ip, lsr #8 /* r1 = 789A */
#else
strb r2, [r0]
mov r1, r2, lsr #24 /* r1 = ...3 */
orr r2, r1, r3, lsl #8 /* r1 = 6543 */
mov r1, r3, lsr #24 /* r1 = ...7 */
orr r1, r1, ip, lsl #8 /* r1 = A987 */
mov ip, ip, lsr #24 /* ip = ...B */
#endif
str r2, [r0, #0x03]
str r1, [r0, #0x07]
strb ip, [r0, #0x0b]
bx lr
LMEMCPY_C_PAD
/*
* 0101: dst is 8-bit aligned (byte 1), src is 8-bit aligned (byte 1)
*/
ldrb r2, [r1]
ldrh r3, [r1, #0x01]
ldr ip, [r1, #0x03]
strb r2, [r0]
ldr r2, [r1, #0x07]
ldrb r1, [r1, #0x0b]
strh r3, [r0, #0x01]
str ip, [r0, #0x03]
str r2, [r0, #0x07]
strb r1, [r0, #0x0b]
bx lr
LMEMCPY_C_PAD
/*
* 0110: dst is 8-bit aligned (byte 1), src is 16-bit aligned
*/
ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
ldr ip, [r1, #0x06] /* BE:ip = 6789 LE:ip = 9876 */
ldrh r1, [r1, #0x0a] /* BE:r1 = ..AB LE:r1 = ..BA */
#ifdef __ARMEB__
mov r2, r2, ror #8 /* r2 = 1..0 */
strb r2, [r0]
mov r2, r2, lsr #16 /* r2 = ..1. */
orr r2, r2, r3, lsr #24 /* r2 = ..12 */
strh r2, [r0, #0x01]
mov r2, r3, lsl #8 /* r2 = 345. */
orr r3, r2, ip, lsr #24 /* r3 = 3456 */
mov r2, ip, lsl #8 /* r2 = 789. */
orr r2, r2, r1, lsr #8 /* r2 = 789A */
#else
strb r2, [r0]
mov r2, r2, lsr #8 /* r2 = ...1 */
orr r2, r2, r3, lsl #8 /* r2 = 4321 */
strh r2, [r0, #0x01]
mov r2, r3, lsr #8 /* r2 = .543 */
orr r3, r2, ip, lsl #24 /* r3 = 6543 */
mov r2, ip, lsr #8 /* r2 = .987 */
orr r2, r2, r1, lsl #24 /* r2 = A987 */
mov r1, r1, lsr #8 /* r1 = ...B */
#endif
str r3, [r0, #0x03]
str r2, [r0, #0x07]
strb r1, [r0, #0x0b]
bx lr
LMEMCPY_C_PAD
/*
* 0111: dst is 8-bit aligned (byte 1), src is 8-bit aligned (byte 3)
*/
ldrb r2, [r1]
ldr r3, [r1, #0x01] /* BE:r3 = 1234 LE:r3 = 4321 */
ldr ip, [r1, #0x05] /* BE:ip = 5678 LE:ip = 8765 */
ldr r1, [r1, #0x09] /* BE:r1 = 9ABx LE:r1 = xBA9 */
strb r2, [r0]
#ifdef __ARMEB__
mov r2, r3, lsr #16 /* r2 = ..12 */
strh r2, [r0, #0x01]
mov r3, r3, lsl #16 /* r3 = 34.. */
orr r3, r3, ip, lsr #16 /* r3 = 3456 */
mov ip, ip, lsl #16 /* ip = 78.. */
orr ip, ip, r1, lsr #16 /* ip = 789A */
mov r1, r1, lsr #8 /* r1 = .9AB */
#else
strh r3, [r0, #0x01]
mov r3, r3, lsr #16 /* r3 = ..43 */
orr r3, r3, ip, lsl #16 /* r3 = 6543 */
mov ip, ip, lsr #16 /* ip = ..87 */
orr ip, ip, r1, lsl #16 /* ip = A987 */
mov r1, r1, lsr #16 /* r1 = ..xB */
#endif
str r3, [r0, #0x03]
str ip, [r0, #0x07]
strb r1, [r0, #0x0b]
bx lr
LMEMCPY_C_PAD
/*
* 1000: dst is 16-bit aligned, src is 32-bit aligned
*/
ldr ip, [r1] /* BE:ip = 0123 LE:ip = 3210 */
ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */
ldr r2, [r1, #0x08] /* BE:r2 = 89AB LE:r2 = BA98 */
mov r1, ip, lsr #16 /* BE:r1 = ..01 LE:r1 = ..32 */
#ifdef __ARMEB__
strh r1, [r0]
mov r1, ip, lsl #16 /* r1 = 23.. */
orr r1, r1, r3, lsr #16 /* r1 = 2345 */
mov r3, r3, lsl #16 /* r3 = 67.. */
orr r3, r3, r2, lsr #16 /* r3 = 6789 */
#else
strh ip, [r0]
orr r1, r1, r3, lsl #16 /* r1 = 5432 */
mov r3, r3, lsr #16 /* r3 = ..76 */
orr r3, r3, r2, lsl #16 /* r3 = 9876 */
mov r2, r2, lsr #16 /* r2 = ..BA */
#endif
str r1, [r0, #0x02]
str r3, [r0, #0x06]
strh r2, [r0, #0x0a]
bx lr
LMEMCPY_C_PAD
/*
* 1001: dst is 16-bit aligned, src is 8-bit aligned (byte 1)
*/
ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
ldr r3, [r1, #0x03] /* BE:r3 = 3456 LE:r3 = 6543 */
mov ip, r2, lsr #8 /* BE:ip = .x01 LE:ip = .210 */
strh ip, [r0]
ldr ip, [r1, #0x07] /* BE:ip = 789A LE:ip = A987 */
ldrb r1, [r1, #0x0b] /* r1 = ...B */
#ifdef __ARMEB__
mov r2, r2, lsl #24 /* r2 = 2... */
orr r2, r2, r3, lsr #8 /* r2 = 2345 */
mov r3, r3, lsl #24 /* r3 = 6... */
orr r3, r3, ip, lsr #8 /* r3 = 6789 */
orr r1, r1, ip, lsl #8 /* r1 = 89AB */
#else
mov r2, r2, lsr #24 /* r2 = ...2 */
orr r2, r2, r3, lsl #8 /* r2 = 5432 */
mov r3, r3, lsr #24 /* r3 = ...6 */
orr r3, r3, ip, lsl #8 /* r3 = 9876 */
mov r1, r1, lsl #8 /* r1 = ..B. */
orr r1, r1, ip, lsr #24 /* r1 = ..BA */
#endif
str r2, [r0, #0x02]
str r3, [r0, #0x06]
strh r1, [r0, #0x0a]
bx lr
LMEMCPY_C_PAD
/*
* 1010: dst is 16-bit aligned, src is 16-bit aligned
*/
ldrh r2, [r1]
ldr r3, [r1, #0x02]
ldr ip, [r1, #0x06]
ldrh r1, [r1, #0x0a]
strh r2, [r0]
str r3, [r0, #0x02]
str ip, [r0, #0x06]
strh r1, [r0, #0x0a]
bx lr
LMEMCPY_C_PAD
/*
* 1011: dst is 16-bit aligned, src is 8-bit aligned (byte 3)
*/
ldr r2, [r1, #0x09] /* BE:r2 = 9ABx LE:r2 = xBA9 */
ldr r3, [r1, #0x05] /* BE:r3 = 5678 LE:r3 = 8765 */
mov ip, r2, lsr #8 /* BE:ip = .9AB LE:ip = .xBA */
strh ip, [r0, #0x0a]
ldr ip, [r1, #0x01] /* BE:ip = 1234 LE:ip = 4321 */
ldrb r1, [r1] /* r1 = ...0 */
#ifdef __ARMEB__
mov r2, r2, lsr #24 /* r2 = ...9 */
orr r2, r2, r3, lsl #8 /* r2 = 6789 */
mov r3, r3, lsr #24 /* r3 = ...5 */
orr r3, r3, ip, lsl #8 /* r3 = 2345 */
mov r1, r1, lsl #8 /* r1 = ..0. */
orr r1, r1, ip, lsr #24 /* r1 = ..01 */
#else
mov r2, r2, lsl #24 /* r2 = 9... */
orr r2, r2, r3, lsr #8 /* r2 = 9876 */
mov r3, r3, lsl #24 /* r3 = 5... */
orr r3, r3, ip, lsr #8 /* r3 = 5432 */
orr r1, r1, ip, lsl #8 /* r1 = 3210 */
#endif
str r2, [r0, #0x06]
str r3, [r0, #0x02]
strh r1, [r0]
bx lr
LMEMCPY_C_PAD
/*
* 1100: dst is 8-bit aligned (byte 3), src is 32-bit aligned
*/
ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
ldr ip, [r1, #0x04] /* BE:ip = 4567 LE:ip = 7654 */
ldr r1, [r1, #0x08] /* BE:r1 = 89AB LE:r1 = BA98 */
#ifdef __ARMEB__
mov r3, r2, lsr #24 /* r3 = ...0 */
strb r3, [r0]
mov r2, r2, lsl #8 /* r2 = 123. */
orr r2, r2, ip, lsr #24 /* r2 = 1234 */
str r2, [r0, #0x01]
mov r2, ip, lsl #8 /* r2 = 567. */
orr r2, r2, r1, lsr #24 /* r2 = 5678 */
str r2, [r0, #0x05]
mov r2, r1, lsr #8 /* r2 = ..9A */
strh r2, [r0, #0x09]
strb r1, [r0, #0x0b]
#else
strb r2, [r0]
mov r3, r2, lsr #8 /* r3 = .321 */
orr r3, r3, ip, lsl #24 /* r3 = 4321 */
str r3, [r0, #0x01]
mov r3, ip, lsr #8 /* r3 = .765 */
orr r3, r3, r1, lsl #24 /* r3 = 8765 */
str r3, [r0, #0x05]
mov r1, r1, lsr #8 /* r1 = .BA9 */
strh r1, [r0, #0x09]
mov r1, r1, lsr #16 /* r1 = ...B */
strb r1, [r0, #0x0b]
#endif
bx lr
LMEMCPY_C_PAD
/*
* 1101: dst is 8-bit aligned (byte 3), src is 8-bit aligned (byte 1)
*/
ldrb r2, [r1, #0x0b] /* r2 = ...B */
ldr r3, [r1, #0x07] /* BE:r3 = 789A LE:r3 = A987 */
ldr ip, [r1, #0x03] /* BE:ip = 3456 LE:ip = 6543 */
ldr r1, [r1, #-1] /* BE:r1 = x012 LE:r1 = 210x */
strb r2, [r0, #0x0b]
#ifdef __ARMEB__
strh r3, [r0, #0x09]
mov r3, r3, lsr #16 /* r3 = ..78 */
orr r3, r3, ip, lsl #16 /* r3 = 5678 */
mov ip, ip, lsr #16 /* ip = ..34 */
orr ip, ip, r1, lsl #16 /* ip = 1234 */
mov r1, r1, lsr #16 /* r1 = ..x0 */
#else
mov r2, r3, lsr #16 /* r2 = ..A9 */
strh r2, [r0, #0x09]
mov r3, r3, lsl #16 /* r3 = 87.. */
orr r3, r3, ip, lsr #16 /* r3 = 8765 */
mov ip, ip, lsl #16 /* ip = 43.. */
orr ip, ip, r1, lsr #16 /* ip = 4321 */
mov r1, r1, lsr #8 /* r1 = .210 */
#endif
str r3, [r0, #0x05]
str ip, [r0, #0x01]
strb r1, [r0]
bx lr
LMEMCPY_C_PAD
/*
* 1110: dst is 8-bit aligned (byte 3), src is 16-bit aligned
*/
#ifdef __ARMEB__
ldrh r2, [r1, #0x0a] /* r2 = ..AB */
ldr ip, [r1, #0x06] /* ip = 6789 */
ldr r3, [r1, #0x02] /* r3 = 2345 */
ldrh r1, [r1] /* r1 = ..01 */
strb r2, [r0, #0x0b]
mov r2, r2, lsr #8 /* r2 = ...A */
orr r2, r2, ip, lsl #8 /* r2 = 789A */
mov ip, ip, lsr #8 /* ip = .678 */
orr ip, ip, r3, lsl #24 /* ip = 5678 */
mov r3, r3, lsr #8 /* r3 = .234 */
orr r3, r3, r1, lsl #24 /* r3 = 1234 */
mov r1, r1, lsr #8 /* r1 = ...0 */
strb r1, [r0]
str r3, [r0, #0x01]
str ip, [r0, #0x05]
strh r2, [r0, #0x09]
#else
ldrh r2, [r1] /* r2 = ..10 */
ldr r3, [r1, #0x02] /* r3 = 5432 */
ldr ip, [r1, #0x06] /* ip = 9876 */
ldrh r1, [r1, #0x0a] /* r1 = ..BA */
strb r2, [r0]
mov r2, r2, lsr #8 /* r2 = ...1 */
orr r2, r2, r3, lsl #8 /* r2 = 4321 */
mov r3, r3, lsr #24 /* r3 = ...5 */
orr r3, r3, ip, lsl #8 /* r3 = 8765 */
mov ip, ip, lsr #24 /* ip = ...9 */
orr ip, ip, r1, lsl #8 /* ip = .BA9 */
mov r1, r1, lsr #8 /* r1 = ...B */
str r2, [r0, #0x01]
str r3, [r0, #0x05]
strh ip, [r0, #0x09]
strb r1, [r0, #0x0b]
#endif
bx lr
LMEMCPY_C_PAD
/*
* 1111: dst is 8-bit aligned (byte 3), src is 8-bit aligned (byte 3)
*/
ldrb r2, [r1]
ldr r3, [r1, #0x01]
ldr ip, [r1, #0x05]
strb r2, [r0]
ldrh r2, [r1, #0x09]
ldrb r1, [r1, #0x0b]
str r3, [r0, #0x01]
str ip, [r0, #0x05]
strh r2, [r0, #0x09]
strb r1, [r0, #0x0b]
bx lr
#endif /* !_STANDALONE */
|
0xffea/MINIX3
| 3,345
|
common/lib/libc/arch/arm/string/ffs.S
|
/* $NetBSD: ffs.S,v 1.1 2005/12/20 19:28:49 christos Exp $ */
/*
* Copyright (c) 2001 Christopher Gilbert
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company nor the name of the author may be used to
* endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/asm.h>
RCSID("$NetBSD: ffs.S,v 1.1 2005/12/20 19:28:49 christos Exp $")
/*
* ffs - find first set bit, this algorithm isolates the first set
* bit, then multiplies the number by 0x0450fbaf which leaves the top
* 6 bits as an index into the table. This algorithm should be a win
* over the checking each bit in turn as per the C compiled version.
*
* On ARMv5 we use CLZ (count leading Zero's) and then subtract the result
* from 32.
*
* This is the ffs algorithm devised by d.seal and posted to comp.sys.arm on
* 16 Feb 1994.
*/
ENTRY(ffs)
#ifdef _ARM_ARCH_5
/* (X & -X) gives LSB or zero. */
rsb r1, r0, #0
and r0, r0, r1
clz r0, r0
rsb r0, r0, #32
RET
#else
/* Standard trick to isolate bottom bit in r0 or 0 if r0 = 0 on entry */
rsb r1, r0, #0
ands r0, r0, r1
/*
* now r0 has at most one set bit, call this X
* if X = 0, all further instructions are skipped
*/
adrne r2, .L_ffs_table
orrne r0, r0, r0, lsl #4 /* r0 = X * 0x11 */
orrne r0, r0, r0, lsl #6 /* r0 = X * 0x451 */
rsbne r0, r0, r0, lsl #16 /* r0 = X * 0x0450fbaf */
/* now lookup in table indexed on top 6 bits of r0 */
ldrneb r0, [ r2, r0, lsr #26 ]
RET
.text;
.type .L_ffs_table, _ASM_TYPE_OBJECT;
.L_ffs_table:
/* 0 1 2 3 4 5 6 7 */
.byte 0, 1, 2, 13, 3, 7, 0, 14 /* 0- 7 */
.byte 4, 0, 8, 0, 0, 0, 0, 15 /* 8-15 */
.byte 11, 5, 0, 0, 9, 0, 0, 26 /* 16-23 */
.byte 0, 0, 0, 0, 0, 22, 28, 16 /* 24-31 */
.byte 32, 12, 6, 0, 0, 0, 0, 0 /* 32-39 */
.byte 10, 0, 0, 25, 0, 0, 21, 27 /* 40-47 */
.byte 31, 0, 0, 0, 0, 24, 0, 20 /* 48-55 */
.byte 30, 0, 23, 19, 29, 18, 17, 0 /* 56-63 */
#endif
|
0xffea/MINIX3
| 12,949
|
common/lib/libc/arch/arm/string/memmove.S
|
/* $NetBSD: memmove.S,v 1.3 2008/04/28 20:22:52 martin Exp $ */
/*-
* Copyright (c) 1997 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Neil A. Carson and Mark Brinicombe
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/asm.h>
#ifndef _BCOPY
/* LINTSTUB: Func: void *memmove(void *, const void *, size_t) */
ENTRY(memmove)
#else
/* bcopy = memcpy/memmove with arguments reversed. */
/* LINTSTUB: Func: void bcopy(void *, void *, size_t) */
ENTRY(bcopy)
/* switch the source and destination registers */
eor r0, r1, r0
eor r1, r0, r1
eor r0, r1, r0
#endif
/* Do the buffers overlap? */
cmp r0, r1
RETc(eq) /* Bail now if src/dst are the same */
subhs r3, r0, r1 /* if (dst > src) r3 = dst - src */
sublo r3, r1, r0 /* if (src > dst) r3 = src - dst */
cmp r3, r2 /* if (r3 >= len) we have an overlap */
bhs PIC_SYM(_C_LABEL(memcpy), PLT)
/* Determine copy direction */
cmp r1, r0
bcc .Lmemmove_backwards
moveq r0, #0 /* Quick abort for len=0 */
RETc(eq)
stmdb sp!, {r0, lr} /* memmove() returns dest addr */
subs r2, r2, #4
blt .Lmemmove_fl4 /* less than 4 bytes */
ands r12, r0, #3
bne .Lmemmove_fdestul /* oh unaligned destination addr */
ands r12, r1, #3
bne .Lmemmove_fsrcul /* oh unaligned source addr */
.Lmemmove_ft8:
/* We have aligned source and destination */
subs r2, r2, #8
blt .Lmemmove_fl12 /* less than 12 bytes (4 from above) */
subs r2, r2, #0x14
blt .Lmemmove_fl32 /* less than 32 bytes (12 from above) */
stmdb sp!, {r4} /* borrow r4 */
/* blat 32 bytes at a time */
/* XXX for really big copies perhaps we should use more registers */
.Lmemmove_floop32:
ldmia r1!, {r3, r4, r12, lr}
stmia r0!, {r3, r4, r12, lr}
ldmia r1!, {r3, r4, r12, lr}
stmia r0!, {r3, r4, r12, lr}
subs r2, r2, #0x20
bge .Lmemmove_floop32
cmn r2, #0x10
ldmgeia r1!, {r3, r4, r12, lr} /* blat a remaining 16 bytes */
stmgeia r0!, {r3, r4, r12, lr}
subge r2, r2, #0x10
ldmia sp!, {r4} /* return r4 */
.Lmemmove_fl32:
adds r2, r2, #0x14
/* blat 12 bytes at a time */
.Lmemmove_floop12:
ldmgeia r1!, {r3, r12, lr}
stmgeia r0!, {r3, r12, lr}
subges r2, r2, #0x0c
bge .Lmemmove_floop12
.Lmemmove_fl12:
adds r2, r2, #8
blt .Lmemmove_fl4
subs r2, r2, #4
ldrlt r3, [r1], #4
strlt r3, [r0], #4
ldmgeia r1!, {r3, r12}
stmgeia r0!, {r3, r12}
subge r2, r2, #4
.Lmemmove_fl4:
/* less than 4 bytes to go */
adds r2, r2, #4
ldmeqia sp!, {r0, pc} /* done */
/* copy the crud byte at a time */
cmp r2, #2
ldrb r3, [r1], #1
strb r3, [r0], #1
ldrgeb r3, [r1], #1
strgeb r3, [r0], #1
ldrgtb r3, [r1], #1
strgtb r3, [r0], #1
ldmia sp!, {r0, pc}
/* erg - unaligned destination */
.Lmemmove_fdestul:
rsb r12, r12, #4
cmp r12, #2
/* align destination with byte copies */
ldrb r3, [r1], #1
strb r3, [r0], #1
ldrgeb r3, [r1], #1
strgeb r3, [r0], #1
ldrgtb r3, [r1], #1
strgtb r3, [r0], #1
subs r2, r2, r12
blt .Lmemmove_fl4 /* less the 4 bytes */
ands r12, r1, #3
beq .Lmemmove_ft8 /* we have an aligned source */
/* erg - unaligned source */
/* This is where it gets nasty ... */
.Lmemmove_fsrcul:
bic r1, r1, #3
ldr lr, [r1], #4
cmp r12, #2
bgt .Lmemmove_fsrcul3
beq .Lmemmove_fsrcul2
cmp r2, #0x0c
blt .Lmemmove_fsrcul1loop4
sub r2, r2, #0x0c
stmdb sp!, {r4, r5}
.Lmemmove_fsrcul1loop16:
#ifdef __ARMEB__
mov r3, lr, lsl #8
#else
mov r3, lr, lsr #8
#endif
ldmia r1!, {r4, r5, r12, lr}
#ifdef __ARMEB__
orr r3, r3, r4, lsr #24
mov r4, r4, lsl #8
orr r4, r4, r5, lsr #24
mov r5, r5, lsl #8
orr r5, r5, r12, lsr #24
mov r12, r12, lsl #8
orr r12, r12, lr, lsr #24
#else
orr r3, r3, r4, lsl #24
mov r4, r4, lsr #8
orr r4, r4, r5, lsl #24
mov r5, r5, lsr #8
orr r5, r5, r12, lsl #24
mov r12, r12, lsr #8
orr r12, r12, lr, lsl #24
#endif
stmia r0!, {r3-r5, r12}
subs r2, r2, #0x10
bge .Lmemmove_fsrcul1loop16
ldmia sp!, {r4, r5}
adds r2, r2, #0x0c
blt .Lmemmove_fsrcul1l4
.Lmemmove_fsrcul1loop4:
#ifdef __ARMEB__
mov r12, lr, lsl #8
#else
mov r12, lr, lsr #8
#endif
ldr lr, [r1], #4
#ifdef __ARMEB__
orr r12, r12, lr, lsr #24
#else
orr r12, r12, lr, lsl #24
#endif
str r12, [r0], #4
subs r2, r2, #4
bge .Lmemmove_fsrcul1loop4
.Lmemmove_fsrcul1l4:
sub r1, r1, #3
b .Lmemmove_fl4
.Lmemmove_fsrcul2:
cmp r2, #0x0c
blt .Lmemmove_fsrcul2loop4
sub r2, r2, #0x0c
stmdb sp!, {r4, r5}
.Lmemmove_fsrcul2loop16:
#ifdef __ARMEB__
mov r3, lr, lsl #16
#else
mov r3, lr, lsr #16
#endif
ldmia r1!, {r4, r5, r12, lr}
#ifdef __ARMEB__
orr r3, r3, r4, lsr #16
mov r4, r4, lsl #16
orr r4, r4, r5, lsr #16
mov r5, r5, lsl #16
orr r5, r5, r12, lsr #16
mov r12, r12, lsl #16
orr r12, r12, lr, lsr #16
#else
orr r3, r3, r4, lsl #16
mov r4, r4, lsr #16
orr r4, r4, r5, lsl #16
mov r5, r5, lsr #16
orr r5, r5, r12, lsl #16
mov r12, r12, lsr #16
orr r12, r12, lr, lsl #16
#endif
stmia r0!, {r3-r5, r12}
subs r2, r2, #0x10
bge .Lmemmove_fsrcul2loop16
ldmia sp!, {r4, r5}
adds r2, r2, #0x0c
blt .Lmemmove_fsrcul2l4
.Lmemmove_fsrcul2loop4:
#ifdef __ARMEB__
mov r12, lr, lsl #16
#else
mov r12, lr, lsr #16
#endif
ldr lr, [r1], #4
#ifdef __ARMEB__
orr r12, r12, lr, lsr #16
#else
orr r12, r12, lr, lsl #16
#endif
str r12, [r0], #4
subs r2, r2, #4
bge .Lmemmove_fsrcul2loop4
.Lmemmove_fsrcul2l4:
sub r1, r1, #2
b .Lmemmove_fl4
.Lmemmove_fsrcul3:
cmp r2, #0x0c
blt .Lmemmove_fsrcul3loop4
sub r2, r2, #0x0c
stmdb sp!, {r4, r5}
.Lmemmove_fsrcul3loop16:
#ifdef __ARMEB__
mov r3, lr, lsl #24
#else
mov r3, lr, lsr #24
#endif
ldmia r1!, {r4, r5, r12, lr}
#ifdef __ARMEB__
orr r3, r3, r4, lsr #8
mov r4, r4, lsl #24
orr r4, r4, r5, lsr #8
mov r5, r5, lsl #24
orr r5, r5, r12, lsr #8
mov r12, r12, lsl #24
orr r12, r12, lr, lsr #8
#else
orr r3, r3, r4, lsl #8
mov r4, r4, lsr #24
orr r4, r4, r5, lsl #8
mov r5, r5, lsr #24
orr r5, r5, r12, lsl #8
mov r12, r12, lsr #24
orr r12, r12, lr, lsl #8
#endif
stmia r0!, {r3-r5, r12}
subs r2, r2, #0x10
bge .Lmemmove_fsrcul3loop16
ldmia sp!, {r4, r5}
adds r2, r2, #0x0c
blt .Lmemmove_fsrcul3l4
.Lmemmove_fsrcul3loop4:
#ifdef __ARMEB__
mov r12, lr, lsl #24
#else
mov r12, lr, lsr #24
#endif
ldr lr, [r1], #4
#ifdef __ARMEB__
orr r12, r12, lr, lsr #8
#else
orr r12, r12, lr, lsl #8
#endif
str r12, [r0], #4
subs r2, r2, #4
bge .Lmemmove_fsrcul3loop4
.Lmemmove_fsrcul3l4:
sub r1, r1, #1
b .Lmemmove_fl4
.Lmemmove_backwards:
add r1, r1, r2
add r0, r0, r2
subs r2, r2, #4
blt .Lmemmove_bl4 /* less than 4 bytes */
ands r12, r0, #3
bne .Lmemmove_bdestul /* oh unaligned destination addr */
ands r12, r1, #3
bne .Lmemmove_bsrcul /* oh unaligned source addr */
.Lmemmove_bt8:
/* We have aligned source and destination */
subs r2, r2, #8
blt .Lmemmove_bl12 /* less than 12 bytes (4 from above) */
stmdb sp!, {r4, lr}
subs r2, r2, #0x14 /* less than 32 bytes (12 from above) */
blt .Lmemmove_bl32
/* blat 32 bytes at a time */
/* XXX for really big copies perhaps we should use more registers */
.Lmemmove_bloop32:
ldmdb r1!, {r3, r4, r12, lr}
stmdb r0!, {r3, r4, r12, lr}
ldmdb r1!, {r3, r4, r12, lr}
stmdb r0!, {r3, r4, r12, lr}
subs r2, r2, #0x20
bge .Lmemmove_bloop32
.Lmemmove_bl32:
cmn r2, #0x10
ldmgedb r1!, {r3, r4, r12, lr} /* blat a remaining 16 bytes */
stmgedb r0!, {r3, r4, r12, lr}
subge r2, r2, #0x10
adds r2, r2, #0x14
ldmgedb r1!, {r3, r12, lr} /* blat a remaining 12 bytes */
stmgedb r0!, {r3, r12, lr}
subge r2, r2, #0x0c
ldmia sp!, {r4, lr}
.Lmemmove_bl12:
adds r2, r2, #8
blt .Lmemmove_bl4
subs r2, r2, #4
ldrlt r3, [r1, #-4]!
strlt r3, [r0, #-4]!
ldmgedb r1!, {r3, r12}
stmgedb r0!, {r3, r12}
subge r2, r2, #4
.Lmemmove_bl4:
/* less than 4 bytes to go */
adds r2, r2, #4
RETc(eq)
/* copy the crud byte at a time */
cmp r2, #2
ldrb r3, [r1, #-1]!
strb r3, [r0, #-1]!
ldrgeb r3, [r1, #-1]!
strgeb r3, [r0, #-1]!
ldrgtb r3, [r1, #-1]!
strgtb r3, [r0, #-1]!
RET
/* erg - unaligned destination */
.Lmemmove_bdestul:
cmp r12, #2
/* align destination with byte copies */
ldrb r3, [r1, #-1]!
strb r3, [r0, #-1]!
ldrgeb r3, [r1, #-1]!
strgeb r3, [r0, #-1]!
ldrgtb r3, [r1, #-1]!
strgtb r3, [r0, #-1]!
subs r2, r2, r12
blt .Lmemmove_bl4 /* less than 4 bytes to go */
ands r12, r1, #3
beq .Lmemmove_bt8 /* we have an aligned source */
/* erg - unaligned source */
/* This is where it gets nasty ... */
.Lmemmove_bsrcul:
bic r1, r1, #3
ldr r3, [r1, #0]
cmp r12, #2
blt .Lmemmove_bsrcul1
beq .Lmemmove_bsrcul2
cmp r2, #0x0c
blt .Lmemmove_bsrcul3loop4
sub r2, r2, #0x0c
stmdb sp!, {r4, r5, lr}
.Lmemmove_bsrcul3loop16:
#ifdef __ARMEB__
mov lr, r3, lsr #8
#else
mov lr, r3, lsl #8
#endif
ldmdb r1!, {r3-r5, r12}
#ifdef __ARMEB__
orr lr, lr, r12, lsl #24
mov r12, r12, lsr #8
orr r12, r12, r5, lsl #24
mov r5, r5, lsr #8
orr r5, r5, r4, lsl #24
mov r4, r4, lsr #8
orr r4, r4, r3, lsl #24
#else
orr lr, lr, r12, lsr #24
mov r12, r12, lsl #8
orr r12, r12, r5, lsr #24
mov r5, r5, lsl #8
orr r5, r5, r4, lsr #24
mov r4, r4, lsl #8
orr r4, r4, r3, lsr #24
#endif
stmdb r0!, {r4, r5, r12, lr}
subs r2, r2, #0x10
bge .Lmemmove_bsrcul3loop16
ldmia sp!, {r4, r5, lr}
adds r2, r2, #0x0c
blt .Lmemmove_bsrcul3l4
.Lmemmove_bsrcul3loop4:
#ifdef __ARMEB__
mov r12, r3, lsr #8
#else
mov r12, r3, lsl #8
#endif
ldr r3, [r1, #-4]!
#ifdef __ARMEB__
orr r12, r12, r3, lsl #24
#else
orr r12, r12, r3, lsr #24
#endif
str r12, [r0, #-4]!
subs r2, r2, #4
bge .Lmemmove_bsrcul3loop4
.Lmemmove_bsrcul3l4:
add r1, r1, #3
b .Lmemmove_bl4
.Lmemmove_bsrcul2:
cmp r2, #0x0c
blt .Lmemmove_bsrcul2loop4
sub r2, r2, #0x0c
stmdb sp!, {r4, r5, lr}
.Lmemmove_bsrcul2loop16:
#ifdef __ARMEB__
mov lr, r3, lsr #16
#else
mov lr, r3, lsl #16
#endif
ldmdb r1!, {r3-r5, r12}
#ifdef __ARMEB__
orr lr, lr, r12, lsl #16
mov r12, r12, lsr #16
orr r12, r12, r5, lsl #16
mov r5, r5, lsr #16
orr r5, r5, r4, lsl #16
mov r4, r4, lsr #16
orr r4, r4, r3, lsl #16
#else
orr lr, lr, r12, lsr #16
mov r12, r12, lsl #16
orr r12, r12, r5, lsr #16
mov r5, r5, lsl #16
orr r5, r5, r4, lsr #16
mov r4, r4, lsl #16
orr r4, r4, r3, lsr #16
#endif
stmdb r0!, {r4, r5, r12, lr}
subs r2, r2, #0x10
bge .Lmemmove_bsrcul2loop16
ldmia sp!, {r4, r5, lr}
adds r2, r2, #0x0c
blt .Lmemmove_bsrcul2l4
.Lmemmove_bsrcul2loop4:
#ifdef __ARMEB__
mov r12, r3, lsr #16
#else
mov r12, r3, lsl #16
#endif
ldr r3, [r1, #-4]!
#ifdef __ARMEB__
orr r12, r12, r3, lsl #16
#else
orr r12, r12, r3, lsr #16
#endif
str r12, [r0, #-4]!
subs r2, r2, #4
bge .Lmemmove_bsrcul2loop4
.Lmemmove_bsrcul2l4:
add r1, r1, #2
b .Lmemmove_bl4
.Lmemmove_bsrcul1:
cmp r2, #0x0c
blt .Lmemmove_bsrcul1loop4
sub r2, r2, #0x0c
stmdb sp!, {r4, r5, lr}
.Lmemmove_bsrcul1loop32:
#ifdef __ARMEB__
mov lr, r3, lsr #24
#else
mov lr, r3, lsl #24
#endif
ldmdb r1!, {r3-r5, r12}
#ifdef __ARMEB__
orr lr, lr, r12, lsl #8
mov r12, r12, lsr #24
orr r12, r12, r5, lsl #8
mov r5, r5, lsr #24
orr r5, r5, r4, lsl #8
mov r4, r4, lsr #24
orr r4, r4, r3, lsl #8
#else
orr lr, lr, r12, lsr #8
mov r12, r12, lsl #24
orr r12, r12, r5, lsr #8
mov r5, r5, lsl #24
orr r5, r5, r4, lsr #8
mov r4, r4, lsl #24
orr r4, r4, r3, lsr #8
#endif
stmdb r0!, {r4, r5, r12, lr}
subs r2, r2, #0x10
bge .Lmemmove_bsrcul1loop32
ldmia sp!, {r4, r5, lr}
adds r2, r2, #0x0c
blt .Lmemmove_bsrcul1l4
.Lmemmove_bsrcul1loop4:
#ifdef __ARMEB__
mov r12, r3, lsr #24
#else
mov r12, r3, lsl #24
#endif
ldr r3, [r1, #-4]!
#ifdef __ARMEB__
orr r12, r12, r3, lsl #8
#else
orr r12, r12, r3, lsr #8
#endif
str r12, [r0, #-4]!
subs r2, r2, #4
bge .Lmemmove_bsrcul1loop4
.Lmemmove_bsrcul1l4:
add r1, r1, #1
b .Lmemmove_bl4
|
0xffea/MINIX3
| 1,890
|
common/lib/libc/arch/arm/string/strncmp.S
|
/* $NetBSD: strncmp.S,v 1.1 2005/12/20 19:28:49 christos Exp $ */
/*
* Copyright (c) 2002 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/asm.h>
RCSID("$NetBSD: strncmp.S,v 1.1 2005/12/20 19:28:49 christos Exp $")
ENTRY(strncmp)
/* if ((len - 1) < 0) return 0 */
subs r2, r2, #1
movmi r0, #0
RETc(mi)
/* ip == last src address to compare */
add ip, r0, r2
1:
ldrb r2, [r0], #1
ldrb r3, [r1], #1
cmp ip, r0
cmpcs r2, #1
cmpcs r2, r3
beq 1b
sub r0, r2, r3
RET
|
0xffea/MINIX3
| 7,458
|
common/lib/libc/arch/arm/string/memset.S
|
/* $NetBSD: memset.S,v 1.1 2005/12/20 19:28:49 christos Exp $ */
/*
* Copyright 2003 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Steve C. Woodford for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 1995 Mark Brinicombe.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Mark Brinicombe.
* 4. The name of the company nor the name of the author may be used to
* endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/asm.h>
/*
* memset: Sets a block of memory to the specified value
*
* On entry:
* r0 - dest address
* r1 - byte to write
* r2 - number of bytes to write
*
* On exit:
* r0 - dest address
*/
#ifdef _BZERO
/* LINTSTUB: Func: void bzero(void *, size_t) */
ENTRY(bzero)
mov r3, #0x00
#else
/* LINTSTUB: Func: void *memset(void *, int, size_t) */
ENTRY(memset)
and r3, r1, #0xff /* We deal with bytes */
mov r1, r2
#endif
cmp r1, #0x04 /* Do we have less than 4 bytes */
mov ip, r0
blt .Lmemset_lessthanfour
/* Ok first we will word align the address */
ands r2, ip, #0x03 /* Get the bottom two bits */
bne .Lmemset_wordunaligned /* The address is not word aligned */
/* We are now word aligned */
.Lmemset_wordaligned:
#ifndef _BZERO
orr r3, r3, r3, lsl #8 /* Extend value to 16-bits */
#endif
#ifdef __XSCALE__
tst ip, #0x04 /* Quad-align for Xscale */
#else
cmp r1, #0x10
#endif
#ifndef _BZERO
orr r3, r3, r3, lsl #16 /* Extend value to 32-bits */
#endif
#ifdef __XSCALE__
subne r1, r1, #0x04 /* Quad-align if necessary */
strne r3, [ip], #0x04
cmp r1, #0x10
#endif
blt .Lmemset_loop4 /* If less than 16 then use words */
mov r2, r3 /* Duplicate data */
cmp r1, #0x80 /* If < 128 then skip the big loop */
blt .Lmemset_loop32
/* Do 128 bytes at a time */
.Lmemset_loop128:
subs r1, r1, #0x80
#ifdef __XSCALE__
strged r2, [ip], #0x08
strged r2, [ip], #0x08
strged r2, [ip], #0x08
strged r2, [ip], #0x08
strged r2, [ip], #0x08
strged r2, [ip], #0x08
strged r2, [ip], #0x08
strged r2, [ip], #0x08
strged r2, [ip], #0x08
strged r2, [ip], #0x08
strged r2, [ip], #0x08
strged r2, [ip], #0x08
strged r2, [ip], #0x08
strged r2, [ip], #0x08
strged r2, [ip], #0x08
strged r2, [ip], #0x08
#else
stmgeia ip!, {r2-r3}
stmgeia ip!, {r2-r3}
stmgeia ip!, {r2-r3}
stmgeia ip!, {r2-r3}
stmgeia ip!, {r2-r3}
stmgeia ip!, {r2-r3}
stmgeia ip!, {r2-r3}
stmgeia ip!, {r2-r3}
stmgeia ip!, {r2-r3}
stmgeia ip!, {r2-r3}
stmgeia ip!, {r2-r3}
stmgeia ip!, {r2-r3}
stmgeia ip!, {r2-r3}
stmgeia ip!, {r2-r3}
stmgeia ip!, {r2-r3}
stmgeia ip!, {r2-r3}
#endif
bgt .Lmemset_loop128
RETc(eq) /* Zero length so just exit */
add r1, r1, #0x80 /* Adjust for extra sub */
/* Do 32 bytes at a time */
.Lmemset_loop32:
subs r1, r1, #0x20
#ifdef __XSCALE__
strged r2, [ip], #0x08
strged r2, [ip], #0x08
strged r2, [ip], #0x08
strged r2, [ip], #0x08
#else
stmgeia ip!, {r2-r3}
stmgeia ip!, {r2-r3}
stmgeia ip!, {r2-r3}
stmgeia ip!, {r2-r3}
#endif
bgt .Lmemset_loop32
RETc(eq) /* Zero length so just exit */
adds r1, r1, #0x10 /* Partially adjust for extra sub */
/* Deal with 16 bytes or more */
#ifdef __XSCALE__
strged r2, [ip], #0x08
strged r2, [ip], #0x08
#else
stmgeia ip!, {r2-r3}
stmgeia ip!, {r2-r3}
#endif
RETc(eq) /* Zero length so just exit */
addlt r1, r1, #0x10 /* Possibly adjust for extra sub */
/* We have at least 4 bytes so copy as words */
.Lmemset_loop4:
subs r1, r1, #0x04
strge r3, [ip], #0x04
bgt .Lmemset_loop4
RETc(eq) /* Zero length so just exit */
#ifdef __XSCALE__
/* Compensate for 64-bit alignment check */
adds r1, r1, #0x04
RETc(eq)
cmp r1, #2
#else
cmp r1, #-2
#endif
strb r3, [ip], #0x01 /* Set 1 byte */
strgeb r3, [ip], #0x01 /* Set another byte */
strgtb r3, [ip] /* and a third */
RET /* Exit */
.Lmemset_wordunaligned:
rsb r2, r2, #0x004
strb r3, [ip], #0x01 /* Set 1 byte */
cmp r2, #0x02
strgeb r3, [ip], #0x01 /* Set another byte */
sub r1, r1, r2
strgtb r3, [ip], #0x01 /* and a third */
cmp r1, #0x04 /* More than 4 bytes left? */
bge .Lmemset_wordaligned /* Yup */
.Lmemset_lessthanfour:
cmp r1, #0x00
RETc(eq) /* Zero length so exit */
strb r3, [ip], #0x01 /* Set 1 byte */
cmp r1, #0x02
strgeb r3, [ip], #0x01 /* Set another byte */
strgtb r3, [ip] /* and a third */
RET /* Exit */
|
0xffea/MINIX3
| 8,228
|
common/lib/libc/arch/arm/string/memcpy_arm.S
|
/* $NetBSD: memcpy_arm.S,v 1.2 2008/04/28 20:22:52 martin Exp $ */
/*-
* Copyright (c) 1997 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Neil A. Carson and Mark Brinicombe
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/asm.h>
/*
* This is one fun bit of code ...
* Some easy listening music is suggested while trying to understand this
* code e.g. Iron Maiden
*
* For anyone attempting to understand it :
*
* The core code is implemented here with simple stubs for memcpy().
*
* All local labels are prefixed with Lmemcpy_
* Following the prefix a label starting f is used in the forward copy code
* while a label using b is used in the backwards copy code
* The source and destination addresses determine whether a forward or
* backward copy is performed.
* Separate bits of code are used to deal with the following situations
* for both the forward and backwards copy.
* unaligned source address
* unaligned destination address
* Separate copy routines are used to produce an optimised result for each
* of these cases.
* The copy code will use LDM/STM instructions to copy up to 32 bytes at
* a time where possible.
*
* Note: r12 (aka ip) can be trashed during the function along with
* r0-r3 although r0-r2 have defined uses i.e. src, dest, len through out.
* Additional registers are preserved prior to use i.e. r4, r5 & lr
*
* Apologies for the state of the comments ;-)
*/
/* LINTSTUB: Func: void *memcpy(void *dst, const void *src, size_t len) */
ENTRY(memcpy)
/* save leaf functions having to store this away */
stmdb sp!, {r0, lr} /* memcpy() returns dest addr */
subs r2, r2, #4
blt .Lmemcpy_l4 /* less than 4 bytes */
ands r12, r0, #3
bne .Lmemcpy_destul /* oh unaligned destination addr */
ands r12, r1, #3
bne .Lmemcpy_srcul /* oh unaligned source addr */
.Lmemcpy_t8:
/* We have aligned source and destination */
subs r2, r2, #8
blt .Lmemcpy_l12 /* less than 12 bytes (4 from above) */
subs r2, r2, #0x14
blt .Lmemcpy_l32 /* less than 32 bytes (12 from above) */
stmdb sp!, {r4} /* borrow r4 */
/* blat 32 bytes at a time */
/* XXX for really big copies perhaps we should use more registers */
.Lmemcpy_loop32:
ldmia r1!, {r3, r4, r12, lr}
stmia r0!, {r3, r4, r12, lr}
ldmia r1!, {r3, r4, r12, lr}
stmia r0!, {r3, r4, r12, lr}
subs r2, r2, #0x20
bge .Lmemcpy_loop32
cmn r2, #0x10
ldmgeia r1!, {r3, r4, r12, lr} /* blat a remaining 16 bytes */
stmgeia r0!, {r3, r4, r12, lr}
subge r2, r2, #0x10
ldmia sp!, {r4} /* return r4 */
.Lmemcpy_l32:
adds r2, r2, #0x14
/* blat 12 bytes at a time */
.Lmemcpy_loop12:
ldmgeia r1!, {r3, r12, lr}
stmgeia r0!, {r3, r12, lr}
subges r2, r2, #0x0c
bge .Lmemcpy_loop12
.Lmemcpy_l12:
adds r2, r2, #8
blt .Lmemcpy_l4
subs r2, r2, #4
ldrlt r3, [r1], #4
strlt r3, [r0], #4
ldmgeia r1!, {r3, r12}
stmgeia r0!, {r3, r12}
subge r2, r2, #4
.Lmemcpy_l4:
/* less than 4 bytes to go */
adds r2, r2, #4
#ifdef __APCS_26_
ldmeqia sp!, {r0, pc}^ /* done */
#else
ldmeqia sp!, {r0, pc} /* done */
#endif
/* copy the crud byte at a time */
cmp r2, #2
ldrb r3, [r1], #1
strb r3, [r0], #1
ldrgeb r3, [r1], #1
strgeb r3, [r0], #1
ldrgtb r3, [r1], #1
strgtb r3, [r0], #1
ldmia sp!, {r0, pc}
/* erg - unaligned destination */
.Lmemcpy_destul:
rsb r12, r12, #4
cmp r12, #2
/* align destination with byte copies */
ldrb r3, [r1], #1
strb r3, [r0], #1
ldrgeb r3, [r1], #1
strgeb r3, [r0], #1
ldrgtb r3, [r1], #1
strgtb r3, [r0], #1
subs r2, r2, r12
blt .Lmemcpy_l4 /* less the 4 bytes */
ands r12, r1, #3
beq .Lmemcpy_t8 /* we have an aligned source */
/* erg - unaligned source */
/* This is where it gets nasty ... */
.Lmemcpy_srcul:
bic r1, r1, #3
ldr lr, [r1], #4
cmp r12, #2
bgt .Lmemcpy_srcul3
beq .Lmemcpy_srcul2
cmp r2, #0x0c
blt .Lmemcpy_srcul1loop4
sub r2, r2, #0x0c
stmdb sp!, {r4, r5}
.Lmemcpy_srcul1loop16:
#ifdef __ARMEB__
mov r3, lr, lsl #8
#else
mov r3, lr, lsr #8
#endif
ldmia r1!, {r4, r5, r12, lr}
#ifdef __ARMEB__
orr r3, r3, r4, lsr #24
mov r4, r4, lsl #8
orr r4, r4, r5, lsr #24
mov r5, r5, lsl #8
orr r5, r5, r12, lsr #24
mov r12, r12, lsl #8
orr r12, r12, lr, lsr #24
#else
orr r3, r3, r4, lsl #24
mov r4, r4, lsr #8
orr r4, r4, r5, lsl #24
mov r5, r5, lsr #8
orr r5, r5, r12, lsl #24
mov r12, r12, lsr #8
orr r12, r12, lr, lsl #24
#endif
stmia r0!, {r3-r5, r12}
subs r2, r2, #0x10
bge .Lmemcpy_srcul1loop16
ldmia sp!, {r4, r5}
adds r2, r2, #0x0c
blt .Lmemcpy_srcul1l4
.Lmemcpy_srcul1loop4:
#ifdef __ARMEB__
mov r12, lr, lsl #8
#else
mov r12, lr, lsr #8
#endif
ldr lr, [r1], #4
#ifdef __ARMEB__
orr r12, r12, lr, lsr #24
#else
orr r12, r12, lr, lsl #24
#endif
str r12, [r0], #4
subs r2, r2, #4
bge .Lmemcpy_srcul1loop4
.Lmemcpy_srcul1l4:
sub r1, r1, #3
b .Lmemcpy_l4
.Lmemcpy_srcul2:
cmp r2, #0x0c
blt .Lmemcpy_srcul2loop4
sub r2, r2, #0x0c
stmdb sp!, {r4, r5}
.Lmemcpy_srcul2loop16:
#ifdef __ARMEB__
mov r3, lr, lsl #16
#else
mov r3, lr, lsr #16
#endif
ldmia r1!, {r4, r5, r12, lr}
#ifdef __ARMEB__
orr r3, r3, r4, lsr #16
mov r4, r4, lsl #16
orr r4, r4, r5, lsr #16
mov r5, r5, lsl #16
orr r5, r5, r12, lsr #16
mov r12, r12, lsl #16
orr r12, r12, lr, lsr #16
#else
orr r3, r3, r4, lsl #16
mov r4, r4, lsr #16
orr r4, r4, r5, lsl #16
mov r5, r5, lsr #16
orr r5, r5, r12, lsl #16
mov r12, r12, lsr #16
orr r12, r12, lr, lsl #16
#endif
stmia r0!, {r3-r5, r12}
subs r2, r2, #0x10
bge .Lmemcpy_srcul2loop16
ldmia sp!, {r4, r5}
adds r2, r2, #0x0c
blt .Lmemcpy_srcul2l4
.Lmemcpy_srcul2loop4:
#ifdef __ARMEB__
mov r12, lr, lsl #16
#else
mov r12, lr, lsr #16
#endif
ldr lr, [r1], #4
#ifdef __ARMEB__
orr r12, r12, lr, lsr #16
#else
orr r12, r12, lr, lsl #16
#endif
str r12, [r0], #4
subs r2, r2, #4
bge .Lmemcpy_srcul2loop4
.Lmemcpy_srcul2l4:
sub r1, r1, #2
b .Lmemcpy_l4
.Lmemcpy_srcul3:
cmp r2, #0x0c
blt .Lmemcpy_srcul3loop4
sub r2, r2, #0x0c
stmdb sp!, {r4, r5}
.Lmemcpy_srcul3loop16:
#ifdef __ARMEB__
mov r3, lr, lsl #24
#else
mov r3, lr, lsr #24
#endif
ldmia r1!, {r4, r5, r12, lr}
#ifdef __ARMEB__
orr r3, r3, r4, lsr #8
mov r4, r4, lsl #24
orr r4, r4, r5, lsr #8
mov r5, r5, lsl #24
orr r5, r5, r12, lsr #8
mov r12, r12, lsl #24
orr r12, r12, lr, lsr #8
#else
orr r3, r3, r4, lsl #8
mov r4, r4, lsr #24
orr r4, r4, r5, lsl #8
mov r5, r5, lsr #24
orr r5, r5, r12, lsl #8
mov r12, r12, lsr #24
orr r12, r12, lr, lsl #8
#endif
stmia r0!, {r3-r5, r12}
subs r2, r2, #0x10
bge .Lmemcpy_srcul3loop16
ldmia sp!, {r4, r5}
adds r2, r2, #0x0c
blt .Lmemcpy_srcul3l4
.Lmemcpy_srcul3loop4:
#ifdef __ARMEB__
mov r12, lr, lsl #24
#else
mov r12, lr, lsr #24
#endif
ldr lr, [r1], #4
#ifdef __ARMEB__
orr r12, r12, lr, lsr #8
#else
orr r12, r12, lr, lsl #8
#endif
str r12, [r0], #4
subs r2, r2, #4
bge .Lmemcpy_srcul3loop4
.Lmemcpy_srcul3l4:
sub r1, r1, #1
b .Lmemcpy_l4
|
0xffea/MINIX3
| 6,247
|
common/lib/libc/arch/arm/string/memcmp.S
|
/* $NetBSD: memcmp.S,v 1.1 2005/12/20 19:28:49 christos Exp $ */
/*
* Copyright 2003 Wasabi Systems, Inc.
* All rights reserved.
*
* Written by Steve C. Woodford for Wasabi Systems, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the NetBSD Project by
* Wasabi Systems, Inc.
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
* or promote products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 2002 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/asm.h>
RCSID("$NetBSD: memcmp.S,v 1.1 2005/12/20 19:28:49 christos Exp $")
ENTRY(memcmp)
mov ip, r0
#if defined(_KERNEL) && !defined(_STANDALONE)
cmp r2, #0x06
beq .Lmemcmp_6bytes
#endif
mov r0, #0x00
/* Are both addresses aligned the same way? */
cmp r2, #0x00
eornes r3, ip, r1
RETc(eq) /* len == 0, or same addresses! */
tst r3, #0x03
subne r2, r2, #0x01
bne .Lmemcmp_bytewise2 /* Badly aligned. Do it the slow way */
/* Word-align the addresses, if necessary */
sub r3, r1, #0x05
ands r3, r3, #0x03
add r3, r3, r3, lsl #1
addne pc, pc, r3, lsl #3
nop
/* Compare up to 3 bytes */
ldrb r0, [ip], #0x01
ldrb r3, [r1], #0x01
subs r0, r0, r3
RETc(ne)
subs r2, r2, #0x01
RETc(eq)
/* Compare up to 2 bytes */
ldrb r0, [ip], #0x01
ldrb r3, [r1], #0x01
subs r0, r0, r3
RETc(ne)
subs r2, r2, #0x01
RETc(eq)
/* Compare 1 byte */
ldrb r0, [ip], #0x01
ldrb r3, [r1], #0x01
subs r0, r0, r3
RETc(ne)
subs r2, r2, #0x01
RETc(eq)
/* Compare 4 bytes at a time, if possible */
subs r2, r2, #0x04
bcc .Lmemcmp_bytewise
.Lmemcmp_word_aligned:
ldr r0, [ip], #0x04
ldr r3, [r1], #0x04
subs r2, r2, #0x04
cmpcs r0, r3
beq .Lmemcmp_word_aligned
sub r0, r0, r3
/* Correct for extra subtraction, and check if done */
adds r2, r2, #0x04
cmpeq r0, #0x00 /* If done, did all bytes match? */
RETc(eq) /* Yup. Just return */
/* Re-do the final word byte-wise */
sub ip, ip, #0x04
sub r1, r1, #0x04
.Lmemcmp_bytewise:
add r2, r2, #0x03
.Lmemcmp_bytewise2:
ldrb r0, [ip], #0x01
ldrb r3, [r1], #0x01
subs r2, r2, #0x01
cmpcs r0, r3
beq .Lmemcmp_bytewise2
sub r0, r0, r3
RET
#if defined(_KERNEL) && !defined(_STANDALONE)
/*
* 6 byte compares are very common, thanks to the network stack.
* This code is hand-scheduled to reduce the number of stalls for
* load results. Everything else being equal, this will be ~32%
* faster than a byte-wise memcmp.
*/
.align 5
.Lmemcmp_6bytes:
ldrb r3, [r1, #0x00] /* r3 = b2#0 */
ldrb r0, [ip, #0x00] /* r0 = b1#0 */
ldrb r2, [r1, #0x01] /* r2 = b2#1 */
subs r0, r0, r3 /* r0 = b1#0 - b2#0 */
ldreqb r3, [ip, #0x01] /* r3 = b1#1 */
RETc(ne) /* Return if mismatch on #0 */
subs r0, r3, r2 /* r0 = b1#1 - b2#1 */
ldreqb r3, [r1, #0x02] /* r3 = b2#2 */
ldreqb r0, [ip, #0x02] /* r0 = b1#2 */
RETc(ne) /* Return if mismatch on #1 */
ldrb r2, [r1, #0x03] /* r2 = b2#3 */
subs r0, r0, r3 /* r0 = b1#2 - b2#2 */
ldreqb r3, [ip, #0x03] /* r3 = b1#3 */
RETc(ne) /* Return if mismatch on #2 */
subs r0, r3, r2 /* r0 = b1#3 - b2#3 */
ldreqb r3, [r1, #0x04] /* r3 = b2#4 */
ldreqb r0, [ip, #0x04] /* r0 = b1#4 */
RETc(ne) /* Return if mismatch on #3 */
ldrb r2, [r1, #0x05] /* r2 = b2#5 */
subs r0, r0, r3 /* r0 = b1#4 - b2#4 */
ldreqb r3, [ip, #0x05] /* r3 = b1#5 */
RETc(ne) /* Return if mismatch on #4 */
sub r0, r3, r2 /* r0 = b1#5 - b2#5 */
RET
#endif
|
0xffea/MINIX3
| 1,741
|
common/lib/libc/arch/arm/string/strcmp.S
|
/* $NetBSD: strcmp.S,v 1.1 2005/12/20 19:28:49 christos Exp $ */
/*
* Copyright (c) 2002 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/asm.h>
RCSID("$NetBSD: strcmp.S,v 1.1 2005/12/20 19:28:49 christos Exp $")
ENTRY(strcmp)
1:
ldrb r2, [r0], #1
ldrb r3, [r1], #1
cmp r2, #1
cmpcs r2, r3
beq 1b
sub r0, r2, r3
RET
|
0xffea/MINIX3
| 9,713
|
common/lib/libc/arch/x86_64/atomic/atomic.S
|
/* $NetBSD: atomic.S,v 1.16 2011/01/12 23:12:10 joerg Exp $ */
/*-
* Copyright (c) 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe, and by Andrew Doran.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/param.h>
#include <machine/asm.h>
#ifdef _KERNEL
#define ALIAS(f, t) STRONG_ALIAS(f,t)
#else
#define ALIAS(f, t) WEAK_ALIAS(f,t)
#endif
#ifdef _HARDKERNEL
#define LOCK(n) .Lpatch ## n: lock
#define ENDLABEL(a) _ALIGN_TEXT; LABEL(a)
#else
#define LOCK(n) lock
#define ENDLABEL(a) /* nothing */
#endif
.text
/* 32-bit */
ENTRY(_atomic_add_32)
LOCK(1)
addl %esi, (%rdi)
ret
ENTRY(_atomic_add_32_nv)
movl %esi, %eax
LOCK(2)
xaddl %eax, (%rdi)
addl %esi, %eax
ret
ENTRY(_atomic_and_32)
LOCK(3)
andl %esi, (%rdi)
ret
ENTRY(_atomic_and_32_nv)
movl (%rdi), %eax
1:
movl %eax, %ecx
andl %esi, %ecx
LOCK(4)
cmpxchgl %ecx, (%rdi)
jnz 1b
movl %ecx, %eax
ret
ENTRY(_atomic_dec_32)
LOCK(5)
decl (%rdi)
ret
ENTRY(_atomic_dec_32_nv)
movl $-1, %eax
LOCK(6)
xaddl %eax, (%rdi)
decl %eax
ret
ENTRY(_atomic_inc_32)
LOCK(7)
incl (%rdi)
ret
ENTRY(_atomic_inc_32_nv)
movl $1, %eax
LOCK(8)
xaddl %eax, (%rdi)
incl %eax
ret
ENTRY(_atomic_or_32)
LOCK(9)
orl %esi, (%rdi)
ret
ENTRY(_atomic_or_32_nv)
movl (%rdi), %eax
1:
movl %eax, %ecx
orl %esi, %ecx
LOCK(10)
cmpxchgl %ecx, (%rdi)
jnz 1b
movl %ecx, %eax
ret
ENTRY(_atomic_swap_32)
movl %esi, %eax
xchgl %eax, (%rdi)
ret
ENTRY(_atomic_cas_32)
movl %esi, %eax
LOCK(12)
cmpxchgl %edx, (%rdi)
/* %eax now contains the old value */
ret
ENTRY(_atomic_cas_32_ni)
movl %esi, %eax
cmpxchgl %edx, (%rdi)
/* %eax now contains the old value */
ret
/* 64-bit */
ENTRY(_atomic_add_64)
LOCK(13)
addq %rsi, (%rdi)
ret
ENTRY(_atomic_add_64_nv)
movq %rsi, %rax
LOCK(14)
xaddq %rax, (%rdi)
addq %rsi, %rax
ret
ENTRY(_atomic_and_64)
LOCK(15)
andq %rsi, (%rdi)
ret
ENTRY(_atomic_and_64_nv)
movq (%rdi), %rax
1:
movq %rax, %rcx
andq %rsi, %rcx
LOCK(16)
cmpxchgq %rcx, (%rdi)
jnz 1b
movq %rcx, %rax
ret
ENTRY(_atomic_dec_64)
LOCK(17)
decq (%rdi)
ret
ENTRY(_atomic_dec_64_nv)
movq $-1, %rax
LOCK(18)
xaddq %rax, (%rdi)
decq %rax
ret
ENTRY(_atomic_inc_64)
LOCK(19)
incq (%rdi)
ret
ENTRY(_atomic_inc_64_nv)
movq $1, %rax
LOCK(20)
xaddq %rax, (%rdi)
incq %rax
ret
ENTRY(_atomic_or_64)
LOCK(21)
orq %rsi, (%rdi)
ret
ENTRY(_atomic_or_64_nv)
movq (%rdi), %rax
1:
movq %rax, %rcx
orq %rsi, %rcx
LOCK(22)
cmpxchgq %rcx, (%rdi)
jnz 1b
movq %rcx, %rax
ret
ENTRY(_atomic_swap_64)
movq %rsi, %rax
xchgq %rax, (%rdi)
ret
ENTRY(_atomic_cas_64)
movq %rsi, %rax
LOCK(24)
cmpxchgq %rdx, (%rdi)
/* %eax now contains the old value */
ret
ENTRY(_atomic_cas_64_ni)
movq %rsi, %rax
cmpxchgq %rdx, (%rdi)
/* %eax now contains the old value */
ret
/* memory barriers */
ENTRY(_membar_consumer)
LOCK(25)
addq $0, -8(%rsp)
ret
ENDLABEL(membar_consumer_end)
ENTRY(_membar_producer)
/* A store is enough */
movq $0, -8(%rsp)
ret
ENDLABEL(membar_producer_end)
ENTRY(_membar_sync)
LOCK(26)
addq $0, -8(%rsp)
ret
ENDLABEL(membar_sync_end)
#ifdef _HARDKERNEL
ENTRY(sse2_lfence)
lfence
ret
ENDLABEL(sse2_lfence_end)
ENTRY(sse2_mfence)
mfence
ret
ENDLABEL(sse2_mfence_end)
atomic_lockpatch:
.globl atomic_lockpatch
.quad .Lpatch1, .Lpatch2, .Lpatch3, .Lpatch4, .Lpatch5
.quad .Lpatch6, .Lpatch7, .Lpatch8, .Lpatch9, .Lpatch10
.quad .Lpatch12, .Lpatch13, .Lpatch14, .Lpatch15
.quad .Lpatch16, .Lpatch17, .Lpatch18, .Lpatch19, .Lpatch20
.quad .Lpatch21, .Lpatch22, .Lpatch24, .Lpatch25
.quad .Lpatch26, 0
#endif /* _HARDKERNEL */
ALIAS(atomic_add_32,_atomic_add_32)
ALIAS(atomic_add_64,_atomic_add_64)
ALIAS(atomic_add_int,_atomic_add_32)
ALIAS(atomic_add_long,_atomic_add_64)
ALIAS(atomic_add_ptr,_atomic_add_64)
ALIAS(atomic_add_32_nv,_atomic_add_32_nv)
ALIAS(atomic_add_64_nv,_atomic_add_64_nv)
ALIAS(atomic_add_int_nv,_atomic_add_32_nv)
ALIAS(atomic_add_long_nv,_atomic_add_64_nv)
ALIAS(atomic_add_ptr_nv,_atomic_add_64_nv)
ALIAS(atomic_and_32,_atomic_and_32)
ALIAS(atomic_and_64,_atomic_and_64)
ALIAS(atomic_and_uint,_atomic_and_32)
ALIAS(atomic_and_ulong,_atomic_and_64)
ALIAS(atomic_and_ptr,_atomic_and_64)
ALIAS(atomic_and_32_nv,_atomic_and_32_nv)
ALIAS(atomic_and_64_nv,_atomic_and_64_nv)
ALIAS(atomic_and_uint_nv,_atomic_and_32_nv)
ALIAS(atomic_and_ulong_nv,_atomic_and_64_nv)
ALIAS(atomic_and_ptr_nv,_atomic_and_64_nv)
ALIAS(atomic_dec_32,_atomic_dec_32)
ALIAS(atomic_dec_64,_atomic_dec_64)
ALIAS(atomic_dec_uint,_atomic_dec_32)
ALIAS(atomic_dec_ulong,_atomic_dec_64)
ALIAS(atomic_dec_ptr,_atomic_dec_64)
ALIAS(atomic_dec_32_nv,_atomic_dec_32_nv)
ALIAS(atomic_dec_64_nv,_atomic_dec_64_nv)
ALIAS(atomic_dec_uint_nv,_atomic_dec_32_nv)
ALIAS(atomic_dec_ulong_nv,_atomic_dec_64_nv)
ALIAS(atomic_dec_ptr_nv,_atomic_dec_64_nv)
ALIAS(atomic_inc_32,_atomic_inc_32)
ALIAS(atomic_inc_64,_atomic_inc_64)
ALIAS(atomic_inc_uint,_atomic_inc_32)
ALIAS(atomic_inc_ulong,_atomic_inc_64)
ALIAS(atomic_inc_ptr,_atomic_inc_64)
ALIAS(atomic_inc_32_nv,_atomic_inc_32_nv)
ALIAS(atomic_inc_64_nv,_atomic_inc_64_nv)
ALIAS(atomic_inc_uint_nv,_atomic_inc_32_nv)
ALIAS(atomic_inc_ulong_nv,_atomic_inc_64_nv)
ALIAS(atomic_inc_ptr_nv,_atomic_inc_64_nv)
ALIAS(atomic_or_32,_atomic_or_32)
ALIAS(atomic_or_uint,_atomic_or_32)
ALIAS(atomic_or_ulong,_atomic_or_64)
ALIAS(atomic_or_ptr,_atomic_or_64)
ALIAS(atomic_or_32_nv,_atomic_or_32_nv)
ALIAS(atomic_or_64_nv,_atomic_or_64_nv)
ALIAS(atomic_or_uint_nv,_atomic_or_32_nv)
ALIAS(atomic_or_ulong_nv,_atomic_or_64_nv)
ALIAS(atomic_or_ptr_nv,_atomic_or_64_nv)
ALIAS(atomic_swap_32,_atomic_swap_32)
ALIAS(atomic_swap_64,_atomic_swap_64)
ALIAS(atomic_swap_uint,_atomic_swap_32)
ALIAS(atomic_swap_ulong,_atomic_swap_64)
ALIAS(atomic_swap_ptr,_atomic_swap_64)
ALIAS(atomic_cas_32,_atomic_cas_32)
ALIAS(atomic_cas_64,_atomic_cas_64)
ALIAS(atomic_cas_uint,_atomic_cas_32)
ALIAS(atomic_cas_ulong,_atomic_cas_64)
ALIAS(atomic_cas_ptr,_atomic_cas_64)
ALIAS(atomic_cas_32_ni,_atomic_cas_32_ni)
ALIAS(atomic_cas_64_ni,_atomic_cas_64_ni)
ALIAS(atomic_cas_uint_ni,_atomic_cas_32_ni)
ALIAS(atomic_cas_ulong_ni,_atomic_cas_64_ni)
ALIAS(atomic_cas_ptr_ni,_atomic_cas_64_ni)
ALIAS(membar_consumer,_membar_consumer)
ALIAS(membar_producer,_membar_producer)
ALIAS(membar_enter,_membar_consumer)
ALIAS(membar_exit,_membar_producer)
ALIAS(membar_sync,_membar_sync)
STRONG_ALIAS(_atomic_add_int,_atomic_add_32)
STRONG_ALIAS(_atomic_add_long,_atomic_add_64)
STRONG_ALIAS(_atomic_add_ptr,_atomic_add_64)
STRONG_ALIAS(_atomic_add_int_nv,_atomic_add_32_nv)
STRONG_ALIAS(_atomic_add_long_nv,_atomic_add_64_nv)
STRONG_ALIAS(_atomic_add_ptr_nv,_atomic_add_64_nv)
STRONG_ALIAS(_atomic_and_uint,_atomic_and_32)
STRONG_ALIAS(_atomic_and_ulong,_atomic_and_64)
STRONG_ALIAS(_atomic_and_ptr,_atomic_and_64)
STRONG_ALIAS(_atomic_and_uint_nv,_atomic_and_32_nv)
STRONG_ALIAS(_atomic_and_ulong_nv,_atomic_and_64_nv)
STRONG_ALIAS(_atomic_and_ptr_nv,_atomic_and_64_nv)
STRONG_ALIAS(_atomic_dec_uint,_atomic_dec_32)
STRONG_ALIAS(_atomic_dec_ulong,_atomic_dec_64)
STRONG_ALIAS(_atomic_dec_ptr,_atomic_dec_64)
STRONG_ALIAS(_atomic_dec_uint_nv,_atomic_dec_32_nv)
STRONG_ALIAS(_atomic_dec_ulong_nv,_atomic_dec_64_nv)
STRONG_ALIAS(_atomic_dec_ptr_nv,_atomic_dec_64_nv)
STRONG_ALIAS(_atomic_inc_uint,_atomic_inc_32)
STRONG_ALIAS(_atomic_inc_ulong,_atomic_inc_64)
STRONG_ALIAS(_atomic_inc_ptr,_atomic_inc_64)
STRONG_ALIAS(_atomic_inc_uint_nv,_atomic_inc_32_nv)
STRONG_ALIAS(_atomic_inc_ulong_nv,_atomic_inc_64_nv)
STRONG_ALIAS(_atomic_inc_ptr_nv,_atomic_inc_64_nv)
STRONG_ALIAS(_atomic_or_uint,_atomic_or_32)
STRONG_ALIAS(_atomic_or_ulong,_atomic_or_64)
STRONG_ALIAS(_atomic_or_ptr,_atomic_or_64)
STRONG_ALIAS(_atomic_or_uint_nv,_atomic_or_32_nv)
STRONG_ALIAS(_atomic_or_ulong_nv,_atomic_or_64_nv)
STRONG_ALIAS(_atomic_or_ptr_nv,_atomic_or_64_nv)
STRONG_ALIAS(_atomic_swap_uint,_atomic_swap_32)
STRONG_ALIAS(_atomic_swap_ulong,_atomic_swap_64)
STRONG_ALIAS(_atomic_swap_ptr,_atomic_swap_64)
STRONG_ALIAS(_atomic_cas_uint,_atomic_cas_32)
STRONG_ALIAS(_atomic_cas_ulong,_atomic_cas_64)
STRONG_ALIAS(_atomic_cas_ptr,_atomic_cas_64)
STRONG_ALIAS(_atomic_cas_uint_ni,_atomic_cas_32_ni)
STRONG_ALIAS(_atomic_cas_ulong_ni,_atomic_cas_64_ni)
STRONG_ALIAS(_atomic_cas_ptr_ni,_atomic_cas_64_ni)
STRONG_ALIAS(_membar_enter,_membar_consumer)
STRONG_ALIAS(_membar_exit,_membar_producer)
|
0xffea/MINIX3
| 2,101
|
common/lib/libc/arch/x86_64/string/strcpy.S
|
/*
* Written by J.T. Conklin <[email protected]>
* Public domain.
*/
#include <machine/asm.h>
#if defined(LIBC_SCCS)
RCSID("$NetBSD: strcpy.S,v 1.1 2005/12/20 19:28:51 christos Exp $")
#endif
/*
* This strcpy implementation copies a byte at a time until the
* source pointer is aligned to a word boundary, it then copies by
* words until it finds a word containing a zero byte, and finally
* copies by bytes until the end of the string is reached.
*
* While this may result in unaligned stores if the source and
* destination pointers are unaligned with respect to each other,
* it is still faster than either byte copies or the overhead of
* an implementation suitable for machines with strict alignment
* requirements.
*/
ENTRY(strcpy)
movq %rdi,%rax
movabsq $0x0101010101010101,%r8
movabsq $0x8080808080808080,%r9
/*
* Align source to a word boundary.
* Consider unrolling loop?
*/
_ALIGN_TEXT
.Lalign:
testb $7,%sil
je .Lword_aligned
movb (%rsi),%dl
incq %rsi
movb %dl,(%rdi)
incq %rdi
testb %dl,%dl
jne .Lalign
ret
_ALIGN_TEXT
.Lloop:
movq %rdx,(%rdi)
addq $8,%rdi
.Lword_aligned:
movq (%rsi),%rdx
movq %rdx,%rcx
addq $8,%rsi
subq %r8,%rcx
testq %r9,%rcx
je .Lloop
/*
* In rare cases, the above loop may exit prematurely. We must
* return to the loop if none of the bytes in the word equal 0.
*/
movb %dl,(%rdi)
incq %rdi
testb %dl,%dl /* 1st byte == 0? */
je .Ldone
shrq $8,%rdx
movb %dl,(%rdi)
incq %rdi
testb %dl,%dl /* 2nd byte == 0? */
je .Ldone
shrq $8,%rdx
movb %dl,(%rdi)
incq %rdi
testb %dl,%dl /* 3rd byte == 0? */
je .Ldone
shrq $8,%rdx
movb %dl,(%rdi)
incq %rdi
testb %dl,%dl /* 4th byte == 0? */
je .Ldone
shrq $8,%rdx
movb %dl,(%rdi)
incq %rdi
testb %dl,%dl /* 5th byte == 0? */
je .Ldone
shrq $8,%rdx
movb %dl,(%rdi)
incq %rdi
testb %dl,%dl /* 6th byte == 0? */
je .Ldone
shrq $8,%rdx
movb %dl,(%rdi)
incq %rdi
testb %dl,%dl /* 7th byte == 0? */
je .Ldone
shrq $8,%rdx
movb %dl,(%rdi)
incq %rdi
testb %dl,%dl /* 8th byte == 0? */
jne .Lword_aligned
.Ldone:
ret
|
0xffea/MINIX3
| 2,924
|
common/lib/libc/arch/x86_64/string/memset.S
|
/* $NetBSD: memset.S,v 1.3 2009/08/01 20:35:45 dsl Exp $ */
/*-
* Copyright (c) 2009 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by David Laight.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/asm.h>
#if defined(LIBC_SCCS)
RCSID("$NetBSD: memset.S,v 1.3 2009/08/01 20:35:45 dsl Exp $")
#endif
#ifndef _KERNEL
/* bzero, %rdi is buffer, %rsi length */
ENTRY(bzero)
mov %rsi,%rdx /* length */
xor %eax,%eax /* value to write */
jmp 1f
#endif
/* memset, %rdi is buffer, %rsi char to fill, %rdx length */
ENTRY(memset)
movzbq %sil,%rax /* byte value to fill */
mov %rdx,%rsi /* copy of length */
mov $0x0101010101010101,%r9
imul %r9,%rax /* fill value in all bytes */
1:
mov %rdi,%r9 /* Need to return buffer address */
or %edi,%edx /* address | length */
mov %rsi,%rcx
cmp $7,%rsi
jbe 10f /* jump if short fill */
test $7,%dl /* check for misaligned fill */
jnz 20f /* jump if misaligned */
/* Target aligned and length multiple of 8 */
2:
shr $3,%rcx
rep stosq
mov %r9,%rax
ret
/*
* Short transfer, any faffing here will generate mispredicted branches.
* So we keep it simple.
*/
10: rep stosb
mov %r9,%rax
ret
/*
* Buffer or length misaligned.
* Write pattern to first and last word of buffer, then fill middle.
* (This writes to some bytes more than once - possibly three times!.)
*/
20:
mov %rax,(%rdi)
movzbq %dil,%rdx /* low address for alignment */
mov %rax,-8(%rcx,%rdi)
and $7,%dl /* offset in word */
sub %rdx,%rcx /* adjust length ... */
add %rdx,%rdi /* ... and target */
jmp 2b
|
0xffea/MINIX3
| 2,221
|
common/lib/libc/arch/x86_64/string/strrchr.S
|
/*
* Written by J.T. Conklin <[email protected]>
* Public domain.
*/
#include <machine/asm.h>
#if defined(LIBC_SCCS)
RCSID("$NetBSD: strrchr.S,v 1.2 2009/07/17 19:37:57 dsl Exp $")
#endif
ENTRY(strrchr)
movzbq %sil,%rcx
/* zero return value */
xorq %rax,%rax
/*
* Align to word boundary.
* Consider unrolling loop?
*/
.Lalign:
testb $7,%dil
je .Lword_aligned
movb (%rdi),%dl
cmpb %cl,%dl
cmoveq %rdi,%rax
incq %rdi
testb %dl,%dl
jne .Lalign
jmp .Ldone
.Lword_aligned:
/* copy char to all bytes in word */
movb %cl,%ch
movq %rcx,%rdx
salq $16,%rcx
orq %rdx,%rcx
movq %rcx,%rdx
salq $32,%rcx
orq %rdx,%rcx
movabsq $0x0101010101010101,%r8
movabsq $0x8080808080808080,%r9
/* Check whether any byte in the word is equal to ch or 0. */
_ALIGN_TEXT
.Lloop:
movq (%rdi),%rdx
addq $8,%rdi
movq %rdx,%rsi
subq %r8,%rdx
xorq %rcx,%rsi
subq %r8,%rsi
orq %rsi,%rdx
testq %r9,%rdx
je .Lloop
/*
* In rare cases, the above loop may exit prematurely. We must
* return to the loop if none of the bytes in the word match
* ch or are equal to 0.
*/
movb -8(%rdi),%dl
cmpb %cl,%dl /* 1st byte == ch? */
jne 1f
leaq -8(%rdi),%rax
1: testb %dl,%dl /* 1st byte == 0? */
je .Ldone
movb -7(%rdi),%dl
cmpb %cl,%dl /* 2nd byte == ch? */
jne 1f
leaq -7(%rdi),%rax
1: testb %dl,%dl /* 2nd byte == 0? */
je .Ldone
movb -6(%rdi),%dl
cmpb %cl,%dl /* 3rd byte == ch? */
jne 1f
leaq -6(%rdi),%rax
1: testb %dl,%dl /* 3rd byte == 0? */
je .Ldone
movb -5(%rdi),%dl
cmpb %cl,%dl /* 4th byte == ch? */
jne 1f
leaq -5(%rdi),%rax
1: testb %dl,%dl /* 4th byte == 0? */
je .Ldone
movb -4(%rdi),%dl
cmpb %cl,%dl /* 5th byte == ch? */
jne 1f
leaq -4(%rdi),%rax
1: testb %dl,%dl /* 5th byte == 0? */
je .Ldone
movb -3(%rdi),%dl
cmpb %cl,%dl /* 6th byte == ch? */
jne 1f
leaq -3(%rdi),%rax
1: testb %dl,%dl /* 6th byte == 0? */
je .Ldone
movb -2(%rdi),%dl
cmpb %cl,%dl /* 7th byte == ch? */
jne 1f
leaq -2(%rdi),%rax
1: testb %dl,%dl /* 7th byte == 0? */
je .Ldone
movb -1(%rdi),%dl
cmpb %cl,%dl /* 8th byte == ch? */
jne 1f
leaq -1(%rdi),%rax
1: testb %dl,%dl /* 8th byte == 0? */
jne .Lloop
.Ldone:
ret
STRONG_ALIAS(rindex,strrchr)
|
0xffea/MINIX3
| 4,494
|
common/lib/libc/arch/x86_64/string/bcopy.S
|
/*-
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
*
* This code is derived from locore.s.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/asm.h>
#if defined(LIBC_SCCS)
RCSID("$NetBSD: bcopy.S,v 1.4 2009/11/22 17:25:47 dsl Exp $")
#endif
/*
* (ov)bcopy (src,dst,cnt)
* [email protected] (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
*
* Hacked about by [email protected]
*/
#ifdef MEMCOPY
ENTRY(memcpy)
#define NO_OVERLAP
#else
#ifdef MEMMOVE
ENTRY(memmove)
#else
ENTRY(bcopy)
#endif
#endif
movq %rdx,%rcx
#if defined(MEMCOPY) || defined(MEMMOVE)
movq %rdi,%rax /* must return destination address */
mov %rdi,%r11 /* for misaligned check */
#else
mov %rsi,%r11 /* for misaligned check */
xchgq %rdi,%rsi /* bcopy() has arg order reversed */
#endif
#if !defined(NO_OVERLAP)
movq %rdi,%r8
subq %rsi,%r8
#endif
shrq $3,%rcx /* count for copy by words */
jz 8f /* j if less than 8 bytes */
lea -8(%rdi,%rdx),%r9 /* target address of last 8 */
mov -8(%rsi,%rdx),%r10 /* get last word */
#if !defined(NO_OVERLAP)
cmpq %rdx,%r8 /* overlapping? */
jb 10f
#endif
/*
* Non-overlaping, copy forwards.
* Newer Intel cpus (Nehalem) will do 16byte read/write transfers
* if %ecx is more than 76.
* AMD might do something similar some day.
*/
and $7,%r11 /* destination misaligned ? */
jnz 2f
rep
movsq
mov %r10,(%r9) /* write last word */
ret
/*
* Destination misaligned
* AMD say it is better to align the destination (not the source).
* This will also re-align copies if the source and dest are both
* misaligned by the same amount)
* (I think Nehalem will use its accelerated copy if the source
* and destination have the same alignment.)
*/
2:
lea -9(%r11,%rdx),%rcx /* post re-alignment count */
neg %r11 /* now -1 .. -7 */
mov (%rsi),%rdx /* get first word */
mov %rdi,%r8 /* target for first word */
lea 8(%rsi,%r11),%rsi
lea 8(%rdi,%r11),%rdi
shr $3,%rcx
rep
movsq
mov %rdx,(%r8) /* write first word */
mov %r10,(%r9) /* write last word */
ret
#if !defined(NO_OVERLAP)
/* Must copy backwards.
* Reverse copy is probably easy to code faster than 'rep movds'
* since that requires (IIRC) an extra clock every 3 iterations (AMD).
* However I don't suppose anything cares that much!
* The big cost is the std/cld pair - reputedly 50+ cycles on Netburst P4.
* The copy is aligned with the buffer start (more likely to
* be a multiple of 8 than the end).
*/
10:
lea -8(%rsi,%rcx,8),%rsi
lea -8(%rdi,%rcx,8),%rdi
std
rep
movsq
cld
mov %r10,(%r9) /* write last bytes */
ret
#endif
/* Less than 8 bytes to copy, copy by bytes */
/* Intel Nehalem optimise 'rep movsb' for <= 7 bytes (9-15 clocks).
* For longer transfers it is 50+ !
*/
8: mov %rdx,%rcx
#if !defined(NO_OVERLAP)
cmpq %rdx,%r8 /* overlapping? */
jb 81f
#endif
/* nope, copy forwards. */
rep
movsb
ret
#if !defined(NO_OVERLAP)
/* Must copy backwards */
81:
lea -1(%rsi,%rcx),%rsi
lea -1(%rdi,%rcx),%rdi
std
rep
movsb
cld
ret
#endif
|
0xffea/MINIX3
| 3,707
|
common/lib/libc/arch/x86_64/string/memchr.S
|
/* $NetBSD: memchr.S,v 1.5 2009/08/01 20:47:02 dsl Exp $ */
/*-
* Copyright (c) 2009 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by David Laight.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <machine/asm.h>
#if defined(LIBC_SCCS)
RCSID("$NetBSD: memchr.S,v 1.5 2009/08/01 20:47:02 dsl Exp $")
#endif
/*
* The instruction sequences used try to avoid data dependencies
* between adjacent instructions (to allow parallel execution).
* The 'imul' for %r9 could be put into the delay following the
* memory read (ie inside the loop) at no obvious cost - except
* that the loop is currently exactly 32 bytes - 2 fetch blocks!.
*
* I don't think aligning any of the other branch targets is useful.
*/
ENTRY(memchr)
movabsq $0x0101010101010101,%r8
lea (%rdi,%rdx),%r10 /* limit of buffer to scan */
movzbq %sil,%rsi /* mask high bits! */
/* 'directpath' imuls can execute 3 at a time ... (amd) */
imul %r8,%rsi /* search byte replicated in word */
imul $0x80,%r8,%r9 /* 0x8080808080808080 */
test $7,%dil
jnz 20f /* jump if misaligned */
jmp 1f /* jump to avoid 4 nops (13 bytes) in gap */
_ALIGN_TEXT /* entire loop now in 32 aligned bytes */
1:
cmpq %r10,%rdi /* end of buffer ? */
jae 30f /* jump if so */
movq (%rdi),%rax /* value to check */
addq $8,%rdi
xorq %rsi,%rax /* now looking for zeros */
2:
mov %rax,%rcx
subq %r8,%rax /* x - 0x01 */
not %rcx
andq %r9,%rax /* (x - 0x01) & 0x80 */
andq %rcx,%rax /* ((x - 0x01) & 0x80) & ~x */
je 1b /* jump if not found */
/* Found byte in word, get its address */
bsf %rax,%rax
shr $3,%eax
lea -8(%rax,%rdi),%rax
cmpq %r10,%rax /* need to check not beyond buffer */
jae 30f
rep
ret /* amd - no ret after jmp */
/* Input misaligned, read aligned and make low bytes invalid */
20:
mov %dil,%cl /* misalignment amount 1..7 (+high bits )*/
and $~7,%dil /* %rdi now start of word */
test %rdx,%rdx /* zero length, don't read */
jz 30f
neg %cl /* 7..1 (+high bits) */
mov (%rdi),%rax /* word containing first byte */
addq $8,%rdi
and $7,%cl /* 7..1 */
mov %r8,%r11 /* any value with bits in each byte */
shl $3,%cl /* 56..8 */
xorq %rsi,%rax /* now looking for zeros */
/* Set low bytes non-zero */
shr %cl,%r11 /* non-zero in unwanted bytes */
or %r11,%rax /* low bytes now set */
jmp 2b
/* Not found */
30: xorq %rax,%rax
ret
|
0xffea/MINIX3
| 5,908
|
common/lib/libc/arch/x86_64/string/strlen.S
|
/* $NetBSD: strlen.S,v 1.5 2009/07/12 21:24:21 dsl Exp $ */
/*-
* Copyright (c) 2009 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by David Laight.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Inspired by a version written by J.T. Conklin <[email protected]>
* (Only the long comment really remains his work!)
*/
#include <machine/asm.h>
#if defined(LIBC_SCCS)
RCSID("$NetBSD: strlen.S,v 1.5 2009/07/12 21:24:21 dsl Exp $")
#endif
/*
* There are many well known branch-free sequences which are used
* for determining whether a zero-byte is contained within a word.
* These sequences are generally much more efficent than loading
* and comparing each byte individually.
*
* The expression [1,2]:
*
* (1) ~(((x & 0x7f....7f) + 0x7f....7f) | (x | 0x7f....7f))
*
* evaluates to a non-zero value if any of the bytes in the
* original word is zero.
*
* It also has the useful property that bytes in the result word
* that correspond to non-zero bytes in the original word have
* the value 0x00, while bytes corresponding to zero bytes have
* the value 0x80. This allows calculation of the first (and
* last) occurrence of a zero byte within the word (useful for C's
* str* primitives) by counting the number of leading (or
* trailing) zeros and dividing the result by 8. On machines
* without (or with slow) clz() / ctz() instructions, testing
* each byte in the result word for zero is necessary.
*
* This typically takes 4 instructions (5 on machines without
* "not-or") not including those needed to load the constant.
*
*
* The expression:
*
* (2) ((x - 0x01....01) & 0x80....80 & ~x)
*
* evaluates to a non-zero value if any of the bytes in the
* original word is zero.
*
* On little endian machines, the first byte in the result word
* that corresponds to a zero byte in the original byte is 0x80,
* so clz() can be used as above. On big endian machines, and
* little endian machines without (or with a slow) clz() insn,
* testing each byte in the original for zero is necessary.
*
* This typically takes 3 instructions (4 on machines without
* "and with complement") not including those needed to load
* constants.
*
*
* The expression:
*
* (3) ((x - 0x01....01) & 0x80....80)
*
* always evaluates to a non-zero value if any of the bytes in
* the original word is zero or has the top bit set.
* For strings that are likely to only contain 7-bit ascii these
* false positives will be rare.
*
* To account for possible false positives, each byte of the
* original word must be checked when the expression evaluates to
* a non-zero value. However, because it is simpler than those
* presented above, code that uses it will be faster as long as
* the rate of false positives is low.
*
* This is likely, because the the false positive can only occur
* if the most siginificant bit of a byte within the word is set.
* The expression will never fail for typical 7-bit ASCII strings.
*
* This typically takes 2 instructions not including those needed
* to load constants.
*
*
* [1] Henry S. Warren Jr., "Hacker's Delight", Addison-Westley 2003
*
* [2] International Business Machines, "The PowerPC Compiler Writer's
* Guide", Warthman Associates, 1996
*/
#ifdef TEST_STRLEN
ENTRY(test_strlen)
#else
ENTRY(strlen)
#endif
movabsq $0x0101010101010101,%r8
test $7,%dil
movq %rdi,%rax /* Buffer, %rdi unchanged */
movabsq $0x8080808080808080,%r9
jnz 10f /* Jump if misaligned */
_ALIGN_TEXT
1:
movq (%rax),%rdx /* get bytes to check */
2:
addq $8,%rax
mov %rdx,%rcx /* save for later check */
subq %r8,%rdx /* alg (3) above first */
not %rcx /* Invert of data */
andq %r9,%rdx
je 1b /* jump if all 0x01-0x80 */
/* Do check from alg (2) above - loops for 0x81..0xff bytes */
andq %rcx,%rdx
je 1b
/* Since we are LE, use bit scan for first 0x80 byte */
sub %rdi,%rax /* length to next word */
bsf %rdx,%rdx /* 7, 15, 23 ... 63 */
shr $3,%rdx /* 0, 1, 2 ... 7 */
lea -8(%rax,%rdx),%rax
ret
/* Misaligned, read aligned word and make low bytes non-zero */
_ALIGN_TEXT
10:
mov %al,%cl
mov $1,%rsi
and $7,%cl /* offset into word 1..7 */
and $~7,%al /* start of word with buffer */
shl $3,%cl /* bit count 8, 16 .. 56 */
movq (%rax),%rdx /* first data in high bytes */
shl %cl,%rsi
dec %rsi
or %rsi,%rdx /* low bytes now non-zero */
jmp 2b
#ifdef TEST_STRLEN
/* trivial implementation when testing above! */
ENTRY(strlen)
mov %rdi,%rax
1:
cmpb $0,(%rax)
jz 2f
inc %rax
jmp 1b
2: sub %rdi,%rax
ret
#endif
|
0xffea/MINIX3
| 2,627
|
common/lib/libc/arch/x86_64/string/strcat.S
|
/*
* Written by J.T. Conklin <[email protected]>
* Public domain.
*/
#include <machine/asm.h>
#if defined(LIBC_SCCS)
RCSID("$NetBSD: strcat.S,v 1.1 2005/12/20 19:28:51 christos Exp $")
#endif
ENTRY(strcat)
movq %rdi,%rax
movabsq $0x0101010101010101,%r8
movabsq $0x8080808080808080,%r9
/*
* Align destination to word boundary.
* Consider unrolling loop?
*/
.Lscan:
.Lscan_align:
testb $7,%dil
je .Lscan_aligned
cmpb $0,(%rdi)
je .Lcopy
incq %rdi
jmp .Lscan_align
_ALIGN_TEXT
.Lscan_aligned:
.Lscan_loop:
movq (%rdi),%rdx
addq $8,%rdi
subq %r8,%rdx
testq %r9,%rdx
je .Lscan_loop
/*
* In rare cases, the above loop may exit prematurely. We must
* return to the loop if none of the bytes in the word equal 0.
*/
cmpb $0,-8(%rdi) /* 1st byte == 0? */
jne 1f
subq $8,%rdi
jmp .Lcopy
1: cmpb $0,-7(%rdi) /* 2nd byte == 0? */
jne 1f
subq $7,%rdi
jmp .Lcopy
1: cmpb $0,-6(%rdi) /* 3rd byte == 0? */
jne 1f
subq $6,%rdi
jmp .Lcopy
1: cmpb $0,-5(%rdi) /* 4th byte == 0? */
jne 1f
subq $5,%rdi
jmp .Lcopy
1: cmpb $0,-4(%rdi) /* 5th byte == 0? */
jne 1f
subq $4,%rdi
jmp .Lcopy
1: cmpb $0,-3(%rdi) /* 6th byte == 0? */
jne 1f
subq $3,%rdi
jmp .Lcopy
1: cmpb $0,-2(%rdi) /* 7th byte == 0? */
jne 1f
subq $2,%rdi
jmp .Lcopy
1: cmpb $0,-1(%rdi) /* 8th byte == 0? */
jne .Lscan_loop
subq $1,%rdi
/*
* Align source to a word boundary.
* Consider unrolling loop?
*/
.Lcopy:
.Lcopy_align:
testb $7,%sil
je .Lcopy_aligned
movb (%rsi),%dl
incq %rsi
movb %dl,(%rdi)
incq %rdi
testb %dl,%dl
jne .Lcopy_align
ret
_ALIGN_TEXT
.Lcopy_loop:
movq %rdx,(%rdi)
addq $8,%rdi
.Lcopy_aligned:
movq (%rsi),%rdx
movq %rdx,%rcx
addq $8,%rsi
subq %r8,%rcx
testq %r9,%rcx
je .Lcopy_loop
/*
* In rare cases, the above loop may exit prematurely. We must
* return to the loop if none of the bytes in the word equal 0.
*/
movb %dl,(%rdi)
incq %rdi
testb %dl,%dl /* 1st byte == 0? */
je .Ldone
shrq $8,%rdx
movb %dl,(%rdi)
incq %rdi
testb %dl,%dl /* 2nd byte == 0? */
je .Ldone
shrq $8,%rdx
movb %dl,(%rdi)
incq %rdi
testb %dl,%dl /* 3rd byte == 0? */
je .Ldone
shrq $8,%rdx
movb %dl,(%rdi)
incq %rdi
testb %dl,%dl /* 4th byte == 0? */
je .Ldone
shrq $8,%rdx
movb %dl,(%rdi)
incq %rdi
testb %dl,%dl /* 5th byte == 0? */
je .Ldone
shrq $8,%rdx
movb %dl,(%rdi)
incq %rdi
testb %dl,%dl /* 6th byte == 0? */
je .Ldone
shrq $8,%rdx
movb %dl,(%rdi)
incq %rdi
testb %dl,%dl /* 7th byte == 0? */
je .Ldone
shrq $8,%rdx
movb %dl,(%rdi)
incq %rdi
testb %dl,%dl /* 8th byte == 0? */
jne .Lcopy_aligned
.Ldone:
ret
|
0xffea/MINIX3
| 4,979
|
common/lib/libc/arch/x86_64/string/strchr.S
|
/* $NetBSD: strchr.S,v 1.6 2009/07/20 15:21:00 christos Exp $ */
/*-
* Copyright (c) 2009 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by David Laight.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/* See comments in strlen.S about checking words for byte values */
#include <machine/asm.h>
#if defined(LIBC_SCCS)
RCSID("$NetBSD: strchr.S,v 1.6 2009/07/20 15:21:00 christos Exp $")
#endif
/*
* On entry %rdi is the buffer and the low byte of %rsi (%sil) the
* character to search for.
*
* Registers %rdx, %rcx, %r8-%r11 and %rax are also usable
*/
/* Uncomment below to get regression test to run this version but
* have everything else use the trivial one below. */
/* #define TEST_STRCHR */
#ifdef TEST_STRCHR
ENTRY(test_strchr)
#else
ENTRY(strchr)
#endif
movabsq $0x0101010101010101,%r8
movzbq %sil,%rdx /* value to search for (c) */
/* These imul are 'directpath' on athlons, so are fast */
imul $0x80,%r8,%r9 /* 0x8080808080808080 */
imul %r8,%rdx /* (c) copied to all bytes */
test $7,%dil
jnz 20f /* jump if misaligned */
_ALIGN_TEXT /* one byte nop */
1:
movq (%rdi),%rax /* bytes to check (x) */
2:
addq $8,%rdi
mov %rax,%r10
mov %rax,%r11 /* for 'char' check */
not %r10 /* invert of data (~x) */
xorq %rdx,%r11 /* convert 'char' test to one for NUL */
subq %r8,%rax /* x - 0x10 */
movq %r10,%rsi /* ~x */
subq %r8,%r11 /* (x ^ c) - 0x10 */
/*
* Here we could check ((x - 0x10) | ((x ^ c) - 0x10)) & 0x80
* and short-circuit the case where no top bits are set, and
* we continue the loop.
* However it needs 3 more clocks that are difficult to interleave
* in the existing dependency chain ...
*/
andq %r9,%rax /* (x - 0x10) & 0x80 */
xorq %rdx,%rsi /* c ^ ~x == ~(c ^ x) */
andq %r9,%r11 /* ((x ^ c) - 0x10) & 0x80 */
andq %r10,%rax /* (x - 0x10) & 0x80 & ~x */
jne 10f /* jump if string ends */
andq %rsi,%r11 /* ((x ^ c) - 0x10) & 0x80 & ~(x ^ c) */
je 1b /* jump if no match */
/* Found char, since LE can use bit scan */
bsf %r11,%r11 /* 7, 15, 23 ... 63 */
8: shr $3,%r11 /* 0, 1, 2 .. 7 */
lea -8(%r11,%rdi),%rax
ret
/* End of string, check whether char is before NUL */
_ALIGN_TEXT /* adds three byte nop */
10:
bsf %rax,%rax /* count to NUL */
andq %rsi,%r11 /* check for char in last 8 bytes */
je 11f
bsf %r11,%r11 /* NUL and char - see which was first */
cmp %r11,%rax
jae 8b /* return 'found' if same - searching for NUL */
11: xor %eax,%eax /* char not found */
ret
/* Source misaligned: read aligned word and make low bytes invalid */
/* I (dsl) think a _ALIGN_TEXT here will slow things down! */
20:
xor %rcx,%rcx
sub %dil,%cl /* Convert low address values 1..7 ... */
sbb %rsi,%rsi /* carry was set, so %rsi now ~0u! */
and $7,%cl /* ... to 7..1 */
and $~7,%dil /* move address to start of word */
shl $3,%cl /* now 56, 48 ... 16, 8 */
movq (%rdi),%rax /* aligned word containing first data */
xor %rdx,%rsi /* invert of search pattern (~c) */
je 22f /* searching for 0xff */
21: shr %cl,%rsi /* ~c in low bytes */
or %rsi,%rax /* set some bits making low bytes invalid */
jmp 2b
/* We are searching for 0xff, so can't use ~pattern for invalid value */
22:
mov %r8,%r10 /* 0x01 pattern */
lea (%r8,%r8),%rsi /* 0x02 - bits gets set (above) */
not %r10 /* now 0xfe */
sar %cl,%r10 /* top bytes 0xff */
and %r10,%rax /* clear lsb from unwanted low bytes */
jmp 21b
#ifdef TEST_STRCHR
/* Trivial version for bug-fixing above */
ENTRY(strchr)
movq %rsi,%rdx
movq %rdi,%rsi
1:
lodsb
cmp %al,%dl
je 2f
test %al,%al
jne 1b
xor %eax,%eax
ret
2: lea -1(%rsi),%rax
ret
#endif
STRONG_ALIAS(index,strchr)
|
0xffea/MINIX3
| 1,136
|
common/lib/libc/arch/x86_64/string/strcmp.S
|
/*
* Written by J.T. Conklin <[email protected]>
* Public domain.
*/
#include <machine/asm.h>
#if defined(LIBC_SCCS)
RCSID("$NetBSD: strcmp.S,v 1.1 2005/12/20 19:28:51 christos Exp $")
#endif
ENTRY(strcmp)
/*
* Align s1 to word boundary.
* Consider unrolling loop?
*/
.Ls1align:
testb $7,%dil
je .Ls1aligned
movb (%rdi),%al
incq %rdi
movb (%rsi),%dl
incq %rsi
testb %al,%al
je .Ldone
cmpb %al,%dl
je .Ls1align
jmp .Ldone
/*
* Check whether s2 is aligned to a word boundary. If it is, we
* can compare by words. Otherwise we have to compare by bytes.
*/
.Ls1aligned:
testb $7,%sil
jne .Lbyte_loop
movabsq $0x0101010101010101,%r8
subq $8,%rdi
movabsq $0x8080808080808080,%r9
subq $8,%rsi
_ALIGN_TEXT
.Lword_loop:
movq 8(%rdi),%rax
addq $8,%rdi
movq 8(%rsi),%rdx
addq $8,%rsi
cmpq %rax,%rdx
jne .Lbyte_loop
subq %r8,%rdx
notq %rax
andq %rax,%rdx
testq %r9,%rdx
je .Lword_loop
_ALIGN_TEXT
.Lbyte_loop:
movb (%rdi),%al
incq %rdi
movb (%rsi),%dl
incq %rsi
testb %al,%al
je .Ldone
cmpb %al,%dl
je .Lbyte_loop
.Ldone:
movzbq %al,%rax
movzbq %dl,%rdx
subq %rdx,%rax
ret
|
0xffea/MINIX3
| 1,639
|
common/lib/libc/arch/alpha/gen/byte_swap_2.S
|
/* $NetBSD: byte_swap_2.S,v 1.3 2008/02/16 17:37:13 apb Exp $ */
/*
* Copyright (c) 1996 Carnegie-Mellon University.
* All rights reserved.
*
* Author: Chris G. Demetriou
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or [email protected]
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
*
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*/
#include <machine/asm.h>
/*
* Byte-swap a 2-byte quantity. (Convert 0x0123 to 0x2301.)
*
* Argument is an unsigned 2-byte integer (uint16_t).
*/
#if defined(_KERNEL) || defined(_STANDALONE)
#define BSWAP16 bswap16
#else /* defined(_KERNEL) || defined(_STANDALONE) */
#define BSWAP16 __bswap16
#endif /* defined(_KERNEL) || defined(_STANDALONE) */
LEAF(BSWAP16, 1) /* a0 contains 0x0123 */
XLEAF(htons, 1)
XLEAF(ntohs, 1)
insbl a0, 1, t0 /* t0 = 0x23 */
extbl a0, 1, t1 /* t1 = 0x 01 */
or t0, t1, v0 /* v0 = 0x2301 */
RET
END(BSWAP16)
|
0xffea/MINIX3
| 1,890
|
common/lib/libc/arch/alpha/gen/byte_swap_4.S
|
/* $NetBSD: byte_swap_4.S,v 1.3 2008/02/16 17:37:13 apb Exp $ */
/*
* Copyright (c) 1996 Carnegie-Mellon University.
* All rights reserved.
*
* Author: Chris G. Demetriou
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or [email protected]
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
*
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*/
#include <machine/asm.h>
/*
* Byte-swap a 4-byte quantity. (Convert 0x01234567 to 0x67452301.)
*
* Argument is an unsigned 4-byte integer (uint32_t).
*/
#if defined(_KERNEL) || defined(_STANDALONE)
#define BSWAP32 bswap32
#else /* defined(_KERNEL) || defined(_STANDALONE) */
#define BSWAP32 __bswap32
#endif /* defined(_KERNEL) || defined(_STANDALONE) */
LEAF(BSWAP32, 1) /* a0 contains 0x01234567 */
XLEAF(htonl, 1)
XLEAF(ntohl, 1)
insbl a0, 3, t0 /* t0 = 0x67 */
extbl a0, 1, t1 /* t1 = 0x 45 */
extbl a0, 2, t2 /* t2 = 0x 23 */
extbl a0, 3, t3 /* t3 = 0x 01 */
sll t1, 16, t1 /* t1 = 0x 45 */
sll t2, 8, t2 /* t2 = 0x 23 */
or t3, t0, v0 /* v0 = 0x67 01 */
or t1, t2, t1 /* t1 = 0x 4523 */
or t1, v0, v0 /* v0 = 0x67452301 */
RET
END(BSWAP32)
|
0xffea/MINIX3
| 3,317
|
common/lib/libc/arch/alpha/gmon/_mcount.S
|
/* $NetBSD: _mcount.S,v 1.2 2005/12/21 18:11:11 christos Exp $ */
/*
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
* All rights reserved.
*
* Author: Chris G. Demetriou
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or [email protected]
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
*
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*/
#if defined(GPROF) && !defined(_STANDALONE)
#include <machine/asm.h>
#include <machine/profile.h>
#define OFFSET_AT (0 * 8)
#define OFFSET_V0 (1 * 8)
#define OFFSET_T0 (2 * 8)
#define OFFSET_T1 (3 * 8)
#define OFFSET_T2 (4 * 8)
#define OFFSET_T3 (5 * 8)
#define OFFSET_T4 (6 * 8)
#define OFFSET_T5 (7 * 8)
#define OFFSET_T6 (8 * 8)
#define OFFSET_T7 (9 * 8)
#define OFFSET_S6 (10 * 8)
#define OFFSET_A0 (11 * 8)
#define OFFSET_A1 (12 * 8)
#define OFFSET_A2 (13 * 8)
#define OFFSET_A3 (14 * 8)
#define OFFSET_A4 (15 * 8)
#define OFFSET_A5 (16 * 8)
#define OFFSET_T8 (17 * 8)
#define OFFSET_T9 (18 * 8)
#define OFFSET_T10 (19 * 8)
#define OFFSET_T11 (20 * 8)
#define OFFSET_RA (21 * 8)
#define OFFSET_T12 (22 * 8)
#define OFFSET_GP (23 * 8)
#define FRAME_SIZE (24 * 8)
LEAF_NOPROFILE(_mcount,0) /* XXX */
.set noat
.set noreorder
lda sp, -FRAME_SIZE(sp)
stq at_reg, OFFSET_AT(sp)
stq v0, OFFSET_V0(sp)
stq t0, OFFSET_T0(sp)
stq t1, OFFSET_T1(sp)
stq t2, OFFSET_T2(sp)
stq t3, OFFSET_T3(sp)
stq t4, OFFSET_T4(sp)
stq t5, OFFSET_T5(sp)
stq t6, OFFSET_T6(sp)
stq t7, OFFSET_T7(sp)
stq s6, OFFSET_S6(sp) /* XXX because run _after_ prologue. */
stq a0, OFFSET_A0(sp)
stq a1, OFFSET_A1(sp)
stq a2, OFFSET_A2(sp)
stq a3, OFFSET_A3(sp)
stq a4, OFFSET_A4(sp)
stq a5, OFFSET_A5(sp)
stq t8, OFFSET_T8(sp)
stq t9, OFFSET_T9(sp)
stq t10, OFFSET_T10(sp)
stq t11, OFFSET_T11(sp)
stq ra, OFFSET_RA(sp)
stq t12, OFFSET_T12(sp)
stq gp, OFFSET_GP(sp)
br pv, 1f
1: LDGP(pv)
mov ra, a0
mov at_reg, a1
CALL(_MCOUNT_FUNC)
ldq v0, OFFSET_V0(sp)
ldq t0, OFFSET_T0(sp)
ldq t1, OFFSET_T1(sp)
ldq t2, OFFSET_T2(sp)
ldq t3, OFFSET_T3(sp)
ldq t4, OFFSET_T4(sp)
ldq t5, OFFSET_T5(sp)
ldq t6, OFFSET_T6(sp)
ldq t7, OFFSET_T7(sp)
ldq s6, OFFSET_S6(sp) /* XXX because run _after_ prologue. */
ldq a0, OFFSET_A0(sp)
ldq a1, OFFSET_A1(sp)
ldq a2, OFFSET_A2(sp)
ldq a3, OFFSET_A3(sp)
ldq a4, OFFSET_A4(sp)
ldq a5, OFFSET_A5(sp)
ldq t8, OFFSET_T8(sp)
ldq t9, OFFSET_T9(sp)
ldq t10, OFFSET_T10(sp)
ldq t11, OFFSET_T11(sp)
ldq ra, OFFSET_RA(sp)
ldq t12, OFFSET_T12(sp)
ldq gp, OFFSET_GP(sp)
ldq at_reg, OFFSET_AT(sp)
lda sp, FRAME_SIZE(sp)
ret zero, (at_reg), 1
END(_mcount)
#endif
|
0xffea/MINIX3
| 2,233
|
common/lib/libc/arch/alpha/atomic/atomic_swap.S
|
/* $NetBSD: atomic_swap.S,v 1.4 2008/05/25 15:56:11 chs Exp $ */
/*-
* Copyright (c) 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
.text
LEAF(_atomic_swap_32, 2)
1: ldl_l v0, 0(a0)
mov a1, t2
stl_c t2, 0(a0)
beq t2, 2f
RET
2: br 1b
END(_atomic_swap_32)
ATOMIC_OP_ALIAS(atomic_swap_32,_atomic_swap_32)
ATOMIC_OP_ALIAS(atomic_swap_uint,_atomic_swap_32)
STRONG_ALIAS(_atomic_swap_uint,_atomic_swap_32)
LEAF(_atomic_swap_64, 2)
1: ldq_l v0, 0(a0)
mov a1, t2
stq_c t2, 0(a0)
beq t2, 2f
RET
2: br 1b
END(_atomic_swap_64)
ATOMIC_OP_ALIAS(atomic_swap_64,_atomic_swap_64)
ATOMIC_OP_ALIAS(atomic_swap_ulong,_atomic_swap_64)
STRONG_ALIAS(_atomic_swap_ulong,_atomic_swap_64)
ATOMIC_OP_ALIAS(atomic_swap_ptr,_atomic_swap_64)
STRONG_ALIAS(_atomic_swap_ptr,_atomic_swap_64)
|
0xffea/MINIX3
| 2,730
|
common/lib/libc/arch/alpha/atomic/atomic_and.S
|
/* $NetBSD: atomic_and.S,v 1.4 2008/05/25 15:56:11 chs Exp $ */
/*-
* Copyright (c) 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
.text
LEAF(_atomic_and_32, 2)
1: ldl_l t1, 0(a0)
and t1, a1, t2
stl_c t2, 0(a0)
beq t2, 2f
RET
2: br 1b
END(_atomic_and_32)
ATOMIC_OP_ALIAS(atomic_and_32,_atomic_and_32)
ATOMIC_OP_ALIAS(atomic_and_uint,_atomic_and_32)
STRONG_ALIAS(_atomic_and_uint,_atomic_and_32)
LEAF(_atomic_and_32_nv, 2)
1: ldl_l t1, 0(a0)
and t1, a1, t2
mov t2, v0
stl_c t2, 0(a0)
beq t2, 2f
RET
2: br 1b
END(_atomic_and_32_nv)
ATOMIC_OP_ALIAS(atomic_and_32_nv,_atomic_and_32_nv)
ATOMIC_OP_ALIAS(atomic_and_uint_nv,_atomic_and_32_nv)
STRONG_ALIAS(_atomic_and_uint_nv,_atomic_and_32_nv)
LEAF(_atomic_and_64, 2)
1: ldq_l t1, 0(a0)
and t1, a1, t2
stq_c t2, 0(a0)
beq t2, 2f
RET
2: br 1b
END(_atomic_and_64)
ATOMIC_OP_ALIAS(atomic_and_64,_atomic_and_64)
ATOMIC_OP_ALIAS(atomic_and_ulong,_atomic_and_64)
STRONG_ALIAS(_atomic_and_ulong,_atomic_and_64)
LEAF(_atomic_and_64_nv, 2)
1: ldq_l t1, 0(a0)
and t1, a1, t2
mov t2, v0
stq_c t2, 0(a0)
beq t2, 2f
RET
2: br 1b
END(_atomic_and_64_nv)
ATOMIC_OP_ALIAS(atomic_and_64_nv,_atomic_and_64_nv)
ATOMIC_OP_ALIAS(atomic_and_ulong_nv,_atomic_and_64_nv)
STRONG_ALIAS(_atomic_and_ulong_nv,_atomic_and_64_nv)
|
0xffea/MINIX3
| 2,630
|
common/lib/libc/arch/alpha/atomic/membar_ops.S
|
/* $NetBSD: membar_ops.S,v 1.6 2008/05/25 15:56:11 chs Exp $ */
/*-
* Copyright (c) 2006, 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
.text
#ifdef _KERNEL
/*
* We start out with no-op versions that do nothing. We hot-patch when
* we detect a MP system.
*/
LEAF(_membar_producer, 0)
RET
nop
END(_membar_producer)
EXPORT(_membar_producer_end)
LEAF(_membar_sync, 0)
RET
nop
END(_membar_sync)
EXPORT(_membar_sync_end)
LEAF(_membar_producer_mp, 0)
wmb
RET
END(_membar_producer_mp)
EXPORT(_membar_producer_mp_end)
LEAF(_membar_sync_mp, 0)
mb
RET
END(_membar_sync_mp)
EXPORT(_membar_sync_mp_end)
#else /* _KERNEL */
LEAF(_membar_producer, 0)
mb
RET
END(_membar_producer)
EXPORT(_membar_producer_end)
LEAF(_membar_sync, 0)
mb
RET
END(_membar_sync)
EXPORT(_membar_sync_end)
#endif /* _KERNEL */
ATOMIC_OP_ALIAS(membar_producer,_membar_producer)
ATOMIC_OP_ALIAS(membar_sync,_membar_sync)
ATOMIC_OP_ALIAS(membar_enter,_membar_sync)
STRONG_ALIAS(_membar_enter,_membar_sync)
ATOMIC_OP_ALIAS(membar_exit,_membar_sync)
STRONG_ALIAS(_membar_exit,_membar_sync)
ATOMIC_OP_ALIAS(membar_consumer,_membar_sync)
STRONG_ALIAS(_membar_consumer,_membar_sync)
|
0xffea/MINIX3
| 2,926
|
common/lib/libc/arch/alpha/atomic/atomic_inc.S
|
/* $NetBSD: atomic_inc.S,v 1.4 2008/05/25 15:56:11 chs Exp $ */
/*-
* Copyright (c) 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
.text
LEAF(_atomic_inc_32, 1)
1: ldl_l t1, 0(a0)
addl t1, 1, t2
stl_c t2, 0(a0)
beq t2, 2f
RET
2: br 1b
END(_atomic_inc_32)
ATOMIC_OP_ALIAS(atomic_inc_32,_atomic_inc_32)
ATOMIC_OP_ALIAS(atomic_inc_uint,_atomic_inc_32)
STRONG_ALIAS(_atomic_inc_uint,_atomic_inc_32)
LEAF(_atomic_inc_32_nv, 1)
1: ldl_l t1, 0(a0)
addl t1, 1, t2
mov t2, v0
stl_c t2, 0(a0)
beq t2, 2f
RET
2: br 1b
END(_atomic_inc_32_nv)
ATOMIC_OP_ALIAS(atomic_inc_32_nv,_atomic_inc_32_nv)
ATOMIC_OP_ALIAS(atomic_inc_uint_nv,_atomic_inc_32_nv)
STRONG_ALIAS(_atomic_inc_uint_nv,_atomic_inc_32_nv)
LEAF(_atomic_inc_64, 1)
1: ldq_l t1, 0(a0)
addq t1, 1, t2
stq_c t2, 0(a0)
beq t2, 2f
RET
2: br 1b
END(_atomic_inc_64)
ATOMIC_OP_ALIAS(atomic_inc_64,_atomic_inc_64)
ATOMIC_OP_ALIAS(atomic_inc_ulong,_atomic_inc_64)
STRONG_ALIAS(_atomic_inc_ulong,_atomic_inc_64)
ATOMIC_OP_ALIAS(atomic_inc_ptr,_atomic_inc_64)
STRONG_ALIAS(_atomic_inc_ptr,_atomic_inc_64)
LEAF(_atomic_inc_64_nv, 1)
1: ldq_l t1, 0(a0)
addq t1, 1, t2
mov t2, v0
stq_c t2, 0(a0)
beq t2, 2f
RET
2: br 1b
END(_atomic_inc_64_nv)
ATOMIC_OP_ALIAS(atomic_inc_64_nv,_atomic_inc_64_nv)
ATOMIC_OP_ALIAS(atomic_inc_ulong_nv,_atomic_inc_64_nv)
STRONG_ALIAS(_atomic_inc_ulong_nv,_atomic_inc_64_nv)
ATOMIC_OP_ALIAS(atomic_inc_ptr_nv,_atomic_inc_64_nv)
STRONG_ALIAS(_atomic_inc_ptr_nv,_atomic_inc_64_nv)
|
0xffea/MINIX3
| 2,926
|
common/lib/libc/arch/alpha/atomic/atomic_dec.S
|
/* $NetBSD: atomic_dec.S,v 1.4 2008/05/25 15:56:11 chs Exp $ */
/*-
* Copyright (c) 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
.text
LEAF(_atomic_dec_32, 1)
1: ldl_l t1, 0(a0)
subl t1, 1, t2
stl_c t2, 0(a0)
beq t2, 2f
RET
2: br 1b
END(_atomic_dec_32)
ATOMIC_OP_ALIAS(atomic_dec_32,_atomic_dec_32)
ATOMIC_OP_ALIAS(atomic_dec_uint,_atomic_dec_32)
STRONG_ALIAS(_atomic_dec_uint,_atomic_dec_32)
LEAF(_atomic_dec_32_nv, 1)
1: ldl_l t1, 0(a0)
subl t1, 1, t2
mov t2, v0
stl_c t2, 0(a0)
beq t2, 2f
RET
2: br 1b
END(_atomic_dec_32_nv)
ATOMIC_OP_ALIAS(atomic_dec_32_nv,_atomic_dec_32_nv)
ATOMIC_OP_ALIAS(atomic_dec_uint_nv,_atomic_dec_32_nv)
STRONG_ALIAS(_atomic_dec_uint_nv,_atomic_dec_32_nv)
LEAF(_atomic_dec_64, 1)
1: ldq_l t1, 0(a0)
subq t1, 1, t2
stq_c t2, 0(a0)
beq t2, 2f
RET
2: br 1b
END(_atomic_dec_64)
ATOMIC_OP_ALIAS(atomic_dec_64,_atomic_dec_64)
ATOMIC_OP_ALIAS(atomic_dec_ulong,_atomic_dec_64)
STRONG_ALIAS(_atomic_dec_ulong,_atomic_dec_64)
ATOMIC_OP_ALIAS(atomic_dec_ptr,_atomic_dec_64)
STRONG_ALIAS(_atomic_dec_ptr,_atomic_dec_64)
LEAF(_atomic_dec_64_nv, 1)
1: ldq_l t1, 0(a0)
subq t1, 1, t2
mov t2, v0
stq_c t2, 0(a0)
beq t2, 2f
RET
2: br 1b
END(_atomic_dec_64_nv)
ATOMIC_OP_ALIAS(atomic_dec_64_nv,_atomic_dec_64_nv)
ATOMIC_OP_ALIAS(atomic_dec_ulong_nv,_atomic_dec_64_nv)
STRONG_ALIAS(_atomic_dec_ulong_nv,_atomic_dec_64_nv)
ATOMIC_OP_ALIAS(atomic_dec_ptr_nv,_atomic_dec_64_nv)
STRONG_ALIAS(_atomic_dec_ptr_nv,_atomic_dec_64_nv)
|
0xffea/MINIX3
| 2,922
|
common/lib/libc/arch/alpha/atomic/atomic_add.S
|
/* $NetBSD: atomic_add.S,v 1.4 2008/05/25 15:56:11 chs Exp $ */
/*-
* Copyright (c) 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
.text
LEAF(_atomic_add_32, 2)
1: ldl_l t1, 0(a0)
addl t1, a1, t2
stl_c t2, 0(a0)
beq t2, 2f
RET
2: br 1b
END(_atomic_add_32)
ATOMIC_OP_ALIAS(atomic_add_32,_atomic_add_32)
ATOMIC_OP_ALIAS(atomic_add_int,_atomic_add_32)
STRONG_ALIAS(_atomic_add_int,_atomic_add_32)
LEAF(_atomic_add_32_nv, 2)
1: ldl_l t1, 0(a0)
addl t1, a1, t2
mov t2, v0
stl_c t2, 0(a0)
beq t2, 2f
RET
2: br 1b
END(_atomic_add_32_nv)
ATOMIC_OP_ALIAS(atomic_add_32_nv,_atomic_add_32_nv)
ATOMIC_OP_ALIAS(atomic_add_int_nv,_atomic_add_32_nv)
STRONG_ALIAS(_atomic_add_int_nv,_atomic_add_32_nv)
LEAF(_atomic_add_64, 2)
1: ldq_l t1, 0(a0)
addq t1, a1, t2
stq_c t2, 0(a0)
beq t2, 2f
RET
2: br 1b
END(_atomic_add_64)
ATOMIC_OP_ALIAS(atomic_add_64,_atomic_add_64)
ATOMIC_OP_ALIAS(atomic_add_long,_atomic_add_64)
STRONG_ALIAS(_atomic_add_long,_atomic_add_64)
ATOMIC_OP_ALIAS(atomic_add_ptr,_atomic_add_64)
STRONG_ALIAS(_atomic_add_ptr,_atomic_add_64)
LEAF(_atomic_add_64_nv, 2)
1: ldq_l t1, 0(a0)
addq t1, a1, t2
mov t2, v0
stq_c t2, 0(a0)
beq t2, 2f
RET
2: br 1b
END(_atomic_add_64_nv)
ATOMIC_OP_ALIAS(atomic_add_64_nv,_atomic_add_64_nv)
ATOMIC_OP_ALIAS(atomic_add_long_nv,_atomic_add_64_nv)
STRONG_ALIAS(_atomic_add_long_nv,_atomic_add_64_nv)
ATOMIC_OP_ALIAS(atomic_add_ptr_nv,_atomic_add_64_nv)
STRONG_ALIAS(_atomic_add_ptr_nv,_atomic_add_64_nv)
|
0xffea/MINIX3
| 2,778
|
common/lib/libc/arch/alpha/atomic/atomic_cas.S
|
/* $NetBSD: atomic_cas.S,v 1.5 2008/05/25 15:56:11 chs Exp $ */
/*-
* Copyright (c) 2007, 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
.text
LEAF(_atomic_cas_32, 3)
1: mov a2, t2
ldl_l v0, 0(a0)
cmpeq v0, a1, t1
beq t1, 2f
stl_c t2, 0(a0)
beq t2, 3f
2: RET
3: br 1b
END(_atomic_cas_32)
ATOMIC_OP_ALIAS(atomic_cas_32,_atomic_cas_32)
ATOMIC_OP_ALIAS(atomic_cas_uint,_atomic_cas_32)
STRONG_ALIAS(_atomic_cas_uint,_atomic_cas_32)
ATOMIC_OP_ALIAS(atomic_cas_32_ni,_atomic_cas_32)
STRONG_ALIAS(_atomic_cas_32_ni,_atomic_cas_32)
ATOMIC_OP_ALIAS(atomic_cas_uint_ni,_atomic_cas_32)
STRONG_ALIAS(_atomic_cas_uint_ni,_atomic_cas_32)
LEAF(_atomic_cas_64, 3)
1: mov a2, t2
ldq_l v0, 0(a0)
cmpeq v0, a1, t1
beq t1, 2f
stq_c t2, 0(a0)
beq t2, 3f
2: RET
3: br 1b
END(_atomic_cas_64)
ATOMIC_OP_ALIAS(atomic_cas_64,_atomic_cas_64)
ATOMIC_OP_ALIAS(atomic_cas_ulong,_atomic_cas_64)
STRONG_ALIAS(_atomic_cas_ulong,_atomic_cas_64)
ATOMIC_OP_ALIAS(atomic_cas_ptr,_atomic_cas_64)
STRONG_ALIAS(_atomic_cas_ptr,_atomic_cas_64)
ATOMIC_OP_ALIAS(atomic_cas_64_ni,_atomic_cas_64)
STRONG_ALIAS(_atomic_cas_64_ni,_atomic_cas_64)
ATOMIC_OP_ALIAS(atomic_cas_ulong_ni,_atomic_cas_64)
STRONG_ALIAS(_atomic_cas_ulong_ni,_atomic_cas_64)
ATOMIC_OP_ALIAS(atomic_cas_ptr_ni,_atomic_cas_64)
STRONG_ALIAS(_atomic_cas_ptr_ni,_atomic_cas_64)
|
0xffea/MINIX3
| 2,697
|
common/lib/libc/arch/alpha/atomic/atomic_or.S
|
/* $NetBSD: atomic_or.S,v 1.4 2008/05/25 15:56:11 chs Exp $ */
/*-
* Copyright (c) 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
.text
LEAF(_atomic_or_32, 2)
1: ldl_l t1, 0(a0)
bis t1, a1, t2
stl_c t2, 0(a0)
beq t2, 2f
RET
2: br 1b
END(_atomic_or_32)
ATOMIC_OP_ALIAS(atomic_or_32,_atomic_or_32)
ATOMIC_OP_ALIAS(atomic_or_uint,_atomic_or_32)
STRONG_ALIAS(_atomic_or_uint,_atomic_or_32)
LEAF(_atomic_or_32_nv, 2)
1: ldl_l t1, 0(a0)
bis t1, a1, t2
mov t2, v0
stl_c t2, 0(a0)
beq t2, 2f
RET
2: br 1b
END(_atomic_or_32_nv)
ATOMIC_OP_ALIAS(atomic_or_32_nv,_atomic_or_32_nv)
ATOMIC_OP_ALIAS(atomic_or_uint_nv,_atomic_or_32_nv)
STRONG_ALIAS(_atomic_or_uint_nv,_atomic_or_32_nv)
LEAF(_atomic_or_64, 2)
1: ldq_l t1, 0(a0)
bis t1, a1, t2
stq_c t2, 0(a0)
beq t2, 2f
RET
2: br 1b
END(_atomic_or_64)
ATOMIC_OP_ALIAS(atomic_or_64,_atomic_or_64)
ATOMIC_OP_ALIAS(atomic_or_ulong,_atomic_or_64)
STRONG_ALIAS(_atomic_or_ulong,_atomic_or_64)
LEAF(_atomic_or_64_nv, 2)
1: ldq_l t1, 0(a0)
bis t1, a1, t2
mov t2, v0
stq_c t2, 0(a0)
beq t2, 2f
RET
2: br 1b
END(_atomic_or_64_nv)
ATOMIC_OP_ALIAS(atomic_or_64_nv,_atomic_or_64_nv)
ATOMIC_OP_ALIAS(atomic_or_ulong_nv,_atomic_or_64_nv)
STRONG_ALIAS(_atomic_or_ulong_nv,_atomic_or_64_nv)
|
0xffea/MINIX3
| 2,916
|
common/lib/libc/arch/alpha/string/ffs.S
|
/* $NetBSD: ffs.S,v 1.1 2005/12/20 19:28:49 christos Exp $ */
/*
* Copyright (c) 1995 Christopher G. Demetriou
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed for the
* NetBSD Project. See http://www.NetBSD.org/ for
* information about NetBSD.
* 4. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* <<Id: LICENSE,v 1.2 2000/06/14 15:57:33 cgd Exp>>
*/
#include <machine/asm.h>
LEAF(ffs, 1)
addl a0, 0, t0
beq t0, Lallzero
/*
* Initialize return value (v0), and set up t1 so that it
* contains the mask with only the lowest bit set.
*/
subl zero, t0, t1
ldil v0, 1
and t0, t1, t1
and t1, 0xff, t2
bne t2, Ldo8
/*
* If lower 16 bits empty, add 16 to result and use upper 16.
*/
zapnot t1, 0x03, t3
bne t3, Ldo16
sra t1, 16, t1
addl v0, 16, v0
Ldo16:
/*
* If lower 8 bits empty, add 8 to result and use upper 8.
*/
and t1, 0xff, t4
bne t4, Ldo8
sra t1, 8, t1
addl v0, 8, v0
Ldo8:
and t1, 0x0f, t5 /* lower 4 of 8 empty? */
and t1, 0x33, t6 /* lower 2 of each 4 empty? */
and t1, 0x55, t7 /* lower 1 of each 2 empty? */
/* If lower 4 bits empty, add 4 to result. */
bne t5, Ldo4
addl v0, 4, v0
Ldo4: /* If lower 2 bits of each 4 empty, add 2 to result. */
bne t6, Ldo2
addl v0, 2, v0
Ldo2: /* If lower bit of each 2 empty, add 1 to result. */
bne t7, Ldone
addl v0, 1, v0
Ldone:
RET
Lallzero:
bis zero, zero, v0
RET
END(ffs)
|
0xffea/MINIX3
| 2,799
|
common/lib/libc/arch/alpha/string/bzero.S
|
/* $NetBSD: bzero.S,v 1.1 2005/12/20 19:28:49 christos Exp $ */
/*
* Copyright (c) 1995 Carnegie-Mellon University.
* All rights reserved.
*
* Author: Trevor Blackwell
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or [email protected]
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
*
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*/
#include <machine/asm.h>
LEAF(bzero,2)
ble a1,bzero_done
bic a1,63,t3 /* t3 is # bytes to do 64 bytes at a time */
/* If nothing in first word, ignore it */
subq zero,a0,t0
and t0,7,t0 /* t0 = (0-size)%8 */
beq t0,bzero_nostart1
cmpult a1,t0,t1 /* if size > size%8 goto noshort */
beq t1,bzero_noshort
/*
* The whole thing is less than a word.
* Mask off 1..7 bytes, and finish.
*/
ldq_u t2,0(a0)
lda t0,-1(zero) /* t0=-1 */
mskql t0,a1,t0 /* Get ff in bytes (a0%8)..((a0+a1-1)%8) */
insql t0,a0,t0
bic t2,t0,t2 /* zero those bytes in word */
stq_u t2,0(a0)
RET
bzero_noshort:
/* Handle the first partial word */
ldq_u t2,0(a0)
subq a1,t0,a1
mskql t2,a0,t2 /* zero bytes (a0%8)..7 in word */
stq_u t2,0(a0)
addq a0,t0,a0 /* round a0 up to next word */
bic a1,63,t3 /* recalc t3 (# bytes to do 64 bytes at a
time) */
bzero_nostart1:
/*
* Loop, zeroing 64 bytes at a time
*/
beq t3,bzero_lp_done
bzero_lp:
stq zero,0(a0)
stq zero,8(a0)
stq zero,16(a0)
stq zero,24(a0)
subq t3,64,t3
stq zero,32(a0)
stq zero,40(a0)
stq zero,48(a0)
stq zero,56(a0)
addq a0,64,a0
bne t3,bzero_lp
bzero_lp_done:
/*
* Handle the last 0..7 words.
* We mask off the low bits, so we don't need an extra
* compare instruction for the loop (just a bne. heh-heh)
*/
and a1,0x38,t4
beq t4,bzero_finish_lp_done
bzero_finish_lp:
stq zero,0(a0)
subq t4,8,t4
addq a0,8,a0
bne t4,bzero_finish_lp
/* Do the last partial word */
bzero_finish_lp_done:
and a1,7,t5 /* 0..7 bytes left */
beq t5,bzero_done /* mskqh won't change t0 if t5==0, but I
don't want to touch, say, a new VM page */
ldq t0,0(a0)
mskqh t0,t5,t0
stq t0,0(a0)
bzero_done:
RET
END(bzero)
|
0xffea/MINIX3
| 5,964
|
common/lib/libc/arch/alpha/string/bcopy.S
|
/* $NetBSD: bcopy.S,v 1.1 2005/12/20 19:28:49 christos Exp $ */
/*
* Copyright (c) 1995 Carnegie-Mellon University.
* All rights reserved.
*
* Author: Trevor Blackwell. Support for use as memcpy() and memmove()
* added by Chris Demetriou.
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or [email protected]
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
*
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*/
#include <machine/asm.h>
#if defined(MEMCOPY) || defined(MEMMOVE)
#ifdef MEMCOPY
#define FUNCTION memcpy
#else
#define FUNCTION memmove
#endif
#define SRCREG a1
#define DSTREG a0
#else /* !(defined(MEMCOPY) || defined(MEMMOVE)) */
#define FUNCTION bcopy
#define SRCREG a0
#define DSTREG a1
#endif /* !(defined(MEMCOPY) || defined(MEMMOVE)) */
#define SIZEREG a2
/*
* Copy bytes.
*
* void bcopy(char *from, char *to, size_t len);
* char *memcpy(void *to, const void *from, size_t len);
* char *memmove(void *to, const void *from, size_t len);
*
* No matter how invoked, the source and destination registers
* for calculation. There's no point in copying them to "working"
* registers, since the code uses their values "in place," and
* copying them would be slower.
*/
LEAF(FUNCTION,3)
#if defined(MEMCOPY) || defined(MEMMOVE)
/* set up return value, while we still can */
mov DSTREG,v0
#endif
/* Check for negative length */
ble SIZEREG,bcopy_done
/* Check for overlap */
subq DSTREG,SRCREG,t5
cmpult t5,SIZEREG,t5
bne t5,bcopy_overlap
/* a3 = end address */
addq SRCREG,SIZEREG,a3
/* Get the first word */
ldq_u t2,0(SRCREG)
/* Do they have the same alignment? */
xor SRCREG,DSTREG,t0
and t0,7,t0
and DSTREG,7,t1
bne t0,bcopy_different_alignment
/* src & dst have same alignment */
beq t1,bcopy_all_aligned
ldq_u t3,0(DSTREG)
addq SIZEREG,t1,SIZEREG
mskqh t2,SRCREG,t2
mskql t3,SRCREG,t3
or t2,t3,t2
/* Dst is 8-byte aligned */
bcopy_all_aligned:
/* If less than 8 bytes,skip loop */
subq SIZEREG,1,t0
and SIZEREG,7,SIZEREG
bic t0,7,t0
beq t0,bcopy_samealign_lp_end
bcopy_samealign_lp:
stq_u t2,0(DSTREG)
addq DSTREG,8,DSTREG
ldq_u t2,8(SRCREG)
subq t0,8,t0
addq SRCREG,8,SRCREG
bne t0,bcopy_samealign_lp
bcopy_samealign_lp_end:
/* If we're done, exit */
bne SIZEREG,bcopy_small_left
stq_u t2,0(DSTREG)
RET
bcopy_small_left:
mskql t2,SIZEREG,t4
ldq_u t3,0(DSTREG)
mskqh t3,SIZEREG,t3
or t4,t3,t4
stq_u t4,0(DSTREG)
RET
bcopy_different_alignment:
/*
* this is the fun part
*/
addq SRCREG,SIZEREG,a3
cmpule SIZEREG,8,t0
bne t0,bcopy_da_finish
beq t1,bcopy_da_noentry
/* Do the initial partial word */
subq zero,DSTREG,t0
and t0,7,t0
ldq_u t3,7(SRCREG)
extql t2,SRCREG,t2
extqh t3,SRCREG,t3
or t2,t3,t5
insql t5,DSTREG,t5
ldq_u t6,0(DSTREG)
mskql t6,DSTREG,t6
or t5,t6,t5
stq_u t5,0(DSTREG)
addq SRCREG,t0,SRCREG
addq DSTREG,t0,DSTREG
subq SIZEREG,t0,SIZEREG
ldq_u t2,0(SRCREG)
bcopy_da_noentry:
subq SIZEREG,1,t0
bic t0,7,t0
and SIZEREG,7,SIZEREG
beq t0,bcopy_da_finish2
bcopy_da_lp:
ldq_u t3,7(SRCREG)
addq SRCREG,8,SRCREG
extql t2,SRCREG,t4
extqh t3,SRCREG,t5
subq t0,8,t0
or t4,t5,t5
stq t5,0(DSTREG)
addq DSTREG,8,DSTREG
beq t0,bcopy_da_finish1
ldq_u t2,7(SRCREG)
addq SRCREG,8,SRCREG
extql t3,SRCREG,t4
extqh t2,SRCREG,t5
subq t0,8,t0
or t4,t5,t5
stq t5,0(DSTREG)
addq DSTREG,8,DSTREG
bne t0,bcopy_da_lp
bcopy_da_finish2:
/* Do the last new word */
mov t2,t3
bcopy_da_finish1:
/* Do the last partial word */
ldq_u t2,-1(a3)
extql t3,SRCREG,t3
extqh t2,SRCREG,t2
or t2,t3,t2
br zero,bcopy_samealign_lp_end
bcopy_da_finish:
/* Do the last word in the next source word */
ldq_u t3,-1(a3)
extql t2,SRCREG,t2
extqh t3,SRCREG,t3
or t2,t3,t2
insqh t2,DSTREG,t3
insql t2,DSTREG,t2
lda t4,-1(zero)
mskql t4,SIZEREG,t5
cmovne t5,t5,t4
insqh t4,DSTREG,t5
insql t4,DSTREG,t4
addq DSTREG,SIZEREG,a4
ldq_u t6,0(DSTREG)
ldq_u t7,-1(a4)
bic t6,t4,t6
bic t7,t5,t7
and t2,t4,t2
and t3,t5,t3
or t2,t6,t2
or t3,t7,t3
stq_u t3,-1(a4)
stq_u t2,0(DSTREG)
RET
bcopy_overlap:
/*
* Basically equivalent to previous case, only backwards.
* Not quite as highly optimized
*/
addq SRCREG,SIZEREG,a3
addq DSTREG,SIZEREG,a4
/* less than 8 bytes - don't worry about overlap */
cmpule SIZEREG,8,t0
bne t0,bcopy_ov_short
/* Possibly do a partial first word */
and a4,7,t4
beq t4,bcopy_ov_nostart2
subq a3,t4,a3
subq a4,t4,a4
ldq_u t1,0(a3)
subq SIZEREG,t4,SIZEREG
ldq_u t2,7(a3)
ldq t3,0(a4)
extql t1,a3,t1
extqh t2,a3,t2
or t1,t2,t1
mskqh t3,t4,t3
mskql t1,t4,t1
or t1,t3,t1
stq t1,0(a4)
bcopy_ov_nostart2:
bic SIZEREG,7,t4
and SIZEREG,7,SIZEREG
beq t4,bcopy_ov_lp_end
bcopy_ov_lp:
/* This could be more pipelined, but it doesn't seem worth it */
ldq_u t0,-8(a3)
subq a4,8,a4
ldq_u t1,-1(a3)
subq a3,8,a3
extql t0,a3,t0
extqh t1,a3,t1
subq t4,8,t4
or t0,t1,t0
stq t0,0(a4)
bne t4,bcopy_ov_lp
bcopy_ov_lp_end:
beq SIZEREG,bcopy_done
ldq_u t0,0(SRCREG)
ldq_u t1,7(SRCREG)
ldq_u t2,0(DSTREG)
extql t0,SRCREG,t0
extqh t1,SRCREG,t1
or t0,t1,t0
insql t0,DSTREG,t0
mskql t2,DSTREG,t2
or t2,t0,t2
stq_u t2,0(DSTREG)
bcopy_done:
RET
bcopy_ov_short:
ldq_u t2,0(SRCREG)
br zero,bcopy_da_finish
END(FUNCTION)
|
0xffea/MINIX3
| 4,582
|
common/lib/libc/arch/mips/gen/byte_swap_8.S
|
/* $NetBSD: byte_swap_8.S,v 1.2 2009/12/14 00:39:00 matt Exp $ */
/*-
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Ralph Campbell.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <mips/asm.h>
#if defined(LIBC_SCCS) && !defined(lint)
RCSID("$NetBSD: byte_swap_8.S,v 1.2 2009/12/14 00:39:00 matt Exp $")
#endif /* LIBC_SCCS and not lint */
#undef _LOCORE
#define _LOCORE /* XXX not really, just assembly-code source */
#include <machine/endian.h>
NLEAF(bswap64) # a0 = 0xffeeddccbbaa9988 return 0x8899aabbccddeeff
#if (__mips == 32 || __mips == 64) && __mips_isa_rev == 2
#if !defined(__mips_o32)
/*
* If we are on MIPS32r2 or MIPS64r2 use the new instructions.
*/
dsbh v0, a0 # dwords swap bytes within halfwords
dshd v0, v0 # dwords swap halwords within dwords
j ra
#else /* defined(__mips_o32) */
/*
* If we are on MIPS32r2 or MIPS64r2 use the new instructions.
* (except we must use the 32bit versions)
*/
wsbh v1, a0 # word swap bytes within halfwords
wsbh v0, a1 # word swap bytes within halfwords
rotr v1, v1, 16 # rotate word 16bits and swap word
rotr v0, v0, 16 # rotate word 16bits and swap word
j ra
#endif /* defined(__mips_o32) */
#elif !defined(__mips_o32)
# a0 = 0xffeeddccbbaa9988
li t0, 0xffff # t0 = 0x000000000000ffff
dsll t1, t0, 32 # t1 = 0x0000ffff00000000
or t0, t1 # t0 = 0x0000ffff0000ffff
dsll t2, t0, 8 # t2 = 0x00ffff0000ffff00
xor t2, t0 # t2 = 0x00ff00ff00ff00ff
/*
* We could swap by halfword, but that would be one instruction longer.
*/
dsrl ta0, a0, 32 # ta0 = 0x00000000ffeeddcc
dsll ta1, a0, 32 # ta1 = 0xbbaa998800000000
or a1, ta0, ta1 # a1 = 0xbbaa9988ffeeddcc
# words swapped
and ta0, a1, t0 # ta0 = 0x000099880000ddcc
dsrl ta1, a1, 16 # ta1 = 0x0000bbaa9988ffee
and ta1, t0 # ta1 = 0x0000bbaa0000ffee
dsll a2, ta0, 16 # a2 = 0x99880000ddcc0000
or a2, ta1 # a2 = 0x9988bbaaddccffee
# halfwords swapped
and ta0, a2, t2 # ta0 = 0x008800aa00cc00ee
dsrl ta1, a2, 8 # ta1 = 0x009988bbaaddccff
and ta1, t2 # ta1 = 0x009900bb00dd00ff
dsll v0, ta0, 8 # v0 = 0x8800aa00cc00ee00
or v0, ta1 # v0 = 0x8899aabbccddeeff
# bytes swapped
j ra
#else /* defined(__mips_o32) */
/*
* 32bit ABI.
*/
# a0 = 0xccddeeff
# a1 = 0x8899aabb
srl t0, a0, 24 # t0 = 0x000000cc
srl t1, a1, 24 # t1 = 0x00000088
sll ta0, a0, 24 # ta0 = 0xff000000
sll ta1, a1, 24 # ta1 = 0xbb000000
or ta0, ta0, t0 # ta0 = 0xff0000cc
or ta1, ta1, t1 # ta1 = 0xbb000088
and t0, a0, 0xff00 # t0 = 0x0000ee00
and t1, a1, 0xff00 # t1 = 0x0000aa00
sll t0, t0, 8 # t0 = 0x00ee0000
sll t1, t1, 8 # t1 = 0x00aa0000
or ta0, ta0, t0 # ta0 = 0xffee00cc
or ta1, ta1, t1 # ta1 = 0xbbaa0088
srl t0, a0, 8 # t0 = 0x00ccddee
srl t1, a1, 8 # t1 = 0x008899aa
and t0, t0, 0xff00 # t0 = 0x0000dd00
and t1, t1, 0xff00 # t1 = 0x00009900
or v1, ta0, t0 # v1 = 0xffeeddcc
or v0, ta1, t1 # v0 = 0xbbaa9988
j ra
#endif /* defined(__mips_o32) */
END(bswap64)
|
0xffea/MINIX3
| 2,745
|
common/lib/libc/arch/mips/gen/byte_swap_2.S
|
/* $NetBSD: byte_swap_2.S,v 1.3 2009/12/14 00:39:00 matt Exp $ */
/*-
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Ralph Campbell.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <mips/asm.h>
#if defined(LIBC_SCCS) && !defined(lint)
#if 0
RCSID("from: @(#)htons.s 8.1 (Berkeley) 6/4/93")
#else
RCSID("$NetBSD: byte_swap_2.S,v 1.3 2009/12/14 00:39:00 matt Exp $")
#endif
#endif /* LIBC_SCCS and not lint */
#undef _LOCORE
#define _LOCORE /* XXX not really, just assembly-code source */
#include <machine/endian.h>
#if defined(_KERNEL) || defined(_STANDALONE)
#define BSWAP16_NAME bswap16
#else
#define BSWAP16_NAME __bswap16
#endif
NLEAF(BSWAP16_NAME)
#if BYTE_ORDER == LITTLE_ENDIAN
ALEAF(htons)
ALEAF(ntohs)
#endif
#if (__mips == 32 || __mips == 64) && __mips_isa_rev == 2
/*
* If we are on MIPS32r2 or MIPS64r2, use the new instructions
*/
wsbh a0, a0 # word swap bytes within halfwords
and v0, a0, 0xffff # bound it to 16bits
j ra
#else
srl v0, a0, 8
and v0, v0, 0xff
sll v1, a0, 8
and v1, v1, 0xff00
or v0, v0, v1
j ra
#endif
END(BSWAP16_NAME)
#if BYTE_ORDER == BIG_ENDIAN
NLEAF(htons)
ALEAF(ntohs)
move v0, a0
j ra
END(htons)
#endif
|
0xffea/MINIX3
| 2,992
|
common/lib/libc/arch/mips/gen/byte_swap_4.S
|
/* $NetBSD: byte_swap_4.S,v 1.3 2009/12/14 00:39:00 matt Exp $ */
/*-
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Ralph Campbell.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <mips/asm.h>
#if defined(LIBC_SCCS) && !defined(lint)
#if 0
RCSID("from: @(#)htonl.s 8.1 (Berkeley) 6/4/93")
#else
RCSID("$NetBSD: byte_swap_4.S,v 1.3 2009/12/14 00:39:00 matt Exp $")
#endif
#endif /* LIBC_SCCS and not lint */
#undef _LOCORE
#define _LOCORE /* XXX not really, just assembly-code source */
#include <machine/endian.h>
#if defined(_KERNEL) || defined(_STANDALONE)
#define BSWAP32_NAME bswap32
#else
#define BSWAP32_NAME __bswap32
#endif
NLEAF(BSWAP32_NAME) # a0 = 0x11223344, return 0x44332211
#if BYTE_ORDER == LITTLE_ENDIAN
ALEAF(htonl) # a0 = 0x11223344, return 0x44332211
ALEAF(ntohl)
#endif
#if (__mips == 32 || __mips == 64) && __mips_isa_rev == 2
/*
* If we are on MIPS32R2 or MIPS64R2 it's much easier
*/
wsbh a0, a0 # word swap bytes within halfwords
rotr v0, a0, 16 # rotate word 16bits
j ra
#else
srl v1, a0, 24 # v1 = 0x00000011
sll v0, a0, 24 # v0 = 0x44000000
or v0, v0, v1
and v1, a0, 0xff00
sll v1, v1, 8 # v1 = 0x00330000
or v0, v0, v1
srl v1, a0, 8
and v1, v1, 0xff00 # v1 = 0x00002200
or v0, v0, v1
j ra
#endif
END(BSWAP32_NAME)
#if BYTE_ORDER == BIG_ENDIAN
NLEAF(htonl) # a0 = 0x11223344, return 0x44332211
ALEAF(ntohl)
move v0, a0
j ra
END(htonl)
#endif
|
0xffea/MINIX3
| 2,515
|
common/lib/libc/arch/mips/atomic/atomic_swap.S
|
/* $NetBSD: atomic_swap.S,v 1.2 2009/12/14 00:39:00 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <machine/asm.h>
#include "atomic_op_asm.h"
RCSID("$NetBSD: atomic_swap.S,v 1.2 2009/12/14 00:39:00 matt Exp $")
.text
.set noat
.set noreorder
.set nomacro
LEAF(_atomic_swap_32)
1: INT_LL v0, 0(a0)
nop
move t0, a1
INT_SC t0, 0(a0)
beq t0, zero, 1b
nop
2:
j ra
nop
END(_atomic_swap_32)
ATOMIC_OP_ALIAS(atomic_swap_32, _atomic_swap_32)
#if !defined(__mips_o32)
LEAF(_atomic_swap_64)
1: REG_LL v0, 0(a0)
nop
move t0, a1
REG_SC t0, 0(a0)
beq t0, zero, 1b
nop
2:
j ra
nop
END(_atomic_swap_64)
ATOMIC_OP_ALIAS(atomic_swap_64, _atomic_swap_64)
#endif
#ifdef _LP64
STRONG_ALIAS(_atomic_swap_ptr, _atomic_swap_64)
STRONG_ALIAS(_atomic_swap_ulong, _atomic_swap_64)
#else
STRONG_ALIAS(_atomic_swap_ptr, _atomic_swap_32)
STRONG_ALIAS(_atomic_swap_ulong, _atomic_swap_32)
#endif
STRONG_ALIAS(_atomic_swap_uint, _atomic_swap_32)
ATOMIC_OP_ALIAS(atomic_swap_ptr, _atomic_swap_ptr)
ATOMIC_OP_ALIAS(atomic_swap_uint, _atomic_swap_uint)
ATOMIC_OP_ALIAS(atomic_swap_ulong, _atomic_swap_ulong)
|
0xffea/MINIX3
| 1,800
|
common/lib/libc/arch/mips/atomic/atomic_cas_up.S
|
/* $NetBSD: atomic_cas_up.S,v 1.2 2008/05/25 15:56:12 chs Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/ras.h>
#include "atomic_op_asm.h"
.text
.set noat
.set noreorder
LEAF(_atomic_cas_up)
.hidden _C_LABEL(_atomic_cas_up)
RAS_START_ASM_HIDDEN(_atomic_cas)
lw t0, (a0)
nop
bne t0, a1, 1f
nop
sw a2, (a0)
RAS_END_ASM_HIDDEN(_atomic_cas)
j ra
addu v0, zero, a1
1:
j ra
addu v0, zero, t0
END(_atomic_cas_up)
|
0xffea/MINIX3
| 3,043
|
common/lib/libc/arch/mips/atomic/atomic_and.S
|
/* $NetBSD: atomic_and.S,v 1.2 2009/12/14 00:38:59 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <machine/asm.h>
#include "atomic_op_asm.h"
RCSID("$NetBSD: atomic_and.S,v 1.2 2009/12/14 00:38:59 matt Exp $")
.text
.set noat
.set noreorder
.set nomacro
LEAF(_atomic_and_32)
1: INT_LL t0, 0(a0)
nop
and t0, a1
INT_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_and_32)
ATOMIC_OP_ALIAS(atomic_and_32, _atomic_and_32)
LEAF(_atomic_and_32_nv)
1: INT_LL v0, 0(a0)
nop
and v0, a1
move t0, v0
INT_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_and_32_nv)
ATOMIC_OP_ALIAS(atomic_and_32_nv, _atomic_and_32_nv)
#if !defined(__mips_o32)
LEAF(_atomic_and_64)
1: REG_LL t0, 0(a0)
nop
and t0, a1
REG_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_and_64)
ATOMIC_OP_ALIAS(atomic_and_64, _atomic_and_64)
LEAF(_atomic_and_64_nv)
1: REG_LL v0, 0(a0)
nop
and v0, a1
move t0, v0
REG_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_and_64_nv)
ATOMIC_OP_ALIAS(atomic_and_64_nv, _atomic_and_64_nv)
#endif
#ifdef _LP64
STRONG_ALIAS(_atomic_and_ulong, _atomic_and_64)
STRONG_ALIAS(_atomic_and_ulong_nv, _atomic_and_64_nv)
#else
STRONG_ALIAS(_atomic_and_ulong, _atomic_and_32)
STRONG_ALIAS(_atomic_and_ulong_nv, _atomic_and_32_nv)
#endif
STRONG_ALIAS(_atomic_and_uint, _atomic_and_32)
STRONG_ALIAS(_atomic_and_uint_nv, _atomic_and_32_nv)
ATOMIC_OP_ALIAS(atomic_and_uint, _atomic_and_uint)
ATOMIC_OP_ALIAS(atomic_and_uint_nv, _atomic_and_uint_nv)
ATOMIC_OP_ALIAS(atomic_and_ulong, _atomic_and_ulong)
ATOMIC_OP_ALIAS(atomic_and_ulong_nv, _atomic_and_ulong_nv)
|
0xffea/MINIX3
| 2,547
|
common/lib/libc/arch/mips/atomic/membar_ops.S
|
/* $NetBSD: membar_ops.S,v 1.4 2009/12/14 00:39:00 matt Exp $ */
/*-
* Copyright (c) 2006, 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe, and by Andrew Doran.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
#if defined(_KERNEL)
#ifdef _KERNEL_OPT
#include "opt_cputype.h"
#include "opt_lockdebug.h"
#include "opt_multiprocessor.h"
#endif
#include <machine/cpu.h>
#if (MIPS_HAS_LLSC != 0 && defined(MULTIPROCESSOR)) || !defined(__mips_o32)
#define SYNC sync
#endif
#elif !defined(__mips_o32)
#define SYNC sync
#endif
.text
LEAF(_membar_sync)
j ra
#ifdef SYNC
SYNC
#else
nop
#endif
END(_membar_sync)
#ifdef _KERNEL
STRONG_ALIAS(mb_read, _membar_sync)
STRONG_ALIAS(mb_write, _membar_sync)
STRONG_ALIAS(mb_memory, _membar_sync)
#endif
ATOMIC_OP_ALIAS(membar_sync,_membar_sync)
ATOMIC_OP_ALIAS(membar_enter,_membar_sync)
STRONG_ALIAS(_membar_enter,_membar_sync)
ATOMIC_OP_ALIAS(membar_exit,_membar_sync)
STRONG_ALIAS(_membar_exit,_membar_sync)
ATOMIC_OP_ALIAS(membar_producer,_membar_sync)
STRONG_ALIAS(_membar_producer,_membar_sync)
ATOMIC_OP_ALIAS(membar_consumer,_membar_sync)
STRONG_ALIAS(_membar_consumer,_membar_sync)
|
0xffea/MINIX3
| 3,358
|
common/lib/libc/arch/mips/atomic/atomic_inc.S
|
/* $NetBSD: atomic_inc.S,v 1.2 2009/12/14 00:39:00 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <machine/asm.h>
#include "atomic_op_asm.h"
RCSID("$NetBSD: atomic_inc.S,v 1.2 2009/12/14 00:39:00 matt Exp $")
.text
.set noat
.set noreorder
.set nomacro
LEAF(_atomic_inc_32)
1: INT_LL t0, 0(a0)
nop
INT_ADDU t0, 1
INT_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_inc_32)
ATOMIC_OP_ALIAS(atomic_inc_32, _atomic_inc_32)
LEAF(_atomic_inc_32_nv)
1: INT_LL v0, 0(a0)
nop
INT_ADDU v0, 1
move t0, v0
INT_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_inc_32_nv)
ATOMIC_OP_ALIAS(atomic_inc_32_nv, _atomic_inc_32_nv)
#if !defined(__mips_o32)
LEAF(_atomic_inc_64)
1: REG_LL t0, 0(a0)
nop
REG_ADDU t0, 1
REG_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_inc_64)
ATOMIC_OP_ALIAS(atomic_inc_64, _atomic_inc_64)
LEAF(_atomic_inc_64_nv)
1: REG_LL v0, 0(a0)
nop
REG_ADDU v0, 1
move t0, v0
REG_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_inc_64_nv)
ATOMIC_OP_ALIAS(atomic_inc_64_nv, _atomic_inc_64_nv)
#endif
#ifdef _LP64
STRONG_ALIAS(_atomic_inc_ptr, _atomic_inc_64)
STRONG_ALIAS(_atomic_inc_ptr_nv, _atomic_inc_64_nv)
STRONG_ALIAS(_atomic_inc_ulong, _atomic_inc_64)
STRONG_ALIAS(_atomic_inc_ulong_nv, _atomic_inc_64_nv)
#else
STRONG_ALIAS(_atomic_inc_ptr, _atomic_inc_32)
STRONG_ALIAS(_atomic_inc_ptr_nv, _atomic_inc_32_nv)
STRONG_ALIAS(_atomic_inc_ulong, _atomic_inc_32)
STRONG_ALIAS(_atomic_inc_ulong_nv, _atomic_inc_32_nv)
#endif
STRONG_ALIAS(_atomic_inc_uint, _atomic_inc_32)
STRONG_ALIAS(_atomic_inc_uint_nv, _atomic_inc_32_nv)
ATOMIC_OP_ALIAS(atomic_inc_ptr, _atomic_inc_ptr)
ATOMIC_OP_ALIAS(atomic_inc_ptr_nv, _atomic_inc_ptr_nv)
ATOMIC_OP_ALIAS(atomic_inc_uint, _atomic_inc_uint)
ATOMIC_OP_ALIAS(atomic_inc_uint_nv, _atomic_inc_uint_nv)
ATOMIC_OP_ALIAS(atomic_inc_ulong, _atomic_inc_ulong)
ATOMIC_OP_ALIAS(atomic_inc_ulong_nv, _atomic_inc_ulong_nv)
|
0xffea/MINIX3
| 3,362
|
common/lib/libc/arch/mips/atomic/atomic_dec.S
|
/* $NetBSD: atomic_dec.S,v 1.2 2009/12/14 00:39:00 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <machine/asm.h>
#include "atomic_op_asm.h"
RCSID("$NetBSD: atomic_dec.S,v 1.2 2009/12/14 00:39:00 matt Exp $")
.text
.set noat
.set noreorder
.set nomacro
LEAF(_atomic_dec_32)
1: INT_LL t0, 0(a0)
nop
INT_ADDU t0, -1
INT_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_dec_32)
ATOMIC_OP_ALIAS(atomic_dec_32, _atomic_dec_32)
LEAF(_atomic_dec_32_nv)
1: INT_LL v0, 0(a0)
nop
INT_ADDU v0, -1
move t0, v0
INT_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_dec_32_nv)
ATOMIC_OP_ALIAS(atomic_dec_32_nv, _atomic_dec_32_nv)
#if !defined(__mips_o32)
LEAF(_atomic_dec_64)
1: REG_LL t0, 0(a0)
nop
REG_ADDU t0, -1
REG_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_dec_64)
ATOMIC_OP_ALIAS(atomic_dec_64, _atomic_dec_64)
LEAF(_atomic_dec_64_nv)
1: REG_LL v0, 0(a0)
nop
REG_ADDU v0, -1
move t0, v0
REG_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_dec_64_nv)
ATOMIC_OP_ALIAS(atomic_dec_64_nv, _atomic_dec_64_nv)
#endif
#ifdef _LP64
STRONG_ALIAS(_atomic_dec_ptr, _atomic_dec_64)
STRONG_ALIAS(_atomic_dec_ptr_nv, _atomic_dec_64_nv)
STRONG_ALIAS(_atomic_dec_ulong, _atomic_dec_64)
STRONG_ALIAS(_atomic_dec_ulong_nv, _atomic_dec_64_nv)
#else
STRONG_ALIAS(_atomic_dec_ptr, _atomic_dec_32)
STRONG_ALIAS(_atomic_dec_ptr_nv, _atomic_dec_32_nv)
STRONG_ALIAS(_atomic_dec_ulong, _atomic_dec_32)
STRONG_ALIAS(_atomic_dec_ulong_nv, _atomic_dec_32_nv)
#endif
STRONG_ALIAS(_atomic_dec_uint, _atomic_dec_32)
STRONG_ALIAS(_atomic_dec_uint_nv, _atomic_dec_32_nv)
ATOMIC_OP_ALIAS(atomic_dec_ptr, _atomic_dec_ptr)
ATOMIC_OP_ALIAS(atomic_dec_ptr_nv, _atomic_dec_ptr_nv)
ATOMIC_OP_ALIAS(atomic_dec_uint, _atomic_dec_uint)
ATOMIC_OP_ALIAS(atomic_dec_uint_nv, _atomic_dec_uint_nv)
ATOMIC_OP_ALIAS(atomic_dec_ulong, _atomic_dec_ulong)
ATOMIC_OP_ALIAS(atomic_dec_ulong_nv, _atomic_dec_ulong_nv)
|
0xffea/MINIX3
| 3,349
|
common/lib/libc/arch/mips/atomic/atomic_add.S
|
/* $NetBSD: atomic_add.S,v 1.2 2009/12/14 00:38:59 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <machine/asm.h>
#include "atomic_op_asm.h"
RCSID("$NetBSD: atomic_add.S,v 1.2 2009/12/14 00:38:59 matt Exp $")
.text
.set noat
.set noreorder
.set nomacro
LEAF(_atomic_add_32)
1: INT_LL t0, 0(a0)
nop
INT_ADDU t0, a1
INT_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_add_32)
ATOMIC_OP_ALIAS(atomic_add_32, _atomic_add_32)
LEAF(_atomic_add_32_nv)
1: INT_LL v0, 0(a0)
nop
INT_ADDU v0, a1
move t0, v0
INT_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_add_32_nv)
ATOMIC_OP_ALIAS(atomic_add_32_nv, _atomic_add_32_nv)
#if !defined(__mips_o32)
LEAF(_atomic_add_64)
1: REG_LL t0, 0(a0)
nop
REG_ADDU t0, a1
REG_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_add_64)
ATOMIC_OP_ALIAS(atomic_add_64, _atomic_add_64)
LEAF(_atomic_add_64_nv)
1: REG_LL v0, 0(a0)
nop
REG_ADDU v0, a1
move t0, v0
REG_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_add_64_nv)
ATOMIC_OP_ALIAS(atomic_add_64_nv, _atomic_add_64_nv)
#endif
#ifdef _LP64
STRONG_ALIAS(_atomic_add_long, _atomic_add_64)
STRONG_ALIAS(_atomic_add_long_nv, _atomic_add_64_nv)
STRONG_ALIAS(_atomic_add_ptr, _atomic_add_64)
STRONG_ALIAS(_atomic_add_ptr_nv, _atomic_add_64_nv)
#else
STRONG_ALIAS(_atomic_add_long, _atomic_add_32)
STRONG_ALIAS(_atomic_add_long_nv, _atomic_add_32_nv)
STRONG_ALIAS(_atomic_add_ptr, _atomic_add_32)
STRONG_ALIAS(_atomic_add_ptr_nv, _atomic_add_32_nv)
#endif
STRONG_ALIAS(_atomic_add_int, _atomic_add_32)
STRONG_ALIAS(_atomic_add_int_nv, _atomic_add_32_nv)
ATOMIC_OP_ALIAS(atomic_add_int, _atomic_add_int)
ATOMIC_OP_ALIAS(atomic_add_int_nv, _atomic_add_int_nv)
ATOMIC_OP_ALIAS(atomic_add_ptr, _atomic_add_ptr)
ATOMIC_OP_ALIAS(atomic_add_ptr_nv, _atomic_add_ptr_nv)
ATOMIC_OP_ALIAS(atomic_add_long, _atomic_add_long)
ATOMIC_OP_ALIAS(atomic_add_long_nv, _atomic_add_long_nv)
|
0xffea/MINIX3
| 2,987
|
common/lib/libc/arch/mips/atomic/atomic_cas.S
|
/* $NetBSD: atomic_cas.S,v 1.2 2009/12/14 00:39:00 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <machine/asm.h>
#include "atomic_op_asm.h"
RCSID("$NetBSD: atomic_cas.S,v 1.2 2009/12/14 00:39:00 matt Exp $")
.text
.set noat
.set noreorder
.set nomacro
LEAF(_atomic_cas_32)
1: INT_LL v0, 0(a0)
nop
bne v0, a1, 2f
nop
move t0, a2
INT_SC t0, 0(a0)
beq t0, zero, 1b
nop
move v0, a1
2:
j ra
nop
END(_atomic_cas_32)
ATOMIC_OP_ALIAS(atomic_cas_32, _atomic_cas_32)
#if !defined(__mips_o32)
LEAF(_atomic_cas_64)
1: REG_LL v0, 0(a0)
nop
bne v0, a1, 2f
nop
move t0, a2
REG_SC t0, 0(a0)
beq t0, zero, 1b
nop
move v0, a1
2:
j ra
nop
END(_atomic_cas_64)
ATOMIC_OP_ALIAS(atomic_cas_64, _atomic_cas_64)
#endif
#ifdef _LP64
STRONG_ALIAS(_atomic_cas_ptr, _atomic_cas_64)
STRONG_ALIAS(_atomic_cas_ptr_ni, _atomic_cas_64)
STRONG_ALIAS(_atomic_cas_ulong, _atomic_cas_64)
STRONG_ALIAS(_atomic_cas_ulong_ni, _atomic_cas_64)
#else
STRONG_ALIAS(_atomic_cas_ptr, _atomic_cas_32)
STRONG_ALIAS(_atomic_cas_ptr_ni, _atomic_cas_32)
STRONG_ALIAS(_atomic_cas_ulong, _atomic_cas_32)
STRONG_ALIAS(_atomic_cas_ulong_ni, _atomic_cas_32)
#endif
STRONG_ALIAS(_atomic_cas_uint, _atomic_cas_32)
STRONG_ALIAS(_atomic_cas_uint_ni, _atomic_cas_32)
ATOMIC_OP_ALIAS(atomic_cas_ptr, _atomic_cas_ptr)
ATOMIC_OP_ALIAS(atomic_cas_ptr_ni, _atomic_cas_ptr_ni)
ATOMIC_OP_ALIAS(atomic_cas_uint, _atomic_cas_uint)
ATOMIC_OP_ALIAS(atomic_cas_uint_ni, _atomic_cas_uint_ni)
ATOMIC_OP_ALIAS(atomic_cas_ulong, _atomic_cas_ulong)
ATOMIC_OP_ALIAS(atomic_cas_ulong_ni, _atomic_cas_ulong_ni)
|
0xffea/MINIX3
| 2,934
|
common/lib/libc/arch/mips/atomic/atomic_or.S
|
/* $NetBSD: atomic_or.S,v 1.2 2009/12/14 00:39:00 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <machine/asm.h>
#include "atomic_op_asm.h"
.text
.set noat
.set noreorder
.set nomacro
LEAF(_atomic_or_32)
1: INT_LL t0, 0(a0)
nop
or t0, a1
INT_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_or_32)
ATOMIC_OP_ALIAS(atomic_or_32, _atomic_or_32)
LEAF(_atomic_or_32_nv)
1: INT_LL v0, 0(a0)
nop
or v0, a1
move t0, v0
INT_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_or_32_nv)
ATOMIC_OP_ALIAS(atomic_or_32_nv, _atomic_or_32_nv)
#if !defined(__mips_o32)
LEAF(_atomic_or_64)
1: REG_LL t0, 0(a0)
nop
or t0, a1
REG_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_or_64)
ATOMIC_OP_ALIAS(atomic_or_64, _atomic_or_64)
LEAF(_atomic_or_64_nv)
1: REG_LL v0, 0(a0)
nop
or v0, a1
move t0, v0
REG_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_or_64_nv)
ATOMIC_OP_ALIAS(atomic_or_64_nv, _atomic_or_64_nv)
#endif
#ifdef _LP64
STRONG_ALIAS(_atomic_or_ulong, _atomic_or_64)
STRONG_ALIAS(_atomic_or_ulong_nv, _atomic_or_64_nv)
#else
STRONG_ALIAS(_atomic_or_ulong, _atomic_or_32)
STRONG_ALIAS(_atomic_or_ulong_nv, _atomic_or_32_nv)
#endif
STRONG_ALIAS(_atomic_or_uint, _atomic_or_32)
STRONG_ALIAS(_atomic_or_uint_nv, _atomic_or_32_nv)
ATOMIC_OP_ALIAS(atomic_or_uint, _atomic_or_uint)
ATOMIC_OP_ALIAS(atomic_or_uint_nv, _atomic_or_uint_nv)
ATOMIC_OP_ALIAS(atomic_or_ulong, _atomic_or_ulong)
ATOMIC_OP_ALIAS(atomic_or_ulong_nv, _atomic_or_ulong_nv)
|
0xffea/MINIX3
| 2,144
|
common/lib/libc/arch/mips/string/ffs.S
|
/* $NetBSD: ffs.S,v 1.2 2009/12/14 00:39:00 matt Exp $ */
/*-
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Ralph Campbell.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <mips/asm.h>
#if defined(LIBC_SCCS) && !defined(lint)
/* RCSID("from: @(#)ffs.s 8.1 (Berkeley) 6/4/93") */
RCSID("$NetBSD: ffs.S,v 1.2 2009/12/14 00:39:00 matt Exp $")
#endif /* LIBC_SCCS and not lint */
/* bit = ffs(value) */
LEAF(ffs)
move v0, zero
beq a0, zero, done
1:
and v1, a0, 1 # bit set?
addu v0, v0, 1
srl a0, a0, 1
beq v1, zero, 1b # no, continue
done:
j ra
END(ffs)
|
0xffea/MINIX3
| 2,253
|
common/lib/libc/arch/mips/string/strrchr.S
|
/* $NetBSD: strrchr.S,v 1.2 2011/01/02 02:58:52 matt Exp $ */
/*-
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Ralph Campbell.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <mips/asm.h>
#if defined(LIBC_SCCS) && !defined(lint)
ASMSTR("from: @(#)rindex.s 8.1 (Berkeley) 6/4/93")
ASMSTR("$NetBSD: strrchr.S,v 1.2 2011/01/02 02:58:52 matt Exp $")
#endif /* LIBC_SCCS and not lint */
#ifdef __ABICALLS__
.abicalls
#endif
LEAF(strrchr)
XLEAF(rindex)
move v0, zero # default if not found
1:
lbu a3, 0(a0) # get a byte
PTR_ADDU a0, a0, 1
bne a3, a1, 2f
PTR_SUBU v0, a0, 1 # save address of last match
2:
bne a3, zero, 1b # continue if not end
j ra
END(strrchr)
|
0xffea/MINIX3
| 6,802
|
common/lib/libc/arch/mips/string/bcopy.S
|
/* $NetBSD: bcopy.S,v 1.3 2009/12/14 00:39:00 matt Exp $ */
/*
* Mach Operating System
* Copyright (c) 1993 Carnegie Mellon University
* All Rights Reserved.
*
* Permission to use, copy, modify and distribute this software and its
* documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
* ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or [email protected]
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
*
* any improvements or extensions that they make and grant Carnegie Mellon
* the rights to redistribute these changes.
*/
/*
* File: mips_bcopy.s
* Author: Chris Maeda
* Date: June 1993
*
* Fast copy routine. Derived from aligned_block_copy.
*/
#include <mips/asm.h>
#ifndef _LOCORE
#define _LOCORE /* XXX not really, just assembly-code source */
#endif
#include <machine/endian.h>
#if defined(LIBC_SCCS) && !defined(lint)
#if 0
RCSID("from: @(#)mips_bcopy.s 2.2 CMU 18/06/93")
#else
RCSID("$NetBSD: bcopy.S,v 1.3 2009/12/14 00:39:00 matt Exp $")
#endif
#endif /* LIBC_SCCS and not lint */
/*
* bcopy(caddr_t src, caddr_t dst, unsigned int len)
*
* a0 src address
* a1 dst address
* a2 length
*/
#if defined(MEMCOPY) || defined(MEMMOVE)
#ifdef MEMCOPY
#define FUNCTION memcpy
#else
#define FUNCTION memmove
#endif
#define SRCREG a1
#define DSTREG a0
#else
#define FUNCTION bcopy
#define SRCREG a0
#define DSTREG a1
#endif
#define SIZEREG a2
LEAF(FUNCTION)
.set noat
.set noreorder
#if defined(MEMCOPY) || defined(MEMMOVE)
/* set up return value, while we still can */
move v0,DSTREG
#endif
/*
* Make sure we can copy forwards.
*/
sltu t0,SRCREG,DSTREG # t0 == SRCREG < DSTREG
bne t0,zero,6f # copy backwards
/*
* There are four alignment cases (with frequency)
* (Based on measurements taken with a DECstation 5000/200
* inside a Mach kernel.)
*
* aligned -> aligned (mostly)
* unaligned -> aligned (sometimes)
* aligned,unaligned -> unaligned (almost never)
*
* Note that we could add another case that checks if
* the destination and source are unaligned but the
* copy is alignable. eg if src and dest are both
* on a halfword boundary.
*/
andi t1,DSTREG,(SZREG-1) # get last bits of dest
bne t1,zero,3f # dest unaligned
andi t0,SRCREG,(SZREG-1) # get last bits of src
bne t0,zero,5f
/*
* Forward aligned->aligned copy, 8 words at a time.
*/
98:
li AT,-(SZREG*8)
and t0,SIZEREG,AT # count truncated to multiples
PTR_ADDU a3,SRCREG,t0 # run fast loop up to this addr
sltu AT,SRCREG,a3 # any work to do?
beq AT,zero,2f
PTR_SUBU SIZEREG,t0
/*
* loop body
*/
1: # cp
REG_L t3,(0*SZREG)(SRCREG)
REG_L v1,(1*SZREG)(SRCREG)
REG_L t0,(2*SZREG)(SRCREG)
REG_L t1,(3*SZREG)(SRCREG)
PTR_ADDU SRCREG,SZREG*8
REG_S t3,(0*SZREG)(DSTREG)
REG_S v1,(1*SZREG)(DSTREG)
REG_S t0,(2*SZREG)(DSTREG)
REG_S t1,(3*SZREG)(DSTREG)
REG_L t1,(-1*SZREG)(SRCREG)
REG_L t0,(-2*SZREG)(SRCREG)
REG_L v1,(-3*SZREG)(SRCREG)
REG_L t3,(-4*SZREG)(SRCREG)
PTR_ADDU DSTREG,SZREG*8
REG_S t1,(-1*SZREG)(DSTREG)
REG_S t0,(-2*SZREG)(DSTREG)
REG_S v1,(-3*SZREG)(DSTREG)
bne SRCREG,a3,1b
REG_S t3,(-4*SZREG)(DSTREG)
/*
* Copy a word at a time, no loop unrolling.
*/
2: # wordcopy
andi t2,SIZEREG,(SZREG-1) # get byte count / SZREG
PTR_SUBU t2,SIZEREG,t2 # t2 = words to copy * SZREG
beq t2,zero,3f
PTR_ADDU t0,SRCREG,t2 # stop at t0
PTR_SUBU SIZEREG,SIZEREG,t2
1:
REG_L t3,0(SRCREG)
PTR_ADDU SRCREG,SZREG
REG_S t3,0(DSTREG)
bne SRCREG,t0,1b
PTR_ADDU DSTREG,SZREG
3: # bytecopy
beq SIZEREG,zero,4f # nothing left to do?
nop
1:
lb t3,0(SRCREG)
PTR_ADDU SRCREG,1
sb t3,0(DSTREG)
PTR_SUBU SIZEREG,1
bgtz SIZEREG,1b
PTR_ADDU DSTREG,1
4: # copydone
j ra
nop
/*
* Copy from unaligned source to aligned dest.
*/
5: # destaligned
andi t0,SIZEREG,(SZREG-1) # t0 = bytecount mod SZREG
PTR_SUBU a3,SIZEREG,t0 # number of words to transfer
beq a3,zero,3b
nop
move SIZEREG,t0 # this many to do after we are done
PTR_ADDU a3,SRCREG,a3 # stop point
1:
REG_LHI t3,0(SRCREG)
REG_LLO t3,SZREG-1(SRCREG)
PTR_ADDI SRCREG,SZREG
REG_S t3,0(DSTREG)
bne SRCREG,a3,1b
PTR_ADDI DSTREG,SZREG
b 3b
nop
6: # backcopy -- based on above
PTR_ADDU SRCREG,SIZEREG
PTR_ADDU DSTREG,SIZEREG
andi t1,DSTREG,SZREG-1 # get last 3 bits of dest
bne t1,zero,3f
andi t0,SRCREG,SZREG-1 # get last 3 bits of src
bne t0,zero,5f
/*
* Forward aligned->aligned copy, 8*4 bytes at a time.
*/
li AT,(-8*SZREG)
and t0,SIZEREG,AT # count truncated to multiple of 32
beq t0,zero,2f # any work to do?
PTR_SUBU SIZEREG,t0
PTR_SUBU a3,SRCREG,t0
/*
* loop body
*/
1: # cp
REG_L t3,(-4*SZREG)(SRCREG)
REG_L v1,(-3*SZREG)(SRCREG)
REG_L t0,(-2*SZREG)(SRCREG)
REG_L t1,(-1*SZREG)(SRCREG)
PTR_SUBU SRCREG,8*SZREG
REG_S t3,(-4*SZREG)(DSTREG)
REG_S v1,(-3*SZREG)(DSTREG)
REG_S t0,(-2*SZREG)(DSTREG)
REG_S t1,(-1*SZREG)(DSTREG)
REG_L t1,(3*SZREG)(SRCREG)
REG_L t0,(2*SZREG)(SRCREG)
REG_L v1,(1*SZREG)(SRCREG)
REG_L t3,(0*SZREG)(SRCREG)
PTR_SUBU DSTREG,8*SZREG
REG_S t1,(3*SZREG)(DSTREG)
REG_S t0,(2*SZREG)(DSTREG)
REG_S v1,(1*SZREG)(DSTREG)
bne SRCREG,a3,1b
REG_S t3,(0*SZREG)(DSTREG)
/*
* Copy a word at a time, no loop unrolling.
*/
2: # wordcopy
andi t2,SIZEREG,SZREG-1 # get byte count / 4
PTR_SUBU t2,SIZEREG,t2 # t2 = number of words to copy
beq t2,zero,3f
PTR_SUBU t0,SRCREG,t2 # stop at t0
PTR_SUBU SIZEREG,SIZEREG,t2
1:
REG_L t3,-SZREG(SRCREG)
PTR_SUBU SRCREG,SZREG
REG_S t3,-SZREG(DSTREG)
bne SRCREG,t0,1b
PTR_SUBU DSTREG,SZREG
3: # bytecopy
beq SIZEREG,zero,4f # nothing left to do?
nop
1:
lb t3,-1(SRCREG)
PTR_SUBU SRCREG,1
sb t3,-1(DSTREG)
PTR_SUBU SIZEREG,1
bgtz SIZEREG,1b
PTR_SUBU DSTREG,1
4: # copydone
j ra
nop
/*
* Copy from unaligned source to aligned dest.
*/
5: # destaligned
andi t0,SIZEREG,SZREG-1 # t0 = bytecount mod 4
PTR_SUBU a3,SIZEREG,t0 # number of words to transfer
beq a3,zero,3b
nop
move SIZEREG,t0 # this many to do after we are done
PTR_SUBU a3,SRCREG,a3 # stop point
1:
REG_LHI t3,-SZREG(SRCREG)
REG_LLO t3,-1(SRCREG)
PTR_SUBU SRCREG,SZREG
REG_S t3,-SZREG(DSTREG)
bne SRCREG,a3,1b
PTR_SUBU DSTREG,SZREG
b 3b
nop
.set reorder
.set at
END(FUNCTION)
|
0xffea/MINIX3
| 2,199
|
common/lib/libc/arch/mips/string/strlen.S
|
/* $NetBSD: strlen.S,v 1.2 2009/12/14 00:39:00 matt Exp $ */
/*-
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Ralph Campbell.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <mips/asm.h>
#if defined(LIBC_SCCS) && !defined(lint)
/* RCSID("from: @(#)strlen.s 8.1 (Berkeley) 6/4/93") */
RCSID("$NetBSD: strlen.S,v 1.2 2009/12/14 00:39:00 matt Exp $")
#endif /* LIBC_SCCS and not lint */
LEAF(strlen)
PTR_ADDU v1, a0, 1
1:
lb v0, 0(a0) # get byte from string
PTR_ADDU a0, a0, 1 # increment pointer
bne v0, zero, 1b # continue if not end
PTR_SUBU v0, a0, v1 # compute length - 1 for '\0' char
j ra
END(strlen)
|
0xffea/MINIX3
| 2,183
|
common/lib/libc/arch/mips/string/strchr.S
|
/* $NetBSD: strchr.S,v 1.3 2011/01/02 02:58:52 matt Exp $ */
/*-
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Ralph Campbell.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <mips/asm.h>
#if defined(LIBC_SCCS) && !defined(lint)
ASMSTR("from: @(#)index.s 8.1 (Berkeley) 6/4/93")
ASMSTR("$NetBSD: strchr.S,v 1.3 2011/01/02 02:58:52 matt Exp $")
#endif /* LIBC_SCCS and not lint */
#ifdef __ABICALLS__
.abicalls
#endif
LEAF(strchr)
XLEAF(index)
1:
lbu a2, 0(a0) # get a byte
PTR_ADDU a0, 1
beq a2, a1, fnd
bne a2, zero, 1b
notfnd:
move v0, zero
j ra
fnd:
PTR_SUBU v0, a0, 1
j ra
END(strchr)
|
0xffea/MINIX3
| 2,419
|
common/lib/libc/arch/mips/string/strcmp.S
|
/* $NetBSD: strcmp.S,v 1.2 2009/12/14 00:39:00 matt Exp $ */
/*-
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Ralph Campbell.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <mips/asm.h>
#if defined(LIBC_SCCS) && !defined(lint)
/* RCSID("from: @(#)strcmp.s 8.1 (Berkeley) 6/4/93") */
RCSID("$NetBSD: strcmp.S,v 1.2 2009/12/14 00:39:00 matt Exp $")
#endif /* LIBC_SCCS and not lint */
/*
* NOTE: this version assumes unsigned chars in order to be "8 bit clean".
*/
LEAF(strcmp)
1:
lbu t0, 0(a0) # get two bytes and compare them
lbu t1, 0(a1)
beq t0, zero, LessOrEq # end of first string?
bne t0, t1, NotEq
lbu t0, 1(a0) # unroll loop
lbu t1, 1(a1)
PTR_ADD a0, a0, 2
beq t0, zero, LessOrEq # end of first string?
PTR_ADD a1, a1, 2
beq t0, t1, 1b
NotEq:
subu v0, t0, t1
j ra
LessOrEq:
subu v0, zero, t1
j ra
END(strcmp)
|
0xffea/MINIX3
| 2,150
|
common/lib/libc/arch/powerpc/atomic/atomic_swap.S
|
/* $NetBSD: atomic_swap.S,v 1.6 2011/01/15 07:31:11 matt Exp $ */
/*-
* Copyright (c) 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
__RCSID("$NetBSD: atomic_swap.S,v 1.6 2011/01/15 07:31:11 matt Exp $")
.text
ENTRY(_atomic_swap_32)
1: lwarx %r10,0,%r3
stwcx. %r4,0,%r3
bne- 1b
mr %r3,%r10
blr
END(_atomic_swap_32)
ATOMIC_OP_ALIAS(atomic_swap_32,_atomic_swap_32)
ATOMIC_OP_ALIAS(atomic_swap_uint,_atomic_swap_32)
STRONG_ALIAS(_atomic_swap_uint,_atomic_swap_32)
#if !defined(_LP64)
ATOMIC_OP_ALIAS(atomic_swap_ulong,_atomic_swap_32)
STRONG_ALIAS(_atomic_swap_ulong,_atomic_swap_32)
ATOMIC_OP_ALIAS(atomic_swap_ptr,_atomic_swap_32)
STRONG_ALIAS(_atomic_swap_ptr,_atomic_swap_32)
#endif
|
0xffea/MINIX3
| 2,474
|
common/lib/libc/arch/powerpc/atomic/atomic_and.S
|
/* $NetBSD: atomic_and.S,v 1.6 2011/01/15 07:31:11 matt Exp $ */
/*-
* Copyright (c) 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
__RCSID("$NetBSD: atomic_and.S,v 1.6 2011/01/15 07:31:11 matt Exp $")
.text
ENTRY(_atomic_and_32)
1: lwarx %r10,0,%r3
and %r10,%r10,%r4
stwcx. %r10,0,%r3
bne- 1b
blr
END(_atomic_and_32)
ATOMIC_OP_ALIAS(atomic_and_32,_atomic_and_32)
ATOMIC_OP_ALIAS(atomic_and_uint,_atomic_and_32)
STRONG_ALIAS(_atomic_and_uint,_atomic_and_32)
#if !defined(_LP64)
ATOMIC_OP_ALIAS(atomic_and_ulong,_atomic_and_32)
STRONG_ALIAS(_atomic_and_ulong,_atomic_and_32)
#endif
ENTRY(_atomic_and_32_nv)
1: lwarx %r10,0,%r3
and %r10,%r10,%r4
stwcx. %r10,0,%r3
bne- 1b
mr %r3,%r10
blr
END(_atomic_and_32_nv)
ATOMIC_OP_ALIAS(atomic_and_32_nv,_atomic_and_32_nv)
ATOMIC_OP_ALIAS(atomic_and_uint_nv,_atomic_and_32_nv)
STRONG_ALIAS(_atomic_and_uint_nv,_atomic_and_32_nv)
#if !defined(_LP64)
ATOMIC_OP_ALIAS(atomic_and_ulong_nv,_atomic_and_32_nv)
STRONG_ALIAS(_atomic_and_ulong_nv,_atomic_and_32_nv)
#endif
|
0xffea/MINIX3
| 2,224
|
common/lib/libc/arch/powerpc/atomic/membar_ops.S
|
/* $NetBSD: membar_ops.S,v 1.4 2011/01/15 07:31:11 matt Exp $ */
/*-
* Copyright (c) 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe, and by Andrew Doran.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
__RCSID("$NetBSD: membar_ops.S,v 1.4 2011/01/15 07:31:11 matt Exp $")
.text
/* These assume Total Store Order (TSO) */
ENTRY(_membar_consumer)
isync
blr
END(_membar_consumer)
ENTRY(_membar_producer)
sync
blr
END(_membar_producer)
ATOMIC_OP_ALIAS(membar_producer,_membar_producer)
ATOMIC_OP_ALIAS(membar_consumer,_membar_consumer)
ATOMIC_OP_ALIAS(membar_enter,_membar_consumer)
STRONG_ALIAS(_membar_enter,_membar_consumer)
ATOMIC_OP_ALIAS(membar_exit,_membar_producer)
STRONG_ALIAS(_membar_exit,_membar_producer)
ATOMIC_OP_ALIAS(membar_sync,_membar_producer)
STRONG_ALIAS(_membar_sync,_membar_producer)
|
0xffea/MINIX3
| 2,668
|
common/lib/libc/arch/powerpc/atomic/atomic_inc.S
|
/* $NetBSD: atomic_inc.S,v 1.6 2011/01/15 07:31:11 matt Exp $ */
/*-
* Copyright (c) 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
__RCSID("$NetBSD: atomic_inc.S,v 1.6 2011/01/15 07:31:11 matt Exp $")
.text
ENTRY(_atomic_inc_32)
1: lwarx %r10,0,%r3
addi %r10,%r10,1
stwcx. %r10,0,%r3
bne- 1b
blr
END(_atomic_inc_32)
ATOMIC_OP_ALIAS(atomic_inc_32,_atomic_inc_32)
ATOMIC_OP_ALIAS(atomic_inc_uint,_atomic_inc_32)
STRONG_ALIAS(_atomic_inc_uint,_atomic_inc_32)
#if !defined(_LP64)
ATOMIC_OP_ALIAS(atomic_inc_ulong,_atomic_inc_32)
STRONG_ALIAS(_atomic_inc_ulong,_atomic_inc_32)
ATOMIC_OP_ALIAS(atomic_inc_ptr,_atomic_inc_32)
STRONG_ALIAS(_atomic_inc_ptr,_atomic_inc_32)
#endif
ENTRY(_atomic_inc_32_nv)
1: lwarx %r10,0,%r3
addi %r10,%r10,1
stwcx. %r10,0,%r3
bne- 1b
mr %r3,%r10
blr
END(_atomic_inc_32_nv)
ATOMIC_OP_ALIAS(atomic_inc_32_nv,_atomic_inc_32_nv)
ATOMIC_OP_ALIAS(atomic_inc_uint_nv,_atomic_inc_32_nv)
STRONG_ALIAS(_atomic_inc_uint_nv,_atomic_inc_32_nv)
#if !defined(_LP64)
ATOMIC_OP_ALIAS(atomic_inc_ulong_nv,_atomic_inc_32_nv)
STRONG_ALIAS(_atomic_inc_ulong_nv,_atomic_inc_32_nv)
ATOMIC_OP_ALIAS(atomic_inc_ptr_nv,_atomic_inc_32_nv)
STRONG_ALIAS(_atomic_inc_ptr_nv,_atomic_inc_32_nv)
#endif
|
0xffea/MINIX3
| 2,670
|
common/lib/libc/arch/powerpc/atomic/atomic_dec.S
|
/* $NetBSD: atomic_dec.S,v 1.6 2011/01/15 07:31:11 matt Exp $ */
/*-
* Copyright (c) 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
__RCSID("$NetBSD: atomic_dec.S,v 1.6 2011/01/15 07:31:11 matt Exp $")
.text
ENTRY(_atomic_dec_32)
1: lwarx %r10,0,%r3
addi %r10,%r10,-1
stwcx. %r10,0,%r3
bne- 1b
blr
END(_atomic_dec_32)
ATOMIC_OP_ALIAS(atomic_dec_32,_atomic_dec_32)
ATOMIC_OP_ALIAS(atomic_dec_uint,_atomic_dec_32)
STRONG_ALIAS(_atomic_dec_uint,_atomic_dec_32)
#if !defined(_LP64)
ATOMIC_OP_ALIAS(atomic_dec_ulong,_atomic_dec_32)
STRONG_ALIAS(_atomic_dec_ulong,_atomic_dec_32)
ATOMIC_OP_ALIAS(atomic_dec_ptr,_atomic_dec_32)
STRONG_ALIAS(_atomic_dec_ptr,_atomic_dec_32)
#endif
ENTRY(_atomic_dec_32_nv)
1: lwarx %r10,0,%r3
addi %r10,%r10,-1
stwcx. %r10,0,%r3
bne- 1b
mr %r3,%r10
blr
END(_atomic_dec_32_nv)
ATOMIC_OP_ALIAS(atomic_dec_32_nv,_atomic_dec_32_nv)
ATOMIC_OP_ALIAS(atomic_dec_uint_nv,_atomic_dec_32_nv)
STRONG_ALIAS(_atomic_dec_uint_nv,_atomic_dec_32_nv)
#if !defined(_LP64)
ATOMIC_OP_ALIAS(atomic_dec_ulong_nv,_atomic_dec_32_nv)
STRONG_ALIAS(_atomic_dec_ulong_nv,_atomic_dec_32_nv)
ATOMIC_OP_ALIAS(atomic_dec_ptr_nv,_atomic_dec_32_nv)
STRONG_ALIAS(_atomic_dec_ptr_nv,_atomic_dec_32_nv)
#endif
|
0xffea/MINIX3
| 2,662
|
common/lib/libc/arch/powerpc/atomic/atomic_add.S
|
/* $NetBSD: atomic_add.S,v 1.7 2011/01/15 07:31:11 matt Exp $ */
/*-
* Copyright (c) 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
__RCSID("$NetBSD: atomic_add.S,v 1.7 2011/01/15 07:31:11 matt Exp $")
.text
ENTRY(_atomic_add_32)
1: lwarx %r10,0,%r3
add %r10,%r10,%r4
stwcx. %r10,0,%r3
bne- 1b
blr
END(_atomic_add_32)
ATOMIC_OP_ALIAS(atomic_add_32,_atomic_add_32)
ATOMIC_OP_ALIAS(atomic_add_int,_atomic_add_32)
STRONG_ALIAS(_atomic_add_int,_atomic_add_32)
#if !defined(_LP64)
ATOMIC_OP_ALIAS(atomic_add_long,_atomic_add_32)
STRONG_ALIAS(_atomic_add_long,_atomic_add_32)
ATOMIC_OP_ALIAS(atomic_add_ptr,_atomic_add_32)
STRONG_ALIAS(_atomic_add_ptr,_atomic_add_32)
#endif
ENTRY(_atomic_add_32_nv)
1: lwarx %r10,0,%r3
add %r10,%r10,%r4
stwcx. %r10,0,%r3
bne- 1b
mr %r3,%r10
blr
END(_atomic_add_32_nv)
ATOMIC_OP_ALIAS(atomic_add_32_nv,_atomic_add_32_nv)
ATOMIC_OP_ALIAS(atomic_add_int_nv,_atomic_add_32_nv)
STRONG_ALIAS(_atomic_add_int_nv,_atomic_add_32_nv)
#if !defined(_LP64)
ATOMIC_OP_ALIAS(atomic_add_long_nv,_atomic_add_32_nv)
STRONG_ALIAS(_atomic_add_long_nv,_atomic_add_32_nv)
ATOMIC_OP_ALIAS(atomic_add_ptr_nv,_atomic_add_32_nv)
STRONG_ALIAS(_atomic_add_ptr_nv,_atomic_add_32_nv)
#endif
|
0xffea/MINIX3
| 2,589
|
common/lib/libc/arch/powerpc/atomic/atomic_cas.S
|
/* $NetBSD: atomic_cas.S,v 1.7 2011/01/15 07:31:11 matt Exp $ */
/*-
* Copyright (c) 2007, 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
__RCSID("$NetBSD: atomic_cas.S,v 1.7 2011/01/15 07:31:11 matt Exp $")
.text
ENTRY(_atomic_cas_32)
1: lwarx %r10,0,%r3
cmpw %r10,%r4
bne- 2f
stwcx. %r5,0,%r3
bne- 1b
2: mr %r3,%r10
blr
END(_atomic_cas_32)
ATOMIC_OP_ALIAS(atomic_cas_32,_atomic_cas_32)
ATOMIC_OP_ALIAS(atomic_cas_uint,_atomic_cas_32)
STRONG_ALIAS(_atomic_cas_uint,_atomic_cas_32)
#if !defined(_LP64)
ATOMIC_OP_ALIAS(atomic_cas_ulong,_atomic_cas_32)
STRONG_ALIAS(_atomic_cas_ulong,_atomic_cas_32)
ATOMIC_OP_ALIAS(atomic_cas_ptr,_atomic_cas_32)
STRONG_ALIAS(_atomic_cas_ptr,_atomic_cas_32)
#endif
ATOMIC_OP_ALIAS(atomic_cas_32_ni,_atomic_cas_32)
STRONG_ALIAS(_atomic_cas_32_ni,_atomic_cas_32)
ATOMIC_OP_ALIAS(atomic_cas_uint_ni,_atomic_cas_32)
STRONG_ALIAS(_atomic_cas_uint_ni,_atomic_cas_32)
#if !defined(_LP64)
ATOMIC_OP_ALIAS(atomic_cas_ulong_ni,_atomic_cas_32)
STRONG_ALIAS(_atomic_cas_ulong_ni,_atomic_cas_32)
ATOMIC_OP_ALIAS(atomic_cas_ptr_ni,_atomic_cas_32)
STRONG_ALIAS(_atomic_cas_ptr_ni,_atomic_cas_32)
#endif
|
0xffea/MINIX3
| 2,446
|
common/lib/libc/arch/powerpc/atomic/atomic_or.S
|
/* $NetBSD: atomic_or.S,v 1.6 2011/01/15 07:31:11 matt Exp $ */
/*-
* Copyright (c) 2007 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Jason R. Thorpe.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "atomic_op_asm.h"
__RCSID("$NetBSD: atomic_or.S,v 1.6 2011/01/15 07:31:11 matt Exp $")
.text
ENTRY(_atomic_or_32)
1: lwarx %r10,0,%r3
or %r10,%r10,%r4
stwcx. %r10,0,%r3
bne- 1b
blr
END(_atomic_or_32)
ATOMIC_OP_ALIAS(atomic_or_32,_atomic_or_32)
ATOMIC_OP_ALIAS(atomic_or_uint,_atomic_or_32)
STRONG_ALIAS(_atomic_or_uint,_atomic_or_32)
#if !defined(_LP64)
ATOMIC_OP_ALIAS(atomic_or_ulong,_atomic_or_32)
STRONG_ALIAS(_atomic_or_ulong,_atomic_or_32)
#endif
ENTRY(_atomic_or_32_nv)
1: lwarx %r10,0,%r3
or %r10,%r10,%r4
stwcx. %r10,0,%r3
bne- 1b
mr %r3,%r10
blr
END(_atomic_or_32_nv)
ATOMIC_OP_ALIAS(atomic_or_32_nv,_atomic_or_32_nv)
ATOMIC_OP_ALIAS(atomic_or_uint_nv,_atomic_or_32_nv)
STRONG_ALIAS(_atomic_or_uint_nv,_atomic_or_32_nv)
#if !defined(_LP64)
ATOMIC_OP_ALIAS(atomic_or_ulong_nv,_atomic_or_32_nv)
STRONG_ALIAS(_atomic_or_ulong_nv,_atomic_or_32_nv)
#endif
|
0xffea/MINIX3
| 3,700
|
common/lib/libc/arch/powerpc/string/memcpy.S
|
/* $NetBSD: memcpy.S,v 1.3 2011/01/15 07:31:12 matt Exp $ */
/* stropt/memcpy_440.S, pl_string_common, pl_linux 10/11/04 11:45:36
* ==========================================================================
* Optimized memcpy implementation for IBM PowerPC 440.
*
* Copyright (c) 2003, IBM Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* * Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of IBM nor the names of its contributors
* may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ==========================================================================
*
* Function: Copy n bytes of the source to the destination. Behavior is
* undefined for objects that overlap.
*
*
* void *memcpy(void * dest, const void * src, int n)
*
* Input: r3 - destination address
* r4 - source address
* r5 - byte count
* Output: r3 - destination address
*
* ==========================================================================
*/
#include <machine/asm.h>
.text
.align 4
/* LINTSTUB: Func: void *memcpy(void *, const void *, size_t) */
ENTRY(memcpy)
/*
* Check count passed in R5. If zero, return; otherwise continue.
*/
cmpwi %r5,0
beqlr-
mr %r8, %r3 /* Copy dst (return value) */
addi %r4, %r4, -4 /* Prepare for main loop's auto */
addi %r8, %r8, -4 /* update */
srwi. %r9,%r5,2 /* Word count -> r9 */
beq- last1 /* Partial copy if <4 bytes */
mtctr %r9 /* Word cnt in CTR for loop */
lwzu %r7, 4(%r4) /* Preload for main loop */
b g1
g0: /* Main loop */
lwzu %r7, 4(%r4) /* Load a new word */
stwu %r6, 4(%r8) /* Store previous word */
g1:
bdz- last /* Dec ctr and exit loop if no */
/* more words */
lwzu %r6, 4(%r4) /* Load another word */
stwu %r7, 4(%r8) /* Store previous word */
bdnz+ g0 /* Dec ctr and continue loop if */
/* more words */
mr %r7, %r6
last:
stwu %r7, 4(%r8) /* Store last word */
last1: /* Byte-by-byte copy */
clrlwi. %r5,%r5,30
beqlr
mtctr %r5
lbzu %r6, 4(%r4) /* 1st byte: update by word */
stbu %r6, 4(%r8)
bdzlr-
last2:
lbzu %r6, 1(%r4) /* Handle the rest */
stbu %r6, 1(%r8)
bdnz+ last2
blr
END(memcpy)
|
0xffea/MINIX3
| 1,909
|
common/lib/libc/arch/powerpc/string/ffs.S
|
/* $NetBSD: ffs.S,v 1.5 2011/01/15 07:31:12 matt Exp $ */
/*-
* Copyright (C) 2001 Martin J. Laubach <[email protected]>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*----------------------------------------------------------------------*/
#include <machine/asm.h>
__RCSID("$NetBSD: ffs.S,v 1.5 2011/01/15 07:31:12 matt Exp $")
.align 4
ENTRY(ffs)
neg %r4, %r3
and %r3, %r4, %r3
cntlzw %r3, %r3
li %r0, 32
subf %r3, %r3, %r0
blr
END(ffs)
/*----------------------------------------------------------------------*/
|
0xffea/MINIX3
| 5,418
|
common/lib/libc/arch/powerpc/string/memmove.S
|
/* $NetBSD: memmove.S,v 1.3 2011/01/15 07:31:12 matt Exp $ */
/* stropt/memmove.S, pl_string_common, pl_linux 10/11/04 11:45:37
* ==========================================================================
* Optimized memmove implementation for IBM PowerPC 405/440.
*
* Copyright (c) 2003, IBM Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* * Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of IBM nor the names of its contributors
* may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ==========================================================================
*
* Function: Move memory area (handles overlapping regions)
*
* void *memmove(void * dest, const void * src, int n)
*
* Input: r3 - destination address
* r4 - source address
* r5 - byte count
* Output: r3 - destination address
*
* ==========================================================================
*/
#include <machine/asm.h>
.text
.align 4
#ifdef _BCOPY
/* bcopy = memcpy/memmove with arguments reversed. */
/* LINTSTUB: Func: void bcopy(void *, void *, size_t) */
ENTRY(bcopy)
mr %r6, %r3 /* swap src/dst */
mr %r3, %r4
mr %r4, %r6
#else
/* LINTSTUB: Func: void *memmove(void *, const void *, size_t) */
ENTRY(memmove)
#endif
mr %r8, %r3 /* Save dst (return value) */
cmpw %r4, %r8 /* Branch to reverse if */
blt reverse /* src < dest. Don't want to */
/* overwrite end of src with */
/* start of dest */
addi %r4, %r4, -4 /* Back up src and dst pointers */
addi %r8, %r8, -4 /* due to auto-update of 'load' */
srwi. %r9,%r5,2 /* How many words in total cnt */
beq- last1 /* Handle byte by byte if < 4 */
/* bytes total */
mtctr %r9 /* Count of words for loop */
lwzu %r7, 4(%r4) /* Preload first word */
b g1
g0: /* Main loop */
lwzu %r7, 4(%r4) /* Load a new word */
stwu %r6, 4(%r8) /* Store previous word */
g1:
bdz- last /* Dec cnt, and branch if just */
/* one word to store */
lwzu %r6, 4(%r4) /* Load another word */
stwu %r7, 4(%r8) /* Store previous word */
bdnz+ g0 /* Dec cnt, and loop again if */
/* more words */
mr %r7, %r6 /* If word count -> 0, then... */
last:
stwu %r7, 4(%r8) /* ... store last word */
last1: /* Byte-by-byte copy */
clrlwi. %r5,%r5,30 /* If count -> 0, then ... */
beqlr /* we're done */
mtctr %r5 /* else load count for loop */
lbzu %r6, 4(%r4) /* 1st byte: update addr by 4 */
stbu %r6, 4(%r8) /* since we pre-adjusted by 4 */
bdzlr- /* in anticipation of main loop */
last2:
lbzu %r6, 1(%r4) /* But handle the rest by */
stbu %r6, 1(%r8) /* updating addr by 1 */
bdnz+ last2
blr
/* We're here since src < dest. Don't want to overwrite end of */
/* src with start of dest */
reverse:
add %r4, %r4, %r5 /* Work from end to beginning */
add %r8, %r8, %r5 /* so add count to string ptrs */
srwi. %r9,%r5,2 /* Words in total count */
beq- rlast1 /* Handle byte by byte if < 4 */
/* bytes total */
mtctr %r9 /* Count of words for loop */
lwzu %r7, -4(%r4) /* Preload first word */
b rg1
rg0: /* Main loop */
lwzu %r7, -4(%r4) /* Load a new word */
stwu %r6, -4(%r8) /* Store previous word */
rg1:
bdz- rlast /* Dec cnt, and branch if just */
/* one word to store */
lwzu %r6, -4(%r4) /* Load another word */
stwu %r7, -4(%r8) /* Store previous word */
bdnz+ rg0 /* Dec cnt, and loop again if */
/* more words */
mr %r7, %r6 /* If word count -> 0, then... */
rlast:
stwu %r7, -4(%r8) /* ... store last word */
rlast1: /* Byte-by-byte copy */
clrlwi. %r5,%r5,30 /* If count -> 0, then... */
beqlr /* ... we're done */
mtctr %r5 /* else load count for loop */
rlast2:
lbzu %r6, -1(%r4) /* Handle the rest, byte by */
stbu %r6, -1(%r8) /* byte */
bdnz+ rlast2 /* Dec ctr, and branch if more */
/* bytes left */
blr
#ifdef _BCOPY
END(bcopy)
#else
END(memmove)
#endif
|
0xffea/MINIX3
| 7,458
|
common/lib/libc/arch/powerpc/string/memcmp.S
|
/* $NetBSD: memcmp.S,v 1.3 2011/01/15 07:31:12 matt Exp $ */
/* stropt/memcmp.S, pl_string_common, pl_linux 10/11/04 11:45:35
* ==========================================================================
* Optimized memcmp implementation for IBM PowerPC 405/440.
*
* Copyright (c) 2003, IBM Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* * Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of IBM nor the names of its contributors
* may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ==========================================================================
*
* Function: Compare two character strings (up to n characters)
*
* int memcmp(const char *s1, const char *s2, int n)
*
* Input: r3 - buffer 1 address
* r4 - buffer 2 address
* r5 - maximum characters to compare
* Output: r3 <0 (less), 0 (equal), >0 (greater)
*
* ==========================================================================
*/
#include <machine/asm.h>
.text
.align 4
/* LINTSTUB: Func: void *memcmp(const void *, const void *, size_t) */
ENTRY(memcmp)
/*
* Check count passed in R5. If zero, return 0; otherwise continue.
*/
cmpwi %r5,0
beq- ret_0;
/*
* Most of the time the difference is found in the first
* several bytes. The following code minimizes the number
* of load operations for short compares.
*/
mr %r11, %r3 /* Save buffer 1 */
again:
not %r10, %r4 /* buffer 2: bytes to page bdy */
rlwinm. %r10, %r10,29,23,31 /* buffer 2: dwords to page bdy */
beq- bytebybyte /* If < 8 bytes to the page bdy */
/* do byte by byte */
lwz %r8, 0(%r4) /* load 1st buffer 2 word */
not %r12, %r11 /* buffer 1: bytes to page bdy */
rlwinm. %r12, %r12,29,23,31 /* buffer 1: dwords to page bdy */
beq- bytebybyte /* If < 8 bytes to the page bdy */
/* do byte by byte */
lwz %r6, 0(%r11) /* load 1st buffer 1 word */
cmpwi %r5, 4 /* If remaining count <= 4 */
ble+ first4 /* handle specially. DWG */
cmplw %r8, %r6 /* compare buffer 2 and buffer 1*/
bne+ all_done /* different => we're done */
lwzu %r9, 4(%r4) /* load 2nd buffer 2 word */
lwzu %r7, 4(%r11) /* load 2nd buffer 1 word */
cmpwi %r5, 8 /* If remaining count <= 8 */
ble+ last4 /* handle specially. DWG */
cmplw %r9, %r7 /* compare buffer 2 and buffer 1*/
bne+ all_done /* different => we're done */
addi %r5, %r5, -8 /* Update character counter DWG */
addi %r10, %r4, 0x0004 /* DWG*/
not %r10, %r10 /* buffer 2: bytes to page bdy DWG */
rlwinm. %r10, %r10,29,23,31 /* buffer 2: dwords to page bdy DWG */
addi %r12, %r11, 0x0004 /* DWG */
not %r12, %r12 /* buffer 1: bytes to page bdy DWG */
rlwinm. %r12, %r12,29,23,31 /* buffer 1: dwords to page bdy DWG */
/* The following section prior to loop: figures out whether */
/* the buffer 1 or buffer 2 is closer to the page boundary. */
/* The main loop count is then set up to reflect the number of */
/* double words of the buffer that is closest */
cmpw %r10, %r12 /* Find closest */
blt lt
mr %r10, %r12
lt:
srwi %r12, %r5, 3 /* Double check the total count */
cmpw %r10, %r12 /* limitation */
blt lt2
mr %r10, %r12 /* DWG */
lt2: /* DWG */
cmpwi %r10, 0 /* DWG */
bne lt3 /* DWG */
addi %r4, %r4, 0x0004 /* DWG */
addi %r11,%r11,0x0004 /* DWG */
b again /* DWG */
lt3: /* DWG */
mtctr %r10 /* dword count for loop */
lwzu %r6, 4(%r11) /* pre-load buffer 1 word */
b in /* To the loop */
loop: /* main loop */
cmplw %r8, %r6 /* Compare first buffer 2 word */
bne- all_done /* with first buffer 1 word */
/* If different, we're done */
cmplw %r9, %r7 /* Compare second buffer 2 word */
/* with second buffer 1 word */
lwzu %r6, 4(%r11) /* pre-load buffer 1 word */
bne- all_done /* If different, we're done */
in:
lwzu %r7, 4(%r11) /* pre-load buffer 1 word */
lwzu %r8, 4(%r4) /* pre-load buffer 2 word */
lwzu %r9, 4(%r4) /* pre-load buffer 2 word */
bdnz+ loop /* Do more DW's if cnt > 0 */
/*mfctr %r12*/ /*DWG*/ /* number of dwords left */
/*subf %r10, %r12, %r10*/ /*DWG*//* number of dwords compared */
slwi %r10, %r10, 3
subf %r5, %r10, %r5 /* adjust byte counter */
/*bne+ partial*/ /*DWG*/ /* If less than 8 bytes, handle */
/* specially */
/*cmpwi %r5, 8*/ /* Removed. DWG */
/*blt partial*/ /* Removed. DWG */
/*addic %r5, %r5, -8*/ /*DWG*/ /* Subtract two words from count*/
cmplw %r8, %r6 /* compare last dword */
addi %r4, %r4, 4
bne- all_done
cmplw %r9, %r7
addi %r11, %r11, 4
bne- all_done
bytebybyte:
/* We've gotten close to a page boundary: do a byte-byte-byte
* compare for the following 8 bytes, and then go back to
* the full-word compare loop.
*/
li %r3, 8 /* loop count */
cmpw %r3, %r5 /* take min(8, counter) */
ble f2
mr. %r3, %r5
beqlr
f2:
mtctr %r3
subf %r5, %r3, %r5 /* adjust counter */
bbb:
lbz %r6, 0(%r11) /* byte copy loop */
addi %r11, %r11, 1
lbz %r8, 0(%r4)
addi %r4, %r4, 1
cmplw %r8, %r6
bdnzt+ eq, bbb
bne all_done
cmpwi %r5, 0
bgt again /* handle the rest */
xor %r3,%r3,%r3
blr
#if 0 /* Removed code section. DWG */
partial:
mr. %r3, %r5
beqlr /* If count -> 0, we're done */
f1:
subfic %r3, %r3, 4 /* zero/end in first word? */
cmpwi %r3, 0
blt last4
#endif /* DWG */
first4:
subfic %r3, %r5, 4 /* If count <= 4, handle */
rlwinm %r3, %r3, 3, 0, 31 /* count *= 8 */
srw %r6, %r6, %r3 /* align 1st buffer 1 word */
srw %r8, %r8, %r3 /* align 1st buffer 2 word */
cmplw %r8, %r6 /* get result */
bne all_done
xor %r3,%r3,%r3
blr
last4:
subfic %r10, %r5, 8 /*DWG*/
rlwinm %r10, %r10, 3, 0, 31 /* count *= 8 */
srw %r7, %r7, %r10 /* align 2nd buffer 1 word */
srw %r9, %r9, %r10 /* align 2nd buffer 2 word */
cmplw %r9, %r7 /* get result */
bne all_done
ret_0:
xor %r3,%r3,%r3 /* Equal result */
blr
all_done:
blt finish_lt
addi %r3,0,-1 /* Less than result */
blr
finish_lt:
addi %r3,0,1 /* Greater than result */
blr
END(memcmp)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.