Mon Dec 14 00:39:01 2009 UTC ()
Merge from matt-nb5-mips64
(matt)
diff -r1.9 -r1.10 src/common/lib/libc/arch/mips/atomic/Makefile.inc
diff -r0 -r1.2 src/common/lib/libc/arch/mips/atomic/atomic_add.S
diff -r0 -r1.2 src/common/lib/libc/arch/mips/atomic/atomic_and.S
diff -r0 -r1.2 src/common/lib/libc/arch/mips/atomic/atomic_cas.S
diff -r0 -r1.2 src/common/lib/libc/arch/mips/atomic/atomic_dec.S
diff -r0 -r1.2 src/common/lib/libc/arch/mips/atomic/atomic_inc.S
diff -r0 -r1.2 src/common/lib/libc/arch/mips/atomic/atomic_or.S
diff -r0 -r1.2 src/common/lib/libc/arch/mips/atomic/atomic_swap.S
diff -r1.3 -r1.4 src/common/lib/libc/arch/mips/atomic/membar_ops.S
diff -r1.2 -r1.3 src/common/lib/libc/arch/mips/gen/byte_swap_2.S
diff -r1.2 -r1.3 src/common/lib/libc/arch/mips/gen/byte_swap_4.S
diff -r0 -r1.2 src/common/lib/libc/arch/mips/gen/byte_swap_8.S
diff -r1.2 -r1.3 src/common/lib/libc/arch/mips/string/bcopy.S
diff -r1.1 -r1.2 src/common/lib/libc/arch/mips/string/ffs.S
diff -r1.1 -r1.2 src/common/lib/libc/arch/mips/string/strcmp.S
diff -r1.1 -r1.2 src/common/lib/libc/arch/mips/string/strlen.S
diff -r1.1 -r0 src/common/lib/libc/arch/mips/string/memset.S
diff -r1.6 -r1.7 src/common/lib/libc/atomic/atomic_init_testset.c
diff -r0 -r1.2 src/common/lib/libc/string/bzero2.c
diff -r0 -r1.2 src/common/lib/libc/string/memset2.c
--- src/common/lib/libc/arch/mips/atomic/Makefile.inc 2009/01/04 17:54:29 1.9
+++ src/common/lib/libc/arch/mips/atomic/Makefile.inc 2009/12/14 00:38:59 1.10
@@ -1,23 +1,40 @@
-# $NetBSD: Makefile.inc,v 1.9 2009/01/04 17:54:29 pooka Exp $
+# $NetBSD: Makefile.inc,v 1.10 2009/12/14 00:38:59 matt Exp $
.if defined(LIB) && (${LIB} == "kern" || ${LIB} == "c" || ${LIB} == "pthread" \
|| ${LIB} == "rump")
-SRCS+= atomic_add_32_cas.c atomic_add_32_nv_cas.c atomic_and_32_cas.c \
- atomic_and_32_nv_cas.c atomic_dec_32_cas.c atomic_dec_32_nv_cas.c \
- atomic_inc_32_cas.c atomic_inc_32_nv_cas.c atomic_or_32_cas.c \
- atomic_or_32_nv_cas.c atomic_swap_32_cas.c atomic_add_64_cas.c \
- atomic_add_64_nv_cas.c atomic_and_64_cas.c atomic_and_64_nv_cas.c \
- atomic_dec_64_cas.c atomic_dec_64_nv_cas.c atomic_inc_64_cas.c \
- atomic_inc_64_nv_cas.c atomic_or_64_cas.c atomic_or_64_nv_cas.c \
- atomic_swap_64_cas.c membar_ops.o
+.if ${MACHINE_ARCH:Mmips64*} == ""
+SRCS+= atomic_add_32_cas.c atomic_add_32_nv_cas.c \
+ atomic_and_32_cas.c atomic_and_32_nv_cas.c \
+ atomic_dec_32_cas.c atomic_dec_32_nv_cas.c \
+ atomic_inc_32_cas.c atomic_inc_32_nv_cas.c \
+ atomic_or_32_cas.c atomic_or_32_nv_cas.c \
+ atomic_swap_32_cas.c \
+ atomic_add_64_cas.c atomic_add_64_nv_cas.c \
+ atomic_and_64_cas.c atomic_and_64_nv_cas.c \
+ atomic_dec_64_cas.c atomic_dec_64_nv_cas.c \
+ atomic_inc_64_cas.c atomic_inc_64_nv_cas.c \
+ atomic_or_64_cas.c atomic_or_64_nv_cas.c \
+ atomic_swap_64_cas.c
+CPPFLAGS+= -D__HAVE_ASM_ATOMIC_CAS_UP
+
+.else
+SRCS+= atomic_add.S atomic_dec.S atomic_inc.S
+SRCS+= atomic_and.S atomic_or.S
+SRCS+= atomic_swap.S
.endif
+SRCS+= membar_ops.S
+.endif
+
.if defined(LIB) && (${LIB} == "c" || ${LIB} == "pthread")
-SRCS+= membar_ops_nop.c atomic_init_testset.c atomic_cas_up.S
-CPPFLAGS+= -D__HAVE_ASM_ATOMIC_CAS_UP
+.if ${MACHINE_ARCH:Mmips64*} == ""
+SRCS+= atomic_init_testset.c atomic_cas_up.S
+.else
+SRCS+= atomic_cas.S atomic_init_cas.c
+.endif
.endif
/* $NetBSD: atomic_add.S,v 1.2 2009/12/14 00:38:59 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <machine/asm.h>
#include "atomic_op_asm.h"
RCSID("$NetBSD: atomic_add.S,v 1.2 2009/12/14 00:38:59 matt Exp $")
.text
.set noat
.set noreorder
.set nomacro
LEAF(_atomic_add_32)
1: INT_LL t0, 0(a0)
nop
INT_ADDU t0, a1
INT_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_add_32)
ATOMIC_OP_ALIAS(atomic_add_32, _atomic_add_32)
LEAF(_atomic_add_32_nv)
1: INT_LL v0, 0(a0)
nop
INT_ADDU v0, a1
move t0, v0
INT_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_add_32_nv)
ATOMIC_OP_ALIAS(atomic_add_32_nv, _atomic_add_32_nv)
#if !defined(__mips_o32)
LEAF(_atomic_add_64)
1: REG_LL t0, 0(a0)
nop
REG_ADDU t0, a1
REG_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_add_64)
ATOMIC_OP_ALIAS(atomic_add_64, _atomic_add_64)
LEAF(_atomic_add_64_nv)
1: REG_LL v0, 0(a0)
nop
REG_ADDU v0, a1
move t0, v0
REG_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_add_64_nv)
ATOMIC_OP_ALIAS(atomic_add_64_nv, _atomic_add_64_nv)
#endif
#ifdef _LP64
STRONG_ALIAS(_atomic_add_long, _atomic_add_64)
STRONG_ALIAS(_atomic_add_long_nv, _atomic_add_64_nv)
STRONG_ALIAS(_atomic_add_ptr, _atomic_add_64)
STRONG_ALIAS(_atomic_add_ptr_nv, _atomic_add_64_nv)
#else
STRONG_ALIAS(_atomic_add_long, _atomic_add_32)
STRONG_ALIAS(_atomic_add_long_nv, _atomic_add_32_nv)
STRONG_ALIAS(_atomic_add_ptr, _atomic_add_32)
STRONG_ALIAS(_atomic_add_ptr_nv, _atomic_add_32_nv)
#endif
STRONG_ALIAS(_atomic_add_int, _atomic_add_32)
STRONG_ALIAS(_atomic_add_int_nv, _atomic_add_32_nv)
ATOMIC_OP_ALIAS(atomic_add_int, _atomic_add_int)
ATOMIC_OP_ALIAS(atomic_add_int_nv, _atomic_add_int_nv)
ATOMIC_OP_ALIAS(atomic_add_ptr, _atomic_add_ptr)
ATOMIC_OP_ALIAS(atomic_add_ptr_nv, _atomic_add_ptr_nv)
ATOMIC_OP_ALIAS(atomic_add_long, _atomic_add_long)
ATOMIC_OP_ALIAS(atomic_add_long_nv, _atomic_add_long_nv)
/* $NetBSD: atomic_and.S,v 1.2 2009/12/14 00:38:59 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <machine/asm.h>
#include "atomic_op_asm.h"
RCSID("$NetBSD: atomic_and.S,v 1.2 2009/12/14 00:38:59 matt Exp $")
.text
.set noat
.set noreorder
.set nomacro
LEAF(_atomic_and_32)
1: INT_LL t0, 0(a0)
nop
and t0, a1
INT_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_and_32)
ATOMIC_OP_ALIAS(atomic_and_32, _atomic_and_32)
LEAF(_atomic_and_32_nv)
1: INT_LL v0, 0(a0)
nop
and v0, a1
move t0, v0
INT_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_and_32_nv)
ATOMIC_OP_ALIAS(atomic_and_32_nv, _atomic_and_32_nv)
#if !defined(__mips_o32)
LEAF(_atomic_and_64)
1: REG_LL t0, 0(a0)
nop
and t0, a1
REG_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_and_64)
ATOMIC_OP_ALIAS(atomic_and_64, _atomic_and_64)
LEAF(_atomic_and_64_nv)
1: REG_LL v0, 0(a0)
nop
and v0, a1
move t0, v0
REG_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_and_64_nv)
ATOMIC_OP_ALIAS(atomic_and_64_nv, _atomic_and_64_nv)
#endif
#ifdef _LP64
STRONG_ALIAS(_atomic_and_ulong, _atomic_and_64)
STRONG_ALIAS(_atomic_and_ulong_nv, _atomic_and_64_nv)
#else
STRONG_ALIAS(_atomic_and_ulong, _atomic_and_32)
STRONG_ALIAS(_atomic_and_ulong_nv, _atomic_and_32_nv)
#endif
STRONG_ALIAS(_atomic_and_uint, _atomic_and_32)
STRONG_ALIAS(_atomic_and_uint_nv, _atomic_and_32_nv)
ATOMIC_OP_ALIAS(atomic_and_uint, _atomic_and_uint)
ATOMIC_OP_ALIAS(atomic_and_uint_nv, _atomic_and_uint_nv)
ATOMIC_OP_ALIAS(atomic_and_ulong, _atomic_and_ulong)
ATOMIC_OP_ALIAS(atomic_and_ulong_nv, _atomic_and_ulong_nv)
/* $NetBSD: atomic_cas.S,v 1.2 2009/12/14 00:39:00 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <machine/asm.h>
#include "atomic_op_asm.h"
RCSID("$NetBSD: atomic_cas.S,v 1.2 2009/12/14 00:39:00 matt Exp $")
.text
.set noat
.set noreorder
.set nomacro
LEAF(_atomic_cas_32)
1: INT_LL v0, 0(a0)
nop
bne v0, a1, 2f
nop
move t0, a2
INT_SC t0, 0(a0)
beq t0, zero, 1b
nop
move v0, a1
2:
j ra
nop
END(_atomic_cas_32)
ATOMIC_OP_ALIAS(atomic_cas_32, _atomic_cas_32)
#if !defined(__mips_o32)
LEAF(_atomic_cas_64)
1: REG_LL v0, 0(a0)
nop
bne v0, a1, 2f
nop
move t0, a2
REG_SC t0, 0(a0)
beq t0, zero, 1b
nop
move v0, a1
2:
j ra
nop
END(_atomic_cas_64)
ATOMIC_OP_ALIAS(atomic_cas_64, _atomic_cas_64)
#endif
#ifdef _LP64
STRONG_ALIAS(_atomic_cas_ptr, _atomic_cas_64)
STRONG_ALIAS(_atomic_cas_ptr_ni, _atomic_cas_64)
STRONG_ALIAS(_atomic_cas_ulong, _atomic_cas_64)
STRONG_ALIAS(_atomic_cas_ulong_ni, _atomic_cas_64)
#else
STRONG_ALIAS(_atomic_cas_ptr, _atomic_cas_32)
STRONG_ALIAS(_atomic_cas_ptr_ni, _atomic_cas_32)
STRONG_ALIAS(_atomic_cas_ulong, _atomic_cas_32)
STRONG_ALIAS(_atomic_cas_ulong_ni, _atomic_cas_32)
#endif
STRONG_ALIAS(_atomic_cas_uint, _atomic_cas_32)
STRONG_ALIAS(_atomic_cas_uint_ni, _atomic_cas_32)
ATOMIC_OP_ALIAS(atomic_cas_ptr, _atomic_cas_ptr)
ATOMIC_OP_ALIAS(atomic_cas_ptr_ni, _atomic_cas_ptr_ni)
ATOMIC_OP_ALIAS(atomic_cas_uint, _atomic_cas_uint)
ATOMIC_OP_ALIAS(atomic_cas_uint_ni, _atomic_cas_uint_ni)
ATOMIC_OP_ALIAS(atomic_cas_ulong, _atomic_cas_ulong)
ATOMIC_OP_ALIAS(atomic_cas_ulong_ni, _atomic_cas_ulong_ni)
/* $NetBSD: atomic_dec.S,v 1.2 2009/12/14 00:39:00 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <machine/asm.h>
#include "atomic_op_asm.h"
RCSID("$NetBSD: atomic_dec.S,v 1.2 2009/12/14 00:39:00 matt Exp $")
.text
.set noat
.set noreorder
.set nomacro
LEAF(_atomic_dec_32)
1: INT_LL t0, 0(a0)
nop
INT_ADDU t0, -1
INT_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_dec_32)
ATOMIC_OP_ALIAS(atomic_dec_32, _atomic_dec_32)
LEAF(_atomic_dec_32_nv)
1: INT_LL v0, 0(a0)
nop
INT_ADDU v0, -1
move t0, v0
INT_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_dec_32_nv)
ATOMIC_OP_ALIAS(atomic_dec_32_nv, _atomic_dec_32_nv)
#if !defined(__mips_o32)
LEAF(_atomic_dec_64)
1: REG_LL t0, 0(a0)
nop
REG_ADDU t0, -1
REG_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_dec_64)
ATOMIC_OP_ALIAS(atomic_dec_64, _atomic_dec_64)
LEAF(_atomic_dec_64_nv)
1: REG_LL v0, 0(a0)
nop
REG_ADDU v0, -1
move t0, v0
REG_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_dec_64_nv)
ATOMIC_OP_ALIAS(atomic_dec_64_nv, _atomic_dec_64_nv)
#endif
#ifdef _LP64
STRONG_ALIAS(_atomic_dec_ptr, _atomic_dec_64)
STRONG_ALIAS(_atomic_dec_ptr_nv, _atomic_dec_64_nv)
STRONG_ALIAS(_atomic_dec_ulong, _atomic_dec_64)
STRONG_ALIAS(_atomic_dec_ulong_nv, _atomic_dec_64_nv)
#else
STRONG_ALIAS(_atomic_dec_ptr, _atomic_dec_32)
STRONG_ALIAS(_atomic_dec_ptr_nv, _atomic_dec_32_nv)
STRONG_ALIAS(_atomic_dec_ulong, _atomic_dec_32)
STRONG_ALIAS(_atomic_dec_ulong_nv, _atomic_dec_32_nv)
#endif
STRONG_ALIAS(_atomic_dec_uint, _atomic_dec_32)
STRONG_ALIAS(_atomic_dec_uint_nv, _atomic_dec_32_nv)
ATOMIC_OP_ALIAS(atomic_dec_ptr, _atomic_dec_ptr)
ATOMIC_OP_ALIAS(atomic_dec_ptr_nv, _atomic_dec_ptr_nv)
ATOMIC_OP_ALIAS(atomic_dec_uint, _atomic_dec_uint)
ATOMIC_OP_ALIAS(atomic_dec_uint_nv, _atomic_dec_uint_nv)
ATOMIC_OP_ALIAS(atomic_dec_ulong, _atomic_dec_ulong)
ATOMIC_OP_ALIAS(atomic_dec_ulong_nv, _atomic_dec_ulong_nv)
/* $NetBSD: atomic_inc.S,v 1.2 2009/12/14 00:39:00 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <machine/asm.h>
#include "atomic_op_asm.h"
RCSID("$NetBSD: atomic_inc.S,v 1.2 2009/12/14 00:39:00 matt Exp $")
.text
.set noat
.set noreorder
.set nomacro
LEAF(_atomic_inc_32)
1: INT_LL t0, 0(a0)
nop
INT_ADDU t0, 1
INT_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_inc_32)
ATOMIC_OP_ALIAS(atomic_inc_32, _atomic_inc_32)
LEAF(_atomic_inc_32_nv)
1: INT_LL v0, 0(a0)
nop
INT_ADDU v0, 1
move t0, v0
INT_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_inc_32_nv)
ATOMIC_OP_ALIAS(atomic_inc_32_nv, _atomic_inc_32_nv)
#if !defined(__mips_o32)
LEAF(_atomic_inc_64)
1: REG_LL t0, 0(a0)
nop
REG_ADDU t0, 1
REG_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_inc_64)
ATOMIC_OP_ALIAS(atomic_inc_64, _atomic_inc_64)
LEAF(_atomic_inc_64_nv)
1: REG_LL v0, 0(a0)
nop
REG_ADDU v0, 1
move t0, v0
REG_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_inc_64_nv)
ATOMIC_OP_ALIAS(atomic_inc_64_nv, _atomic_inc_64_nv)
#endif
#ifdef _LP64
STRONG_ALIAS(_atomic_inc_ptr, _atomic_inc_64)
STRONG_ALIAS(_atomic_inc_ptr_nv, _atomic_inc_64_nv)
STRONG_ALIAS(_atomic_inc_ulong, _atomic_inc_64)
STRONG_ALIAS(_atomic_inc_ulong_nv, _atomic_inc_64_nv)
#else
STRONG_ALIAS(_atomic_inc_ptr, _atomic_inc_32)
STRONG_ALIAS(_atomic_inc_ptr_nv, _atomic_inc_32_nv)
STRONG_ALIAS(_atomic_inc_ulong, _atomic_inc_32)
STRONG_ALIAS(_atomic_inc_ulong_nv, _atomic_inc_32_nv)
#endif
STRONG_ALIAS(_atomic_inc_uint, _atomic_inc_32)
STRONG_ALIAS(_atomic_inc_uint_nv, _atomic_inc_32_nv)
ATOMIC_OP_ALIAS(atomic_inc_ptr, _atomic_inc_ptr)
ATOMIC_OP_ALIAS(atomic_inc_ptr_nv, _atomic_inc_ptr_nv)
ATOMIC_OP_ALIAS(atomic_inc_uint, _atomic_inc_uint)
ATOMIC_OP_ALIAS(atomic_inc_uint_nv, _atomic_inc_uint_nv)
ATOMIC_OP_ALIAS(atomic_inc_ulong, _atomic_inc_ulong)
ATOMIC_OP_ALIAS(atomic_inc_ulong_nv, _atomic_inc_ulong_nv)
/* $NetBSD: atomic_or.S,v 1.2 2009/12/14 00:39:00 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <machine/asm.h>
#include "atomic_op_asm.h"
.text
.set noat
.set noreorder
.set nomacro
LEAF(_atomic_or_32)
1: INT_LL t0, 0(a0)
nop
or t0, a1
INT_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_or_32)
ATOMIC_OP_ALIAS(atomic_or_32, _atomic_or_32)
LEAF(_atomic_or_32_nv)
1: INT_LL v0, 0(a0)
nop
or v0, a1
move t0, v0
INT_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_or_32_nv)
ATOMIC_OP_ALIAS(atomic_or_32_nv, _atomic_or_32_nv)
#if !defined(__mips_o32)
LEAF(_atomic_or_64)
1: REG_LL t0, 0(a0)
nop
or t0, a1
REG_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_or_64)
ATOMIC_OP_ALIAS(atomic_or_64, _atomic_or_64)
LEAF(_atomic_or_64_nv)
1: REG_LL v0, 0(a0)
nop
or v0, a1
move t0, v0
REG_SC t0, 0(a0)
beq t0, zero, 1b
nop
j ra
nop
END(_atomic_or_64_nv)
ATOMIC_OP_ALIAS(atomic_or_64_nv, _atomic_or_64_nv)
#endif
#ifdef _LP64
STRONG_ALIAS(_atomic_or_ulong, _atomic_or_64)
STRONG_ALIAS(_atomic_or_ulong_nv, _atomic_or_64_nv)
#else
STRONG_ALIAS(_atomic_or_ulong, _atomic_or_32)
STRONG_ALIAS(_atomic_or_ulong_nv, _atomic_or_32_nv)
#endif
STRONG_ALIAS(_atomic_or_uint, _atomic_or_32)
STRONG_ALIAS(_atomic_or_uint_nv, _atomic_or_32_nv)
ATOMIC_OP_ALIAS(atomic_or_uint, _atomic_or_uint)
ATOMIC_OP_ALIAS(atomic_or_uint_nv, _atomic_or_uint_nv)
ATOMIC_OP_ALIAS(atomic_or_ulong, _atomic_or_ulong)
ATOMIC_OP_ALIAS(atomic_or_ulong_nv, _atomic_or_ulong_nv)
/* $NetBSD: atomic_swap.S,v 1.2 2009/12/14 00:39:00 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
#include <machine/asm.h>
#include "atomic_op_asm.h"
RCSID("$NetBSD: atomic_swap.S,v 1.2 2009/12/14 00:39:00 matt Exp $")
.text
.set noat
.set noreorder
.set nomacro
LEAF(_atomic_swap_32)
1: INT_LL v0, 0(a0)
nop
move t0, a1
INT_SC t0, 0(a0)
beq t0, zero, 1b
nop
2:
j ra
nop
END(_atomic_swap_32)
ATOMIC_OP_ALIAS(atomic_swap_32, _atomic_swap_32)
#if !defined(__mips_o32)
LEAF(_atomic_swap_64)
1: REG_LL v0, 0(a0)
nop
move t0, a1
REG_SC t0, 0(a0)
beq t0, zero, 1b
nop
2:
j ra
nop
END(_atomic_swap_64)
ATOMIC_OP_ALIAS(atomic_swap_64, _atomic_swap_64)
#endif
#ifdef _LP64
STRONG_ALIAS(_atomic_swap_ptr, _atomic_swap_64)
STRONG_ALIAS(_atomic_swap_ulong, _atomic_swap_64)
#else
STRONG_ALIAS(_atomic_swap_ptr, _atomic_swap_32)
STRONG_ALIAS(_atomic_swap_ulong, _atomic_swap_32)
#endif
STRONG_ALIAS(_atomic_swap_uint, _atomic_swap_32)
ATOMIC_OP_ALIAS(atomic_swap_ptr, _atomic_swap_ptr)
ATOMIC_OP_ALIAS(atomic_swap_uint, _atomic_swap_uint)
ATOMIC_OP_ALIAS(atomic_swap_ulong, _atomic_swap_ulong)
--- src/common/lib/libc/arch/mips/atomic/membar_ops.S 2008/05/25 15:56:12 1.3
+++ src/common/lib/libc/arch/mips/atomic/membar_ops.S 2009/12/14 00:39:00 1.4
@@ -1,4 +1,4 @@
-/* $NetBSD: membar_ops.S,v 1.3 2008/05/25 15:56:12 chs Exp $ */
+/* $NetBSD: membar_ops.S,v 1.4 2009/12/14 00:39:00 matt Exp $ */
/*-
* Copyright (c) 2006, 2007 The NetBSD Foundation, Inc.
@@ -31,30 +31,41 @@
#include "atomic_op_asm.h"
+#if defined(_KERNEL)
+
#ifdef _KERNEL_OPT
#include "opt_cputype.h"
#include "opt_lockdebug.h"
#include "opt_multiprocessor.h"
#endif
-#if defined(_KERNEL)
#include <machine/cpu.h>
-#if MIPS_HAS_LLSC != 0 && defined(MULTIPROCESSOR)
+#if (MIPS_HAS_LLSC != 0 && defined(MULTIPROCESSOR)) || !defined(__mips_o32)
#define SYNC sync
-#else
-#define SYNC /* nothing */
#endif
+#elif !defined(__mips_o32)
+#define SYNC sync
+#endif
.text
LEAF(_membar_sync)
- SYNC
j ra
+#ifdef SYNC
+ SYNC
+#else
nop
+#endif
END(_membar_sync)
+#ifdef _KERNEL
+STRONG_ALIAS(mb_read, _membar_sync)
+STRONG_ALIAS(mb_write, _membar_sync)
+STRONG_ALIAS(mb_memory, _membar_sync)
+#endif
+
ATOMIC_OP_ALIAS(membar_sync,_membar_sync)
ATOMIC_OP_ALIAS(membar_enter,_membar_sync)
STRONG_ALIAS(_membar_enter,_membar_sync)
@@ -64,7 +75,3 @@
STRONG_ALIAS(_membar_producer,_membar_sync)
ATOMIC_OP_ALIAS(membar_consumer,_membar_sync)
STRONG_ALIAS(_membar_consumer,_membar_sync)
-
-#else /* _KERNEL */
-
-#endif /* _KERNEL */
--- src/common/lib/libc/arch/mips/gen/byte_swap_2.S 2006/02/08 21:52:36 1.2
+++ src/common/lib/libc/arch/mips/gen/byte_swap_2.S 2009/12/14 00:39:00 1.3
@@ -1,4 +1,4 @@
-/* $NetBSD: byte_swap_2.S,v 1.2 2006/02/08 21:52:36 simonb Exp $ */
+/* $NetBSD: byte_swap_2.S,v 1.3 2009/12/14 00:39:00 matt Exp $ */
/*-
* Copyright (c) 1991, 1993
@@ -35,8 +35,11 @@
#include <mips/asm.h>
#if defined(LIBC_SCCS) && !defined(lint)
- ASMSTR("from: @(#)htons.s 8.1 (Berkeley) 6/4/93")
- ASMSTR("$NetBSD: byte_swap_2.S,v 1.2 2006/02/08 21:52:36 simonb Exp $")
+#if 0
+ RCSID("from: @(#)htons.s 8.1 (Berkeley) 6/4/93")
+#else
+ RCSID("$NetBSD: byte_swap_2.S,v 1.3 2009/12/14 00:39:00 matt Exp $")
+#endif
#endif /* LIBC_SCCS and not lint */
#undef _LOCORE
@@ -46,9 +49,6 @@
#if defined(_KERNEL) || defined(_STANDALONE)
#define BSWAP16_NAME bswap16
#else
-#ifdef __ABICALLS__
- .abicalls
-#endif
#define BSWAP16_NAME __bswap16
#endif
@@ -57,12 +57,21 @@
ALEAF(htons)
ALEAF(ntohs)
#endif
+#if (__mips == 32 || __mips == 64) && __mips_isa_rev == 2
+ /*
+ * If we are on MIPS32r2 or MIPS64r2, use the new instructions
+ */
+ wsbh a0, a0 # word swap bytes within halfwords
+ and v0, a0, 0xffff # bound it to 16bits
+ j ra
+#else
srl v0, a0, 8
and v0, v0, 0xff
sll v1, a0, 8
and v1, v1, 0xff00
or v0, v0, v1
j ra
+#endif
END(BSWAP16_NAME)
#if BYTE_ORDER == BIG_ENDIAN
--- src/common/lib/libc/arch/mips/gen/byte_swap_4.S 2006/02/08 21:52:36 1.2
+++ src/common/lib/libc/arch/mips/gen/byte_swap_4.S 2009/12/14 00:39:00 1.3
@@ -1,4 +1,4 @@
-/* $NetBSD: byte_swap_4.S,v 1.2 2006/02/08 21:52:36 simonb Exp $ */
+/* $NetBSD: byte_swap_4.S,v 1.3 2009/12/14 00:39:00 matt Exp $ */
/*-
* Copyright (c) 1991, 1993
@@ -35,8 +35,11 @@
#include <mips/asm.h>
#if defined(LIBC_SCCS) && !defined(lint)
- ASMSTR("from: @(#)htonl.s 8.1 (Berkeley) 6/4/93")
- ASMSTR("$NetBSD: byte_swap_4.S,v 1.2 2006/02/08 21:52:36 simonb Exp $")
+#if 0
+ RCSID("from: @(#)htonl.s 8.1 (Berkeley) 6/4/93")
+#else
+ RCSID("$NetBSD: byte_swap_4.S,v 1.3 2009/12/14 00:39:00 matt Exp $")
+#endif
#endif /* LIBC_SCCS and not lint */
#undef _LOCORE
@@ -46,9 +49,6 @@
#if defined(_KERNEL) || defined(_STANDALONE)
#define BSWAP32_NAME bswap32
#else
-#ifdef __ABICALLS__
- .abicalls
-#endif
#define BSWAP32_NAME __bswap32
#endif
@@ -57,6 +57,14 @@
ALEAF(htonl) # a0 = 0x11223344, return 0x44332211
ALEAF(ntohl)
#endif
+#if (__mips == 32 || __mips == 64) && __mips_isa_rev == 2
+ /*
+ * If we are on MIPS32R2 or MIPS64R2 it's much easier
+ */
+ wsbh a0, a0 # word swap bytes within halfwords
+ rotr v0, a0, 16 # rotate word 16bits
+ j ra
+#else
srl v1, a0, 24 # v1 = 0x00000011
sll v0, a0, 24 # v0 = 0x44000000
or v0, v0, v1
@@ -67,6 +75,7 @@
and v1, v1, 0xff00 # v1 = 0x00002200
or v0, v0, v1
j ra
+#endif
END(BSWAP32_NAME)
#if BYTE_ORDER == BIG_ENDIAN
/* $NetBSD: byte_swap_8.S,v 1.2 2009/12/14 00:39:00 matt Exp $ */
/*-
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Ralph Campbell.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <mips/asm.h>
#if defined(LIBC_SCCS) && !defined(lint)
RCSID("$NetBSD: byte_swap_8.S,v 1.2 2009/12/14 00:39:00 matt Exp $")
#endif /* LIBC_SCCS and not lint */
#undef _LOCORE
#define _LOCORE /* XXX not really, just assembly-code source */
#include <machine/endian.h>
NLEAF(bswap64) # a0 = 0xffeeddccbbaa9988 return 0x8899aabbccddeeff
#if (__mips == 32 || __mips == 64) && __mips_isa_rev == 2
#if !defined(__mips_o32)
/*
* If we are on MIPS32r2 or MIPS64r2 use the new instructions.
*/
dsbh v0, a0 # dwords swap bytes within halfwords
dshd v0, v0 # dwords swap halwords within dwords
j ra
#else /* defined(__mips_o32) */
/*
* If we are on MIPS32r2 or MIPS64r2 use the new instructions.
* (except we must use the 32bit versions)
*/
wsbh v1, a0 # word swap bytes within halfwords
wsbh v0, a1 # word swap bytes within halfwords
rotr v1, v1, 16 # rotate word 16bits and swap word
rotr v0, v0, 16 # rotate word 16bits and swap word
j ra
#endif /* defined(__mips_o32) */
#elif !defined(__mips_o32)
# a0 = 0xffeeddccbbaa9988
li t0, 0xffff # t0 = 0x000000000000ffff
dsll t1, t0, 32 # t1 = 0x0000ffff00000000
or t0, t1 # t0 = 0x0000ffff0000ffff
dsll t2, t0, 8 # t2 = 0x00ffff0000ffff00
xor t2, t0 # t2 = 0x00ff00ff00ff00ff
/*
* We could swap by halfword, but that would be one instruction longer.
*/
dsrl ta0, a0, 32 # ta0 = 0x00000000ffeeddcc
dsll ta1, a0, 32 # ta1 = 0xbbaa998800000000
or a1, ta0, ta1 # a1 = 0xbbaa9988ffeeddcc
# words swapped
and ta0, a1, t0 # ta0 = 0x000099880000ddcc
dsrl ta1, a1, 16 # ta1 = 0x0000bbaa9988ffee
and ta1, t0 # ta1 = 0x0000bbaa0000ffee
dsll a2, ta0, 16 # a2 = 0x99880000ddcc0000
or a2, ta1 # a2 = 0x9988bbaaddccffee
# halfwords swapped
and ta0, a2, t2 # ta0 = 0x008800aa00cc00ee
dsrl ta1, a2, 8 # ta1 = 0x009988bbaaddccff
and ta1, t2 # ta1 = 0x009900bb00dd00ff
dsll v0, ta0, 8 # v0 = 0x8800aa00cc00ee00
or v0, ta1 # v0 = 0x8899aabbccddeeff
# bytes swapped
j ra
#else /* defined(__mips_o32) */
/*
* 32bit ABI.
*/
# a0 = 0xccddeeff
# a1 = 0x8899aabb
srl t0, a0, 24 # t0 = 0x000000cc
srl t1, a1, 24 # t1 = 0x00000088
sll ta0, a0, 24 # ta0 = 0xff000000
sll ta1, a1, 24 # ta1 = 0xbb000000
or ta0, ta0, t0 # ta0 = 0xff0000cc
or ta1, ta1, t1 # ta1 = 0xbb000088
and t0, a0, 0xff00 # t0 = 0x0000ee00
and t1, a1, 0xff00 # t1 = 0x0000aa00
sll t0, t0, 8 # t0 = 0x00ee0000
sll t1, t1, 8 # t1 = 0x00aa0000
or ta0, ta0, t0 # ta0 = 0xffee00cc
or ta1, ta1, t1 # ta1 = 0xbbaa0088
srl t0, a0, 8 # t0 = 0x00ccddee
srl t1, a1, 8 # t1 = 0x008899aa
and t0, t0, 0xff00 # t0 = 0x0000dd00
and t1, t1, 0xff00 # t1 = 0x00009900
or v1, ta0, t0 # v1 = 0xffeeddcc
or v0, ta1, t1 # v0 = 0xbbaa9988
j ra
#endif /* defined(__mips_o32) */
END(bswap64)
--- src/common/lib/libc/arch/mips/string/bcopy.S 2005/12/27 11:23:53 1.2
+++ src/common/lib/libc/arch/mips/string/bcopy.S 2009/12/14 00:39:00 1.3
@@ -1,4 +1,4 @@
-/* $NetBSD: bcopy.S,v 1.2 2005/12/27 11:23:53 tsutsui Exp $ */
+/* $NetBSD: bcopy.S,v 1.3 2009/12/14 00:39:00 matt Exp $ */
/*
* Mach Operating System
@@ -43,14 +43,13 @@
#if defined(LIBC_SCCS) && !defined(lint)
- ASMSTR("from: @(#)mips_bcopy.s 2.2 CMU 18/06/93")
- ASMSTR("$NetBSD: bcopy.S,v 1.2 2005/12/27 11:23:53 tsutsui Exp $")
+#if 0
+ RCSID("from: @(#)mips_bcopy.s 2.2 CMU 18/06/93")
+#else
+ RCSID("$NetBSD: bcopy.S,v 1.3 2009/12/14 00:39:00 matt Exp $")
+#endif
#endif /* LIBC_SCCS and not lint */
-#ifdef __ABICALLS__
- .abicalls
-#endif
-
/*
* bcopy(caddr_t src, caddr_t dst, unsigned int len)
*
@@ -103,71 +102,72 @@
* copy is alignable. eg if src and dest are both
* on a halfword boundary.
*/
- andi t1,DSTREG,3 # get last 3 bits of dest
- bne t1,zero,3f
- andi t0,SRCREG,3 # get last 3 bits of src
- bne t0,zero,5f
+ andi t1,DSTREG,(SZREG-1) # get last bits of dest
+ bne t1,zero,3f # dest unaligned
+ andi t0,SRCREG,(SZREG-1) # get last bits of src
+ bne t0,zero,5f
/*
- * Forward aligned->aligned copy, 8*4 bytes at a time.
+ * Forward aligned->aligned copy, 8 words at a time.
*/
- li AT,-32
- and t0,SIZEREG,AT # count truncated to multiple of 32 */
- addu a3,SRCREG,t0 # run fast loop up to this address
- sltu AT,SRCREG,a3 # any work to do?
- beq AT,zero,2f
- subu SIZEREG,t0
+98:
+ li AT,-(SZREG*8)
+ and t0,SIZEREG,AT # count truncated to multiples
+ PTR_ADDU a3,SRCREG,t0 # run fast loop up to this addr
+ sltu AT,SRCREG,a3 # any work to do?
+ beq AT,zero,2f
+ PTR_SUBU SIZEREG,t0
/*
* loop body
*/
1: # cp
- lw t3,0(SRCREG)
- lw v1,4(SRCREG)
- lw t0,8(SRCREG)
- lw t1,12(SRCREG)
- addu SRCREG,32
- sw t3,0(DSTREG)
- sw v1,4(DSTREG)
- sw t0,8(DSTREG)
- sw t1,12(DSTREG)
- lw t1,-4(SRCREG)
- lw t0,-8(SRCREG)
- lw v1,-12(SRCREG)
- lw t3,-16(SRCREG)
- addu DSTREG,32
- sw t1,-4(DSTREG)
- sw t0,-8(DSTREG)
- sw v1,-12(DSTREG)
- bne SRCREG,a3,1b
- sw t3,-16(DSTREG)
+ REG_L t3,(0*SZREG)(SRCREG)
+ REG_L v1,(1*SZREG)(SRCREG)
+ REG_L t0,(2*SZREG)(SRCREG)
+ REG_L t1,(3*SZREG)(SRCREG)
+ PTR_ADDU SRCREG,SZREG*8
+ REG_S t3,(0*SZREG)(DSTREG)
+ REG_S v1,(1*SZREG)(DSTREG)
+ REG_S t0,(2*SZREG)(DSTREG)
+ REG_S t1,(3*SZREG)(DSTREG)
+ REG_L t1,(-1*SZREG)(SRCREG)
+ REG_L t0,(-2*SZREG)(SRCREG)
+ REG_L v1,(-3*SZREG)(SRCREG)
+ REG_L t3,(-4*SZREG)(SRCREG)
+ PTR_ADDU DSTREG,SZREG*8
+ REG_S t1,(-1*SZREG)(DSTREG)
+ REG_S t0,(-2*SZREG)(DSTREG)
+ REG_S v1,(-3*SZREG)(DSTREG)
+ bne SRCREG,a3,1b
+ REG_S t3,(-4*SZREG)(DSTREG)
/*
* Copy a word at a time, no loop unrolling.
*/
2: # wordcopy
- andi t2,SIZEREG,3 # get byte count / 4
- subu t2,SIZEREG,t2 # t2 = number of words to copy * 4
- beq t2,zero,3f
- addu t0,SRCREG,t2 # stop at t0
- subu SIZEREG,SIZEREG,t2
+ andi t2,SIZEREG,(SZREG-1) # get byte count / SZREG
+ PTR_SUBU t2,SIZEREG,t2 # t2 = words to copy * SZREG
+ beq t2,zero,3f
+ PTR_ADDU t0,SRCREG,t2 # stop at t0
+ PTR_SUBU SIZEREG,SIZEREG,t2
1:
- lw t3,0(SRCREG)
- addu SRCREG,4
- sw t3,0(DSTREG)
- bne SRCREG,t0,1b
- addu DSTREG,4
+ REG_L t3,0(SRCREG)
+ PTR_ADDU SRCREG,SZREG
+ REG_S t3,0(DSTREG)
+ bne SRCREG,t0,1b
+ PTR_ADDU DSTREG,SZREG
3: # bytecopy
- beq SIZEREG,zero,4f # nothing left to do?
+ beq SIZEREG,zero,4f # nothing left to do?
nop
1:
- lb t3,0(SRCREG)
- addu SRCREG,1
- sb t3,0(DSTREG)
- subu SIZEREG,1
- bgtz SIZEREG,1b
- addu DSTREG,1
+ lb t3,0(SRCREG)
+ PTR_ADDU SRCREG,1
+ sb t3,0(DSTREG)
+ PTR_SUBU SIZEREG,1
+ bgtz SIZEREG,1b
+ PTR_ADDU DSTREG,1
4: # copydone
j ra
@@ -177,91 +177,91 @@
* Copy from unaligned source to aligned dest.
*/
5: # destaligned
- andi t0,SIZEREG,3 # t0 = bytecount mod 4
- subu a3,SIZEREG,t0 # number of words to transfer
- beq a3,zero,3b
+ andi t0,SIZEREG,(SZREG-1) # t0 = bytecount mod SZREG
+ PTR_SUBU a3,SIZEREG,t0 # number of words to transfer
+ beq a3,zero,3b
nop
- move SIZEREG,t0 # this many to do after we are done
- addu a3,SRCREG,a3 # stop point
+ move SIZEREG,t0 # this many to do after we are done
+ PTR_ADDU a3,SRCREG,a3 # stop point
1:
- LWHI t3,0(SRCREG)
- LWLO t3,3(SRCREG)
- addi SRCREG,4
- sw t3,0(DSTREG)
- bne SRCREG,a3,1b
- addi DSTREG,4
+ REG_LHI t3,0(SRCREG)
+ REG_LLO t3,SZREG-1(SRCREG)
+ PTR_ADDI SRCREG,SZREG
+ REG_S t3,0(DSTREG)
+ bne SRCREG,a3,1b
+ PTR_ADDI DSTREG,SZREG
- j 3b
+ b 3b
nop
6: # backcopy -- based on above
- addu SRCREG,SIZEREG
- addu DSTREG,SIZEREG
- andi t1,DSTREG,3 # get last 3 bits of dest
- bne t1,zero,3f
- andi t0,SRCREG,3 # get last 3 bits of src
- bne t0,zero,5f
+ PTR_ADDU SRCREG,SIZEREG
+ PTR_ADDU DSTREG,SIZEREG
+ andi t1,DSTREG,SZREG-1 # get last 3 bits of dest
+ bne t1,zero,3f
+ andi t0,SRCREG,SZREG-1 # get last 3 bits of src
+ bne t0,zero,5f
/*
* Forward aligned->aligned copy, 8*4 bytes at a time.
*/
- li AT,-32
- and t0,SIZEREG,AT # count truncated to multiple of 32
- beq t0,zero,2f # any work to do?
- subu SIZEREG,t0
- subu a3,SRCREG,t0
+ li AT,(-8*SZREG)
+ and t0,SIZEREG,AT # count truncated to multiple of 32
+ beq t0,zero,2f # any work to do?
+ PTR_SUBU SIZEREG,t0
+ PTR_SUBU a3,SRCREG,t0
/*
* loop body
*/
1: # cp
- lw t3,-16(SRCREG)
- lw v1,-12(SRCREG)
- lw t0,-8(SRCREG)
- lw t1,-4(SRCREG)
- subu SRCREG,32
- sw t3,-16(DSTREG)
- sw v1,-12(DSTREG)
- sw t0,-8(DSTREG)
- sw t1,-4(DSTREG)
- lw t1,12(SRCREG)
- lw t0,8(SRCREG)
- lw v1,4(SRCREG)
- lw t3,0(SRCREG)
- subu DSTREG,32
- sw t1,12(DSTREG)
- sw t0,8(DSTREG)
- sw v1,4(DSTREG)
- bne SRCREG,a3,1b
- sw t3,0(DSTREG)
+ REG_L t3,(-4*SZREG)(SRCREG)
+ REG_L v1,(-3*SZREG)(SRCREG)
+ REG_L t0,(-2*SZREG)(SRCREG)
+ REG_L t1,(-1*SZREG)(SRCREG)
+ PTR_SUBU SRCREG,8*SZREG
+ REG_S t3,(-4*SZREG)(DSTREG)
+ REG_S v1,(-3*SZREG)(DSTREG)
+ REG_S t0,(-2*SZREG)(DSTREG)
+ REG_S t1,(-1*SZREG)(DSTREG)
+ REG_L t1,(3*SZREG)(SRCREG)
+ REG_L t0,(2*SZREG)(SRCREG)
+ REG_L v1,(1*SZREG)(SRCREG)
+ REG_L t3,(0*SZREG)(SRCREG)
+ PTR_SUBU DSTREG,8*SZREG
+ REG_S t1,(3*SZREG)(DSTREG)
+ REG_S t0,(2*SZREG)(DSTREG)
+ REG_S v1,(1*SZREG)(DSTREG)
+ bne SRCREG,a3,1b
+ REG_S t3,(0*SZREG)(DSTREG)
/*
* Copy a word at a time, no loop unrolling.
*/
2: # wordcopy
- andi t2,SIZEREG,3 # get byte count / 4
- subu t2,SIZEREG,t2 # t2 = number of words to copy * 4
- beq t2,zero,3f
- subu t0,SRCREG,t2 # stop at t0
- subu SIZEREG,SIZEREG,t2
+ andi t2,SIZEREG,SZREG-1 # get byte count / 4
+ PTR_SUBU t2,SIZEREG,t2 # t2 = number of words to copy
+ beq t2,zero,3f
+ PTR_SUBU t0,SRCREG,t2 # stop at t0
+ PTR_SUBU SIZEREG,SIZEREG,t2
1:
- lw t3,-4(SRCREG)
- subu SRCREG,4
- sw t3,-4(DSTREG)
- bne SRCREG,t0,1b
- subu DSTREG,4
+ REG_L t3,-SZREG(SRCREG)
+ PTR_SUBU SRCREG,SZREG
+ REG_S t3,-SZREG(DSTREG)
+ bne SRCREG,t0,1b
+ PTR_SUBU DSTREG,SZREG
3: # bytecopy
- beq SIZEREG,zero,4f # nothing left to do?
+ beq SIZEREG,zero,4f # nothing left to do?
nop
1:
- lb t3,-1(SRCREG)
- subu SRCREG,1
- sb t3,-1(DSTREG)
- subu SIZEREG,1
- bgtz SIZEREG,1b
- subu DSTREG,1
+ lb t3,-1(SRCREG)
+ PTR_SUBU SRCREG,1
+ sb t3,-1(DSTREG)
+ PTR_SUBU SIZEREG,1
+ bgtz SIZEREG,1b
+ PTR_SUBU DSTREG,1
4: # copydone
j ra
@@ -271,22 +271,22 @@
* Copy from unaligned source to aligned dest.
*/
5: # destaligned
- andi t0,SIZEREG,3 # t0 = bytecount mod 4
- subu a3,SIZEREG,t0 # number of words to transfer
- beq a3,zero,3b
+ andi t0,SIZEREG,SZREG-1 # t0 = bytecount mod 4
+ PTR_SUBU a3,SIZEREG,t0 # number of words to transfer
+ beq a3,zero,3b
nop
- move SIZEREG,t0 # this many to do after we are done
- subu a3,SRCREG,a3 # stop point
+ move SIZEREG,t0 # this many to do after we are done
+ PTR_SUBU a3,SRCREG,a3 # stop point
1:
- LWHI t3,-4(SRCREG)
- LWLO t3,-1(SRCREG)
- subu SRCREG,4
- sw t3,-4(DSTREG)
- bne SRCREG,a3,1b
- subu DSTREG,4
+ REG_LHI t3,-SZREG(SRCREG)
+ REG_LLO t3,-1(SRCREG)
+ PTR_SUBU SRCREG,SZREG
+ REG_S t3,-SZREG(DSTREG)
+ bne SRCREG,a3,1b
+ PTR_SUBU DSTREG,SZREG
- j 3b
+ b 3b
nop
.set reorder
--- src/common/lib/libc/arch/mips/string/ffs.S 2005/12/20 19:28:49 1.1
+++ src/common/lib/libc/arch/mips/string/ffs.S 2009/12/14 00:39:00 1.2
@@ -1,4 +1,4 @@
-/* $NetBSD: ffs.S,v 1.1 2005/12/20 19:28:49 christos Exp $ */
+/* $NetBSD: ffs.S,v 1.2 2009/12/14 00:39:00 matt Exp $ */
/*-
* Copyright (c) 1991, 1993
@@ -35,13 +35,9 @@
#include <mips/asm.h>
#if defined(LIBC_SCCS) && !defined(lint)
- ASMSTR("from: @(#)ffs.s 8.1 (Berkeley) 6/4/93")
- ASMSTR("$NetBSD: ffs.S,v 1.1 2005/12/20 19:28:49 christos Exp $")
+ /* RCSID("from: @(#)ffs.s 8.1 (Berkeley) 6/4/93") */
+ RCSID("$NetBSD: ffs.S,v 1.2 2009/12/14 00:39:00 matt Exp $")
#endif /* LIBC_SCCS and not lint */
-
-#ifdef __ABICALLS__
- .abicalls
-#endif
/* bit = ffs(value) */
--- src/common/lib/libc/arch/mips/string/strcmp.S 2005/12/20 19:28:50 1.1
+++ src/common/lib/libc/arch/mips/string/strcmp.S 2009/12/14 00:39:00 1.2
@@ -1,4 +1,4 @@
-/* $NetBSD: strcmp.S,v 1.1 2005/12/20 19:28:50 christos Exp $ */
+/* $NetBSD: strcmp.S,v 1.2 2009/12/14 00:39:00 matt Exp $ */
/*-
* Copyright (c) 1991, 1993
@@ -35,14 +35,10 @@
#include <mips/asm.h>
#if defined(LIBC_SCCS) && !defined(lint)
- ASMSTR("from: @(#)strcmp.s 8.1 (Berkeley) 6/4/93")
- ASMSTR("$NetBSD: strcmp.S,v 1.1 2005/12/20 19:28:50 christos Exp $")
+ /* RCSID("from: @(#)strcmp.s 8.1 (Berkeley) 6/4/93") */
+ RCSID("$NetBSD: strcmp.S,v 1.2 2009/12/14 00:39:00 matt Exp $")
#endif /* LIBC_SCCS and not lint */
-#ifdef __ABICALLS__
- .abicalls
-#endif
-
/*
* NOTE: this version assumes unsigned chars in order to be "8 bit clean".
*/
@@ -54,9 +50,9 @@
bne t0, t1, NotEq
lbu t0, 1(a0) # unroll loop
lbu t1, 1(a1)
- add a0, a0, 2
+ PTR_ADD a0, a0, 2
beq t0, zero, LessOrEq # end of first string?
- add a1, a1, 2
+ PTR_ADD a1, a1, 2
beq t0, t1, 1b
NotEq:
subu v0, t0, t1
--- src/common/lib/libc/arch/mips/string/strlen.S 2005/12/20 19:28:50 1.1
+++ src/common/lib/libc/arch/mips/string/strlen.S 2009/12/14 00:39:00 1.2
@@ -1,4 +1,4 @@
-/* $NetBSD: strlen.S,v 1.1 2005/12/20 19:28:50 christos Exp $ */
+/* $NetBSD: strlen.S,v 1.2 2009/12/14 00:39:00 matt Exp $ */
/*-
* Copyright (c) 1991, 1993
@@ -35,20 +35,16 @@
#include <mips/asm.h>
#if defined(LIBC_SCCS) && !defined(lint)
- ASMSTR("from: @(#)strlen.s 8.1 (Berkeley) 6/4/93")
- ASMSTR("$NetBSD: strlen.S,v 1.1 2005/12/20 19:28:50 christos Exp $")
+ /* RCSID("from: @(#)strlen.s 8.1 (Berkeley) 6/4/93") */
+ RCSID("$NetBSD: strlen.S,v 1.2 2009/12/14 00:39:00 matt Exp $")
#endif /* LIBC_SCCS and not lint */
-#ifdef __ABICALLS__
- .abicalls
-#endif
-
LEAF(strlen)
- addu v1, a0, 1
+ PTR_ADDU v1, a0, 1
1:
- lb v0, 0(a0) # get byte from string
- addu a0, a0, 1 # increment pointer
- bne v0, zero, 1b # continue if not end
- subu v0, a0, v1 # compute length - 1 for '\0' char
- j ra
+ lb v0, 0(a0) # get byte from string
+ PTR_ADDU a0, a0, 1 # increment pointer
+ bne v0, zero, 1b # continue if not end
+ PTR_SUBU v0, a0, v1 # compute length - 1 for '\0' char
+ j ra
END(strlen)
--- src/common/lib/libc/atomic/atomic_init_testset.c 2009/01/30 14:29:44 1.6
+++ src/common/lib/libc/atomic/atomic_init_testset.c 2009/12/14 00:39:00 1.7
@@ -1,4 +1,4 @@
-/* $NetBSD: atomic_init_testset.c,v 1.6 2009/01/30 14:29:44 skrll Exp $ */
+/* $NetBSD: atomic_init_testset.c,v 1.7 2009/12/14 00:39:00 matt Exp $ */
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
@@ -36,7 +36,7 @@
*/
#include <sys/cdefs.h>
-__RCSID("$NetBSD: atomic_init_testset.c,v 1.6 2009/01/30 14:29:44 skrll Exp $");
+__RCSID("$NetBSD: atomic_init_testset.c,v 1.7 2009/12/14 00:39:00 matt Exp $");
#include "atomic_op_namespace.h"
@@ -91,7 +91,7 @@
__cpu_simple_lock_t *lock;
uint32_t ret;
- lock = &atomic_locks[((uint32_t)ptr >> 3) & 127];
+ lock = &atomic_locks[((uintptr_t)ptr >> 3) & 127];
__cpu_simple_lock(lock);
ret = *ptr;
if (__predict_true(ret == old)) {
/*-
* Copyright (c) 2009 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Matt Thomas <matt@3am-software.com>.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#define BZERO
#include "memset2.c"
#if defined(LIBC_SCCS) && !defined(lint)
__RCSID("$NetBSD: bzero2.c,v 1.2 2009/12/14 00:39:01 matt Exp $");
#endif /* LIBC_SCCS and not lint */
/*-
* Copyright (c) 2009 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
* by Matt Thomas <matt@3am-software.com>.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/types.h>
#if !defined(_KERNEL) && !defined(_STANDALONE)
#include <assert.h>
#include <limits.h>
#include <string.h>
#include <inttypes.h>
#else
#include <lib/libkern/libkern.h>
#include <machine/limits.h>
#endif
#include <sys/endian.h>
#include <machine/types.h>
#ifdef TEST
#include <assert.h>
#define _DIAGASSERT(a) assert(a)
#endif
#ifdef _FORTIFY_SOURCE
#undef bzero
#undef memset
#endif
#if defined(LIBC_SCCS) && !defined(lint)
__RCSID("$NetBSD: memset2.c,v 1.2 2009/12/14 00:39:01 matt Exp $");
#endif /* LIBC_SCCS and not lint */
/*
* Assume uregister_t is the widest non-synthetic unsigned type.
*/
typedef uregister_t memword_t;
#ifdef BZERO
static inline
#define memset memset0
#endif
#ifdef TEST
static
#define memset test_memset
#endif
#ifdef CTASSERT
CTASSERT((~(memword_t)0U >> 1) != ~(memword_t)0U);
#endif
void *
memset(void *addr, int c, size_t len)
{
memword_t *dstp = addr;
memword_t *edstp;
memword_t fill;
#ifndef __OPTIMIZE_SIZE__
memword_t keep_mask = 0;
#endif
size_t fill_count;
_DIAGASSERT(addr != 0);
if (__predict_false(len == 0))
return addr;
/*
* Pad out the fill byte (v) across a memword_t.
* The conditional at the end prevents GCC from complaing about
* shift count >= width of type
*/
fill = c;
fill |= fill << 8;
fill |= fill << 16;
fill |= fill << (sizeof(c) < sizeof(fill) ? 32 : 0);
/*
* Get the number of unaligned bytes to fill in the first word.
*/
fill_count = -(uintptr_t)addr & (sizeof(memword_t) - 1);
if (__predict_false(fill_count != 0)) {
#ifndef __OPTIMIZE_SIZE__
/*
* We want to clear <fill_count> trailing bytes in the word.
* On big/little endian, these are the least/most significant,
* bits respectively. So as we shift, the keep_mask will only
* have bits set for the bytes we won't be filling.
*/
#if BYTE_ORDER == BIG_ENDIAN
keep_mask = ~(memword_t)0U << (fill_count * 8);
#endif
#if BYTE_ORDER == LITTLE_ENDIAN
keep_mask = ~(memword_t)0U >> (fill_count * 8);
#endif
/*
* Make sure dstp is aligned to a memword_t boundary.
*/
dstp = (memword_t *)((uintptr_t)addr & -sizeof(memword_t));
if (len >= fill_count) {
/*
* If we can fill the rest of this word, then we mask
* off the bytes we are filling and then fill in those
* bytes with the new fill value.
*/
*dstp = (*dstp & keep_mask) | (fill & ~keep_mask);
len -= fill_count;
if (__predict_false(len == 0))
return addr;
/*
* Since we were able to fill the rest of this word,
* we will advance to the next word and thus have no
* bytes to preserve.
*
* If we don't have enough to fill the rest of this
* word, we will fall through the following loop
* (since there are no full words to fill). Then we
* use the keep_mask above to preserve the leading
* bytes of word.
*/
dstp++;
keep_mask = 0;
} else {
len += (uintptr_t)addr & (sizeof(memword_t) - 1);
}
#else /* __OPTIMIZE_SIZE__ */
uint8_t *dp, *ep;
if (len < fill_count)
fill_count = len;
for (dp = (uint8_t *)dstp, ep = dp + fill_count;
dp != ep; dp++)
*dp = fill;
if ((len -= fill_count) == 0)
return addr;
dstp = (memword_t *)ep;
#endif /* __OPTIMIZE_SIZE__ */
}
/*
* Simply fill memory one word at time (for as many full words we have
* to write).
*/
for (edstp = dstp + len / sizeof(memword_t); dstp != edstp; dstp++)
*dstp = fill;
/*
* We didn't subtract out the full words we just filled since we know
* by the time we get here we will have less than a words worth to
* write. So we can concern ourselves with only the subword len bits.
*/
len &= sizeof(memword_t)-1;
if (len > 0) {
#ifndef __OPTIMIZE_SIZE__
/*
* We want to clear <len> leading bytes in the word.
* On big/little endian, these are the most/least significant
* bits, respectively, But as we want the mask of the bytes to
* keep, we have to complement the mask. So after we shift,
* the keep_mask will only have bits set for the bytes we won't
* be filling.
*
* But the keep_mask could already have bytes to preserve
* if the amount to fill was less than the amount of traiing
* space in the first word.
*/
#if BYTE_ORDER == BIG_ENDIAN
keep_mask |= ~(memword_t)0U >> (len * 8);
#endif
#if BYTE_ORDER == LITTLE_ENDIAN
keep_mask |= ~(memword_t)0U << (len * 8);
#endif
/*
* Now we mask off the bytes we are filling and then fill in
* those bytes with the new fill value.
*/
*dstp = (*dstp & keep_mask) | (fill & ~keep_mask);
#else /* __OPTIMIZE_SIZE__ */
uint8_t *dp, *ep;
for (dp = (uint8_t *)dstp, ep = dp + len;
dp != ep; dp++)
*dp = fill;
#endif /* __OPTIMIZE_SIZE__ */
}
/*
* Return the initial addr
*/
return addr;
}
#ifdef BZERO
/*
* For bzero, simply inline memset and let the compiler optimize things away.
*/
void
bzero(void *addr, size_t len)
{
memset(addr, 0, len);
}
#endif
#ifdef TEST
#include <stdbool.h>
#include <stdio.h>
#undef memset
static union {
uint8_t bytes[sizeof(memword_t) * 4];
memword_t words[4];
} testmem;
int
main(int argc, char **argv)
{
size_t start;
size_t len;
bool failed = false;
for (start = 1; start < sizeof(testmem) - 1; start++) {
for (len = 1; start + len < sizeof(testmem) - 1; len++) {
bool ok = true;
size_t i;
uint8_t check_value;
memset(testmem.bytes, 0xff, sizeof(testmem));
test_memset(testmem.bytes + start, 0x00, len);
for (i = 0; i < sizeof(testmem); i++) {
if (i == 0 || i == start + len)
check_value = 0xff;
else if (i == start)
check_value = 0x00;
if (testmem.bytes[i] != check_value) {
if (ok)
printf("pass @ %zu .. %zu failed",
start, start + len - 1);
ok = false;
printf(" [%zu]=0x%02x(!0x%02x)",
i, testmem.bytes[i], check_value);
}
}
if (!ok) {
printf("\n");
failed = 1;
}
}
}
return failed ? 1 : 0;
}
#endif /* TEST */