[MIPS] Add optimized string functions

Use same string functions for all MIPS architectures.

Bug: 21555893
(cherry picked from commit 38f2eaa07b0ad2e01a40607d3a0ac240ff53abbf)

Change-Id: I94521f023d0bb136a4672782148a9f6e77cc6f1e
diff --git a/libc/arch-mips/mips.mk b/libc/arch-mips/mips.mk
index 33eceab..05e7198 100644
--- a/libc/arch-mips/mips.mk
+++ b/libc/arch-mips/mips.mk
@@ -6,6 +6,9 @@
 
 libc_bionic_src_files_mips += \
     arch-mips/string/memcmp.c \
+    arch-mips/string/memcpy.S \
+    arch-mips/string/memset.S \
+    arch-mips/string/strcmp.S \
     bionic/__memcpy_chk.cpp \
     bionic/__memset_chk.cpp \
     bionic/__strcpy_chk.cpp \
@@ -31,7 +34,6 @@
     upstream-openbsd/lib/libc/string/stpcpy.c \
     upstream-openbsd/lib/libc/string/stpncpy.c \
     upstream-openbsd/lib/libc/string/strcat.c \
-    upstream-openbsd/lib/libc/string/strcmp.c \
     upstream-openbsd/lib/libc/string/strcpy.c \
     upstream-openbsd/lib/libc/string/strlcat.c \
     upstream-openbsd/lib/libc/string/strlcpy.c \
@@ -54,14 +56,10 @@
 
 ifndef ARCH_MIPS_REV6
 libc_bionic_src_files_mips += \
-    arch-mips/string/memcpy.S \
-    arch-mips/string/memset.S \
     arch-mips/string/mips_strlen.c \
 
 else
 libc_bionic_src_files_mips += \
-    arch-mips/string/memcpy.c \
-    arch-mips/string/memset.c \
     arch-mips/string/strlen.c \
 
 endif
diff --git a/libc/arch-mips/string/memcpy.S b/libc/arch-mips/string/memcpy.S
index dc91096..0b711bd 100644
--- a/libc/arch-mips/string/memcpy.S
+++ b/libc/arch-mips/string/memcpy.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009
+ * Copyright (c) 2012-2015
  *      MIPS Technologies, Inc., California.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -27,397 +27,826 @@
  * SUCH DAMAGE.
  */
 
-/************************************************************************
- *
- *  memcpy.S
- *  Version: "043009"
- *
- ************************************************************************/
+#ifdef __ANDROID__
+# include <private/bionic_asm.h>
+# define USE_MEMMOVE_FOR_OVERLAP
+# define PREFETCH_LOAD_HINT PREFETCH_HINT_LOAD_STREAMED
+# define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE
+#elif _LIBC
+# include <sysdep.h>
+# include <regdef.h>
+# include <sys/asm.h>
+# define PREFETCH_LOAD_HINT PREFETCH_HINT_LOAD_STREAMED
+# define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE
+#elif _COMPILING_NEWLIB
+# include "machine/asm.h"
+# include "machine/regdef.h"
+# define PREFETCH_LOAD_HINT PREFETCH_HINT_LOAD_STREAMED
+# define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE
+#else
+# include <regdef.h>
+# include <sys/asm.h>
+#endif
+
+/* Check to see if the MIPS architecture we are compiling for supports
+ * prefetching.
+ */
+
+#if (__mips == 4) || (__mips == 5) || (__mips == 32) || (__mips == 64)
+# ifndef DISABLE_PREFETCH
+#  define USE_PREFETCH
+# endif
+#endif
+
+#if defined(_MIPS_SIM) && ((_MIPS_SIM == _ABI64) || (_MIPS_SIM == _ABIN32))
+# ifndef DISABLE_DOUBLE
+#  define USE_DOUBLE
+# endif
+#endif
 
 
-/************************************************************************
- *  Include files
- ************************************************************************/
+#if __mips_isa_rev > 5
+# if (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
+#  undef PREFETCH_STORE_HINT
+#  define PREFETCH_STORE_HINT PREFETCH_HINT_STORE_STREAMED
+# endif
+# define R6_CODE
+#endif
 
-#include <private/bionic_asm.h>
+/* Some asm.h files do not have the L macro definition.  */
+#ifndef L
+# if _MIPS_SIM == _ABIO32
+#  define L(label) $L ## label
+# else
+#  define L(label) .L ## label
+# endif
+#endif
 
+/* Some asm.h files do not have the PTR_ADDIU macro definition.  */
+#ifndef PTR_ADDIU
+# if _MIPS_SIM == _ABIO32
+#  define PTR_ADDIU	addiu
+# else
+#  define PTR_ADDIU	daddiu
+# endif
+#endif
+
+/* Some asm.h files do not have the PTR_SRA macro definition.  */
+#ifndef PTR_SRA
+# if  _MIPS_SIM == _ABIO32
+#  define PTR_SRA	sra
+# else
+#  define PTR_SRA	dsra
+# endif
+#endif
+
+/* New R6 instructions that may not be in asm.h.  */
+#ifndef PTR_LSA
+# if _MIPS_SIM == _ABIO32
+#  define PTR_LSA	lsa
+# else
+#  define PTR_LSA	dlsa
+# endif
+#endif
 
 /*
- * This routine could be optimized for MIPS64. The current code only
- * uses MIPS32 instructions.
+ * Using PREFETCH_HINT_LOAD_STREAMED instead of PREFETCH_LOAD on load
+ * prefetches appears to offer a slight preformance advantage.
+ *
+ * Using PREFETCH_HINT_PREPAREFORSTORE instead of PREFETCH_STORE
+ * or PREFETCH_STORE_STREAMED offers a large performance advantage
+ * but PREPAREFORSTORE has some special restrictions to consider.
+ *
+ * Prefetch with the 'prepare for store' hint does not copy a memory
+ * location into the cache, it just allocates a cache line and zeros
+ * it out.  This means that if you do not write to the entire cache
+ * line before writing it out to memory some data will get zero'ed out
+ * when the cache line is written back to memory and data will be lost.
+ *
+ * Also if you are using this memcpy to copy overlapping buffers it may
+ * not behave correctly when using the 'prepare for store' hint.  If you
+ * use the 'prepare for store' prefetch on a memory area that is in the
+ * memcpy source (as well as the memcpy destination), then you will get
+ * some data zero'ed out before you have a chance to read it and data will
+ * be lost.
+ *
+ * If you are going to use this memcpy routine with the 'prepare for store'
+ * prefetch you may want to set USE_MEMMOVE_FOR_OVERLAP in order to avoid
+ * the problem of running memcpy on overlapping buffers.
+ *
+ * There are ifdef'ed sections of this memcpy to make sure that it does not
+ * do prefetches on cache lines that are not going to be completely written.
+ * This code is only needed and only used when PREFETCH_STORE_HINT is set to
+ * PREFETCH_HINT_PREPAREFORSTORE.  This code assumes that cache lines are
+ * 32 bytes and if the cache line is larger it will not work correctly.
  */
-#if defined(__MIPSEB__)
-#  define LWHI	lwl		/* high part is left in big-endian	*/
-#  define SWHI	swl		/* high part is left in big-endian	*/
-#  define LWLO	lwr		/* low part is right in big-endian	*/
-#  define SWLO	swr		/* low part is right in big-endian	*/
+
+#ifdef USE_PREFETCH
+# define PREFETCH_HINT_LOAD		0
+# define PREFETCH_HINT_STORE		1
+# define PREFETCH_HINT_LOAD_STREAMED	4
+# define PREFETCH_HINT_STORE_STREAMED	5
+# define PREFETCH_HINT_LOAD_RETAINED	6
+# define PREFETCH_HINT_STORE_RETAINED	7
+# define PREFETCH_HINT_WRITEBACK_INVAL	25
+# define PREFETCH_HINT_PREPAREFORSTORE	30
+
+/*
+ * If we have not picked out what hints to use at this point use the
+ * standard load and store prefetch hints.
+ */
+# ifndef PREFETCH_STORE_HINT
+#  define PREFETCH_STORE_HINT PREFETCH_HINT_STORE
+# endif
+# ifndef PREFETCH_LOAD_HINT
+#  define PREFETCH_LOAD_HINT PREFETCH_HINT_LOAD
+# endif
+
+/*
+ * We double everything when USE_DOUBLE is true so we do 2 prefetches to
+ * get 64 bytes in that case.  The assumption is that each individual
+ * prefetch brings in 32 bytes.
+ */
+
+# ifdef USE_DOUBLE
+#  define PREFETCH_CHUNK 64
+#  define PREFETCH_FOR_LOAD(chunk, reg) \
+ pref PREFETCH_LOAD_HINT, (chunk)*64(reg); \
+ pref PREFETCH_LOAD_HINT, ((chunk)*64)+32(reg)
+#  define PREFETCH_FOR_STORE(chunk, reg) \
+ pref PREFETCH_STORE_HINT, (chunk)*64(reg); \
+ pref PREFETCH_STORE_HINT, ((chunk)*64)+32(reg)
+# else
+#  define PREFETCH_CHUNK 32
+#  define PREFETCH_FOR_LOAD(chunk, reg) \
+ pref PREFETCH_LOAD_HINT, (chunk)*32(reg)
+#  define PREFETCH_FOR_STORE(chunk, reg) \
+ pref PREFETCH_STORE_HINT, (chunk)*32(reg)
+# endif
+/* MAX_PREFETCH_SIZE is the maximum size of a prefetch, it must not be less
+ * than PREFETCH_CHUNK, the assumed size of each prefetch.  If the real size
+ * of a prefetch is greater than MAX_PREFETCH_SIZE and the PREPAREFORSTORE
+ * hint is used, the code will not work correctly.  If PREPAREFORSTORE is not
+ * used then MAX_PREFETCH_SIZE does not matter.  */
+# define MAX_PREFETCH_SIZE 128
+/* PREFETCH_LIMIT is set based on the fact that we never use an offset greater
+ * than 5 on a STORE prefetch and that a single prefetch can never be larger
+ * than MAX_PREFETCH_SIZE.  We add the extra 32 when USE_DOUBLE is set because
+ * we actually do two prefetches in that case, one 32 bytes after the other.  */
+# ifdef USE_DOUBLE
+#  define PREFETCH_LIMIT (5 * PREFETCH_CHUNK) + 32 + MAX_PREFETCH_SIZE
+# else
+#  define PREFETCH_LIMIT (5 * PREFETCH_CHUNK) + MAX_PREFETCH_SIZE
+# endif
+# if (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE) \
+    && ((PREFETCH_CHUNK * 4) < MAX_PREFETCH_SIZE)
+/* We cannot handle this because the initial prefetches may fetch bytes that
+ * are before the buffer being copied.  We start copies with an offset
+ * of 4 so avoid this situation when using PREPAREFORSTORE.  */
+#error "PREFETCH_CHUNK is too large and/or MAX_PREFETCH_SIZE is too small."
+# endif
+#else /* USE_PREFETCH not defined */
+# define PREFETCH_FOR_LOAD(offset, reg)
+# define PREFETCH_FOR_STORE(offset, reg)
 #endif
 
-#if defined(__MIPSEL__)
-#  define LWHI	lwr		/* high part is right in little-endian	*/
-#  define SWHI	swr		/* high part is right in little-endian	*/
-#  define LWLO	lwl		/* low part is left in big-endian	*/
-#  define SWLO	swl		/* low part is left in big-endian	*/
+/* Allow the routine to be named something else if desired.  */
+#ifndef MEMCPY_NAME
+# define MEMCPY_NAME memcpy
 #endif
 
-LEAF(memcpy,0)
+/* We use these 32/64 bit registers as temporaries to do the copying.  */
+#define REG0 t0
+#define REG1 t1
+#define REG2 t2
+#define REG3 t3
+#if defined(_MIPS_SIM) && (_MIPS_SIM == _ABIO32 || _MIPS_SIM == _ABIO64)
+# define REG4 t4
+# define REG5 t5
+# define REG6 t6
+# define REG7 t7
+#else
+# define REG4 ta0
+# define REG5 ta1
+# define REG6 ta2
+# define REG7 ta3
+#endif
 
+/* We load/store 64 bits at a time when USE_DOUBLE is true.
+ * The C_ prefix stands for CHUNK and is used to avoid macro name
+ * conflicts with system header files.  */
+
+#ifdef USE_DOUBLE
+# define C_ST	sd
+# define C_LD	ld
+# if __MIPSEB
+#  define C_LDHI	ldl	/* high part is left in big-endian	*/
+#  define C_STHI	sdl	/* high part is left in big-endian	*/
+#  define C_LDLO	ldr	/* low part is right in big-endian	*/
+#  define C_STLO	sdr	/* low part is right in big-endian	*/
+# else
+#  define C_LDHI	ldr	/* high part is right in little-endian	*/
+#  define C_STHI	sdr	/* high part is right in little-endian	*/
+#  define C_LDLO	ldl	/* low part is left in little-endian	*/
+#  define C_STLO	sdl	/* low part is left in little-endian	*/
+# endif
+# define C_ALIGN	dalign	/* r6 align instruction			*/
+#else
+# define C_ST	sw
+# define C_LD	lw
+# if __MIPSEB
+#  define C_LDHI	lwl	/* high part is left in big-endian	*/
+#  define C_STHI	swl	/* high part is left in big-endian	*/
+#  define C_LDLO	lwr	/* low part is right in big-endian	*/
+#  define C_STLO	swr	/* low part is right in big-endian	*/
+# else
+#  define C_LDHI	lwr	/* high part is right in little-endian	*/
+#  define C_STHI	swr	/* high part is right in little-endian	*/
+#  define C_LDLO	lwl	/* low part is left in little-endian	*/
+#  define C_STLO	swl	/* low part is left in little-endian	*/
+# endif
+# define C_ALIGN	align	/* r6 align instruction			*/
+#endif
+
+/* Bookkeeping values for 32 vs. 64 bit mode.  */
+#ifdef USE_DOUBLE
+# define NSIZE 8
+# define NSIZEMASK 0x3f
+# define NSIZEDMASK 0x7f
+#else
+# define NSIZE 4
+# define NSIZEMASK 0x1f
+# define NSIZEDMASK 0x3f
+#endif
+#define UNIT(unit) ((unit)*NSIZE)
+#define UNITM1(unit) (((unit)*NSIZE)-1)
+
+#ifdef __ANDROID__
+LEAF(MEMCPY_NAME, 0)
+#else
+LEAF(MEMCPY_NAME)
+#endif
+	.set	nomips16
 	.set	noreorder
-	.set	noat
 /*
  * Below we handle the case where memcpy is called with overlapping src and dst.
- * Although memcpy is not required to handle this case, some parts of Android like Skia
- * rely on such usage. We call memmove to handle such cases.
+ * Although memcpy is not required to handle this case, some parts of Android
+ * like Skia rely on such usage. We call memmove to handle such cases.
  */
-	subu	t0,a0,a1
-	sra	AT,t0,31
-	xor	t1,t0,AT
-	subu	t0,t1,AT
-	sltu	AT,t0,a2
-	beq	AT,zero,.Lmemcpy
-	 la	t9,memmove
+#ifdef USE_MEMMOVE_FOR_OVERLAP
+	PTR_SUBU t0,a0,a1
+	PTR_SRA	t2,t0,31
+	xor	t1,t0,t2
+	PTR_SUBU t0,t1,t2
+	sltu	t2,t0,a2
+	beq	t2,zero,L(memcpy)
+	nop
+#if defined(__LP64__)
+	daddiu	sp,sp,-8
+	SETUP_GP64(0,MEMCPY_NAME)
+	LA	t9,memmove
+	RESTORE_GP64
 	jr	t9
-	 nop
-.Lmemcpy:
-	slti	AT,a2,8
-	bne	AT,zero,.Llast8
-	 move	v0,a0	# memcpy returns the dst pointer
+	daddiu	sp,sp,8
+#else
+	LA	t9,memmove
+	jr	t9
+	nop
+#endif
+L(memcpy):
+#endif
+/*
+ * If the size is less than 2*NSIZE (8 or 16), go to L(lastb).  Regardless of
+ * size, copy dst pointer to v0 for the return value.
+ */
+	slti	t2,a2,(2 * NSIZE)
+	bne	t2,zero,L(lastb)
+#if defined(RETURN_FIRST_PREFETCH) || defined(RETURN_LAST_PREFETCH)
+	move	v0,zero
+#else
+	move	v0,a0
+#endif
 
-# Test if the src and dst are word-aligned, or can be made word-aligned
+#ifndef R6_CODE
+
+/*
+ * If src and dst have different alignments, go to L(unaligned), if they
+ * have the same alignment (but are not actually aligned) do a partial
+ * load/store to make them aligned.  If they are both already aligned
+ * we can start copying at L(aligned).
+ */
 	xor	t8,a1,a0
-	andi	t8,t8,0x3		# t8 is a0/a1 word-displacement
+	andi	t8,t8,(NSIZE-1)		/* t8 is a0/a1 word-displacement */
+	bne	t8,zero,L(unaligned)
+	PTR_SUBU a3, zero, a0
 
-	bne	t8,zero,.Lunaligned
-	 negu	a3,a0
+	andi	a3,a3,(NSIZE-1)		/* copy a3 bytes to align a0/a1	  */
+	beq	a3,zero,L(aligned)	/* if a3=0, it is already aligned */
+	PTR_SUBU a2,a2,a3		/* a2 is the remining bytes count */
 
-	andi	a3,a3,0x3	# we need to copy a3 bytes to make a0/a1 aligned
-	beq	a3,zero,.Lchk16w # when a3=0 then the dst (a0) is word-aligned
-	 subu	a2,a2,a3	# now a2 is the remining bytes count
+	C_LDHI	t8,0(a1)
+	PTR_ADDU a1,a1,a3
+	C_STHI	t8,0(a0)
+	PTR_ADDU a0,a0,a3
 
-	LWHI	t8,0(a1)
-	addu	a1,a1,a3
-	SWHI	t8,0(a0)
-	addu	a0,a0,a3
+#else /* R6_CODE */
 
-# Now the dst/src are mutually word-aligned with word-aligned addresses
-.Lchk16w:
-	andi	t8,a2,0x3f	# any whole 64-byte chunks?
-				# t8 is the byte count after 64-byte chunks
+/*
+ * Align the destination and hope that the source gets aligned too.  If it
+ * doesn't we jump to L(r6_unaligned*) to do unaligned copies using the r6
+ * align instruction.
+ */
+	andi	t8,a0,7
+	lapc	t9,L(atable)
+	PTR_LSA	t9,t8,t9,2
+	jrc	t9
+L(atable):
+	bc	L(lb0)
+	bc	L(lb7)
+	bc	L(lb6)
+	bc	L(lb5)
+	bc	L(lb4)
+	bc	L(lb3)
+	bc	L(lb2)
+	bc	L(lb1)
+L(lb7):
+	lb	a3, 6(a1)
+	sb	a3, 6(a0)
+L(lb6):
+	lb	a3, 5(a1)
+	sb	a3, 5(a0)
+L(lb5):
+	lb	a3, 4(a1)
+	sb	a3, 4(a0)
+L(lb4):
+	lb	a3, 3(a1)
+	sb	a3, 3(a0)
+L(lb3):
+	lb	a3, 2(a1)
+	sb	a3, 2(a0)
+L(lb2):
+	lb	a3, 1(a1)
+	sb	a3, 1(a0)
+L(lb1):
+	lb	a3, 0(a1)
+	sb	a3, 0(a0)
 
-	beq	a2,t8,.Lchk8w	# if a2==t8, no 64-byte chunks
-				# There will be at most 1 32-byte chunk after it
-	 subu	a3,a2,t8	# subtract from a2 the reminder
-                                # Here a3 counts bytes in 16w chunks
-	addu	a3,a0,a3	# Now a3 is the final dst after 64-byte chunks
+	li	t9,8
+	subu	t8,t9,t8
+	PTR_SUBU a2,a2,t8
+	PTR_ADDU a0,a0,t8
+	PTR_ADDU a1,a1,t8
+L(lb0):
 
-	addu	t0,a0,a2	# t0 is the "past the end" address
+	andi	t8,a1,(NSIZE-1)
+	lapc	t9,L(jtable)
+	PTR_LSA	t9,t8,t9,2
+	jrc	t9
+L(jtable):
+	bc	L(aligned)
+	bc	L(r6_unaligned1)
+	bc	L(r6_unaligned2)
+	bc	L(r6_unaligned3)
+# ifdef USE_DOUBLE
+	bc	L(r6_unaligned4)
+	bc	L(r6_unaligned5)
+	bc	L(r6_unaligned6)
+	bc	L(r6_unaligned7)
+# endif
+#endif /* R6_CODE */
 
-# When in the loop we exercise "pref 30,x(a0)", the a0+x should not be past
-# the "t0-32" address
-# This means: for x=128 the last "safe" a0 address is "t0-160"
-# Alternatively, for x=64 the last "safe" a0 address is "t0-96"
-# In the current version we will use "pref 30,128(a0)", so "t0-160" is the limit
-	subu	t9,t0,160	# t9 is the "last safe pref 30,128(a0)" address
+L(aligned):
 
-	pref    0,0(a1)		# bring the first line of src, addr 0
-	pref    0,32(a1)	# bring the second line of src, addr 32
-	pref    0,64(a1)	# bring the third line of src, addr 64
-	pref	30,32(a0)	# safe, as we have at least 64 bytes ahead
-# In case the a0 > t9 don't use "pref 30" at all
-	sgtu	v1,a0,t9
-	bgtz	v1,.Lloop16w	# skip "pref 30,64(a0)" for too short arrays
-	 nop
-# otherwise, start with using pref30
-	pref	30,64(a0)
-.Lloop16w:
-	pref	0,96(a1)
-	lw	t0,0(a1)
-	bgtz	v1,.Lskip_pref30_96	# skip "pref 30,96(a0)"
-	 lw	t1,4(a1)
-	pref    30,96(a0)   # continue setting up the dest, addr 96
-.Lskip_pref30_96:
-	lw	t2,8(a1)
-	lw	t3,12(a1)
-	lw	t4,16(a1)
-	lw	t5,20(a1)
-	lw	t6,24(a1)
-	lw	t7,28(a1)
-        pref    0,128(a1)    # bring the next lines of src, addr 128
+/*
+ * Now dst/src are both aligned to (word or double word) aligned addresses
+ * Set a2 to count how many bytes we have to copy after all the 64/128 byte
+ * chunks are copied and a3 to the dst pointer after all the 64/128 byte
+ * chunks have been copied.  We will loop, incrementing a0 and a1 until a0
+ * equals a3.
+ */
 
-	sw	t0,0(a0)
-	sw	t1,4(a0)
-	sw	t2,8(a0)
-	sw	t3,12(a0)
-	sw	t4,16(a0)
-	sw	t5,20(a0)
-	sw	t6,24(a0)
-	sw	t7,28(a0)
+	andi	t8,a2,NSIZEDMASK /* any whole 64-byte/128-byte chunks? */
+	beq	a2,t8,L(chkw)	 /* if a2==t8, no 64-byte/128-byte chunks */
+	PTR_SUBU a3,a2,t8	 /* subtract from a2 the reminder */
+	PTR_ADDU a3,a0,a3	 /* Now a3 is the final dst after loop */
 
-	lw	t0,32(a1)
-	bgtz	v1,.Lskip_pref30_128	# skip "pref 30,128(a0)"
-	 lw	t1,36(a1)
-	pref    30,128(a0)   # continue setting up the dest, addr 128
-.Lskip_pref30_128:
-	lw	t2,40(a1)
-	lw	t3,44(a1)
-	lw	t4,48(a1)
-	lw	t5,52(a1)
-	lw	t6,56(a1)
-	lw	t7,60(a1)
-        pref    0, 160(a1)    # bring the next lines of src, addr 160
+/* When in the loop we may prefetch with the 'prepare to store' hint,
+ * in this case the a0+x should not be past the "t0-32" address.  This
+ * means: for x=128 the last "safe" a0 address is "t0-160".  Alternatively,
+ * for x=64 the last "safe" a0 address is "t0-96" In the current version we
+ * will use "prefetch hint,128(a0)", so "t0-160" is the limit.
+ */
+#if defined(USE_PREFETCH) && (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
+	PTR_ADDU t0,a0,a2		/* t0 is the "past the end" address */
+	PTR_SUBU t9,t0,PREFETCH_LIMIT	/* t9 is the "last safe pref" address */
+#endif
+	PREFETCH_FOR_LOAD  (0, a1)
+	PREFETCH_FOR_LOAD  (1, a1)
+	PREFETCH_FOR_LOAD  (2, a1)
+	PREFETCH_FOR_LOAD  (3, a1)
+#if defined(USE_PREFETCH) && (PREFETCH_STORE_HINT != PREFETCH_HINT_PREPAREFORSTORE)
+	PREFETCH_FOR_STORE (1, a0)
+	PREFETCH_FOR_STORE (2, a0)
+	PREFETCH_FOR_STORE (3, a0)
+#endif
+#if defined(RETURN_FIRST_PREFETCH) && defined(USE_PREFETCH)
+# if PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE
+	sltu    v1,t9,a0
+	bgtz    v1,L(skip_set)
+	nop
+	PTR_ADDIU v0,a0,(PREFETCH_CHUNK*4)
+L(skip_set):
+# else
+	PTR_ADDIU v0,a0,(PREFETCH_CHUNK*1)
+# endif
+#endif
+#if defined(RETURN_LAST_PREFETCH) && defined(USE_PREFETCH) \
+    && (PREFETCH_STORE_HINT != PREFETCH_HINT_PREPAREFORSTORE)
+	PTR_ADDIU v0,a0,(PREFETCH_CHUNK*3)
+# ifdef USE_DOUBLE
+	PTR_ADDIU v0,v0,32
+# endif
+#endif
+L(loop16w):
+	C_LD	t0,UNIT(0)(a1)
+#if defined(USE_PREFETCH) && (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
+	sltu	v1,t9,a0		/* If a0 > t9 don't use next prefetch */
+	bgtz	v1,L(skip_pref)
+#endif
+	C_LD	t1,UNIT(1)(a1)
+#ifndef R6_CODE
+	PREFETCH_FOR_STORE (4, a0)
+	PREFETCH_FOR_STORE (5, a0)
+#else
+	PREFETCH_FOR_STORE (2, a0)
+#endif
+#if defined(RETURN_LAST_PREFETCH) && defined(USE_PREFETCH)
+	PTR_ADDIU v0,a0,(PREFETCH_CHUNK*5)
+# ifdef USE_DOUBLE
+	PTR_ADDIU v0,v0,32
+# endif
+#endif
+L(skip_pref):
+	C_LD	REG2,UNIT(2)(a1)
+	C_LD	REG3,UNIT(3)(a1)
+	C_LD	REG4,UNIT(4)(a1)
+	C_LD	REG5,UNIT(5)(a1)
+	C_LD	REG6,UNIT(6)(a1)
+	C_LD	REG7,UNIT(7)(a1)
+#ifndef R6_CODE
+	PREFETCH_FOR_LOAD (4, a1)
+#else
+	PREFETCH_FOR_LOAD (3, a1)
+#endif
+	C_ST	t0,UNIT(0)(a0)
+	C_ST	t1,UNIT(1)(a0)
+	C_ST	REG2,UNIT(2)(a0)
+	C_ST	REG3,UNIT(3)(a0)
+	C_ST	REG4,UNIT(4)(a0)
+	C_ST	REG5,UNIT(5)(a0)
+	C_ST	REG6,UNIT(6)(a0)
+	C_ST	REG7,UNIT(7)(a0)
 
-	sw	t0,32(a0)
-	sw	t1,36(a0)
-	sw	t2,40(a0)
-	sw	t3,44(a0)
-	sw	t4,48(a0)
-	sw	t5,52(a0)
-	sw	t6,56(a0)
-	sw	t7,60(a0)
-
-	addiu	a0,a0,64	# adding 64 to dest
-	sgtu	v1,a0,t9
-	bne	a0,a3,.Lloop16w
-	 addiu	a1,a1,64	# adding 64 to src
+	C_LD	t0,UNIT(8)(a1)
+	C_LD	t1,UNIT(9)(a1)
+	C_LD	REG2,UNIT(10)(a1)
+	C_LD	REG3,UNIT(11)(a1)
+	C_LD	REG4,UNIT(12)(a1)
+	C_LD	REG5,UNIT(13)(a1)
+	C_LD	REG6,UNIT(14)(a1)
+	C_LD	REG7,UNIT(15)(a1)
+#ifndef R6_CODE
+	PREFETCH_FOR_LOAD (5, a1)
+#endif
+	C_ST	t0,UNIT(8)(a0)
+	C_ST	t1,UNIT(9)(a0)
+	C_ST	REG2,UNIT(10)(a0)
+	C_ST	REG3,UNIT(11)(a0)
+	C_ST	REG4,UNIT(12)(a0)
+	C_ST	REG5,UNIT(13)(a0)
+	C_ST	REG6,UNIT(14)(a0)
+	C_ST	REG7,UNIT(15)(a0)
+	PTR_ADDIU a0,a0,UNIT(16)	/* adding 64/128 to dest */
+	bne	a0,a3,L(loop16w)
+	PTR_ADDIU a1,a1,UNIT(16)	/* adding 64/128 to src */
 	move	a2,t8
 
-# Here we have src and dest word-aligned but less than 64-bytes to go
+/* Here we have src and dest word-aligned but less than 64-bytes or
+ * 128 bytes to go.  Check for a 32(64) byte chunk and copy if if there
+ * is one.  Otherwise jump down to L(chk1w) to handle the tail end of
+ * the copy.
+ */
 
-.Lchk8w:
-	pref 0, 0x0(a1)
-	andi	t8,a2,0x1f	# is there a 32-byte chunk?
-				# the t8 is the reminder count past 32-bytes
-	beq	a2,t8,.Lchk1w	# when a2=t8, no 32-byte chunk
-	 nop
+L(chkw):
+	PREFETCH_FOR_LOAD (0, a1)
+	andi	t8,a2,NSIZEMASK	/* Is there a 32-byte/64-byte chunk.  */
+				/* The t8 is the reminder count past 32-bytes */
+	beq	a2,t8,L(chk1w)	/* When a2=t8, no 32-byte chunk  */
+	nop
+	C_LD	t0,UNIT(0)(a1)
+	C_LD	t1,UNIT(1)(a1)
+	C_LD	REG2,UNIT(2)(a1)
+	C_LD	REG3,UNIT(3)(a1)
+	C_LD	REG4,UNIT(4)(a1)
+	C_LD	REG5,UNIT(5)(a1)
+	C_LD	REG6,UNIT(6)(a1)
+	C_LD	REG7,UNIT(7)(a1)
+	PTR_ADDIU a1,a1,UNIT(8)
+	C_ST	t0,UNIT(0)(a0)
+	C_ST	t1,UNIT(1)(a0)
+	C_ST	REG2,UNIT(2)(a0)
+	C_ST	REG3,UNIT(3)(a0)
+	C_ST	REG4,UNIT(4)(a0)
+	C_ST	REG5,UNIT(5)(a0)
+	C_ST	REG6,UNIT(6)(a0)
+	C_ST	REG7,UNIT(7)(a0)
+	PTR_ADDIU a0,a0,UNIT(8)
 
-	lw	t0,0(a1)
-	lw	t1,4(a1)
-	lw	t2,8(a1)
-	lw	t3,12(a1)
-	lw	t4,16(a1)
-	lw	t5,20(a1)
-	lw	t6,24(a1)
-	lw	t7,28(a1)
-	addiu	a1,a1,32
+/*
+ * Here we have less than 32(64) bytes to copy.  Set up for a loop to
+ * copy one word (or double word) at a time.  Set a2 to count how many
+ * bytes we have to copy after all the word (or double word) chunks are
+ * copied and a3 to the dst pointer after all the (d)word chunks have
+ * been copied.  We will loop, incrementing a0 and a1 until a0 equals a3.
+ */
+L(chk1w):
+	andi	a2,t8,(NSIZE-1)	/* a2 is the reminder past one (d)word chunks */
+	beq	a2,t8,L(lastb)
+	PTR_SUBU a3,t8,a2	/* a3 is count of bytes in one (d)word chunks */
+	PTR_ADDU a3,a0,a3	/* a3 is the dst address after loop */
 
-	sw	t0,0(a0)
-	sw	t1,4(a0)
-	sw	t2,8(a0)
-	sw	t3,12(a0)
-	sw	t4,16(a0)
-	sw	t5,20(a0)
-	sw	t6,24(a0)
-	sw	t7,28(a0)
-	addiu	a0,a0,32
+/* copying in words (4-byte or 8-byte chunks) */
+L(wordCopy_loop):
+	C_LD	REG3,UNIT(0)(a1)
+	PTR_ADDIU a0,a0,UNIT(1)
+	PTR_ADDIU a1,a1,UNIT(1)
+	bne	a0,a3,L(wordCopy_loop)
+	C_ST	REG3,UNIT(-1)(a0)
 
-.Lchk1w:
-	andi	a2,t8,0x3	# now a2 is the reminder past 1w chunks
-	beq	a2,t8,.Llast8
-	 subu	a3,t8,a2	# a3 is count of bytes in 1w chunks
-	addu	a3,a0,a3	# now a3 is the dst address past the 1w chunks
-
-# copying in words (4-byte chunks)
-.LwordCopy_loop:
-	lw	t3,0(a1)	# the first t3 may be equal t0 ... optimize?
-	addiu	a1,a1,4
-	addiu	a0,a0,4
-	bne	a0,a3,.LwordCopy_loop
-	 sw	t3,-4(a0)
-
-# For the last (<8) bytes
-.Llast8:
-	blez	a2,.Lleave
-	 addu	a3,a0,a2	# a3 is the last dst address
-.Llast8loop:
+/* Copy the last 8 (or 16) bytes */
+L(lastb):
+	blez	a2,L(leave)
+	PTR_ADDU a3,a0,a2	/* a3 is the last dst address */
+L(lastbloop):
 	lb	v1,0(a1)
-	addiu	a1,a1,1
-	addiu	a0,a0,1
-	bne	a0,a3,.Llast8loop
-	 sb	v1,-1(a0)
-
-.Lleave:
+	PTR_ADDIU a0,a0,1
+	PTR_ADDIU a1,a1,1
+	bne	a0,a3,L(lastbloop)
+	sb	v1,-1(a0)
+L(leave):
 	j	ra
-	 nop
+	nop
 
-#
-# UNALIGNED case
-#
+#ifndef R6_CODE
+/*
+ * UNALIGNED case, got here with a3 = "negu a0"
+ * This code is nearly identical to the aligned code above
+ * but only the destination (not the source) gets aligned
+ * so we need to do partial loads of the source followed
+ * by normal stores to the destination (once we have aligned
+ * the destination).
+ */
 
-.Lunaligned:
-	# got here with a3="negu a0"
-	andi	a3,a3,0x3	# test if the a0 is word aligned
-	beqz	a3,.Lua_chk16w
-	 subu	a2,a2,a3	# bytes left after initial a3 bytes
+L(unaligned):
+	andi	a3,a3,(NSIZE-1)	/* copy a3 bytes to align a0/a1 */
+	beqz	a3,L(ua_chk16w) /* if a3=0, it is already aligned */
+	PTR_SUBU a2,a2,a3	/* a2 is the remining bytes count */
 
-	LWHI	v1,0(a1)
-	LWLO	v1,3(a1)
-	addu	a1,a1,a3	# a3 may be here 1, 2 or 3
-	SWHI	v1,0(a0)
-	addu	a0,a0,a3	# below the dst will be word aligned (NOTE1)
+	C_LDHI	v1,UNIT(0)(a1)
+	C_LDLO	v1,UNITM1(1)(a1)
+	PTR_ADDU a1,a1,a3
+	C_STHI	v1,UNIT(0)(a0)
+	PTR_ADDU a0,a0,a3
 
-.Lua_chk16w:
-	andi	t8,a2,0x3f	# any whole 64-byte chunks?
-				# t8 is the byte count after 64-byte chunks
-	beq	a2,t8,.Lua_chk8w # if a2==t8, no 64-byte chunks
-				# There will be at most 1 32-byte chunk after it
-	 subu	a3,a2,t8	# subtract from a2 the reminder
-                                # Here a3 counts bytes in 16w chunks
-	addu	a3,a0,a3	# Now a3 is the final dst after 64-byte chunks
+/*
+ *  Now the destination (but not the source) is aligned
+ * Set a2 to count how many bytes we have to copy after all the 64/128 byte
+ * chunks are copied and a3 to the dst pointer after all the 64/128 byte
+ * chunks have been copied.  We will loop, incrementing a0 and a1 until a0
+ * equals a3.
+ */
 
-	addu	t0,a0,a2	# t0 is the "past the end" address
+L(ua_chk16w):
+	andi	t8,a2,NSIZEDMASK /* any whole 64-byte/128-byte chunks? */
+	beq	a2,t8,L(ua_chkw) /* if a2==t8, no 64-byte/128-byte chunks */
+	PTR_SUBU a3,a2,t8	 /* subtract from a2 the reminder */
+	PTR_ADDU a3,a0,a3	 /* Now a3 is the final dst after loop */
 
-	subu	t9,t0,160	# t9 is the "last safe pref 30,128(a0)" address
-
-	pref    0,0(a1)		# bring the first line of src, addr 0
-	pref    0,32(a1)	# bring the second line of src, addr 32
-	pref    0,64(a1)	# bring the third line of src, addr 64
-	pref	30,32(a0)	# safe, as we have at least 64 bytes ahead
-# In case the a0 > t9 don't use "pref 30" at all
-	sgtu	v1,a0,t9
-	bgtz	v1,.Lua_loop16w	# skip "pref 30,64(a0)" for too short arrays
-	 nop
-# otherwise, start with using pref30
-	pref	30,64(a0)
-.Lua_loop16w:
-	pref	0,96(a1)
-	LWHI	t0,0(a1)
-	LWLO	t0,3(a1)
-	LWHI	t1,4(a1)
-	bgtz	v1,.Lua_skip_pref30_96
-	 LWLO	t1,7(a1)
-	pref    30,96(a0)   # continue setting up the dest, addr 96
-.Lua_skip_pref30_96:
-	LWHI	t2,8(a1)
-	LWLO	t2,11(a1)
-	LWHI	t3,12(a1)
-	LWLO	t3,15(a1)
-	LWHI	t4,16(a1)
-	LWLO	t4,19(a1)
-	LWHI	t5,20(a1)
-	LWLO	t5,23(a1)
-	LWHI	t6,24(a1)
-	LWLO	t6,27(a1)
-	LWHI	t7,28(a1)
-	LWLO	t7,31(a1)
-        pref    0,128(a1)    # bring the next lines of src, addr 128
-
-	sw	t0,0(a0)
-	sw	t1,4(a0)
-	sw	t2,8(a0)
-	sw	t3,12(a0)
-	sw	t4,16(a0)
-	sw	t5,20(a0)
-	sw	t6,24(a0)
-	sw	t7,28(a0)
-
-	LWHI	t0,32(a1)
-	LWLO	t0,35(a1)
-	LWHI	t1,36(a1)
-	bgtz	v1,.Lua_skip_pref30_128
-	LWLO	t1,39(a1)
-	pref    30,128(a0)   # continue setting up the dest, addr 128
-.Lua_skip_pref30_128:
-	LWHI	t2,40(a1)
-	LWLO	t2,43(a1)
-	LWHI	t3,44(a1)
-	LWLO	t3,47(a1)
-	LWHI	t4,48(a1)
-	LWLO	t4,51(a1)
-	LWHI	t5,52(a1)
-	LWLO	t5,55(a1)
-	LWHI	t6,56(a1)
-	LWLO	t6,59(a1)
-	LWHI	t7,60(a1)
-	LWLO	t7,63(a1)
-        pref    0, 160(a1)    # bring the next lines of src, addr 160
-
-	sw	t0,32(a0)
-	sw	t1,36(a0)
-	sw	t2,40(a0)
-	sw	t3,44(a0)
-	sw	t4,48(a0)
-	sw	t5,52(a0)
-	sw	t6,56(a0)
-	sw	t7,60(a0)
-
-	addiu	a0,a0,64	# adding 64 to dest
-	sgtu	v1,a0,t9
-	bne	a0,a3,.Lua_loop16w
-	 addiu	a1,a1,64	# adding 64 to src
+# if defined(USE_PREFETCH) && (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
+	PTR_ADDU t0,a0,a2	  /* t0 is the "past the end" address */
+	PTR_SUBU t9,t0,PREFETCH_LIMIT /* t9 is the "last safe pref" address */
+# endif
+	PREFETCH_FOR_LOAD  (0, a1)
+	PREFETCH_FOR_LOAD  (1, a1)
+	PREFETCH_FOR_LOAD  (2, a1)
+# if defined(USE_PREFETCH) && (PREFETCH_STORE_HINT != PREFETCH_HINT_PREPAREFORSTORE)
+	PREFETCH_FOR_STORE (1, a0)
+	PREFETCH_FOR_STORE (2, a0)
+	PREFETCH_FOR_STORE (3, a0)
+# endif
+# if defined(RETURN_FIRST_PREFETCH) && defined(USE_PREFETCH)
+#  if (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
+	sltu    v1,t9,a0
+	bgtz    v1,L(ua_skip_set)
+	nop
+	PTR_ADDIU v0,a0,(PREFETCH_CHUNK*4)
+L(ua_skip_set):
+#  else
+	PTR_ADDIU v0,a0,(PREFETCH_CHUNK*1)
+#  endif
+# endif
+L(ua_loop16w):
+	PREFETCH_FOR_LOAD  (3, a1)
+	C_LDHI	t0,UNIT(0)(a1)
+	C_LDHI	t1,UNIT(1)(a1)
+	C_LDHI	REG2,UNIT(2)(a1)
+# if defined(USE_PREFETCH) && (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
+	sltu	v1,t9,a0
+	bgtz	v1,L(ua_skip_pref)
+# endif
+	C_LDHI	REG3,UNIT(3)(a1)
+	PREFETCH_FOR_STORE (4, a0)
+	PREFETCH_FOR_STORE (5, a0)
+L(ua_skip_pref):
+	C_LDHI	REG4,UNIT(4)(a1)
+	C_LDHI	REG5,UNIT(5)(a1)
+	C_LDHI	REG6,UNIT(6)(a1)
+	C_LDHI	REG7,UNIT(7)(a1)
+	C_LDLO	t0,UNITM1(1)(a1)
+	C_LDLO	t1,UNITM1(2)(a1)
+	C_LDLO	REG2,UNITM1(3)(a1)
+	C_LDLO	REG3,UNITM1(4)(a1)
+	C_LDLO	REG4,UNITM1(5)(a1)
+	C_LDLO	REG5,UNITM1(6)(a1)
+	C_LDLO	REG6,UNITM1(7)(a1)
+	C_LDLO	REG7,UNITM1(8)(a1)
+        PREFETCH_FOR_LOAD (4, a1)
+	C_ST	t0,UNIT(0)(a0)
+	C_ST	t1,UNIT(1)(a0)
+	C_ST	REG2,UNIT(2)(a0)
+	C_ST	REG3,UNIT(3)(a0)
+	C_ST	REG4,UNIT(4)(a0)
+	C_ST	REG5,UNIT(5)(a0)
+	C_ST	REG6,UNIT(6)(a0)
+	C_ST	REG7,UNIT(7)(a0)
+	C_LDHI	t0,UNIT(8)(a1)
+	C_LDHI	t1,UNIT(9)(a1)
+	C_LDHI	REG2,UNIT(10)(a1)
+	C_LDHI	REG3,UNIT(11)(a1)
+	C_LDHI	REG4,UNIT(12)(a1)
+	C_LDHI	REG5,UNIT(13)(a1)
+	C_LDHI	REG6,UNIT(14)(a1)
+	C_LDHI	REG7,UNIT(15)(a1)
+	C_LDLO	t0,UNITM1(9)(a1)
+	C_LDLO	t1,UNITM1(10)(a1)
+	C_LDLO	REG2,UNITM1(11)(a1)
+	C_LDLO	REG3,UNITM1(12)(a1)
+	C_LDLO	REG4,UNITM1(13)(a1)
+	C_LDLO	REG5,UNITM1(14)(a1)
+	C_LDLO	REG6,UNITM1(15)(a1)
+	C_LDLO	REG7,UNITM1(16)(a1)
+        PREFETCH_FOR_LOAD (5, a1)
+	C_ST	t0,UNIT(8)(a0)
+	C_ST	t1,UNIT(9)(a0)
+	C_ST	REG2,UNIT(10)(a0)
+	C_ST	REG3,UNIT(11)(a0)
+	C_ST	REG4,UNIT(12)(a0)
+	C_ST	REG5,UNIT(13)(a0)
+	C_ST	REG6,UNIT(14)(a0)
+	C_ST	REG7,UNIT(15)(a0)
+	PTR_ADDIU a0,a0,UNIT(16)	/* adding 64/128 to dest */
+	bne	a0,a3,L(ua_loop16w)
+	PTR_ADDIU a1,a1,UNIT(16)	/* adding 64/128 to src */
 	move	a2,t8
 
-# Here we have src and dest word-aligned but less than 64-bytes to go
+/* Here we have src and dest word-aligned but less than 64-bytes or
+ * 128 bytes to go.  Check for a 32(64) byte chunk and copy if if there
+ * is one.  Otherwise jump down to L(ua_chk1w) to handle the tail end of
+ * the copy.  */
 
-.Lua_chk8w:
-	pref 0, 0x0(a1)
-	andi	t8,a2,0x1f	# is there a 32-byte chunk?
-				# the t8 is the reminder count
-	beq	a2,t8,.Lua_chk1w # when a2=t8, no 32-byte chunk
-	 nop
+L(ua_chkw):
+	PREFETCH_FOR_LOAD (0, a1)
+	andi	t8,a2,NSIZEMASK	  /* Is there a 32-byte/64-byte chunk.  */
+				  /* t8 is the reminder count past 32-bytes */
+	beq	a2,t8,L(ua_chk1w) /* When a2=t8, no 32-byte chunk */
+	nop
+	C_LDHI	t0,UNIT(0)(a1)
+	C_LDHI	t1,UNIT(1)(a1)
+	C_LDHI	REG2,UNIT(2)(a1)
+	C_LDHI	REG3,UNIT(3)(a1)
+	C_LDHI	REG4,UNIT(4)(a1)
+	C_LDHI	REG5,UNIT(5)(a1)
+	C_LDHI	REG6,UNIT(6)(a1)
+	C_LDHI	REG7,UNIT(7)(a1)
+	C_LDLO	t0,UNITM1(1)(a1)
+	C_LDLO	t1,UNITM1(2)(a1)
+	C_LDLO	REG2,UNITM1(3)(a1)
+	C_LDLO	REG3,UNITM1(4)(a1)
+	C_LDLO	REG4,UNITM1(5)(a1)
+	C_LDLO	REG5,UNITM1(6)(a1)
+	C_LDLO	REG6,UNITM1(7)(a1)
+	C_LDLO	REG7,UNITM1(8)(a1)
+	PTR_ADDIU a1,a1,UNIT(8)
+	C_ST	t0,UNIT(0)(a0)
+	C_ST	t1,UNIT(1)(a0)
+	C_ST	REG2,UNIT(2)(a0)
+	C_ST	REG3,UNIT(3)(a0)
+	C_ST	REG4,UNIT(4)(a0)
+	C_ST	REG5,UNIT(5)(a0)
+	C_ST	REG6,UNIT(6)(a0)
+	C_ST	REG7,UNIT(7)(a0)
+	PTR_ADDIU a0,a0,UNIT(8)
+/*
+ * Here we have less than 32(64) bytes to copy.  Set up for a loop to
+ * copy one word (or double word) at a time.
+ */
+L(ua_chk1w):
+	andi	a2,t8,(NSIZE-1)	/* a2 is the reminder past one (d)word chunks */
+	beq	a2,t8,L(ua_smallCopy)
+	PTR_SUBU a3,t8,a2	/* a3 is count of bytes in one (d)word chunks */
+	PTR_ADDU a3,a0,a3	/* a3 is the dst address after loop */
 
-	LWHI	t0,0(a1)
-	LWLO	t0,3(a1)
-	LWHI	t1,4(a1)
-	LWLO	t1,7(a1)
-	LWHI	t2,8(a1)
-	LWLO	t2,11(a1)
-	LWHI	t3,12(a1)
-	LWLO	t3,15(a1)
-	LWHI	t4,16(a1)
-	LWLO	t4,19(a1)
-	LWHI	t5,20(a1)
-	LWLO	t5,23(a1)
-	LWHI	t6,24(a1)
-	LWLO	t6,27(a1)
-	LWHI	t7,28(a1)
-	LWLO	t7,31(a1)
-	addiu	a1,a1,32
+/* copying in words (4-byte or 8-byte chunks) */
+L(ua_wordCopy_loop):
+	C_LDHI	v1,UNIT(0)(a1)
+	C_LDLO	v1,UNITM1(1)(a1)
+	PTR_ADDIU a0,a0,UNIT(1)
+	PTR_ADDIU a1,a1,UNIT(1)
+	bne	a0,a3,L(ua_wordCopy_loop)
+	C_ST	v1,UNIT(-1)(a0)
 
-	sw	t0,0(a0)
-	sw	t1,4(a0)
-	sw	t2,8(a0)
-	sw	t3,12(a0)
-	sw	t4,16(a0)
-	sw	t5,20(a0)
-	sw	t6,24(a0)
-	sw	t7,28(a0)
-	addiu	a0,a0,32
-
-.Lua_chk1w:
-	andi	a2,t8,0x3	# now a2 is the reminder past 1w chunks
-	beq	a2,t8,.Lua_smallCopy
-	 subu	a3,t8,a2	# a3 is count of bytes in 1w chunks
-	addu	a3,a0,a3	# now a3 is the dst address past the 1w chunks
-
-# copying in words (4-byte chunks)
-.Lua_wordCopy_loop:
-	LWHI	v1,0(a1)
-	LWLO	v1,3(a1)
-	addiu	a1,a1,4
-	addiu	a0,a0,4		# note: dst=a0 is word aligned here, see NOTE1
-	bne	a0,a3,.Lua_wordCopy_loop
-	 sw	v1,-4(a0)
-
-# Now less than 4 bytes (value in a2) left to copy
-.Lua_smallCopy:
-	beqz	a2,.Lleave
-	addu	a3,a0,a2	# a3 is the last dst address
-.Lua_smallCopy_loop:
+/* Copy the last 8 (or 16) bytes */
+L(ua_smallCopy):
+	beqz	a2,L(leave)
+	PTR_ADDU a3,a0,a2	/* a3 is the last dst address */
+L(ua_smallCopy_loop):
 	lb	v1,0(a1)
-	addiu	a1,a1,1
-	addiu	a0,a0,1
-	bne	a0,a3,.Lua_smallCopy_loop
-	 sb	v1,-1(a0)
+	PTR_ADDIU a0,a0,1
+	PTR_ADDIU a1,a1,1
+	bne	a0,a3,L(ua_smallCopy_loop)
+	sb	v1,-1(a0)
 
 	j	ra
-	 nop
+	nop
+
+#else /* R6_CODE */
+
+# if __MIPSEB
+#  define SWAP_REGS(X,Y) X, Y
+#  define ALIGN_OFFSET(N) (N)
+# else
+#  define SWAP_REGS(X,Y) Y, X
+#  define ALIGN_OFFSET(N) (NSIZE-N)
+# endif
+# define R6_UNALIGNED_WORD_COPY(BYTEOFFSET) \
+	andi	REG7, a2, (NSIZE-1);/* REG7 is # of bytes to by bytes.     */ \
+	beq	REG7, a2, L(lastb); /* Check for bytes to copy by word	   */ \
+	PTR_SUBU a3, a2, REG7;	/* a3 is number of bytes to be copied in   */ \
+				/* (d)word chunks.			   */ \
+	move	a2, REG7;	/* a2 is # of bytes to copy byte by byte   */ \
+				/* after word loop is finished.		   */ \
+	PTR_ADDU REG6, a0, a3;	/* REG6 is the dst address after loop.	   */ \
+	PTR_SUBU REG2, a1, t8;	/* REG2 is the aligned src address.	   */ \
+	PTR_ADDU a1, a1, a3;	/* a1 is addr of source after word loop.   */ \
+	C_LD	t0, UNIT(0)(REG2);  /* Load first part of source.	   */ \
+L(r6_ua_wordcopy##BYTEOFFSET):						      \
+	C_LD	t1, UNIT(1)(REG2);  /* Load second part of source.	   */ \
+	C_ALIGN	REG3, SWAP_REGS(t1,t0), ALIGN_OFFSET(BYTEOFFSET);	      \
+	PTR_ADDIU a0, a0, UNIT(1);  /* Increment destination pointer.	   */ \
+	PTR_ADDIU REG2, REG2, UNIT(1); /* Increment aligned source pointer.*/ \
+	move	t0, t1;		/* Move second part of source to first.	   */ \
+	bne	a0, REG6,L(r6_ua_wordcopy##BYTEOFFSET);			      \
+	C_ST	REG3, UNIT(-1)(a0);					      \
+	j	L(lastb);						      \
+	nop
+
+	/* We are generating R6 code, the destination is 4 byte aligned and
+	   the source is not 4 byte aligned. t8 is 1, 2, or 3 depending on the
+           alignment of the source.  */
+
+L(r6_unaligned1):
+	R6_UNALIGNED_WORD_COPY(1)
+L(r6_unaligned2):
+	R6_UNALIGNED_WORD_COPY(2)
+L(r6_unaligned3):
+	R6_UNALIGNED_WORD_COPY(3)
+# ifdef USE_DOUBLE
+L(r6_unaligned4):
+	R6_UNALIGNED_WORD_COPY(4)
+L(r6_unaligned5):
+	R6_UNALIGNED_WORD_COPY(5)
+L(r6_unaligned6):
+	R6_UNALIGNED_WORD_COPY(6)
+L(r6_unaligned7):
+	R6_UNALIGNED_WORD_COPY(7)
+# endif
+#endif /* R6_CODE */
 
 	.set	at
 	.set	reorder
-
-END(memcpy)
-
-
-/************************************************************************
- *  Implementation : Static functions
- ************************************************************************/
+END(MEMCPY_NAME)
+#ifndef __ANDROID__
+# ifdef _LIBC
+libc_hidden_builtin_def (MEMCPY_NAME)
+# endif
+#endif
diff --git a/libc/arch-mips/string/memcpy.c b/libc/arch-mips/string/memcpy.c
deleted file mode 100644
index 5551a6d..0000000
--- a/libc/arch-mips/string/memcpy.c
+++ /dev/null
@@ -1,91 +0,0 @@
-/*	$OpenBSD: memcpy.c,v 1.1 2014/11/30 19:43:56 deraadt Exp $ */
-/*-
- * Copyright (c) 1990 The Regents of the University of California.
- * All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by
- * Chris Torek.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the University nor the names of its contributors
- *    may be used to endorse or promote products derived from this software
- *    without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <string.h>
-#include <stdlib.h>
-#include <syslog.h>
-
-/*
- * sizeof(word) MUST BE A POWER OF TWO
- * SO THAT wmask BELOW IS ALL ONES
- */
-typedef	long word;		/* "word" used for optimal copy speed */
-
-#define	wsize	sizeof(word)
-#define	wmask	(wsize - 1)
-
-/*
- * Copy a block of memory, not handling overlap.
- */
-void *
-memcpy(void *dst0, const void *src0, size_t length)
-{
-	char *dst = dst0;
-	const char *src = src0;
-	size_t t;
-
-	if (length == 0 || dst == src)		/* nothing to do */
-		goto done;
-
-	/*
-	 * Macros: loop-t-times; and loop-t-times, t>0
-	 */
-#define	TLOOP(s) if (t) TLOOP1(s)
-#define	TLOOP1(s) do { s; } while (--t)
-
-	/*
-	 * Copy forward.
-	 */
-	t = (long)src;	/* only need low bits */
-	if ((t | (long)dst) & wmask) {
-		/*
-		 * Try to align operands.  This cannot be done
-		 * unless the low bits match.
-		 */
-		if ((t ^ (long)dst) & wmask || length < wsize)
-			t = length;
-		else
-			t = wsize - (t & wmask);
-		length -= t;
-		TLOOP1(*dst++ = *src++);
-	}
-	/*
-	 * Copy whole words, then mop up any trailing bytes.
-	 */
-	t = length / wsize;
-	TLOOP(*(word *)dst = *(word *)src; src += wsize; dst += wsize);
-	t = length & wmask;
-	TLOOP(*dst++ = *src++);
-done:
-	return (dst0);
-}
diff --git a/libc/arch-mips/string/memset.S b/libc/arch-mips/string/memset.S
index 09b756b..65bb5b5 100644
--- a/libc/arch-mips/string/memset.S
+++ b/libc/arch-mips/string/memset.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009
+ * Copyright (c) 2013
  *      MIPS Technologies, Inc., California.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -27,216 +27,410 @@
  * SUCH DAMAGE.
  */
 
-/************************************************************************
- *
- *  memset.S, version "64h" with 1 cache line horizon for "pref 30" and 14 nops
- *  Version: "043009"
- *
- ************************************************************************/
-
-
-/************************************************************************
- *  Include files
- ************************************************************************/
-
-#include <private/bionic_asm.h>
-
-/*
- * This routine could be optimized for MIPS64. The current code only
- * uses MIPS32 instructions.
- */
-
-#if defined(__MIPSEB__)
-#  define SWHI	swl		/* high part is left in big-endian	*/
-#  define SWLO	swr		/* low part is right in big-endian	*/
-#endif
-
-#if defined(__MIPSEL__)
-#  define SWHI	swr		/* high part is right in little-endian	*/
-#  define SWLO	swl		/* low part is left in little-endian	*/
-#endif
-
-#if !(defined(XGPROF) || defined(XPROF))
-#undef SETUP_GP
-#define SETUP_GP
-#endif
-
-#ifdef NDEBUG
-#define DBG #
+#ifdef __ANDROID__
+# include <private/bionic_asm.h>
+# define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE
+#elif _LIBC
+# include <sysdep.h>
+# include <regdef.h>
+# include <sys/asm.h>
+# define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE
+#elif _COMPILING_NEWLIB
+# include "machine/asm.h"
+# include "machine/regdef.h"
+# define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE
 #else
-#define DBG
+# include <regdef.h>
+# include <sys/asm.h>
 #endif
 
-LEAF(memset,0)
+/* Check to see if the MIPS architecture we are compiling for supports
+   prefetching.  */
 
+#if (__mips == 4) || (__mips == 5) || (__mips == 32) || (__mips == 64)
+# ifndef DISABLE_PREFETCH
+#  define USE_PREFETCH
+# endif
+#endif
+
+#if defined(_MIPS_SIM) && ((_MIPS_SIM == _ABI64) || (_MIPS_SIM == _ABIN32))
+# ifndef DISABLE_DOUBLE
+#  define USE_DOUBLE
+# endif
+#endif
+
+#ifndef USE_DOUBLE
+# ifndef DISABLE_DOUBLE_ALIGN
+#  define DOUBLE_ALIGN
+# endif
+#endif
+
+/* Some asm.h files do not have the L macro definition.  */
+#ifndef L
+# if _MIPS_SIM == _ABIO32
+#  define L(label) $L ## label
+# else
+#  define L(label) .L ## label
+# endif
+#endif
+
+/* Some asm.h files do not have the PTR_ADDIU macro definition.  */
+#ifndef PTR_ADDIU
+# if _MIPS_SIM == _ABIO32
+#  define PTR_ADDIU	addiu
+# else
+#  define PTR_ADDIU	daddiu
+# endif
+#endif
+
+/* New R6 instructions that may not be in asm.h.  */
+#ifndef PTR_LSA
+# if _MIPS_SIM == _ABIO32
+#  define PTR_LSA        lsa
+# else
+#  define PTR_LSA        dlsa
+# endif
+#endif
+
+/* Using PREFETCH_HINT_PREPAREFORSTORE instead of PREFETCH_STORE
+   or PREFETCH_STORE_STREAMED offers a large performance advantage
+   but PREPAREFORSTORE has some special restrictions to consider.
+
+   Prefetch with the 'prepare for store' hint does not copy a memory
+   location into the cache, it just allocates a cache line and zeros
+   it out.  This means that if you do not write to the entire cache
+   line before writing it out to memory some data will get zero'ed out
+   when the cache line is written back to memory and data will be lost.
+
+   There are ifdef'ed sections of this memcpy to make sure that it does not
+   do prefetches on cache lines that are not going to be completely written.
+   This code is only needed and only used when PREFETCH_STORE_HINT is set to
+   PREFETCH_HINT_PREPAREFORSTORE.  This code assumes that cache lines are
+   less than MAX_PREFETCH_SIZE bytes and if the cache line is larger it will
+   not work correctly.  */
+
+#ifdef USE_PREFETCH
+# define PREFETCH_HINT_STORE		1
+# define PREFETCH_HINT_STORE_STREAMED	5
+# define PREFETCH_HINT_STORE_RETAINED	7
+# define PREFETCH_HINT_PREPAREFORSTORE	30
+
+/* If we have not picked out what hints to use at this point use the
+   standard load and store prefetch hints.  */
+# ifndef PREFETCH_STORE_HINT
+#  define PREFETCH_STORE_HINT PREFETCH_HINT_STORE
+# endif
+
+/* We double everything when USE_DOUBLE is true so we do 2 prefetches to
+   get 64 bytes in that case.  The assumption is that each individual
+   prefetch brings in 32 bytes.  */
+# ifdef USE_DOUBLE
+#  define PREFETCH_CHUNK 64
+#  define PREFETCH_FOR_STORE(chunk, reg) \
+    pref PREFETCH_STORE_HINT, (chunk)*64(reg); \
+    pref PREFETCH_STORE_HINT, ((chunk)*64)+32(reg)
+# else
+#  define PREFETCH_CHUNK 32
+#  define PREFETCH_FOR_STORE(chunk, reg) \
+    pref PREFETCH_STORE_HINT, (chunk)*32(reg)
+# endif
+
+/* MAX_PREFETCH_SIZE is the maximum size of a prefetch, it must not be less
+   than PREFETCH_CHUNK, the assumed size of each prefetch.  If the real size
+   of a prefetch is greater than MAX_PREFETCH_SIZE and the PREPAREFORSTORE
+   hint is used, the code will not work correctly.  If PREPAREFORSTORE is not
+   used than MAX_PREFETCH_SIZE does not matter.  */
+# define MAX_PREFETCH_SIZE 128
+/* PREFETCH_LIMIT is set based on the fact that we never use an offset greater
+   than 5 on a STORE prefetch and that a single prefetch can never be larger
+   than MAX_PREFETCH_SIZE.  We add the extra 32 when USE_DOUBLE is set because
+   we actually do two prefetches in that case, one 32 bytes after the other.  */
+# ifdef USE_DOUBLE
+#  define PREFETCH_LIMIT (5 * PREFETCH_CHUNK) + 32 + MAX_PREFETCH_SIZE
+# else
+#  define PREFETCH_LIMIT (5 * PREFETCH_CHUNK) + MAX_PREFETCH_SIZE
+# endif
+
+# if (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE) \
+    && ((PREFETCH_CHUNK * 4) < MAX_PREFETCH_SIZE)
+/* We cannot handle this because the initial prefetches may fetch bytes that
+   are before the buffer being copied.  We start copies with an offset
+   of 4 so avoid this situation when using PREPAREFORSTORE.  */
+#  error "PREFETCH_CHUNK is too large and/or MAX_PREFETCH_SIZE is too small."
+# endif
+#else /* USE_PREFETCH not defined */
+# define PREFETCH_FOR_STORE(offset, reg)
+#endif
+
+#if __mips_isa_rev > 5
+# if (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
+#  undef PREFETCH_STORE_HINT
+#  define PREFETCH_STORE_HINT PREFETCH_HINT_STORE_STREAMED
+# endif
+# define R6_CODE
+#endif
+
+/* Allow the routine to be named something else if desired.  */
+#ifndef MEMSET_NAME
+# define MEMSET_NAME memset
+#endif
+
+/* We load/store 64 bits at a time when USE_DOUBLE is true.
+   The C_ prefix stands for CHUNK and is used to avoid macro name
+   conflicts with system header files.  */
+
+#ifdef USE_DOUBLE
+# define C_ST	sd
+# if __MIPSEB
+#  define C_STHI	sdl	/* high part is left in big-endian	*/
+# else
+#  define C_STHI	sdr	/* high part is right in little-endian	*/
+# endif
+#else
+# define C_ST	sw
+# if __MIPSEB
+#  define C_STHI	swl	/* high part is left in big-endian	*/
+# else
+#  define C_STHI	swr	/* high part is right in little-endian	*/
+# endif
+#endif
+
+/* Bookkeeping values for 32 vs. 64 bit mode.  */
+#ifdef USE_DOUBLE
+# define NSIZE 8
+# define NSIZEMASK 0x3f
+# define NSIZEDMASK 0x7f
+#else
+# define NSIZE 4
+# define NSIZEMASK 0x1f
+# define NSIZEDMASK 0x3f
+#endif
+#define UNIT(unit) ((unit)*NSIZE)
+#define UNITM1(unit) (((unit)*NSIZE)-1)
+
+#ifdef __ANDROID__
+LEAF(MEMSET_NAME,0)
+#else
+LEAF(MEMSET_NAME)
+#endif
+
+	.set	nomips16
 	.set	noreorder
-	.set	noat
+/* If the size is less than 2*NSIZE (8 or 16), go to L(lastb).  Regardless of
+   size, copy dst pointer to v0 for the return value.  */
+	slti	t2,a2,(2 * NSIZE)
+	bne	t2,zero,L(lastb)
+	move	v0,a0
 
-	addu	t0,a0,a2		# t0 is the "past the end" address
-	slti	AT,a2,4			# is a2 less than 4?
-	bne	AT,zero,.Llast4		# if yes, go to last4
-	 move	v0,a0			# memset returns the dst pointer
+/* If memset value is not zero, we copy it to all the bytes in a 32 or 64
+   bit word.  */
+	beq	a1,zero,L(set0)		/* If memset value is zero no smear  */
+	PTR_SUBU a3,zero,a0
+	nop
 
-	beq	a1,zero,.Lset0
-	 subu	v1,zero,a0
-
-	# smear byte into 32 bit word
-#if (__mips==32) && (__mips_isa_rev>=2)
-	ins     a1, a1, 8, 8        # Replicate fill byte into half-word.
-	ins     a1, a1, 16, 16      # Replicate fill byte into word.
+	/* smear byte into 32 or 64 bit word */
+#if ((__mips == 64) || (__mips == 32)) && (__mips_isa_rev >= 2)
+# ifdef USE_DOUBLE
+	dins	a1, a1, 8, 8        /* Replicate fill byte into half-word.  */
+	dins	a1, a1, 16, 16      /* Replicate fill byte into word.       */
+	dins	a1, a1, 32, 32      /* Replicate fill byte into dbl word.   */
+# else
+	ins	a1, a1, 8, 8        /* Replicate fill byte into half-word.  */
+	ins	a1, a1, 16, 16      /* Replicate fill byte into word.       */
+# endif
 #else
-	and	a1,0xff
-	sll	AT,a1,8
-	or	a1,AT
-	sll	AT,a1,16
-	or	a1,AT
+# ifdef USE_DOUBLE
+        and     a1,0xff
+	dsll	t2,a1,8
+	or	a1,t2
+	dsll	t2,a1,16
+	or	a1,t2
+	dsll	t2,a1,32
+	or	a1,t2
+# else
+        and     a1,0xff
+	sll	t2,a1,8
+	or	a1,t2
+	sll	t2,a1,16
+	or	a1,t2
+# endif
 #endif
 
-.Lset0:
-	andi	v1,v1,0x3		# word-unaligned address?
-	beq	v1,zero,.Laligned	# v1 is the unalignment count
-	 subu	a2,a2,v1
-	SWHI	a1,0(a0)
-	addu	a0,a0,v1
+/* If the destination address is not aligned do a partial store to get it
+   aligned.  If it is already aligned just jump to L(aligned).  */
+L(set0):
+#ifndef R6_CODE
+	andi	t2,a3,(NSIZE-1)		/* word-unaligned address?          */
+	beq	t2,zero,L(aligned)	/* t2 is the unalignment count      */
+	PTR_SUBU a2,a2,t2
+	C_STHI	a1,0(a0)
+	PTR_ADDU a0,a0,t2
+#else /* R6_CODE */
+	andi	t2,a0,(NSIZE-1)
+	lapc	t9,L(atable)
+	PTR_LSA	t9,t2,t9,2
+	jrc	t9
+L(atable):
+	bc	L(aligned)
+# ifdef USE_DOUBLE
+	bc	L(lb7)
+	bc	L(lb6)
+	bc	L(lb5)
+	bc	L(lb4)
+# endif
+	bc	L(lb3)
+	bc	L(lb2)
+	bc	L(lb1)
+L(lb7):
+	sb	a1,6(a0)
+L(lb6):
+	sb	a1,5(a0)
+L(lb5):
+	sb	a1,4(a0)
+L(lb4):
+	sb	a1,3(a0)
+L(lb3):
+	sb	a1,2(a0)
+L(lb2):
+	sb	a1,1(a0)
+L(lb1):
+	sb	a1,0(a0)
 
-# Here we have the "word-aligned" a0 (until the "last4")
-.Laligned:
-	andi	t8,a2,0x3f	# any 64-byte chunks?
-				# t8 is the byte count past 64-byte chunks
-	beq	a2,t8,.Lchk8w	# when a2==t8, no 64-byte chunks
-				# There will be at most 1 32-byte chunk then
-	 subu	a3,a2,t8	# subtract from a2 the reminder
-				# Here a3 counts bytes in 16w chunks
-	addu	a3,a0,a3	# Now a3 is the final dst after 64-byte chunks
+	li	t9,NSIZE
+	subu	t2,t9,t2
+	PTR_SUBU a2,a2,t2
+	PTR_ADDU a0,a0,t2
+#endif /* R6_CODE */
 
-# Find out, if there are any 64-byte chunks after which will be still at least
-# 96 bytes left. The value "96" is calculated as needed buffer for
-# "pref 30,64(a0)" prefetch, which can be used as "pref 30,0(a0)" after
-# incrementing "a0" by 64.
-# For "a2" below 160 there will be no such "pref 30 safe" 64-byte chunk.
-#
-	sltiu	v1,a2,160
-	bgtz	v1,.Lloop16w_nopref30	# skip "pref 30,0(a0)"
-	 subu	t7,a2,96	# subtract "pref 30 unsafe" region
-		# below we have at least 1 64-byte chunk which is "pref 30 safe"
-	andi	t6,t7,0x3f	# t6 is past "64-byte safe chunks" reminder
-	subu	t5,t7,t6	# subtract from t7 the reminder
-				# Here t5 counts bytes in 16w "safe" chunks
-	addu	t4,a0,t5	# Now t4 is the dst after 64-byte "safe" chunks
-
-# Don't use "pref 30,0(a0)" for a0 in a "middle" of a cache line
-#	pref	30,0(a0)
-# Here we are in the region, where it is safe to use "pref 30,64(a0)"
-.Lloop16w:
-	addiu	a0,a0,64
-	pref	30,-32(a0)	# continue setting up the dest, addr 64-32
-	sw	a1,-64(a0)
-	sw	a1,-60(a0)
-	sw	a1,-56(a0)
-	sw	a1,-52(a0)
-	sw	a1,-48(a0)
-	sw	a1,-44(a0)
-	sw	a1,-40(a0)
-	sw	a1,-36(a0)
-	nop
-	nop			# the extra nop instructions help to balance
-	nop			# cycles needed for "store" + "fill" + "evict"
-	nop			# For 64byte store there are needed 8 fill
-	nop			# and 8 evict cycles, i.e. at least 32 instr.
-	nop
-	nop
-	pref	30,0(a0)	# continue setting up the dest, addr 64-0
-	sw	a1,-32(a0)
-	sw	a1,-28(a0)
-	sw	a1,-24(a0)
-	sw	a1,-20(a0)
-	sw	a1,-16(a0)
-	sw	a1,-12(a0)
-	sw	a1,-8(a0)
-	sw	a1,-4(a0)
-	nop
-	nop
-	nop
-	nop			# NOTE: adding 14 nop-s instead of 12 nop-s
-	nop			# gives better results for "fast" memory
-	nop
-	bne	a0,t4,.Lloop16w
-	 nop
-
-	beq	a0,a3,.Lchk8w	# maybe no more 64-byte chunks?
-	 nop			# this "delayed slot" is useless ...
-
-.Lloop16w_nopref30:	# there could be up to 3 "64-byte nopref30" chunks
-	addiu	a0,a0,64
-	sw	a1,-64(a0)
-	sw	a1,-60(a0)
-	sw	a1,-56(a0)
-	sw	a1,-52(a0)
-	sw	a1,-48(a0)
-	sw	a1,-44(a0)
-	sw	a1,-40(a0)
-	sw	a1,-36(a0)
-	sw	a1,-32(a0)
-	sw	a1,-28(a0)
-	sw	a1,-24(a0)
-	sw	a1,-20(a0)
-	sw	a1,-16(a0)
-	sw	a1,-12(a0)
-	sw	a1,-8(a0)
-	bne	a0,a3,.Lloop16w_nopref30
-	 sw	a1,-4(a0)
-
-.Lchk8w:		# t8 here is the byte count past 64-byte chunks
-
-	andi	t7,t8,0x1f	# is there a 32-byte chunk?
-				# the t7 is the reminder count past 32-bytes
-	beq	t8,t7,.Lchk1w	# when t8==t7, no 32-byte chunk
-	 move	a2,t7
-
+L(aligned):
+/* If USE_DOUBLE is not set we may still want to align the data on a 16
+   byte boundry instead of an 8 byte boundry to maximize the opportunity
+   of proAptiv chips to do memory bonding (combining two sequential 4
+   byte stores into one 8 byte store).  We know there are at least 4 bytes
+   left to store or we would have jumped to L(lastb) earlier in the code.  */
+#ifdef DOUBLE_ALIGN
+	andi	t2,a3,4
+	beq	t2,zero,L(double_aligned)
+	PTR_SUBU a2,a2,t2
 	sw	a1,0(a0)
-	sw	a1,4(a0)
-	sw	a1,8(a0)
-	sw	a1,12(a0)
-	sw	a1,16(a0)
-	sw	a1,20(a0)
-	sw	a1,24(a0)
-	sw	a1,28(a0)
-	addiu	a0,a0,32
+	PTR_ADDU a0,a0,t2
+L(double_aligned):
+#endif
 
-.Lchk1w:
-	andi	t8,a2,0x3	# now t8 is the reminder past 1w chunks
-	beq	a2,t8,.Llast4aligned
-	 subu	a3,a2,t8	# a3 is the count of bytes in 1w chunks
-	addu	a3,a0,a3	# now a3 is the dst address past the 1w chunks
+/* Now the destination is aligned to (word or double word) aligned address
+   Set a2 to count how many bytes we have to copy after all the 64/128 byte
+   chunks are copied and a3 to the dest pointer after all the 64/128 byte
+   chunks have been copied.  We will loop, incrementing a0 until it equals
+   a3.  */
+	andi	t8,a2,NSIZEDMASK /* any whole 64-byte/128-byte chunks? */
+	beq	a2,t8,L(chkw)	 /* if a2==t8, no 64-byte/128-byte chunks */
+	PTR_SUBU a3,a2,t8	 /* subtract from a2 the reminder */
+	PTR_ADDU a3,a0,a3	 /* Now a3 is the final dst after loop */
 
-# copying in words (4-byte chunks)
-.LwordCopy_loop:
-	addiu	a0,a0,4
-	bne	a0,a3,.LwordCopy_loop
-	 sw	a1,-4(a0)
+/* When in the loop we may prefetch with the 'prepare to store' hint,
+   in this case the a0+x should not be past the "t0-32" address.  This
+   means: for x=128 the last "safe" a0 address is "t0-160".  Alternatively,
+   for x=64 the last "safe" a0 address is "t0-96" In the current version we
+   will use "prefetch hint,128(a0)", so "t0-160" is the limit.  */
+#if defined(USE_PREFETCH) \
+    && (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
+	PTR_ADDU t0,a0,a2		/* t0 is the "past the end" address */
+	PTR_SUBU t9,t0,PREFETCH_LIMIT	/* t9 is the "last safe pref" address */
+#endif
+#if defined(USE_PREFETCH) \
+    && (PREFETCH_STORE_HINT != PREFETCH_HINT_PREPAREFORSTORE)
+	PREFETCH_FOR_STORE (1, a0)
+	PREFETCH_FOR_STORE (2, a0)
+	PREFETCH_FOR_STORE (3, a0)
+#endif
 
-# store last 0-3 bytes
-# this will repeat the last store if the memset finishes on a word boundary
-.Llast4aligned:
+L(loop16w):
+#if defined(USE_PREFETCH) \
+    && (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
+	sltu	v1,t9,a0		/* If a0 > t9 don't use next prefetch */
+	bgtz	v1,L(skip_pref)
+	nop
+#endif
+#ifndef R6_CODE
+	PREFETCH_FOR_STORE (4, a0)
+	PREFETCH_FOR_STORE (5, a0)
+#else
+	PREFETCH_FOR_STORE (2, a0)
+#endif
+L(skip_pref):
+	C_ST	a1,UNIT(0)(a0)
+	C_ST	a1,UNIT(1)(a0)
+	C_ST	a1,UNIT(2)(a0)
+	C_ST	a1,UNIT(3)(a0)
+	C_ST	a1,UNIT(4)(a0)
+	C_ST	a1,UNIT(5)(a0)
+	C_ST	a1,UNIT(6)(a0)
+	C_ST	a1,UNIT(7)(a0)
+	C_ST	a1,UNIT(8)(a0)
+	C_ST	a1,UNIT(9)(a0)
+	C_ST	a1,UNIT(10)(a0)
+	C_ST	a1,UNIT(11)(a0)
+	C_ST	a1,UNIT(12)(a0)
+	C_ST	a1,UNIT(13)(a0)
+	C_ST	a1,UNIT(14)(a0)
+	C_ST	a1,UNIT(15)(a0)
+	PTR_ADDIU a0,a0,UNIT(16)	/* adding 64/128 to dest */
+	bne	a0,a3,L(loop16w)
+	nop
+	move	a2,t8
+
+/* Here we have dest word-aligned but less than 64-bytes or 128 bytes to go.
+   Check for a 32(64) byte chunk and copy if if there is one.  Otherwise
+   jump down to L(chk1w) to handle the tail end of the copy.  */
+L(chkw):
+	andi	t8,a2,NSIZEMASK	/* is there a 32-byte/64-byte chunk.  */
+				/* the t8 is the reminder count past 32-bytes */
+	beq	a2,t8,L(chk1w)/* when a2==t8, no 32-byte chunk */
+	nop
+	C_ST	a1,UNIT(0)(a0)
+	C_ST	a1,UNIT(1)(a0)
+	C_ST	a1,UNIT(2)(a0)
+	C_ST	a1,UNIT(3)(a0)
+	C_ST	a1,UNIT(4)(a0)
+	C_ST	a1,UNIT(5)(a0)
+	C_ST	a1,UNIT(6)(a0)
+	C_ST	a1,UNIT(7)(a0)
+	PTR_ADDIU a0,a0,UNIT(8)
+
+/* Here we have less than 32(64) bytes to set.  Set up for a loop to
+   copy one word (or double word) at a time.  Set a2 to count how many
+   bytes we have to copy after all the word (or double word) chunks are
+   copied and a3 to the dest pointer after all the (d)word chunks have
+   been copied.  We will loop, incrementing a0 until a0 equals a3.  */
+L(chk1w):
+	andi	a2,t8,(NSIZE-1)	/* a2 is the reminder past one (d)word chunks */
+	beq	a2,t8,L(lastb)
+	PTR_SUBU a3,t8,a2	/* a3 is count of bytes in one (d)word chunks */
+	PTR_ADDU a3,a0,a3	/* a3 is the dst address after loop */
+
+/* copying in words (4-byte or 8 byte chunks) */
+L(wordCopy_loop):
+	PTR_ADDIU a0,a0,UNIT(1)
+	bne	a0,a3,L(wordCopy_loop)
+	C_ST	a1,UNIT(-1)(a0)
+
+/* Copy the last 8 (or 16) bytes */
+L(lastb):
+	blez	a2,L(leave)
+	PTR_ADDU a3,a0,a2       /* a3 is the last dst address */
+L(lastbloop):
+	PTR_ADDIU a0,a0,1
+	bne	a0,a3,L(lastbloop)
+	sb	a1,-1(a0)
+L(leave):
 	j	ra
-	 SWLO	a1,-1(t0)
-
-.Llast4:
-	beq	a0,t0,.Llast4e
-.Llast4l:
-	 addiu	a0,a0,1
-	bne	a0,t0,.Llast4l
-	 sb	a1,-1(a0)
-.Llast4e:
-	j	ra
-	 nop
+	nop
 
 	.set	at
 	.set	reorder
-
-END(memset)
-
-
-/************************************************************************
- *  Implementation : Static functions
- ************************************************************************/
+END(MEMSET_NAME)
+#ifndef __ANDROID__
+# ifdef _LIBC
+libc_hidden_builtin_def (MEMSET_NAME)
+# endif
+#endif
diff --git a/libc/arch-mips/string/memset.c b/libc/arch-mips/string/memset.c
deleted file mode 100644
index 41dafb2..0000000
--- a/libc/arch-mips/string/memset.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
- * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
- * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-#include <string.h>
-#include <stdint.h>
-
-void*  memset(void*  dst, int c, size_t n)
-{
-    char*  q   = dst;
-    char*  end = q + n;
-
-    for (;;) {
-        if (q >= end) break; *q++ = (char) c;
-        if (q >= end) break; *q++ = (char) c;
-        if (q >= end) break; *q++ = (char) c;
-        if (q >= end) break; *q++ = (char) c;
-    }
-
-  return dst;
-}
diff --git a/libc/arch-mips/string/strcmp.S b/libc/arch-mips/string/strcmp.S
new file mode 100644
index 0000000..2b67f5a
--- /dev/null
+++ b/libc/arch-mips/string/strcmp.S
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 2014
+ *      Imagination Technologies Limited.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY IMAGINATION TECHNOLOGIES LIMITED ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL IMAGINATION TECHNOLOGIES LIMITED BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef __ANDROID__
+# include <private/bionic_asm.h>
+#elif _LIBC
+# include <sysdep.h>
+# include <regdef.h>
+# include <sys/asm.h>
+#elif _COMPILING_NEWLIB
+# include "machine/asm.h"
+# include "machine/regdef.h"
+#else
+# include <regdef.h>
+# include <sys/asm.h>
+#endif
+
+/* Technically strcmp should not read past the end of the strings being
+   compared.  We will read a full word that may contain excess bits beyond
+   the NULL string terminator but unless ENABLE_READAHEAD is set, we will not
+   read the next word after the end of string.  Setting ENABLE_READAHEAD will
+   improve performance but is technically illegal based on the definition of
+   strcmp.  */
+#ifdef ENABLE_READAHEAD
+# define DELAY_READ
+#else
+# define DELAY_READ nop
+#endif
+
+/* Testing on a little endian machine showed using CLZ was a
+   performance loss, so we are not turning it on by default.  */
+#if defined(ENABLE_CLZ) && (__mips_isa_rev > 1)
+# define USE_CLZ
+#endif
+
+/* Some asm.h files do not have the L macro definition.  */
+#ifndef L
+# if _MIPS_SIM == _ABIO32
+#  define L(label) $L ## label
+# else
+#  define L(label) .L ## label
+# endif
+#endif
+
+/* Some asm.h files do not have the PTR_ADDIU macro definition.  */
+#ifndef PTR_ADDIU
+# if _MIPS_SIM == _ABIO32
+#  define PTR_ADDIU       addiu
+# else
+#  define PTR_ADDIU       daddiu
+# endif
+#endif
+
+/* Allow the routine to be named something else if desired.  */
+#ifndef STRCMP_NAME
+# define STRCMP_NAME strcmp
+#endif
+
+#ifdef __ANDROID__
+LEAF(STRCMP_NAME, 0)
+#else
+LEAF(STRCMP_NAME)
+#endif
+	.set	nomips16
+	.set	noreorder
+
+	or	t0, a0, a1
+	andi	t0,0x3
+	bne	t0, zero, L(byteloop)
+
+/* Both strings are 4 byte aligned at this point.  */
+
+	lui	t8, 0x0101
+	ori	t8, t8, 0x0101
+	lui	t9, 0x7f7f
+	ori	t9, 0x7f7f
+
+#define STRCMP32(OFFSET) \
+	lw	v0, OFFSET(a0); \
+	lw	v1, OFFSET(a1); \
+	subu	t0, v0, t8; \
+	bne	v0, v1, L(worddiff); \
+	nor	t1, v0, t9; \
+	and	t0, t0, t1; \
+	bne	t0, zero, L(returnzero)
+
+L(wordloop):
+	STRCMP32(0)
+	DELAY_READ
+	STRCMP32(4)
+	DELAY_READ
+	STRCMP32(8)
+	DELAY_READ
+	STRCMP32(12)
+	DELAY_READ
+	STRCMP32(16)
+	DELAY_READ
+	STRCMP32(20)
+	DELAY_READ
+	STRCMP32(24)
+	DELAY_READ
+	STRCMP32(28)
+	PTR_ADDIU a0, a0, 32
+	b	L(wordloop)
+	PTR_ADDIU a1, a1, 32
+
+L(returnzero):
+	j	ra
+	move	v0, zero
+
+L(worddiff):
+#ifdef USE_CLZ
+	subu	t0, v0, t8
+	nor	t1, v0, t9
+	and	t1, t0, t1
+	xor	t0, v0, v1
+	or	t0, t0, t1
+# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+	wsbh	t0, t0
+	rotr	t0, t0, 16
+# endif
+	clz	t1, t0
+	and	t1, 0xf8
+# if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+	neg	t1
+	addu	t1, 24
+# endif
+	rotrv	v0, v0, t1
+	rotrv	v1, v1, t1
+	and	v0, v0, 0xff
+	and	v1, v1, 0xff
+	j	ra
+	subu	v0, v0, v1
+#else /* USE_CLZ */
+# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+	andi	t0, v0, 0xff
+	beq	t0, zero, L(wexit01)
+	andi	t1, v1, 0xff
+	bne	t0, t1, L(wexit01)
+
+	srl	t8, v0, 8
+	srl	t9, v1, 8
+	andi	t8, t8, 0xff
+	beq	t8, zero, L(wexit89)
+	andi	t9, t9, 0xff
+	bne	t8, t9, L(wexit89)
+
+	srl	t0, v0, 16
+	srl	t1, v1, 16
+	andi	t0, t0, 0xff
+	beq	t0, zero, L(wexit01)
+	andi	t1, t1, 0xff
+	bne	t0, t1, L(wexit01)
+
+	srl	t8, v0, 24
+	srl	t9, v1, 24
+# else /* __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ */
+	srl	t0, v0, 24
+	beq	t0, zero, L(wexit01)
+	srl	t1, v1, 24
+	bne	t0, t1, L(wexit01)
+
+	srl	t8, v0, 16
+	srl	t9, v1, 16
+	andi	t8, t8, 0xff
+	beq	t8, zero, L(wexit89)
+	andi	t9, t9, 0xff
+	bne	t8, t9, L(wexit89)
+
+	srl	t0, v0, 8
+	srl	t1, v1, 8
+	andi	t0, t0, 0xff
+	beq	t0, zero, L(wexit01)
+	andi	t1, t1, 0xff
+	bne	t0, t1, L(wexit01)
+
+	andi	t8, v0, 0xff
+	andi	t9, v1, 0xff
+# endif /* __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ */
+
+L(wexit89):
+	j	ra
+	subu	v0, t8, t9
+L(wexit01):
+	j	ra
+	subu	v0, t0, t1
+#endif /* USE_CLZ */
+
+/* It might seem better to do the 'beq' instruction between the two 'lbu'
+   instructions so that the nop is not needed but testing showed that this
+   code is actually faster (based on glibc strcmp test).  */
+#define BYTECMP01(OFFSET) \
+	lbu	v0, OFFSET(a0); \
+	lbu	v1, OFFSET(a1); \
+	beq	v0, zero, L(bexit01); \
+	nop; \
+	bne	v0, v1, L(bexit01)
+
+#define BYTECMP89(OFFSET) \
+	lbu	t8, OFFSET(a0); \
+	lbu	t9, OFFSET(a1); \
+	beq	t8, zero, L(bexit89); \
+	nop;	\
+	bne	t8, t9, L(bexit89)
+
+L(byteloop):
+	BYTECMP01(0)
+	BYTECMP89(1)
+	BYTECMP01(2)
+	BYTECMP89(3)
+	BYTECMP01(4)
+	BYTECMP89(5)
+	BYTECMP01(6)
+	BYTECMP89(7)
+	PTR_ADDIU a0, a0, 8
+	b	L(byteloop)
+	PTR_ADDIU a1, a1, 8
+
+L(bexit01):
+	j	ra
+	subu	v0, v0, v1
+L(bexit89):
+	j	ra
+	subu	v0, t8, t9
+
+	.set	at
+	.set	reorder
+
+END(STRCMP_NAME)
+#ifndef __ANDROID__
+# ifdef _LIBC
+libc_hidden_builtin_def (STRCMP_NAME)
+# endif
+#endif
diff --git a/libc/arch-mips64/mips64.mk b/libc/arch-mips64/mips64.mk
index 2b81e63..7757385 100644
--- a/libc/arch-mips64/mips64.mk
+++ b/libc/arch-mips64/mips64.mk
@@ -5,6 +5,11 @@
 #
 
 libc_bionic_src_files_mips64 += \
+    arch-mips/string/memcmp.c \
+    arch-mips/string/memcpy.S \
+    arch-mips/string/memset.S \
+    arch-mips/string/strcmp.S \
+    arch-mips/string/strlen.c \
     bionic/__memcpy_chk.cpp \
     bionic/__memset_chk.cpp \
     bionic/__strcpy_chk.cpp \
@@ -12,10 +17,6 @@
     bionic/strchr.cpp \
     bionic/strnlen.c \
     bionic/strrchr.cpp \
-    arch-mips/string/memcmp.c \
-    arch-mips/string/memcpy.c \
-    arch-mips/string/memset.c \
-    arch-mips/string/strlen.c \
 
 libc_freebsd_src_files_mips64 += \
     upstream-freebsd/lib/libc/string/wcscat.c \
@@ -34,7 +35,6 @@
     upstream-openbsd/lib/libc/string/stpcpy.c \
     upstream-openbsd/lib/libc/string/stpncpy.c \
     upstream-openbsd/lib/libc/string/strcat.c \
-    upstream-openbsd/lib/libc/string/strcmp.c \
     upstream-openbsd/lib/libc/string/strcpy.c \
     upstream-openbsd/lib/libc/string/strlcat.c \
     upstream-openbsd/lib/libc/string/strlcpy.c \
diff --git a/libc/arch-mips64/string/memcpy.S b/libc/arch-mips64/string/memcpy.S
deleted file mode 100644
index dc91096..0000000
--- a/libc/arch-mips64/string/memcpy.S
+++ /dev/null
@@ -1,423 +0,0 @@
-/*
- * Copyright (c) 2009
- *      MIPS Technologies, Inc., California.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
- *    contributors may be used to endorse or promote products derived from
- *    this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-/************************************************************************
- *
- *  memcpy.S
- *  Version: "043009"
- *
- ************************************************************************/
-
-
-/************************************************************************
- *  Include files
- ************************************************************************/
-
-#include <private/bionic_asm.h>
-
-
-/*
- * This routine could be optimized for MIPS64. The current code only
- * uses MIPS32 instructions.
- */
-#if defined(__MIPSEB__)
-#  define LWHI	lwl		/* high part is left in big-endian	*/
-#  define SWHI	swl		/* high part is left in big-endian	*/
-#  define LWLO	lwr		/* low part is right in big-endian	*/
-#  define SWLO	swr		/* low part is right in big-endian	*/
-#endif
-
-#if defined(__MIPSEL__)
-#  define LWHI	lwr		/* high part is right in little-endian	*/
-#  define SWHI	swr		/* high part is right in little-endian	*/
-#  define LWLO	lwl		/* low part is left in big-endian	*/
-#  define SWLO	swl		/* low part is left in big-endian	*/
-#endif
-
-LEAF(memcpy,0)
-
-	.set	noreorder
-	.set	noat
-/*
- * Below we handle the case where memcpy is called with overlapping src and dst.
- * Although memcpy is not required to handle this case, some parts of Android like Skia
- * rely on such usage. We call memmove to handle such cases.
- */
-	subu	t0,a0,a1
-	sra	AT,t0,31
-	xor	t1,t0,AT
-	subu	t0,t1,AT
-	sltu	AT,t0,a2
-	beq	AT,zero,.Lmemcpy
-	 la	t9,memmove
-	jr	t9
-	 nop
-.Lmemcpy:
-	slti	AT,a2,8
-	bne	AT,zero,.Llast8
-	 move	v0,a0	# memcpy returns the dst pointer
-
-# Test if the src and dst are word-aligned, or can be made word-aligned
-	xor	t8,a1,a0
-	andi	t8,t8,0x3		# t8 is a0/a1 word-displacement
-
-	bne	t8,zero,.Lunaligned
-	 negu	a3,a0
-
-	andi	a3,a3,0x3	# we need to copy a3 bytes to make a0/a1 aligned
-	beq	a3,zero,.Lchk16w # when a3=0 then the dst (a0) is word-aligned
-	 subu	a2,a2,a3	# now a2 is the remining bytes count
-
-	LWHI	t8,0(a1)
-	addu	a1,a1,a3
-	SWHI	t8,0(a0)
-	addu	a0,a0,a3
-
-# Now the dst/src are mutually word-aligned with word-aligned addresses
-.Lchk16w:
-	andi	t8,a2,0x3f	# any whole 64-byte chunks?
-				# t8 is the byte count after 64-byte chunks
-
-	beq	a2,t8,.Lchk8w	# if a2==t8, no 64-byte chunks
-				# There will be at most 1 32-byte chunk after it
-	 subu	a3,a2,t8	# subtract from a2 the reminder
-                                # Here a3 counts bytes in 16w chunks
-	addu	a3,a0,a3	# Now a3 is the final dst after 64-byte chunks
-
-	addu	t0,a0,a2	# t0 is the "past the end" address
-
-# When in the loop we exercise "pref 30,x(a0)", the a0+x should not be past
-# the "t0-32" address
-# This means: for x=128 the last "safe" a0 address is "t0-160"
-# Alternatively, for x=64 the last "safe" a0 address is "t0-96"
-# In the current version we will use "pref 30,128(a0)", so "t0-160" is the limit
-	subu	t9,t0,160	# t9 is the "last safe pref 30,128(a0)" address
-
-	pref    0,0(a1)		# bring the first line of src, addr 0
-	pref    0,32(a1)	# bring the second line of src, addr 32
-	pref    0,64(a1)	# bring the third line of src, addr 64
-	pref	30,32(a0)	# safe, as we have at least 64 bytes ahead
-# In case the a0 > t9 don't use "pref 30" at all
-	sgtu	v1,a0,t9
-	bgtz	v1,.Lloop16w	# skip "pref 30,64(a0)" for too short arrays
-	 nop
-# otherwise, start with using pref30
-	pref	30,64(a0)
-.Lloop16w:
-	pref	0,96(a1)
-	lw	t0,0(a1)
-	bgtz	v1,.Lskip_pref30_96	# skip "pref 30,96(a0)"
-	 lw	t1,4(a1)
-	pref    30,96(a0)   # continue setting up the dest, addr 96
-.Lskip_pref30_96:
-	lw	t2,8(a1)
-	lw	t3,12(a1)
-	lw	t4,16(a1)
-	lw	t5,20(a1)
-	lw	t6,24(a1)
-	lw	t7,28(a1)
-        pref    0,128(a1)    # bring the next lines of src, addr 128
-
-	sw	t0,0(a0)
-	sw	t1,4(a0)
-	sw	t2,8(a0)
-	sw	t3,12(a0)
-	sw	t4,16(a0)
-	sw	t5,20(a0)
-	sw	t6,24(a0)
-	sw	t7,28(a0)
-
-	lw	t0,32(a1)
-	bgtz	v1,.Lskip_pref30_128	# skip "pref 30,128(a0)"
-	 lw	t1,36(a1)
-	pref    30,128(a0)   # continue setting up the dest, addr 128
-.Lskip_pref30_128:
-	lw	t2,40(a1)
-	lw	t3,44(a1)
-	lw	t4,48(a1)
-	lw	t5,52(a1)
-	lw	t6,56(a1)
-	lw	t7,60(a1)
-        pref    0, 160(a1)    # bring the next lines of src, addr 160
-
-	sw	t0,32(a0)
-	sw	t1,36(a0)
-	sw	t2,40(a0)
-	sw	t3,44(a0)
-	sw	t4,48(a0)
-	sw	t5,52(a0)
-	sw	t6,56(a0)
-	sw	t7,60(a0)
-
-	addiu	a0,a0,64	# adding 64 to dest
-	sgtu	v1,a0,t9
-	bne	a0,a3,.Lloop16w
-	 addiu	a1,a1,64	# adding 64 to src
-	move	a2,t8
-
-# Here we have src and dest word-aligned but less than 64-bytes to go
-
-.Lchk8w:
-	pref 0, 0x0(a1)
-	andi	t8,a2,0x1f	# is there a 32-byte chunk?
-				# the t8 is the reminder count past 32-bytes
-	beq	a2,t8,.Lchk1w	# when a2=t8, no 32-byte chunk
-	 nop
-
-	lw	t0,0(a1)
-	lw	t1,4(a1)
-	lw	t2,8(a1)
-	lw	t3,12(a1)
-	lw	t4,16(a1)
-	lw	t5,20(a1)
-	lw	t6,24(a1)
-	lw	t7,28(a1)
-	addiu	a1,a1,32
-
-	sw	t0,0(a0)
-	sw	t1,4(a0)
-	sw	t2,8(a0)
-	sw	t3,12(a0)
-	sw	t4,16(a0)
-	sw	t5,20(a0)
-	sw	t6,24(a0)
-	sw	t7,28(a0)
-	addiu	a0,a0,32
-
-.Lchk1w:
-	andi	a2,t8,0x3	# now a2 is the reminder past 1w chunks
-	beq	a2,t8,.Llast8
-	 subu	a3,t8,a2	# a3 is count of bytes in 1w chunks
-	addu	a3,a0,a3	# now a3 is the dst address past the 1w chunks
-
-# copying in words (4-byte chunks)
-.LwordCopy_loop:
-	lw	t3,0(a1)	# the first t3 may be equal t0 ... optimize?
-	addiu	a1,a1,4
-	addiu	a0,a0,4
-	bne	a0,a3,.LwordCopy_loop
-	 sw	t3,-4(a0)
-
-# For the last (<8) bytes
-.Llast8:
-	blez	a2,.Lleave
-	 addu	a3,a0,a2	# a3 is the last dst address
-.Llast8loop:
-	lb	v1,0(a1)
-	addiu	a1,a1,1
-	addiu	a0,a0,1
-	bne	a0,a3,.Llast8loop
-	 sb	v1,-1(a0)
-
-.Lleave:
-	j	ra
-	 nop
-
-#
-# UNALIGNED case
-#
-
-.Lunaligned:
-	# got here with a3="negu a0"
-	andi	a3,a3,0x3	# test if the a0 is word aligned
-	beqz	a3,.Lua_chk16w
-	 subu	a2,a2,a3	# bytes left after initial a3 bytes
-
-	LWHI	v1,0(a1)
-	LWLO	v1,3(a1)
-	addu	a1,a1,a3	# a3 may be here 1, 2 or 3
-	SWHI	v1,0(a0)
-	addu	a0,a0,a3	# below the dst will be word aligned (NOTE1)
-
-.Lua_chk16w:
-	andi	t8,a2,0x3f	# any whole 64-byte chunks?
-				# t8 is the byte count after 64-byte chunks
-	beq	a2,t8,.Lua_chk8w # if a2==t8, no 64-byte chunks
-				# There will be at most 1 32-byte chunk after it
-	 subu	a3,a2,t8	# subtract from a2 the reminder
-                                # Here a3 counts bytes in 16w chunks
-	addu	a3,a0,a3	# Now a3 is the final dst after 64-byte chunks
-
-	addu	t0,a0,a2	# t0 is the "past the end" address
-
-	subu	t9,t0,160	# t9 is the "last safe pref 30,128(a0)" address
-
-	pref    0,0(a1)		# bring the first line of src, addr 0
-	pref    0,32(a1)	# bring the second line of src, addr 32
-	pref    0,64(a1)	# bring the third line of src, addr 64
-	pref	30,32(a0)	# safe, as we have at least 64 bytes ahead
-# In case the a0 > t9 don't use "pref 30" at all
-	sgtu	v1,a0,t9
-	bgtz	v1,.Lua_loop16w	# skip "pref 30,64(a0)" for too short arrays
-	 nop
-# otherwise, start with using pref30
-	pref	30,64(a0)
-.Lua_loop16w:
-	pref	0,96(a1)
-	LWHI	t0,0(a1)
-	LWLO	t0,3(a1)
-	LWHI	t1,4(a1)
-	bgtz	v1,.Lua_skip_pref30_96
-	 LWLO	t1,7(a1)
-	pref    30,96(a0)   # continue setting up the dest, addr 96
-.Lua_skip_pref30_96:
-	LWHI	t2,8(a1)
-	LWLO	t2,11(a1)
-	LWHI	t3,12(a1)
-	LWLO	t3,15(a1)
-	LWHI	t4,16(a1)
-	LWLO	t4,19(a1)
-	LWHI	t5,20(a1)
-	LWLO	t5,23(a1)
-	LWHI	t6,24(a1)
-	LWLO	t6,27(a1)
-	LWHI	t7,28(a1)
-	LWLO	t7,31(a1)
-        pref    0,128(a1)    # bring the next lines of src, addr 128
-
-	sw	t0,0(a0)
-	sw	t1,4(a0)
-	sw	t2,8(a0)
-	sw	t3,12(a0)
-	sw	t4,16(a0)
-	sw	t5,20(a0)
-	sw	t6,24(a0)
-	sw	t7,28(a0)
-
-	LWHI	t0,32(a1)
-	LWLO	t0,35(a1)
-	LWHI	t1,36(a1)
-	bgtz	v1,.Lua_skip_pref30_128
-	LWLO	t1,39(a1)
-	pref    30,128(a0)   # continue setting up the dest, addr 128
-.Lua_skip_pref30_128:
-	LWHI	t2,40(a1)
-	LWLO	t2,43(a1)
-	LWHI	t3,44(a1)
-	LWLO	t3,47(a1)
-	LWHI	t4,48(a1)
-	LWLO	t4,51(a1)
-	LWHI	t5,52(a1)
-	LWLO	t5,55(a1)
-	LWHI	t6,56(a1)
-	LWLO	t6,59(a1)
-	LWHI	t7,60(a1)
-	LWLO	t7,63(a1)
-        pref    0, 160(a1)    # bring the next lines of src, addr 160
-
-	sw	t0,32(a0)
-	sw	t1,36(a0)
-	sw	t2,40(a0)
-	sw	t3,44(a0)
-	sw	t4,48(a0)
-	sw	t5,52(a0)
-	sw	t6,56(a0)
-	sw	t7,60(a0)
-
-	addiu	a0,a0,64	# adding 64 to dest
-	sgtu	v1,a0,t9
-	bne	a0,a3,.Lua_loop16w
-	 addiu	a1,a1,64	# adding 64 to src
-	move	a2,t8
-
-# Here we have src and dest word-aligned but less than 64-bytes to go
-
-.Lua_chk8w:
-	pref 0, 0x0(a1)
-	andi	t8,a2,0x1f	# is there a 32-byte chunk?
-				# the t8 is the reminder count
-	beq	a2,t8,.Lua_chk1w # when a2=t8, no 32-byte chunk
-	 nop
-
-	LWHI	t0,0(a1)
-	LWLO	t0,3(a1)
-	LWHI	t1,4(a1)
-	LWLO	t1,7(a1)
-	LWHI	t2,8(a1)
-	LWLO	t2,11(a1)
-	LWHI	t3,12(a1)
-	LWLO	t3,15(a1)
-	LWHI	t4,16(a1)
-	LWLO	t4,19(a1)
-	LWHI	t5,20(a1)
-	LWLO	t5,23(a1)
-	LWHI	t6,24(a1)
-	LWLO	t6,27(a1)
-	LWHI	t7,28(a1)
-	LWLO	t7,31(a1)
-	addiu	a1,a1,32
-
-	sw	t0,0(a0)
-	sw	t1,4(a0)
-	sw	t2,8(a0)
-	sw	t3,12(a0)
-	sw	t4,16(a0)
-	sw	t5,20(a0)
-	sw	t6,24(a0)
-	sw	t7,28(a0)
-	addiu	a0,a0,32
-
-.Lua_chk1w:
-	andi	a2,t8,0x3	# now a2 is the reminder past 1w chunks
-	beq	a2,t8,.Lua_smallCopy
-	 subu	a3,t8,a2	# a3 is count of bytes in 1w chunks
-	addu	a3,a0,a3	# now a3 is the dst address past the 1w chunks
-
-# copying in words (4-byte chunks)
-.Lua_wordCopy_loop:
-	LWHI	v1,0(a1)
-	LWLO	v1,3(a1)
-	addiu	a1,a1,4
-	addiu	a0,a0,4		# note: dst=a0 is word aligned here, see NOTE1
-	bne	a0,a3,.Lua_wordCopy_loop
-	 sw	v1,-4(a0)
-
-# Now less than 4 bytes (value in a2) left to copy
-.Lua_smallCopy:
-	beqz	a2,.Lleave
-	addu	a3,a0,a2	# a3 is the last dst address
-.Lua_smallCopy_loop:
-	lb	v1,0(a1)
-	addiu	a1,a1,1
-	addiu	a0,a0,1
-	bne	a0,a3,.Lua_smallCopy_loop
-	 sb	v1,-1(a0)
-
-	j	ra
-	 nop
-
-	.set	at
-	.set	reorder
-
-END(memcpy)
-
-
-/************************************************************************
- *  Implementation : Static functions
- ************************************************************************/
diff --git a/libc/arch-mips64/string/memset.S b/libc/arch-mips64/string/memset.S
deleted file mode 100644
index 09b756b..0000000
--- a/libc/arch-mips64/string/memset.S
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * Copyright (c) 2009
- *      MIPS Technologies, Inc., California.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
- *    contributors may be used to endorse or promote products derived from
- *    this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-/************************************************************************
- *
- *  memset.S, version "64h" with 1 cache line horizon for "pref 30" and 14 nops
- *  Version: "043009"
- *
- ************************************************************************/
-
-
-/************************************************************************
- *  Include files
- ************************************************************************/
-
-#include <private/bionic_asm.h>
-
-/*
- * This routine could be optimized for MIPS64. The current code only
- * uses MIPS32 instructions.
- */
-
-#if defined(__MIPSEB__)
-#  define SWHI	swl		/* high part is left in big-endian	*/
-#  define SWLO	swr		/* low part is right in big-endian	*/
-#endif
-
-#if defined(__MIPSEL__)
-#  define SWHI	swr		/* high part is right in little-endian	*/
-#  define SWLO	swl		/* low part is left in little-endian	*/
-#endif
-
-#if !(defined(XGPROF) || defined(XPROF))
-#undef SETUP_GP
-#define SETUP_GP
-#endif
-
-#ifdef NDEBUG
-#define DBG #
-#else
-#define DBG
-#endif
-
-LEAF(memset,0)
-
-	.set	noreorder
-	.set	noat
-
-	addu	t0,a0,a2		# t0 is the "past the end" address
-	slti	AT,a2,4			# is a2 less than 4?
-	bne	AT,zero,.Llast4		# if yes, go to last4
-	 move	v0,a0			# memset returns the dst pointer
-
-	beq	a1,zero,.Lset0
-	 subu	v1,zero,a0
-
-	# smear byte into 32 bit word
-#if (__mips==32) && (__mips_isa_rev>=2)
-	ins     a1, a1, 8, 8        # Replicate fill byte into half-word.
-	ins     a1, a1, 16, 16      # Replicate fill byte into word.
-#else
-	and	a1,0xff
-	sll	AT,a1,8
-	or	a1,AT
-	sll	AT,a1,16
-	or	a1,AT
-#endif
-
-.Lset0:
-	andi	v1,v1,0x3		# word-unaligned address?
-	beq	v1,zero,.Laligned	# v1 is the unalignment count
-	 subu	a2,a2,v1
-	SWHI	a1,0(a0)
-	addu	a0,a0,v1
-
-# Here we have the "word-aligned" a0 (until the "last4")
-.Laligned:
-	andi	t8,a2,0x3f	# any 64-byte chunks?
-				# t8 is the byte count past 64-byte chunks
-	beq	a2,t8,.Lchk8w	# when a2==t8, no 64-byte chunks
-				# There will be at most 1 32-byte chunk then
-	 subu	a3,a2,t8	# subtract from a2 the reminder
-				# Here a3 counts bytes in 16w chunks
-	addu	a3,a0,a3	# Now a3 is the final dst after 64-byte chunks
-
-# Find out, if there are any 64-byte chunks after which will be still at least
-# 96 bytes left. The value "96" is calculated as needed buffer for
-# "pref 30,64(a0)" prefetch, which can be used as "pref 30,0(a0)" after
-# incrementing "a0" by 64.
-# For "a2" below 160 there will be no such "pref 30 safe" 64-byte chunk.
-#
-	sltiu	v1,a2,160
-	bgtz	v1,.Lloop16w_nopref30	# skip "pref 30,0(a0)"
-	 subu	t7,a2,96	# subtract "pref 30 unsafe" region
-		# below we have at least 1 64-byte chunk which is "pref 30 safe"
-	andi	t6,t7,0x3f	# t6 is past "64-byte safe chunks" reminder
-	subu	t5,t7,t6	# subtract from t7 the reminder
-				# Here t5 counts bytes in 16w "safe" chunks
-	addu	t4,a0,t5	# Now t4 is the dst after 64-byte "safe" chunks
-
-# Don't use "pref 30,0(a0)" for a0 in a "middle" of a cache line
-#	pref	30,0(a0)
-# Here we are in the region, where it is safe to use "pref 30,64(a0)"
-.Lloop16w:
-	addiu	a0,a0,64
-	pref	30,-32(a0)	# continue setting up the dest, addr 64-32
-	sw	a1,-64(a0)
-	sw	a1,-60(a0)
-	sw	a1,-56(a0)
-	sw	a1,-52(a0)
-	sw	a1,-48(a0)
-	sw	a1,-44(a0)
-	sw	a1,-40(a0)
-	sw	a1,-36(a0)
-	nop
-	nop			# the extra nop instructions help to balance
-	nop			# cycles needed for "store" + "fill" + "evict"
-	nop			# For 64byte store there are needed 8 fill
-	nop			# and 8 evict cycles, i.e. at least 32 instr.
-	nop
-	nop
-	pref	30,0(a0)	# continue setting up the dest, addr 64-0
-	sw	a1,-32(a0)
-	sw	a1,-28(a0)
-	sw	a1,-24(a0)
-	sw	a1,-20(a0)
-	sw	a1,-16(a0)
-	sw	a1,-12(a0)
-	sw	a1,-8(a0)
-	sw	a1,-4(a0)
-	nop
-	nop
-	nop
-	nop			# NOTE: adding 14 nop-s instead of 12 nop-s
-	nop			# gives better results for "fast" memory
-	nop
-	bne	a0,t4,.Lloop16w
-	 nop
-
-	beq	a0,a3,.Lchk8w	# maybe no more 64-byte chunks?
-	 nop			# this "delayed slot" is useless ...
-
-.Lloop16w_nopref30:	# there could be up to 3 "64-byte nopref30" chunks
-	addiu	a0,a0,64
-	sw	a1,-64(a0)
-	sw	a1,-60(a0)
-	sw	a1,-56(a0)
-	sw	a1,-52(a0)
-	sw	a1,-48(a0)
-	sw	a1,-44(a0)
-	sw	a1,-40(a0)
-	sw	a1,-36(a0)
-	sw	a1,-32(a0)
-	sw	a1,-28(a0)
-	sw	a1,-24(a0)
-	sw	a1,-20(a0)
-	sw	a1,-16(a0)
-	sw	a1,-12(a0)
-	sw	a1,-8(a0)
-	bne	a0,a3,.Lloop16w_nopref30
-	 sw	a1,-4(a0)
-
-.Lchk8w:		# t8 here is the byte count past 64-byte chunks
-
-	andi	t7,t8,0x1f	# is there a 32-byte chunk?
-				# the t7 is the reminder count past 32-bytes
-	beq	t8,t7,.Lchk1w	# when t8==t7, no 32-byte chunk
-	 move	a2,t7
-
-	sw	a1,0(a0)
-	sw	a1,4(a0)
-	sw	a1,8(a0)
-	sw	a1,12(a0)
-	sw	a1,16(a0)
-	sw	a1,20(a0)
-	sw	a1,24(a0)
-	sw	a1,28(a0)
-	addiu	a0,a0,32
-
-.Lchk1w:
-	andi	t8,a2,0x3	# now t8 is the reminder past 1w chunks
-	beq	a2,t8,.Llast4aligned
-	 subu	a3,a2,t8	# a3 is the count of bytes in 1w chunks
-	addu	a3,a0,a3	# now a3 is the dst address past the 1w chunks
-
-# copying in words (4-byte chunks)
-.LwordCopy_loop:
-	addiu	a0,a0,4
-	bne	a0,a3,.LwordCopy_loop
-	 sw	a1,-4(a0)
-
-# store last 0-3 bytes
-# this will repeat the last store if the memset finishes on a word boundary
-.Llast4aligned:
-	j	ra
-	 SWLO	a1,-1(t0)
-
-.Llast4:
-	beq	a0,t0,.Llast4e
-.Llast4l:
-	 addiu	a0,a0,1
-	bne	a0,t0,.Llast4l
-	 sb	a1,-1(a0)
-.Llast4e:
-	j	ra
-	 nop
-
-	.set	at
-	.set	reorder
-
-END(memset)
-
-
-/************************************************************************
- *  Implementation : Static functions
- ************************************************************************/
diff --git a/libc/arch-mips64/string/mips-string-ops.h b/libc/arch-mips64/string/mips-string-ops.h
deleted file mode 100644
index e635ba1..0000000
--- a/libc/arch-mips64/string/mips-string-ops.h
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (c) 2010 MIPS Technologies, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *      * Redistributions of source code must retain the above copyright
- *        notice, this list of conditions and the following disclaimer.
- *      * Redistributions in binary form must reproduce the above copyright
- *        notice, this list of conditions and the following disclaimer
- *        in the documentation and/or other materials provided with
- *        the distribution.
- *      * Neither the name of MIPS Technologies Inc. nor the names of its
- *        contributors may be used to endorse or promote products derived
- *        from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __MIPS_STRING_OPS_H
-#define __MIPS_STRING_OPS_H
-    /* This definition of the byte bitfields uses the
-       assumption that the layout of the bitfields is
-       equivalent to the layout in memory.  Generally,
-       for the MIPS ABIs, this is true. If you compile
-       the strcmp.c file with -DSMOKE_TEST_NEW_STRCMP,
-       this assumption will be tested.
-
-       Also, regardless of char signedness, ANSI C dictates that
-       strcmp() treats each character as unsigned char.  For
-       strlen and the like, signedness doesn't matter.
-
-       Also, this code assumes that there are 8-bits per 'char'.  */
-
-#if __mips64
-typedef struct bits
-{
-  unsigned long B0:8, B1:8, B2:8, B3:8, B4:8, B5:8, B6:8, B7:8;
-} bits_t;
-#else
-typedef struct bits
-{
-  unsigned long B0:8, B1:8, B2:8, B3:8;
-} bits_t;
-#endif
-
-#ifndef _ULW
-    /* for MIPS GCC, there is no unaligned builtins - so this code forces
-       the compiler to treat the pointer access as unaligned.  */
-struct ulw
-{
-  unsigned long b;
-} __attribute__ ((packed));
-
-#define _ULW(__x) ((struct ulw *) ((char *)(&__x)))->b;
-#endif
-
-/* This union assumes that small structures can be in registers.  If
-   not, then memory accesses will be done - not optimal, but ok.  */
-typedef union
-{
-  unsigned long v;
-  bits_t b;
-} bitfields_t;
-
-#ifndef detect_zero
-/* __mips_dsp, __mips_dspr2, and __mips64 are predefined by
-   the compiler, based on command line options.  */
-#if (__mips_dsp || __mips_dspr2) && !__mips64
-#define __mips_using_dsp 1
-
-/* DSP 4-lane (8 unsigned bits per line) subtract and saturate
- * Intrinsic operation. How this works:
- *     Given a 4-byte string of "ABC\0", subtract this as
- *     an unsigned integer from 0x01010101:
- *	   0x01010101
- *       - 0x41424300
- *        -----------
- (         0xbfbebe01 <-- answer without saturation
- *	   0x00000001 <-- answer with saturation
- * When this 4-lane vector is treated as an unsigned int value,
- * a non-zero answer indicates the presence of a zero in the
- * original 4-byte argument.  */
-
-typedef signed char v4i8 __attribute__ ((vector_size (4)));
-
-#define detect_zero(__x,__y,__01s,__80s)\
-       ((unsigned) __builtin_mips_subu_s_qb((v4i8) __01s,(v4i8) __x))
-
-    /* sets all 4 lanes to requested byte.  */
-#define set_byte_lanes(__x) ((unsigned) __builtin_mips_repl_qb(__x))
-
-    /* sets all 4 lanes to 0x01.  */
-#define def_and_set_01(__x) unsigned long __x = (unsigned) __builtin_mips_repl_qb(0x01)
-
-    /* sets all 4 lanes to 0x80. Not needed when subu_s.qb used. */
-#define def_and_set_80(__x) /* do nothing */
-
-#else
-    /* this version, originally published in the 80's, uses
-       a reverse-carry-set like determination of the zero byte.
-       The steps are, for __x = 0x31ff0001:
-       __x - _01s = 0x30fdff00
-       ~__x = 0xce00fffe
-       ((__x - _01s) & ~__x) = 0x0000ff00
-       x & _80s = 0x00008000 <- byte 3 was zero
-       Some implementaions naively assume that characters are
-       always 7-bit unsigned ASCII. With that assumption, the
-       "& ~x" is usually discarded. Since character strings
-       are 8-bit, the and is needed to catch the case of
-       a false positive when the byte is 0x80. */
-
-#define detect_zero(__x,__y,_01s,_80s)\
-	((unsigned) (((__x) - _01s) & ~(__x)) & _80s)
-
-#if __mips64
-#define def_and_set_80(__x) unsigned long __x =  0x8080808080808080ul
-#define def_and_set_01(__x)  unsigned long __x = 0x0101010101010101ul
-#else
-#define def_and_set_80(__x) unsigned long __x = 0x80808080ul
-#define def_and_set_01(__x) unsigned long __x = 0x01010101ul
-#endif
-
-#endif
-#endif
-
-/* dealing with 'void *' conversions without using extra variables. */
-#define get_byte(__x,__idx) (((unsigned char *) (__x))[__idx])
-#define set_byte(__x,__idx,__fill) ((unsigned char *) (__x))[__idx] = (__fill)
-#define get_word(__x,__idx) (((unsigned long *) (__x))[__idx])
-#define set_word(__x,__idx,__fill) ((unsigned long *) (__x))[__idx] = (__fill)
-#define inc_ptr_as(__type,__x,__inc) __x = (void *) (((__type) __x) + (__inc))
-#define cvt_ptr_to(__type,__x) ((__type) (__x))
-
-#endif
diff --git a/libc/arch-mips64/string/mips_strlen.c b/libc/arch-mips64/string/mips_strlen.c
deleted file mode 100644
index 37e5865..0000000
--- a/libc/arch-mips64/string/mips_strlen.c
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * Copyright (c) 2010 MIPS Technologies, Inc.
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *      * Redistributions of source code must retain the above copyright
- *        notice, this list of conditions and the following disclaimer.
- *      * Redistributions in binary form must reproduce the above copyright
- *        notice, this list of conditions and the following disclaimer
- *        in the documentation and/or other materials provided with
- *        the distribution.
- *      * Neither the name of MIPS Technologies Inc. nor the names of its
- *        contributors may be used to endorse or promote products derived
- *        from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <string.h>
-#include "mips-string-ops.h"
-
-#define do_strlen_word(__av) {\
-    if (detect_zero(x,x,_01s,_80s)) break;\
-    x = __av;\
-    cnt += sizeof (unsigned);\
-    }
-
-#define do_strlen_byte(__x) {\
-  if ((bx.b.B##__x) == 0) break;\
-  ++cnt;\
-  }
-
-#if SMOKE_TEST_MIPS_STRLEN
-#define strlen my_strlen
-#endif
-
-size_t
-strlen (const char *_a)
-{
-  int cnt = 0;
-  unsigned long x;
-
-  /* align the string to word boundary so we can do word at a time.  */
-  if ((cvt_ptr_to (unsigned long, _a) & (sizeof (unsigned long) - 1)) != 0)
-    {
-      if ((cvt_ptr_to (unsigned long, _a) & 1) != 0)
-	{
-	  if (get_byte (_a, 0) == 0)
-	    return cnt;
-	  /* set bit 1 so 2-bytes are checked and incremented. */
-	  inc_ptr_as (char *, _a, 1);
-	  ++cnt;
-	}
-      if ((cvt_ptr_to (unsigned long, _a) & 2) != 0)
-	{
-	  if (get_byte (_a, 0) == 0)
-	    return cnt + 0;
-	  if (get_byte (_a, 1) == 0)
-	    return cnt + 1;
-	  inc_ptr_as (char *, _a, 2);
-	  cnt += 2;
-	}
-    }
-
-#if __mips64
-#error strlen: mips64 check for 4-byte alignment not implemented.
-#endif
-
-  if (1)
-    {
-      def_and_set_01 (_01s);
-      def_and_set_80 (_80s);
-
-      /* as advantagous as it is to performance, this code cannot pre-load
-         the following word, nor can it prefetch the next line at the start
-         of the loop since the string can be at the end of a page with the
-         following page unmapped. There are tests in the suite to catch
-         any attempt to go beyond the current word. */
-      x = get_word (_a, 0);
-      while (1)
-	{
-	  /* doing 8 words should cover most strings.  */
-	  do_strlen_word (get_word (_a, 1));
-	  do_strlen_word (get_word (_a, 2));
-	  do_strlen_word (get_word (_a, 3));
-	  do_strlen_word (get_word (_a, 4));
-	  do_strlen_word (get_word (_a, 5));
-	  do_strlen_word (get_word (_a, 6));
-	  do_strlen_word (get_word (_a, 7));
-	  do_strlen_word (get_word (_a, 8));
-	  inc_ptr_as (unsigned long*, _a, 8);
-	}
-    }
-  while (1)
-    {
-      /* pull apart the last word processed and find the zero.  */
-      bitfields_t bx;
-      bx.v = x;
-#if __mips64
-      do_strlen_byte (0);
-      do_strlen_byte (1);
-      do_strlen_byte (2);
-      do_strlen_byte (3);
-      do_strlen_byte (4);
-      do_strlen_byte (5);
-      do_strlen_byte (6);
-#else
-      do_strlen_byte (0);
-      do_strlen_byte (1);
-      do_strlen_byte (2);
-#endif
-      /* last byte is zero */
-      break;
-    }
-  return cnt;
-}
-
-#undef do_strlen_byte
-#undef do_strlen_word
-
-#if SMOKE_TEST_MIPS_STRLEN
-#include <stdio.h>
-char str1[] = "DHRYSTONE PROGRAM, 1'ST STRING";
-char str2[] = "DHRYSTONE PROGRAM, 2'ST STRING";
-
-char str3[] = "another string";
-char str4[] = "another";
-
-char str5[] = "somes tring";
-char str6[] = "somes_tring";
-
-char str7[16], str8[16];
-
-static char *
-chk (unsigned long mine, unsigned long libs, int *errors)
-{
-  static char answer[1024];
-  char *result = mine == libs ? "PASS" : "FAIL";
-  sprintf (answer, "new_strlen=%d: lib_strlen=%d: %s!", mine, libs, result);
-  if (mine != libs)
-    (*errors)++;
-  return answer;
-}
-
-int
-main (int argc, char **argv)
-{
-  int errors = 0;
-  /* set -1 in one position */
-  str6[5] = 0xff;
-  /* set zero in same position with junk in following 3 */
-  str7[0] = str8[0] = 0;
-  str7[1] = 0xff;
-  str7[2] = 'a';
-  str7[3] = 2;
-  str8[1] = 's';
-  str8[2] = -2;
-  str8[3] = 0;
-
-  fprintf (stderr, "========== mips_strlen%s test...\n",
-	   argv[0] ? argv[0] : "unknown strlen");
-#define P(__x,__y) {\
-    int a = my_strlen(__x + __y);\
-    int b = (strlen)(__x + __y) /* library version */;\
-    fprintf(stderr,"%s+%d: %s\n",#__x,__y,chk(a,b,&errors));\
-    }
-
-  P (str1, 0);
-  P (str1, 1);
-  P (str1, 2);
-  P (str1, 3);
-
-  P (str2, 0);
-  P (str2, 1);
-  P (str2, 2);
-  P (str2, 3);
-
-  P (str3, 0);
-  P (str3, 1);
-  P (str3, 2);
-  P (str3, 3);
-
-  P (str4, 0);
-  P (str4, 1);
-  P (str4, 2);
-  P (str4, 3);
-
-  P (str5, 0);
-  P (str5, 1);
-  P (str5, 2);
-  P (str5, 3);
-
-  P (str6, 0);
-  P (str6, 1);
-  P (str6, 2);
-  P (str6, 3);
-
-  P (str7, 0);
-  P (str7, 1);
-  P (str7, 2);
-  P (str7, 3);
-
-  P (str8, 0);
-  P (str8, 1);
-  P (str8, 2);
-  P (str8, 3);
-
-  return errors;
-}
-#endif