resolved conflicts for merge of 86125aca to klp-dev

Change-Id: I78622ecbad27301b285193a085fa04374aa60033
diff --git a/libc/Android.mk b/libc/Android.mk
index c08cf77..a6cbf61 100644
--- a/libc/Android.mk
+++ b/libc/Android.mk
@@ -62,7 +62,6 @@
 	string/strcspn.c \
 	string/strdup.c \
 	string/strpbrk.c \
-	string/__strrchr_chk.c \
 	string/strsep.c \
 	string/strspn.c \
 	string/strstr.c \
@@ -105,6 +104,7 @@
 	bionic/md5.c \
 	bionic/memmem.c \
 	bionic/memswap.c \
+	bionic/name_mem.c \
 	bionic/openat.c \
 	bionic/open.c \
 	bionic/pathconf.c \
@@ -181,6 +181,25 @@
 	netbsd/nameser/ns_print.c \
 	netbsd/nameser/ns_samedomain.c \
 
+# Fortify implementations of libc functions.
+libc_common_src_files += \
+    bionic/__fgets_chk.cpp \
+    bionic/__memcpy_chk.cpp \
+    bionic/__memmove_chk.cpp \
+    bionic/__memset_chk.cpp \
+    bionic/__strcat_chk.cpp \
+    bionic/__strchr_chk.cpp \
+    bionic/__strcpy_chk.cpp \
+    bionic/__strlcat_chk.cpp \
+    bionic/__strlcpy_chk.cpp \
+    bionic/__strlen_chk.cpp \
+    bionic/__strncat_chk.cpp \
+    bionic/__strncpy_chk.cpp \
+    bionic/__strrchr_chk.cpp \
+    bionic/__umask_chk.cpp \
+    bionic/__vsnprintf_chk.cpp \
+    bionic/__vsprintf_chk.cpp \
+
 libc_bionic_src_files := \
     bionic/abort.cpp \
     bionic/assert.cpp \
@@ -189,16 +208,12 @@
     bionic/__errno.c \
     bionic/eventfd_read.cpp \
     bionic/eventfd_write.cpp \
-    bionic/__fgets_chk.cpp \
     bionic/futimens.cpp \
     bionic/getauxval.cpp \
     bionic/getcwd.cpp \
     bionic/libc_init_common.cpp \
     bionic/libc_logging.cpp \
     bionic/libgen.cpp \
-    bionic/__memcpy_chk.cpp \
-    bionic/__memmove_chk.cpp \
-    bionic/__memset_chk.cpp \
     bionic/mmap.cpp \
     bionic/pthread_attr.cpp \
     bionic/pthread_detach.cpp \
@@ -221,24 +236,13 @@
     bionic/signalfd.cpp \
     bionic/sigwait.cpp \
     bionic/statvfs.cpp \
-    bionic/__strcat_chk.cpp \
-    bionic/__strchr_chk.cpp \
-    bionic/__strcpy_chk.cpp \
     bionic/strerror.cpp \
     bionic/strerror_r.cpp \
-    bionic/__strlcat_chk.cpp \
-    bionic/__strlcpy_chk.cpp \
-    bionic/__strlen_chk.cpp \
-    bionic/__strncat_chk.cpp \
-    bionic/__strncpy_chk.cpp \
     bionic/strsignal.cpp \
     bionic/stubs.cpp \
     bionic/sysconf.cpp \
     bionic/tdestroy.cpp \
     bionic/tmpfile.cpp \
-    bionic/__umask_chk.cpp \
-    bionic/__vsnprintf_chk.cpp \
-    bionic/__vsprintf_chk.cpp \
     bionic/wait.cpp \
     bionic/wchar.cpp \
 
@@ -358,7 +362,6 @@
 	bionic/memmove.c.arm \
 	string/bcopy.c \
 	string/strncmp.c \
-	string/strcat.c \
 	string/strncat.c \
 	string/strncpy.c \
 	bionic/strchr.cpp \
diff --git a/libc/arch-arm/arm.mk b/libc/arch-arm/arm.mk
index 1a2185f..1d9863c 100644
--- a/libc/arch-arm/arm.mk
+++ b/libc/arch-arm/arm.mk
@@ -14,7 +14,6 @@
     arch-arm/bionic/_setjmp.S \
     arch-arm/bionic/setjmp.S \
     arch-arm/bionic/sigsetjmp.S \
-    arch-arm/bionic/strcpy.S \
     arch-arm/bionic/syscall.S \
     arch-arm/bionic/tgkill.S \
     arch-arm/bionic/tkill.S \
@@ -27,6 +26,15 @@
 _LIBC_ARCH_DYNAMIC_SRC_FILES := \
     arch-arm/bionic/exidx_dynamic.c
 
+# Remove the C++ fortify function implementations for which there is an
+# arm assembler version.
+_LIBC_FORTIFY_FILES_TO_REMOVE := \
+    bionic/__memcpy_chk.cpp \
+    bionic/__memset_chk.cpp \
+
+libc_common_src_files := \
+    $(filter-out $(_LIBC_FORTIFY_FILES_TO_REMOVE),$(libc_common_src_files))
+
 ifeq ($(strip $(wildcard bionic/libc/arch-arm/$(TARGET_CPU_VARIANT)/$(TARGET_CPU_VARIANT).mk)),)
 $(error "TARGET_CPU_VARIANT not set or set to an unknown value. Possible values are cortex-a7, cortex-a8, cortex-a9, cortex-a15, krait. Use generic for devices that do not have a CPU similar to any of the supported cpu variants.")
 endif
diff --git a/libc/arch-arm/cortex-a15/bionic/memcpy.S b/libc/arch-arm/cortex-a15/bionic/memcpy.S
index d297064..2394024 100644
--- a/libc/arch-arm/cortex-a15/bionic/memcpy.S
+++ b/libc/arch-arm/cortex-a15/bionic/memcpy.S
@@ -59,6 +59,7 @@
 
 #include <machine/cpu-features.h>
 #include <machine/asm.h>
+#include "libc_events.h"
 
         .text
         .syntax unified
@@ -66,6 +67,13 @@
 
 #define CACHE_LINE_SIZE 64
 
+ENTRY(__memcpy_chk)
+        cmp     r2, r3
+        bgt     fortify_check_failed
+
+        // Fall through to memcpy...
+END(__memcpy_chk)
+
 ENTRY(memcpy)
         // Assumes that n >= 0, and dst, src are valid pointers.
         // For any sizes less than 832 use the neon code that doesn't
@@ -321,4 +329,21 @@
 
         // Src is guaranteed to be at least word aligned by this point.
         b       word_aligned
+
+
+        // Only reached when the __memcpy_chk check fails.
+fortify_check_failed:
+        ldr     r0, error_message
+        ldr     r1, error_code
+1:
+        add     r0, pc
+        bl      __fortify_chk_fail
+error_code:
+        .word   BIONIC_EVENT_MEMCPY_BUFFER_OVERFLOW
+error_message:
+        .word   error_string-(1b+8)
 END(memcpy)
+
+        .data
+error_string:
+        .string "memcpy buffer overflow"
diff --git a/libc/arch-arm/cortex-a15/bionic/memset.S b/libc/arch-arm/cortex-a15/bionic/memset.S
index 2e1ad54..6c143ad 100644
--- a/libc/arch-arm/cortex-a15/bionic/memset.S
+++ b/libc/arch-arm/cortex-a15/bionic/memset.S
@@ -28,19 +28,38 @@
 
 #include <machine/cpu-features.h>
 #include <machine/asm.h>
+#include "libc_events.h"
 
-		/*
-		 * Optimized memset() for ARM.
+        /*
+         * Optimized memset() for ARM.
          *
          * memset() returns its first argument.
-		 */
+         */
 
         .fpu        neon
         .syntax     unified
 
+ENTRY(__memset_chk)
+        cmp         r2, r3
+        bls         done
+
+        ldr         r0, error_message
+        ldr         r1, error_code
+1:
+        add         r0, pc
+        bl          __fortify_chk_fail
+error_code:
+        .word       BIONIC_EVENT_MEMSET_BUFFER_OVERFLOW
+error_message:
+        .word       error_string-(1b+8)
+
+END(__memset_chk)
+
 ENTRY(bzero)
         mov         r2, r1
         mov         r1, #0
+
+done:
         // Fall through to memset...
 END(bzero)
 
@@ -162,3 +181,7 @@
         ldmfd       sp!, {r0}
         bx          lr
 END(memset)
+
+        .data
+error_string:
+        .string     "memset buffer overflow"
diff --git a/libc/arch-arm/cortex-a15/bionic/strcat.S b/libc/arch-arm/cortex-a15/bionic/strcat.S
new file mode 100644
index 0000000..72d4e9e
--- /dev/null
+++ b/libc/arch-arm/cortex-a15/bionic/strcat.S
@@ -0,0 +1,568 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 2013 ARM Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ *    products derived from this software without specific prior written
+ *    permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+
+    .syntax unified
+
+    .thumb
+    .thumb_func
+
+    .macro m_push
+    push    {r0, r4, r5, lr}
+    .endm // m_push
+
+    .macro m_pop
+    pop     {r0, r4, r5, pc}
+    .endm // m_pop
+
+    .macro m_scan_byte
+    ldrb    r3, [r0]
+    cbz     r3, strcat_r0_scan_done
+    add     r0, #1
+    .endm // m_scan_byte
+
+    .macro m_copy_byte reg, cmd, label
+    ldrb    \reg, [r1], #1
+    strb    \reg, [r0], #1
+    \cmd    \reg, \label
+    .endm // m_copy_byte
+
+ENTRY(strcat)
+    // Quick check to see if src is empty.
+    ldrb    r2, [r1]
+    pld     [r1, #0]
+    cbnz    r2, strcat_continue
+    bx      lr
+
+strcat_continue:
+    // To speed up really small dst strings, unroll checking the first 4 bytes.
+    m_push
+    m_scan_byte
+    m_scan_byte
+    m_scan_byte
+    m_scan_byte
+
+    ands    r3, r0, #7
+    beq     strcat_mainloop
+
+    // Align to a double word (64 bits).
+    rsb     r3, r3, #8
+    lsls    ip, r3, #31
+    beq     strcat_align_to_32
+
+    ldrb    r5, [r0]
+    cbz     r5, strcat_r0_scan_done
+    add     r0, r0, #1
+
+strcat_align_to_32:
+    bcc     strcat_align_to_64
+
+    ldrb    r2, [r0]
+    cbz     r2, strcat_r0_scan_done
+    add     r0, r0, #1
+    ldrb    r4, [r0]
+    cbz     r4, strcat_r0_scan_done
+    add     r0, r0, #1
+
+strcat_align_to_64:
+    tst     r3, #4
+    beq     strcat_mainloop
+    ldr     r3, [r0], #4
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcat_zero_in_second_register
+    b       strcat_mainloop
+
+strcat_r0_scan_done:
+    // For short copies, hard-code checking the first 8 bytes since this
+    // new code doesn't win until after about 8 bytes.
+    m_copy_byte reg=r2, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r3, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r4, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r5, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r2, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r3, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r4, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r5, cmd=cbnz, label=strcpy_continue
+
+strcpy_finish:
+    m_pop
+
+strcpy_continue:
+    ands    r3, r0, #7
+    beq     strcpy_check_src_align
+
+    // Align to a double word (64 bits).
+    rsb     r3, r3, #8
+    lsls    ip, r3, #31
+    beq     strcpy_align_to_32
+
+    ldrb    r2, [r1], #1
+    strb    r2, [r0], #1
+    cbz     r2, strcpy_complete
+
+strcpy_align_to_32:
+    bcc     strcpy_align_to_64
+
+    ldrb    r2, [r1], #1
+    strb    r2, [r0], #1
+    cbz     r2, strcpy_complete
+    ldrb    r2, [r1], #1
+    strb    r2, [r0], #1
+    cbz     r2, strcpy_complete
+
+strcpy_align_to_64:
+    tst     r3, #4
+    beq     strcpy_check_src_align
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+    str     r2, [r0], #4
+
+strcpy_check_src_align:
+    // At this point dst is aligned to a double word, check if src
+    // is also aligned to a double word.
+    ands    r3, r1, #7
+    bne     strcpy_unaligned_copy
+
+    .p2align 2
+strcpy_mainloop:
+    ldrd    r2, r3, [r1], #8
+
+    pld     [r1, #64]
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_second_register
+
+    strd    r2, r3, [r0], #8
+    b       strcpy_mainloop
+
+strcpy_complete:
+    m_pop
+
+strcpy_zero_in_first_register:
+    lsls    lr, ip, #17
+    bne     strcpy_copy1byte
+    bcs     strcpy_copy2bytes
+    lsls    ip, ip, #1
+    bne     strcpy_copy3bytes
+
+strcpy_copy4bytes:
+    // Copy 4 bytes to the destiniation.
+    str     r2, [r0]
+    m_pop
+
+strcpy_copy1byte:
+    strb    r2, [r0]
+    m_pop
+
+strcpy_copy2bytes:
+    strh    r2, [r0]
+    m_pop
+
+strcpy_copy3bytes:
+    strh    r2, [r0], #2
+    lsr     r2, #16
+    strb    r2, [r0]
+    m_pop
+
+strcpy_zero_in_second_register:
+    lsls    lr, ip, #17
+    bne     strcpy_copy5bytes
+    bcs     strcpy_copy6bytes
+    lsls    ip, ip, #1
+    bne     strcpy_copy7bytes
+
+    // Copy 8 bytes to the destination.
+    strd    r2, r3, [r0]
+    m_pop
+
+strcpy_copy5bytes:
+    str     r2, [r0], #4
+    strb    r3, [r0]
+    m_pop
+
+strcpy_copy6bytes:
+    str     r2, [r0], #4
+    strh    r3, [r0]
+    m_pop
+
+strcpy_copy7bytes:
+    str     r2, [r0], #4
+    strh    r3, [r0], #2
+    lsr     r3, #16
+    strb    r3, [r0]
+    m_pop
+
+strcpy_unaligned_copy:
+    // Dst is aligned to a double word, while src is at an unknown alignment.
+    // There are 7 different versions of the unaligned copy code
+    // to prevent overreading the src. The mainloop of every single version
+    // will store 64 bits per loop. The difference is how much of src can
+    // be read without potentially crossing a page boundary.
+    tbb     [pc, r3]
+strcpy_unaligned_branchtable:
+    .byte 0
+    .byte ((strcpy_unalign7 - strcpy_unaligned_branchtable)/2)
+    .byte ((strcpy_unalign6 - strcpy_unaligned_branchtable)/2)
+    .byte ((strcpy_unalign5 - strcpy_unaligned_branchtable)/2)
+    .byte ((strcpy_unalign4 - strcpy_unaligned_branchtable)/2)
+    .byte ((strcpy_unalign3 - strcpy_unaligned_branchtable)/2)
+    .byte ((strcpy_unalign2 - strcpy_unaligned_branchtable)/2)
+    .byte ((strcpy_unalign1 - strcpy_unaligned_branchtable)/2)
+
+    .p2align 2
+    // Can read 7 bytes before possibly crossing a page.
+strcpy_unalign7:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+
+    ldrb    r3, [r1]
+    cbz     r3, strcpy_unalign7_copy5bytes
+    ldrb    r4, [r1, #1]
+    cbz     r4, strcpy_unalign7_copy6bytes
+    ldrb    r5, [r1, #2]
+    cbz     r5, strcpy_unalign7_copy7bytes
+
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    lsrs    ip, r3, #24
+    strd    r2, r3, [r0], #8
+    beq     strcpy_unalign_return
+    b       strcpy_unalign7
+
+strcpy_unalign7_copy5bytes:
+    str     r2, [r0], #4
+    strb    r3, [r0]
+strcpy_unalign_return:
+    m_pop
+
+strcpy_unalign7_copy6bytes:
+    str     r2, [r0], #4
+    strb    r3, [r0], #1
+    strb    r4, [r0], #1
+    m_pop
+
+strcpy_unalign7_copy7bytes:
+    str     r2, [r0], #4
+    strb    r3, [r0], #1
+    strb    r4, [r0], #1
+    strb    r5, [r0], #1
+    m_pop
+
+    .p2align 2
+    // Can read 6 bytes before possibly crossing a page.
+strcpy_unalign6:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+
+    ldrb    r4, [r1]
+    cbz     r4, strcpy_unalign_copy5bytes
+    ldrb    r5, [r1, #1]
+    cbz     r5, strcpy_unalign_copy6bytes
+
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    tst     r3, #0xff0000
+    beq     strcpy_copy7bytes
+    lsrs    ip, r3, #24
+    strd    r2, r3, [r0], #8
+    beq     strcpy_unalign_return
+    b       strcpy_unalign6
+
+    .p2align 2
+    // Can read 5 bytes before possibly crossing a page.
+strcpy_unalign5:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+
+    ldrb    r4, [r1]
+    cbz     r4, strcpy_unalign_copy5bytes
+
+    ldr     r3, [r1], #4
+
+    pld     [r1, #64]
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_second_register
+
+    strd    r2, r3, [r0], #8
+    b       strcpy_unalign5
+
+strcpy_unalign_copy5bytes:
+    str     r2, [r0], #4
+    strb    r4, [r0]
+    m_pop
+
+strcpy_unalign_copy6bytes:
+    str     r2, [r0], #4
+    strb    r4, [r0], #1
+    strb    r5, [r0]
+    m_pop
+
+    .p2align 2
+    // Can read 4 bytes before possibly crossing a page.
+strcpy_unalign4:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_second_register
+
+    strd    r2, r3, [r0], #8
+    b       strcpy_unalign4
+
+    .p2align 2
+    // Can read 3 bytes before possibly crossing a page.
+strcpy_unalign3:
+    ldrb    r2, [r1]
+    cbz     r2, strcpy_unalign3_copy1byte
+    ldrb    r3, [r1, #1]
+    cbz     r3, strcpy_unalign3_copy2bytes
+    ldrb    r4, [r1, #2]
+    cbz     r4, strcpy_unalign3_copy3bytes
+
+    ldr     r2, [r1], #4
+    ldr     r3, [r1], #4
+
+    pld     [r1, #64]
+
+    lsrs    lr, r2, #24
+    beq     strcpy_copy4bytes
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_second_register
+
+    strd    r2, r3, [r0], #8
+    b       strcpy_unalign3
+
+strcpy_unalign3_copy1byte:
+    strb    r2, [r0]
+    m_pop
+
+strcpy_unalign3_copy2bytes:
+    strb    r2, [r0], #1
+    strb    r3, [r0]
+    m_pop
+
+strcpy_unalign3_copy3bytes:
+    strb    r2, [r0], #1
+    strb    r3, [r0], #1
+    strb    r4, [r0]
+    m_pop
+
+    .p2align 2
+    // Can read 2 bytes before possibly crossing a page.
+strcpy_unalign2:
+    ldrb    r2, [r1]
+    cbz     r2, strcpy_unalign_copy1byte
+    ldrb    r4, [r1, #1]
+    cbz     r4, strcpy_unalign_copy2bytes
+
+    ldr     r2, [r1], #4
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    tst     r2, #0xff0000
+    beq     strcpy_copy3bytes
+    lsrs    ip, r2, #24
+    beq     strcpy_copy4bytes
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_second_register
+
+    strd    r2, r3, [r0], #8
+    b       strcpy_unalign2
+
+    .p2align 2
+    // Can read 1 byte before possibly crossing a page.
+strcpy_unalign1:
+    ldrb    r2, [r1]
+    cbz     r2, strcpy_unalign_copy1byte
+
+    ldr     r2, [r1], #4
+    ldr     r3, [r1], #4
+
+    pld     [r1, #64]
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_second_register
+
+    strd    r2, r3, [r0], #8
+    b       strcpy_unalign1
+
+strcpy_unalign_copy1byte:
+    strb    r2, [r0]
+    m_pop
+
+strcpy_unalign_copy2bytes:
+    strb    r2, [r0], #1
+    strb    r4, [r0]
+    m_pop
+
+    .p2align 2
+strcat_mainloop:
+    ldrd    r2, r3, [r0], #8
+
+    pld     [r0, #64]
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcat_zero_in_first_register
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcat_zero_in_second_register
+    b       strcat_mainloop
+
+strcat_zero_in_first_register:
+    // Prefetch the src now, it's going to be used soon.
+    pld     [r1, #0]
+    lsls    lr, ip, #17
+    bne     strcat_sub8
+    bcs     strcat_sub7
+    lsls    ip, ip, #1
+    bne     strcat_sub6
+
+    sub     r0, r0, #5
+    b       strcat_r0_scan_done
+
+strcat_sub8:
+    sub     r0, r0, #8
+    b       strcat_r0_scan_done
+
+strcat_sub7:
+    sub     r0, r0, #7
+    b       strcat_r0_scan_done
+
+strcat_sub6:
+    sub     r0, r0, #6
+    b       strcat_r0_scan_done
+
+strcat_zero_in_second_register:
+    // Prefetch the src now, it's going to be used soon.
+    pld     [r1, #0]
+    lsls    lr, ip, #17
+    bne     strcat_sub4
+    bcs     strcat_sub3
+    lsls    ip, ip, #1
+    bne     strcat_sub2
+
+    sub     r0, r0, #1
+    b       strcat_r0_scan_done
+
+strcat_sub4:
+    sub     r0, r0, #4
+    b       strcat_r0_scan_done
+
+strcat_sub3:
+    sub     r0, r0, #3
+    b       strcat_r0_scan_done
+
+strcat_sub2:
+    sub     r0, r0, #2
+    b       strcat_r0_scan_done
+END(strcat)
diff --git a/libc/arch-arm/cortex-a15/bionic/strcpy.S b/libc/arch-arm/cortex-a15/bionic/strcpy.S
new file mode 100644
index 0000000..5773540
--- /dev/null
+++ b/libc/arch-arm/cortex-a15/bionic/strcpy.S
@@ -0,0 +1,451 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 2013 ARM Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ *    products derived from this software without specific prior written
+ *    permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+
+    .syntax unified
+
+    .thumb
+    .thumb_func
+
+    .macro m_push
+    push    {r0, r4, r5, lr}
+    .endm // m_push
+
+    .macro m_pop
+    pop     {r0, r4, r5, pc}
+    .endm // m_pop
+
+    .macro m_copy_byte reg, cmd, label
+    ldrb    \reg, [r1], #1
+    strb    \reg, [r0], #1
+    \cmd    \reg, \label
+    .endm // m_copy_byte
+
+ENTRY(strcpy)
+    // For short copies, hard-code checking the first 8 bytes since this
+    // new code doesn't win until after about 8 bytes.
+    m_push
+    m_copy_byte reg=r2, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r3, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r4, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r5, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r2, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r3, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r4, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r5, cmd=cbnz, label=strcpy_continue
+
+strcpy_finish:
+    m_pop
+
+strcpy_continue:
+    pld     [r1, #0]
+    ands    r3, r0, #7
+    beq     strcpy_check_src_align
+
+    // Align to a double word (64 bits).
+    rsb     r3, r3, #8
+    lsls    ip, r3, #31
+    beq     strcpy_align_to_32
+
+    ldrb    r2, [r1], #1
+    strb    r2, [r0], #1
+    cbz     r2, strcpy_complete
+
+strcpy_align_to_32:
+    bcc     strcpy_align_to_64
+
+    ldrb    r2, [r1], #1
+    strb    r2, [r0], #1
+    cbz     r2, strcpy_complete
+    ldrb    r2, [r1], #1
+    strb    r2, [r0], #1
+    cbz     r2, strcpy_complete
+
+strcpy_align_to_64:
+    tst     r3, #4
+    beq     strcpy_check_src_align
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+    str     r2, [r0], #4
+
+strcpy_check_src_align:
+    // At this point dst is aligned to a double word, check if src
+    // is also aligned to a double word.
+    ands    r3, r1, #7
+    bne     strcpy_unaligned_copy
+
+    .p2align 2
+strcpy_mainloop:
+    ldrd    r2, r3, [r1], #8
+
+    pld     [r1, #64]
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_second_register
+
+    strd    r2, r3, [r0], #8
+    b       strcpy_mainloop
+
+strcpy_complete:
+    m_pop
+
+strcpy_zero_in_first_register:
+    lsls    lr, ip, #17
+    bne     strcpy_copy1byte
+    bcs     strcpy_copy2bytes
+    lsls    ip, ip, #1
+    bne     strcpy_copy3bytes
+
+strcpy_copy4bytes:
+    // Copy 4 bytes to the destiniation.
+    str     r2, [r0]
+    m_pop
+
+strcpy_copy1byte:
+    strb    r2, [r0]
+    m_pop
+
+strcpy_copy2bytes:
+    strh    r2, [r0]
+    m_pop
+
+strcpy_copy3bytes:
+    strh    r2, [r0], #2
+    lsr     r2, #16
+    strb    r2, [r0]
+    m_pop
+
+strcpy_zero_in_second_register:
+    lsls    lr, ip, #17
+    bne     strcpy_copy5bytes
+    bcs     strcpy_copy6bytes
+    lsls    ip, ip, #1
+    bne     strcpy_copy7bytes
+
+    // Copy 8 bytes to the destination.
+    strd    r2, r3, [r0]
+    m_pop
+
+strcpy_copy5bytes:
+    str     r2, [r0], #4
+    strb    r3, [r0]
+    m_pop
+
+strcpy_copy6bytes:
+    str     r2, [r0], #4
+    strh    r3, [r0]
+    m_pop
+
+strcpy_copy7bytes:
+    str     r2, [r0], #4
+    strh    r3, [r0], #2
+    lsr     r3, #16
+    strb    r3, [r0]
+    m_pop
+
+strcpy_unaligned_copy:
+    // Dst is aligned to a double word, while src is at an unknown alignment.
+    // There are 7 different versions of the unaligned copy code
+    // to prevent overreading the src. The mainloop of every single version
+    // will store 64 bits per loop. The difference is how much of src can
+    // be read without potentially crossing a page boundary.
+    tbb     [pc, r3]
+strcpy_unaligned_branchtable:
+    .byte 0
+    .byte ((strcpy_unalign7 - strcpy_unaligned_branchtable)/2)
+    .byte ((strcpy_unalign6 - strcpy_unaligned_branchtable)/2)
+    .byte ((strcpy_unalign5 - strcpy_unaligned_branchtable)/2)
+    .byte ((strcpy_unalign4 - strcpy_unaligned_branchtable)/2)
+    .byte ((strcpy_unalign3 - strcpy_unaligned_branchtable)/2)
+    .byte ((strcpy_unalign2 - strcpy_unaligned_branchtable)/2)
+    .byte ((strcpy_unalign1 - strcpy_unaligned_branchtable)/2)
+
+    .p2align 2
+    // Can read 7 bytes before possibly crossing a page.
+strcpy_unalign7:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+
+    ldrb    r3, [r1]
+    cbz     r3, strcpy_unalign7_copy5bytes
+    ldrb    r4, [r1, #1]
+    cbz     r4, strcpy_unalign7_copy6bytes
+    ldrb    r5, [r1, #2]
+    cbz     r5, strcpy_unalign7_copy7bytes
+
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    lsrs    ip, r3, #24
+    strd    r2, r3, [r0], #8
+    beq     strcpy_unalign_return
+    b       strcpy_unalign7
+
+strcpy_unalign7_copy5bytes:
+    str     r2, [r0], #4
+    strb    r3, [r0]
+strcpy_unalign_return:
+    m_pop
+
+strcpy_unalign7_copy6bytes:
+    str     r2, [r0], #4
+    strb    r3, [r0], #1
+    strb    r4, [r0], #1
+    m_pop
+
+strcpy_unalign7_copy7bytes:
+    str     r2, [r0], #4
+    strb    r3, [r0], #1
+    strb    r4, [r0], #1
+    strb    r5, [r0], #1
+    m_pop
+
+    .p2align 2
+    // Can read 6 bytes before possibly crossing a page.
+strcpy_unalign6:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+
+    ldrb    r4, [r1]
+    cbz     r4, strcpy_unalign_copy5bytes
+    ldrb    r5, [r1, #1]
+    cbz     r5, strcpy_unalign_copy6bytes
+
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    tst     r3, #0xff0000
+    beq     strcpy_copy7bytes
+    lsrs    ip, r3, #24
+    strd    r2, r3, [r0], #8
+    beq     strcpy_unalign_return
+    b       strcpy_unalign6
+
+    .p2align 2
+    // Can read 5 bytes before possibly crossing a page.
+strcpy_unalign5:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+
+    ldrb    r4, [r1]
+    cbz     r4, strcpy_unalign_copy5bytes
+
+    ldr     r3, [r1], #4
+
+    pld     [r1, #64]
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_second_register
+
+    strd    r2, r3, [r0], #8
+    b       strcpy_unalign5
+
+strcpy_unalign_copy5bytes:
+    str     r2, [r0], #4
+    strb    r4, [r0]
+    m_pop
+
+strcpy_unalign_copy6bytes:
+    str     r2, [r0], #4
+    strb    r4, [r0], #1
+    strb    r5, [r0]
+    m_pop
+
+    .p2align 2
+    // Can read 4 bytes before possibly crossing a page.
+strcpy_unalign4:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_second_register
+
+    strd    r2, r3, [r0], #8
+    b       strcpy_unalign4
+
+    .p2align 2
+    // Can read 3 bytes before possibly crossing a page.
+strcpy_unalign3:
+    ldrb    r2, [r1]
+    cbz     r2, strcpy_unalign3_copy1byte
+    ldrb    r3, [r1, #1]
+    cbz     r3, strcpy_unalign3_copy2bytes
+    ldrb    r4, [r1, #2]
+    cbz     r4, strcpy_unalign3_copy3bytes
+
+    ldr     r2, [r1], #4
+    ldr     r3, [r1], #4
+
+    pld     [r1, #64]
+
+    lsrs    lr, r2, #24
+    beq     strcpy_copy4bytes
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_second_register
+
+    strd    r2, r3, [r0], #8
+    b       strcpy_unalign3
+
+strcpy_unalign3_copy1byte:
+    strb    r2, [r0]
+    m_pop
+
+strcpy_unalign3_copy2bytes:
+    strb    r2, [r0], #1
+    strb    r3, [r0]
+    m_pop
+
+strcpy_unalign3_copy3bytes:
+    strb    r2, [r0], #1
+    strb    r3, [r0], #1
+    strb    r4, [r0]
+    m_pop
+
+    .p2align 2
+    // Can read 2 bytes before possibly crossing a page.
+strcpy_unalign2:
+    ldrb    r2, [r1]
+    cbz     r2, strcpy_unalign_copy1byte
+    ldrb    r4, [r1, #1]
+    cbz     r4, strcpy_unalign_copy2bytes
+
+    ldr     r2, [r1], #4
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    tst     r2, #0xff0000
+    beq     strcpy_copy3bytes
+    lsrs    ip, r2, #24
+    beq     strcpy_copy4bytes
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_second_register
+
+    strd    r2, r3, [r0], #8
+    b       strcpy_unalign2
+
+    .p2align 2
+    // Can read 1 byte before possibly crossing a page.
+strcpy_unalign1:
+    ldrb    r2, [r1]
+    cbz     r2, strcpy_unalign_copy1byte
+
+    ldr     r2, [r1], #4
+    ldr     r3, [r1], #4
+
+    pld     [r1, #64]
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_second_register
+
+    strd    r2, r3, [r0], #8
+    b       strcpy_unalign1
+
+strcpy_unalign_copy1byte:
+    strb    r2, [r0]
+    m_pop
+
+strcpy_unalign_copy2bytes:
+    strb    r2, [r0], #1
+    strb    r4, [r0]
+    m_pop
+END(strcpy)
diff --git a/libc/arch-arm/cortex-a15/bionic/strlen.S b/libc/arch-arm/cortex-a15/bionic/strlen.S
index d5b8ba4..08f6d19 100644
--- a/libc/arch-arm/cortex-a15/bionic/strlen.S
+++ b/libc/arch-arm/cortex-a15/bionic/strlen.S
@@ -61,34 +61,32 @@
     .thumb_func
 
 ENTRY(strlen)
-    pld [r1, #128]
-    mov r1, r0
+    pld     [r0, #0]
+    mov     r1, r0
 
-    rsb     r3, r0, #0
-    ands    r3, r3, #7
+    ands    r3, r0, #7
     beq     mainloop
 
     // Align to a double word (64 bits).
-    ands    ip, r3, #1
+    rsb     r3, r3, #8
+    lsls    ip, r3, #31
     beq     align_to_32
 
     ldrb    r2, [r1], #1
-    cmp     r2, #0
-    beq     update_count_and_return
+    cbz     r2, update_count_and_return
 
 align_to_32:
+    bcc     align_to_64
     ands    ip, r3, #2
     beq     align_to_64
 
     ldrb    r2, [r1], #1
-    cmp     r2, #0
-    beq     update_count_and_return
+    cbz     r2, update_count_and_return
     ldrb    r2, [r1], #1
-    cmp     r2, #0
-    beq     update_count_and_return
+    cbz     r2, update_count_and_return
 
 align_to_64:
-    ands    ip, r3, #4
+    tst     r3, #4
     beq     mainloop
     ldr     r3, [r1], #4
 
@@ -97,6 +95,7 @@
     ands    ip, ip, #0x80808080
     bne     zero_in_second_register
 
+    .p2align 2
 mainloop:
     ldrd    r2, r3, [r1], #8
 
@@ -113,39 +112,54 @@
     bne     zero_in_second_register
     b       mainloop
 
+update_count_and_return:
+    sub     r0, r1, r0
+    sub     r0, r0, #1
+    bx      lr
+
 zero_in_first_register:
-    sub     r1, r1, #4
+    sub     r0, r1, r0
+    lsls    r3, ip, #17
+    bne     sub8_and_return
+    bcs     sub7_and_return
+    lsls    ip, ip, #1
+    bne     sub6_and_return
+
+    sub     r0, r0, #5
+    bx      lr
+
+sub8_and_return:
+    sub     r0, r0, #8
+    bx      lr
+
+sub7_and_return:
+    sub     r0, r0, #7
+    bx      lr
+
+sub6_and_return:
+    sub     r0, r0, #6
+    bx      lr
 
 zero_in_second_register:
     sub     r0, r1, r0
+    lsls    r3, ip, #17
+    bne     sub4_and_return
+    bcs     sub3_and_return
+    lsls    ip, ip, #1
+    bne     sub2_and_return
 
-    // Check for zero in byte 0.
-    ands    r1, ip, #0x80
-    beq     check_byte1
+    sub     r0, r0, #1
+    bx      lr
 
+sub4_and_return:
     sub     r0, r0, #4
     bx      lr
 
-check_byte1:
-    // Check for zero in byte 1.
-    ands    r1, ip, #0x8000
-    beq     check_byte2
-
+sub3_and_return:
     sub     r0, r0, #3
     bx      lr
 
-check_byte2:
-    // Check for zero in byte 2.
-    ands    r1, ip, #0x800000
-    beq     return
-
+sub2_and_return:
     sub     r0, r0, #2
     bx      lr
-
-update_count_and_return:
-    sub     r0, r1, r0
-
-return:
-    sub     r0, r0, #1
-    bx      lr
 END(strlen)
diff --git a/libc/arch-arm/cortex-a15/cortex-a15.mk b/libc/arch-arm/cortex-a15/cortex-a15.mk
index 0904e6b..281e424 100644
--- a/libc/arch-arm/cortex-a15/cortex-a15.mk
+++ b/libc/arch-arm/cortex-a15/cortex-a15.mk
@@ -1,6 +1,8 @@
 $(call libc-add-cpu-variant-src,MEMCPY,arch-arm/cortex-a15/bionic/memcpy.S)
 $(call libc-add-cpu-variant-src,MEMSET,arch-arm/cortex-a15/bionic/memset.S)
+$(call libc-add-cpu-variant-src,STRCAT,arch-arm/cortex-a15/bionic/strcat.S)
 $(call libc-add-cpu-variant-src,STRCMP,arch-arm/cortex-a15/bionic/strcmp.S)
+$(call libc-add-cpu-variant-src,STRCPY,arch-arm/cortex-a15/bionic/strcpy.S)
 $(call libc-add-cpu-variant-src,STRLEN,arch-arm/cortex-a15/bionic/strlen.S)
 
 include bionic/libc/arch-arm/generic/generic.mk
diff --git a/libc/arch-arm/cortex-a9/bionic/memcpy.S b/libc/arch-arm/cortex-a9/bionic/memcpy.S
index 70e27b0..4e624d4 100644
--- a/libc/arch-arm/cortex-a9/bionic/memcpy.S
+++ b/libc/arch-arm/cortex-a9/bionic/memcpy.S
@@ -28,6 +28,7 @@
 
 #include <machine/cpu-features.h>
 #include <machine/asm.h>
+#include "libc_events.h"
 
 /*
  * This code assumes it is running on a processor that supports all arm v7
@@ -40,6 +41,13 @@
 
 #define CACHE_LINE_SIZE     32
 
+ENTRY(__memcpy_chk)
+        cmp         r2, r3
+        bgt         fortify_check_failed
+
+        // Fall through to memcpy...
+END(__memcpy_chk)
+
 ENTRY(memcpy)
         .save       {r0, lr}
         /* start preloading as early as possible */
@@ -208,4 +216,21 @@
 6:
         ldmfd       sp!, {r4, r5, r6, r7, r8}
         ldmfd       sp!, {r0, pc}
+
+
+        // Only reached when the __memcpy_chk check fails.
+fortify_check_failed:
+        ldr     r0, error_message
+        ldr     r1, error_code
+1:
+        add     r0, pc
+        bl      __fortify_chk_fail
+error_code:
+        .word   BIONIC_EVENT_MEMCPY_BUFFER_OVERFLOW
+error_message:
+        .word   error_string-(1b+8)
 END(memcpy)
+
+        .data
+error_string:
+        .string     "memcpy buffer overflow"
diff --git a/libc/arch-arm/cortex-a9/bionic/memset.S b/libc/arch-arm/cortex-a9/bionic/memset.S
index b58aa45..d011430 100644
--- a/libc/arch-arm/cortex-a9/bionic/memset.S
+++ b/libc/arch-arm/cortex-a9/bionic/memset.S
@@ -28,6 +28,7 @@
 
 #include <machine/cpu-features.h>
 #include <machine/asm.h>
+#include "libc_events.h"
 
 /*
  * This code assumes it is running on a processor that supports all arm v7
@@ -36,9 +37,28 @@
 
     .fpu    neon
 
+ENTRY(__memset_chk)
+        cmp         r2, r3
+        bls         done
+
+        ldr         r0, error_message
+        ldr         r1, error_code
+1:
+        add         r0, pc
+        bl          __fortify_chk_fail
+error_code:
+        .word       BIONIC_EVENT_MEMSET_BUFFER_OVERFLOW
+error_message:
+        .word       error_string-(1b+8)
+
+END(__memset_chk)
+
 ENTRY(bzero)
         mov     r2, r1
         mov     r1, #0
+
+done:
+        // Fall through to memset...
 END(bzero)
 
 /* memset() returns its first argument.  */
@@ -150,3 +170,7 @@
         ldmfd       sp!, {r0, r4-r7, lr}
         bx          lr
 END(memset)
+
+        .data
+error_string:
+        .string     "memset buffer overflow"
diff --git a/libc/arch-arm/cortex-a9/bionic/strcat.S b/libc/arch-arm/cortex-a9/bionic/strcat.S
new file mode 100644
index 0000000..0f5baef
--- /dev/null
+++ b/libc/arch-arm/cortex-a9/bionic/strcat.S
@@ -0,0 +1,548 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 2013 ARM Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ *    products derived from this software without specific prior written
+ *    permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+
+    .syntax unified
+
+    .thumb
+    .thumb_func
+
+    .macro m_push
+    push    {r0, r4, r5, lr}
+    .endm // m_push
+
+    .macro m_ret inst
+    \inst   {r0, r4, r5, pc}
+    .endm // m_ret
+
+    .macro m_scan_byte
+    ldrb    r3, [r0]
+    cbz     r3, strcat_r0_scan_done
+    add     r0, #1
+    .endm // m_scan_byte
+
+    .macro m_copy_byte reg, cmd, label
+    ldrb    \reg, [r1], #1
+    strb    \reg, [r0], #1
+    \cmd    \reg, \label
+    .endm // m_copy_byte
+
+ENTRY(strcat)
+    // Quick check to see if src is empty.
+    ldrb        r2, [r1]
+    pld         [r1, #0]
+    cbnz        r2, strcat_continue
+    bx          lr
+
+strcat_continue:
+    // To speed up really small dst strings, unroll checking the first 4 bytes.
+    m_push
+    m_scan_byte
+    m_scan_byte
+    m_scan_byte
+    m_scan_byte
+
+    ands    r3, r0, #7
+    bne     strcat_align_src
+
+    .p2align 2
+strcat_mainloop:
+    ldmia   r0!, {r2, r3}
+
+    pld     [r0, #64]
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcat_zero_in_first_register
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcat_zero_in_second_register
+    b       strcat_mainloop
+
+strcat_zero_in_first_register:
+    sub     r0, r0, #4
+
+strcat_zero_in_second_register:
+    // Check for zero in byte 0.
+    tst     ip, #0x80
+    it      ne
+    subne   r0, r0, #4
+    bne     strcat_r0_scan_done
+    // Check for zero in byte 1.
+    tst     ip, #0x8000
+    it      ne
+    subne   r0, r0, #3
+    bne     strcat_r0_scan_done
+    // Check for zero in byte 2.
+    tst     ip, #0x800000
+    it      ne
+    subne   r0, r0, #2
+    it      eq
+    // Zero is in byte 3.
+    subeq   r0, r0, #1
+
+strcat_r0_scan_done:
+    // Unroll the first 8 bytes that will be copied.
+    m_copy_byte reg=r2, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r3, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r4, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r5, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r2, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r3, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r4, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r5, cmd=cbnz, label=strcpy_continue
+
+strcpy_finish:
+    m_ret   inst=pop
+
+strcpy_continue:
+    pld     [r1, #0]
+    ands    r3, r0, #7
+    bne     strcpy_align_dst
+
+strcpy_check_src_align:
+    // At this point dst is aligned to a double word, check if src
+    // is also aligned to a double word.
+    ands    r3, r1, #7
+    bne     strcpy_unaligned_copy
+
+    .p2align 2
+strcpy_mainloop:
+    ldmia   r1!, {r2, r3}
+
+    pld     [r1, #64]
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_second_register
+
+    stmia   r0!, {r2, r3}
+    b       strcpy_mainloop
+
+strcpy_zero_in_first_register:
+    lsls    lr, ip, #17
+    itt     ne
+    strbne  r2, [r0]
+    m_ret   inst=popne
+    itt     cs
+    strhcs  r2, [r0]
+    m_ret   inst=popcs
+    lsls    ip, ip, #1
+    itt     eq
+    streq   r2, [r0]
+    m_ret   inst=popeq
+    strh    r2, [r0], #2
+    lsr     r3, r2, #16
+    strb    r3, [r0]
+    m_ret   inst=pop
+
+strcpy_zero_in_second_register:
+    lsls    lr, ip, #17
+    ittt    ne
+    stmiane r0!, {r2}
+    strbne  r3, [r0]
+    m_ret   inst=popne
+    ittt    cs
+    strcs   r2, [r0], #4
+    strhcs  r3, [r0]
+    m_ret   inst=popcs
+    lsls    ip, ip, #1
+    itt     eq
+    stmiaeq r0, {r2, r3}
+    m_ret   inst=popeq
+    stmia   r0!, {r2}
+    strh    r3, [r0], #2
+    lsr     r4, r3, #16
+    strb    r4, [r0]
+    m_ret   inst=pop
+
+strcpy_align_dst:
+    // Align to a double word (64 bits).
+    rsb     r3, r3, #8
+    lsls    ip, r3, #31
+    beq     strcpy_align_to_32
+
+    ldrb    r2, [r1], #1
+    strb    r2, [r0], #1
+    cbz     r2, strcpy_complete
+
+strcpy_align_to_32:
+    bcc     strcpy_align_to_64
+
+    ldrb    r4, [r1], #1
+    strb    r4, [r0], #1
+    cmp     r4, #0
+    it      eq
+    m_ret   inst=popeq
+    ldrb    r5, [r1], #1
+    strb    r5, [r0], #1
+    cmp     r5, #0
+    it      eq
+    m_ret   inst=popeq
+
+strcpy_align_to_64:
+    tst     r3, #4
+    beq     strcpy_check_src_align
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+    stmia   r0!, {r2}
+    b       strcpy_check_src_align
+
+strcpy_complete:
+    m_ret   inst=pop
+
+strcpy_unaligned_copy:
+    // Dst is aligned to a double word, while src is at an unknown alignment.
+    // There are 7 different versions of the unaligned copy code
+    // to prevent overreading the src. The mainloop of every single version
+    // will store 64 bits per loop. The difference is how much of src can
+    // be read without potentially crossing a page boundary.
+    tbb     [pc, r3]
+strcpy_unaligned_branchtable:
+    .byte 0
+    .byte ((strcpy_unalign7 - strcpy_unaligned_branchtable)/2)
+    .byte ((strcpy_unalign6 - strcpy_unaligned_branchtable)/2)
+    .byte ((strcpy_unalign5 - strcpy_unaligned_branchtable)/2)
+    .byte ((strcpy_unalign4 - strcpy_unaligned_branchtable)/2)
+    .byte ((strcpy_unalign3 - strcpy_unaligned_branchtable)/2)
+    .byte ((strcpy_unalign2 - strcpy_unaligned_branchtable)/2)
+    .byte ((strcpy_unalign1 - strcpy_unaligned_branchtable)/2)
+
+    .p2align 2
+    // Can read 7 bytes before possibly crossing a page.
+strcpy_unalign7:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+
+    ldrb    r3, [r1]
+    cbz     r3, strcpy_unalign7_copy5bytes
+    ldrb    r4, [r1, #1]
+    cbz     r4, strcpy_unalign7_copy6bytes
+    ldrb    r5, [r1, #2]
+    cbz     r5, strcpy_unalign7_copy7bytes
+
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    lsrs    ip, r3, #24
+    stmia   r0!, {r2, r3}
+    beq     strcpy_unalign_return
+    b       strcpy_unalign7
+
+strcpy_unalign7_copy5bytes:
+    stmia   r0!, {r2}
+    strb    r3, [r0]
+strcpy_unalign_return:
+    m_ret   inst=pop
+
+strcpy_unalign7_copy6bytes:
+    stmia   r0!, {r2}
+    strb    r3, [r0], #1
+    strb    r4, [r0], #1
+    m_ret   inst=pop
+
+strcpy_unalign7_copy7bytes:
+    stmia   r0!, {r2}
+    strb    r3, [r0], #1
+    strb    r4, [r0], #1
+    strb    r5, [r0], #1
+    m_ret   inst=pop
+
+    .p2align 2
+    // Can read 6 bytes before possibly crossing a page.
+strcpy_unalign6:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+
+    ldrb    r4, [r1]
+    cbz     r4, strcpy_unalign_copy5bytes
+    ldrb    r5, [r1, #1]
+    cbz     r5, strcpy_unalign_copy6bytes
+
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    tst     r3, #0xff0000
+    beq     strcpy_unalign6_copy7bytes
+    lsrs    ip, r3, #24
+    stmia   r0!, {r2, r3}
+    beq     strcpy_unalign_return
+    b       strcpy_unalign6
+
+strcpy_unalign6_copy7bytes:
+    stmia   r0!, {r2}
+    strh    r3, [r0], #2
+    lsr     r3, #16
+    strb    r3, [r0]
+    m_ret   inst=pop
+
+    .p2align 2
+    // Can read 5 bytes before possibly crossing a page.
+strcpy_unalign5:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+
+    ldrb    r4, [r1]
+    cbz     r4, strcpy_unalign_copy5bytes
+
+    ldr     r3, [r1], #4
+
+    pld     [r1, #64]
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_second_register
+
+    stmia   r0!, {r2, r3}
+    b       strcpy_unalign5
+
+strcpy_unalign_copy5bytes:
+    stmia   r0!, {r2}
+    strb    r4, [r0]
+    m_ret   inst=pop
+
+strcpy_unalign_copy6bytes:
+    stmia   r0!, {r2}
+    strb    r4, [r0], #1
+    strb    r5, [r0]
+    m_ret   inst=pop
+
+    .p2align 2
+    // Can read 4 bytes before possibly crossing a page.
+strcpy_unalign4:
+    ldmia   r1!, {r2}
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+
+    ldmia   r1!, {r3}
+    pld     [r1, #64]
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_second_register
+
+    stmia   r0!, {r2, r3}
+    b       strcpy_unalign4
+
+    .p2align 2
+    // Can read 3 bytes before possibly crossing a page.
+strcpy_unalign3:
+    ldrb    r2, [r1]
+    cbz     r2, strcpy_unalign3_copy1byte
+    ldrb    r3, [r1, #1]
+    cbz     r3, strcpy_unalign3_copy2bytes
+    ldrb    r4, [r1, #2]
+    cbz     r4, strcpy_unalign3_copy3bytes
+
+    ldr     r2, [r1], #4
+    ldr     r3, [r1], #4
+
+    pld     [r1, #64]
+
+    lsrs    lr, r2, #24
+    beq     strcpy_unalign_copy4bytes
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_second_register
+
+    stmia   r0!, {r2, r3}
+    b       strcpy_unalign3
+
+strcpy_unalign3_copy1byte:
+    strb    r2, [r0]
+    m_ret   inst=pop
+
+strcpy_unalign3_copy2bytes:
+    strb    r2, [r0], #1
+    strb    r3, [r0]
+    m_ret   inst=pop
+
+strcpy_unalign3_copy3bytes:
+    strb    r2, [r0], #1
+    strb    r3, [r0], #1
+    strb    r4, [r0]
+    m_ret   inst=pop
+
+    .p2align 2
+    // Can read 2 bytes before possibly crossing a page.
+strcpy_unalign2:
+    ldrb    r2, [r1]
+    cbz     r2, strcpy_unalign_copy1byte
+    ldrb    r3, [r1, #1]
+    cbz     r3, strcpy_unalign_copy2bytes
+
+    ldr     r2, [r1], #4
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    tst     r2, #0xff0000
+    beq     strcpy_unalign_copy3bytes
+    lsrs    ip, r2, #24
+    beq     strcpy_unalign_copy4bytes
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_second_register
+
+    stmia   r0!, {r2, r3}
+    b       strcpy_unalign2
+
+    .p2align 2
+    // Can read 1 byte before possibly crossing a page.
+strcpy_unalign1:
+    ldrb    r2, [r1]
+    cbz     r2, strcpy_unalign_copy1byte
+
+    ldr     r2, [r1], #4
+    ldr     r3, [r1], #4
+
+    pld     [r1, #64]
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_second_register
+
+    stmia   r0!, {r2, r3}
+    b       strcpy_unalign1
+
+strcpy_unalign_copy1byte:
+    strb    r2, [r0]
+    m_ret   inst=pop
+
+strcpy_unalign_copy2bytes:
+    strb    r2, [r0], #1
+    strb    r3, [r0]
+    m_ret   inst=pop
+
+strcpy_unalign_copy3bytes:
+    strh    r2, [r0], #2
+    lsr     r2, #16
+    strb    r2, [r0]
+    m_ret   inst=pop
+
+strcpy_unalign_copy4bytes:
+    stmia   r0, {r2}
+    m_ret   inst=pop
+
+strcat_align_src:
+    // Align to a double word (64 bits).
+    rsb     r3, r3, #8
+    lsls    ip, r3, #31
+    beq     strcat_align_to_32
+    ldrb    r2, [r0], #1
+    cbz     r2, strcat_r0_update
+
+strcat_align_to_32:
+    bcc     strcat_align_to_64
+    ldrb    r2, [r0], #1
+    cbz     r2, strcat_r0_update
+    ldrb    r2, [r0], #1
+    cbz     r2, strcat_r0_update
+
+strcat_align_to_64:
+    tst     r3, #4
+    beq     strcat_mainloop
+    ldr     r3, [r0], #4
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcat_zero_in_second_register
+    b       strcat_mainloop
+
+strcat_r0_update:
+    sub     r0, r0, #1
+    b strcat_r0_scan_done
+END(strcat)
diff --git a/libc/arch-arm/cortex-a9/bionic/strcpy.S b/libc/arch-arm/cortex-a9/bionic/strcpy.S
new file mode 100644
index 0000000..9aa4f88
--- /dev/null
+++ b/libc/arch-arm/cortex-a9/bionic/strcpy.S
@@ -0,0 +1,456 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 2013 ARM Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ *    products derived from this software without specific prior written
+ *    permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+
+    .syntax unified
+
+    .thumb
+    .thumb_func
+
+    .macro m_push
+    push    {r0, r4, r5, lr}
+    .endm // m_push
+
+    .macro m_ret inst
+    \inst   {r0, r4, r5, pc}
+    .endm // m_ret
+
+    .macro m_copy_byte reg, cmd, label
+    ldrb    \reg, [r1], #1
+    strb    \reg, [r0], #1
+    \cmd    \reg, \label
+    .endm // m_copy_byte
+
+ENTRY(strcpy)
+    // Unroll the first 8 bytes that will be copied.
+    m_push
+    m_copy_byte reg=r2, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r3, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r4, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r5, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r2, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r3, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r4, cmd=cbz, label=strcpy_finish
+    m_copy_byte reg=r5, cmd=cbnz, label=strcpy_continue
+
+strcpy_finish:
+    m_ret   inst=pop
+
+strcpy_continue:
+    pld     [r1, #0]
+    ands    r3, r0, #7
+    bne     strcpy_align_dst
+
+strcpy_check_src_align:
+    // At this point dst is aligned to a double word, check if src
+    // is also aligned to a double word.
+    ands    r3, r1, #7
+    bne     strcpy_unaligned_copy
+
+    .p2align 2
+strcpy_mainloop:
+    ldmia   r1!, {r2, r3}
+
+    pld     [r1, #64]
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_second_register
+
+    stmia   r0!, {r2, r3}
+    b       strcpy_mainloop
+
+strcpy_zero_in_first_register:
+    lsls    lr, ip, #17
+    itt     ne
+    strbne  r2, [r0]
+    m_ret   inst=popne
+    itt     cs
+    strhcs  r2, [r0]
+    m_ret   inst=popcs
+    lsls    ip, ip, #1
+    itt     eq
+    streq   r2, [r0]
+    m_ret   inst=popeq
+    strh    r2, [r0], #2
+    lsr     r3, r2, #16
+    strb    r3, [r0]
+    m_ret   inst=pop
+
+strcpy_zero_in_second_register:
+    lsls    lr, ip, #17
+    ittt    ne
+    stmiane r0!, {r2}
+    strbne  r3, [r0]
+    m_ret   inst=popne
+    ittt    cs
+    strcs   r2, [r0], #4
+    strhcs  r3, [r0]
+    m_ret   inst=popcs
+    lsls    ip, ip, #1
+    itt     eq
+    stmiaeq r0, {r2, r3}
+    m_ret   inst=popeq
+    stmia   r0!, {r2}
+    strh    r3, [r0], #2
+    lsr     r4, r3, #16
+    strb    r4, [r0]
+    m_ret   inst=pop
+
+strcpy_align_dst:
+    // Align to a double word (64 bits).
+    rsb     r3, r3, #8
+    lsls    ip, r3, #31
+    beq     strcpy_align_to_32
+
+    ldrb    r2, [r1], #1
+    strb    r2, [r0], #1
+    cbz     r2, strcpy_complete
+
+strcpy_align_to_32:
+    bcc     strcpy_align_to_64
+
+    ldrb    r4, [r1], #1
+    strb    r4, [r0], #1
+    cmp     r4, #0
+    it      eq
+    m_ret   inst=popeq
+    ldrb    r5, [r1], #1
+    strb    r5, [r0], #1
+    cmp     r5, #0
+    it      eq
+    m_ret   inst=popeq
+
+strcpy_align_to_64:
+    tst     r3, #4
+    beq     strcpy_check_src_align
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+    stmia   r0!, {r2}
+    b       strcpy_check_src_align
+
+strcpy_complete:
+    m_ret   inst=pop
+
+strcpy_unaligned_copy:
+    // Dst is aligned to a double word, while src is at an unknown alignment.
+    // There are 7 different versions of the unaligned copy code
+    // to prevent overreading the src. The mainloop of every single version
+    // will store 64 bits per loop. The difference is how much of src can
+    // be read without potentially crossing a page boundary.
+    tbb     [pc, r3]
+strcpy_unaligned_branchtable:
+    .byte 0
+    .byte ((strcpy_unalign7 - strcpy_unaligned_branchtable)/2)
+    .byte ((strcpy_unalign6 - strcpy_unaligned_branchtable)/2)
+    .byte ((strcpy_unalign5 - strcpy_unaligned_branchtable)/2)
+    .byte ((strcpy_unalign4 - strcpy_unaligned_branchtable)/2)
+    .byte ((strcpy_unalign3 - strcpy_unaligned_branchtable)/2)
+    .byte ((strcpy_unalign2 - strcpy_unaligned_branchtable)/2)
+    .byte ((strcpy_unalign1 - strcpy_unaligned_branchtable)/2)
+
+    .p2align 2
+    // Can read 7 bytes before possibly crossing a page.
+strcpy_unalign7:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+
+    ldrb    r3, [r1]
+    cbz     r3, strcpy_unalign7_copy5bytes
+    ldrb    r4, [r1, #1]
+    cbz     r4, strcpy_unalign7_copy6bytes
+    ldrb    r5, [r1, #2]
+    cbz     r5, strcpy_unalign7_copy7bytes
+
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    lsrs    ip, r3, #24
+    stmia   r0!, {r2, r3}
+    beq     strcpy_unalign_return
+    b       strcpy_unalign7
+
+strcpy_unalign7_copy5bytes:
+    stmia   r0!, {r2}
+    strb    r3, [r0]
+strcpy_unalign_return:
+    m_ret   inst=pop
+
+strcpy_unalign7_copy6bytes:
+    stmia   r0!, {r2}
+    strb    r3, [r0], #1
+    strb    r4, [r0], #1
+    m_ret   inst=pop
+
+strcpy_unalign7_copy7bytes:
+    stmia   r0!, {r2}
+    strb    r3, [r0], #1
+    strb    r4, [r0], #1
+    strb    r5, [r0], #1
+    m_ret   inst=pop
+
+    .p2align 2
+    // Can read 6 bytes before possibly crossing a page.
+strcpy_unalign6:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+
+    ldrb    r4, [r1]
+    cbz     r4, strcpy_unalign_copy5bytes
+    ldrb    r5, [r1, #1]
+    cbz     r5, strcpy_unalign_copy6bytes
+
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    tst     r3, #0xff0000
+    beq     strcpy_unalign6_copy7bytes
+    lsrs    ip, r3, #24
+    stmia   r0!, {r2, r3}
+    beq     strcpy_unalign_return
+    b       strcpy_unalign6
+
+strcpy_unalign6_copy7bytes:
+    stmia   r0!, {r2}
+    strh    r3, [r0], #2
+    lsr     r3, #16
+    strb    r3, [r0]
+    m_ret   inst=pop
+
+    .p2align 2
+    // Can read 5 bytes before possibly crossing a page.
+strcpy_unalign5:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+
+    ldrb    r4, [r1]
+    cbz     r4, strcpy_unalign_copy5bytes
+
+    ldr     r3, [r1], #4
+
+    pld     [r1, #64]
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_second_register
+
+    stmia   r0!, {r2, r3}
+    b       strcpy_unalign5
+
+strcpy_unalign_copy5bytes:
+    stmia   r0!, {r2}
+    strb    r4, [r0]
+    m_ret   inst=pop
+
+strcpy_unalign_copy6bytes:
+    stmia   r0!, {r2}
+    strb    r4, [r0], #1
+    strb    r5, [r0]
+    m_ret   inst=pop
+
+    .p2align 2
+    // Can read 4 bytes before possibly crossing a page.
+strcpy_unalign4:
+    ldmia   r1!, {r2}
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+
+    ldmia   r1!, {r3}
+    pld     [r1, #64]
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_second_register
+
+    stmia   r0!, {r2, r3}
+    b       strcpy_unalign4
+
+    .p2align 2
+    // Can read 3 bytes before possibly crossing a page.
+strcpy_unalign3:
+    ldrb    r2, [r1]
+    cbz     r2, strcpy_unalign3_copy1byte
+    ldrb    r3, [r1, #1]
+    cbz     r3, strcpy_unalign3_copy2bytes
+    ldrb    r4, [r1, #2]
+    cbz     r4, strcpy_unalign3_copy3bytes
+
+    ldr     r2, [r1], #4
+    ldr     r3, [r1], #4
+
+    pld     [r1, #64]
+
+    lsrs    lr, r2, #24
+    beq     strcpy_unalign_copy4bytes
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_second_register
+
+    stmia   r0!, {r2, r3}
+    b       strcpy_unalign3
+
+strcpy_unalign3_copy1byte:
+    strb    r2, [r0]
+    m_ret   inst=pop
+
+strcpy_unalign3_copy2bytes:
+    strb    r2, [r0], #1
+    strb    r3, [r0]
+    m_ret   inst=pop
+
+strcpy_unalign3_copy3bytes:
+    strb    r2, [r0], #1
+    strb    r3, [r0], #1
+    strb    r4, [r0]
+    m_ret   inst=pop
+
+    .p2align 2
+    // Can read 2 bytes before possibly crossing a page.
+strcpy_unalign2:
+    ldrb    r2, [r1]
+    cbz     r2, strcpy_unalign_copy1byte
+    ldrb    r3, [r1, #1]
+    cbz     r3, strcpy_unalign_copy2bytes
+
+    ldr     r2, [r1], #4
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    tst     r2, #0xff0000
+    beq     strcpy_unalign_copy3bytes
+    lsrs    ip, r2, #24
+    beq     strcpy_unalign_copy4bytes
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_second_register
+
+    stmia   r0!, {r2, r3}
+    b       strcpy_unalign2
+
+    .p2align 2
+    // Can read 1 byte before possibly crossing a page.
+strcpy_unalign1:
+    ldrb    r2, [r1]
+    cbz     r2, strcpy_unalign_copy1byte
+
+    ldr     r2, [r1], #4
+    ldr     r3, [r1], #4
+
+    pld     [r1, #64]
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_first_register
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     strcpy_zero_in_second_register
+
+    stmia   r0!, {r2, r3}
+    b       strcpy_unalign1
+
+strcpy_unalign_copy1byte:
+    strb    r2, [r0]
+    m_ret   inst=pop
+
+strcpy_unalign_copy2bytes:
+    strb    r2, [r0], #1
+    strb    r3, [r0]
+    m_ret   inst=pop
+
+strcpy_unalign_copy3bytes:
+    strh    r2, [r0], #2
+    lsr     r2, #16
+    strb    r2, [r0]
+    m_ret   inst=pop
+
+strcpy_unalign_copy4bytes:
+    stmia   r0, {r2}
+    m_ret   inst=pop
+END(strcpy)
diff --git a/libc/arch-arm/cortex-a9/bionic/strlen.S b/libc/arch-arm/cortex-a9/bionic/strlen.S
new file mode 100644
index 0000000..259eda0
--- /dev/null
+++ b/libc/arch-arm/cortex-a9/bionic/strlen.S
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 2013 ARM Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ *    products derived from this software without specific prior written
+ *    permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+
+    .syntax unified
+
+    .thumb
+    .thumb_func
+
+ENTRY(strlen)
+    pld     [r0, #0]
+    mov     r1, r0
+
+    ands    r3, r0, #7
+    bne     align_src
+
+    .p2align 2
+mainloop:
+    ldmia   r1!, {r2, r3}
+
+    pld     [r1, #64]
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     zero_in_first_register
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     zero_in_second_register
+    b       mainloop
+
+zero_in_first_register:
+    sub     r0, r1, r0
+    // Check for zero in byte 0.
+    lsls    r2, ip, #17
+    beq     check_byte1_reg1
+
+    sub     r0, r0, #8
+    bx      lr
+
+check_byte1_reg1:
+    bcc     check_byte2_reg1
+
+    sub     r0, r0, #7
+    bx      lr
+
+check_byte2_reg1:
+    // Check for zero in byte 2.
+    tst     ip, #0x800000
+    itt     ne
+    subne   r0, r0, #6
+    bxne    lr
+    sub     r0, r0, #5
+    bx      lr
+
+zero_in_second_register:
+    sub     r0, r1, r0
+    // Check for zero in byte 0.
+    lsls    r2, ip, #17
+    beq     check_byte1_reg2
+
+    sub     r0, r0, #4
+    bx      lr
+
+check_byte1_reg2:
+    bcc     check_byte2_reg2
+
+    sub     r0, r0, #3
+    bx      lr
+
+check_byte2_reg2:
+    // Check for zero in byte 2.
+    tst     ip, #0x800000
+    itt     ne
+    subne   r0, r0, #2
+    bxne    lr
+    sub     r0, r0, #1
+    bx      lr
+
+align_src:
+    // Align to a double word (64 bits).
+    rsb     r3, r3, #8
+    lsls    ip, r3, #31
+    beq     align_to_32
+
+    ldrb    r2, [r1], #1
+    cbz     r2, done
+
+align_to_32:
+    bcc     align_to_64
+
+    ldrb    r2, [r1], #1
+    cbz     r2, done
+    ldrb    r2, [r1], #1
+    cbz     r2, done
+
+align_to_64:
+    tst     r3, #4
+    beq     mainloop
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     zero_in_second_register
+    b       mainloop
+
+done:
+    sub     r0, r1, r0
+    sub     r0, r0, #1
+    bx      lr
+END(strlen)
diff --git a/libc/arch-arm/cortex-a9/cortex-a9.mk b/libc/arch-arm/cortex-a9/cortex-a9.mk
index 5c684ed..61a52c2 100644
--- a/libc/arch-arm/cortex-a9/cortex-a9.mk
+++ b/libc/arch-arm/cortex-a9/cortex-a9.mk
@@ -1,7 +1,8 @@
 $(call libc-add-cpu-variant-src,MEMCPY,arch-arm/cortex-a9/bionic/memcpy.S)
 $(call libc-add-cpu-variant-src,MEMSET,arch-arm/cortex-a9/bionic/memset.S)
+$(call libc-add-cpu-variant-src,STRCAT,arch-arm/cortex-a9/bionic/strcat.S)
 $(call libc-add-cpu-variant-src,STRCMP,arch-arm/cortex-a9/bionic/strcmp.S)
-# Use cortex-a15 version of strlen.
-$(call libc-add-cpu-variant-src,STRLEN,arch-arm/cortex-a15/bionic/strlen.S)
+$(call libc-add-cpu-variant-src,STRCPY,arch-arm/cortex-a9/bionic/strcpy.S)
+$(call libc-add-cpu-variant-src,STRLEN,arch-arm/cortex-a9/bionic/strlen.S)
 
 include bionic/libc/arch-arm/generic/generic.mk
diff --git a/libc/arch-arm/generic/bionic/memcpy.S b/libc/arch-arm/generic/bionic/memcpy.S
index 6890a55..24373d8 100644
--- a/libc/arch-arm/generic/bionic/memcpy.S
+++ b/libc/arch-arm/generic/bionic/memcpy.S
@@ -28,6 +28,7 @@
 
 #include <machine/cpu-features.h>
 #include <machine/asm.h>
+#include "libc_events.h"
 
         /*
          * Optimized memcpy() for ARM.
@@ -36,6 +37,13 @@
          * so we have to preserve R0.
          */
 
+ENTRY(__memcpy_chk)
+        cmp         r2, r3
+        bgt         fortify_check_failed
+
+        // Fall through to memcpy...
+END(__memcpy_chk)
+
 ENTRY(memcpy)
         /* The stack must always be 64-bits aligned to be compliant with the
          * ARM ABI. Since we have to save R0, we might as well save R4
@@ -377,4 +385,20 @@
         add         sp,  sp, #28
         ldmfd       sp!, {r0, r4, lr}
         bx          lr
+
+        // Only reached when the __memcpy_chk check fails.
+fortify_check_failed:
+        ldr     r0, error_message
+        ldr     r1, error_code
+1:
+        add     r0, pc
+        bl      __fortify_chk_fail
+error_code:
+        .word   BIONIC_EVENT_MEMCPY_BUFFER_OVERFLOW
+error_message:
+        .word   error_string-(1b+8)
 END(memcpy)
+
+        .data
+error_string:
+        .string     "memcpy buffer overflow"
diff --git a/libc/arch-arm/generic/bionic/memset.S b/libc/arch-arm/generic/bionic/memset.S
index 3c034e0..399bae9 100644
--- a/libc/arch-arm/generic/bionic/memset.S
+++ b/libc/arch-arm/generic/bionic/memset.S
@@ -27,6 +27,7 @@
  */
 
 #include <machine/asm.h>
+#include "libc_events.h"
 
         /*
          * Optimized memset() for ARM.
@@ -34,9 +35,28 @@
          * memset() returns its first argument.
          */
 
+ENTRY(__memset_chk)
+        cmp         r2, r3
+        bls         done
+
+        ldr         r0, error_message
+        ldr         r1, error_code
+1:
+        add         r0, pc
+        bl          __fortify_chk_fail
+error_code:
+        .word       BIONIC_EVENT_MEMSET_BUFFER_OVERFLOW
+error_message:
+        .word       error_string-(1b+8)
+
+END(__memset_chk)
+
 ENTRY(bzero)
         mov     r2, r1
         mov     r1, #0
+
+done:
+        // Fall through to memset...
 END(bzero)
 
 ENTRY(memset)
@@ -107,3 +127,7 @@
         ldmfd       sp!, {r0, r4-r7, lr}
         bx          lr
 END(memset)
+
+        .data
+error_string:
+        .string     "memset buffer overflow"
diff --git a/libc/arch-arm/bionic/strcpy.S b/libc/arch-arm/generic/bionic/strcpy.S
similarity index 100%
rename from libc/arch-arm/bionic/strcpy.S
rename to libc/arch-arm/generic/bionic/strcpy.S
diff --git a/libc/arch-arm/generic/generic.mk b/libc/arch-arm/generic/generic.mk
index 18cad9d..c3a5aa5 100644
--- a/libc/arch-arm/generic/generic.mk
+++ b/libc/arch-arm/generic/generic.mk
@@ -1,4 +1,6 @@
 $(call libc-add-cpu-variant-src,MEMCPY,arch-arm/generic/bionic/memcpy.S)
 $(call libc-add-cpu-variant-src,MEMSET,arch-arm/generic/bionic/memset.S)
+$(call libc-add-cpu-variant-src,STRCAT,string/strcat.c)
 $(call libc-add-cpu-variant-src,STRCMP,arch-arm/generic/bionic/strcmp.S)
+$(call libc-add-cpu-variant-src,STRCPY,arch-arm/generic/bionic/strcpy.S)
 $(call libc-add-cpu-variant-src,STRLEN,arch-arm/generic/bionic/strlen.c)
diff --git a/libc/arch-arm/krait/bionic/memcpy.S b/libc/arch-arm/krait/bionic/memcpy.S
index 0cd4d44..3afe18c 100644
--- a/libc/arch-arm/krait/bionic/memcpy.S
+++ b/libc/arch-arm/krait/bionic/memcpy.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008 The Android Open Source Project
+ * Copyright (C) 2013 The Android Open Source Project
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -30,6 +30,7 @@
 
 #include <machine/cpu-features.h>
 #include <machine/asm.h>
+#include "libc_events.h"
 
 /*
  * This code assumes it is running on a processor that supports all arm v7
@@ -37,17 +38,23 @@
  * cache line.
  */
 
+#define CACHE_LINE_SIZE     32
+
         .text
         .fpu    neon
 
-#define CACHE_LINE_SIZE     32
+ENTRY(__memcpy_chk)
+        cmp         r2, r3
+        bgt         fortify_check_failed
+
+        // Fall through to memcpy...
+END(__memcpy_chk)
 
 ENTRY(memcpy)
         .save       {r0, lr}
         /* start preloading as early as possible */
-        pld         [r1, #(CACHE_LINE_SIZE*0)]
+        pld         [r1, #(CACHE_LINE_SIZE*4)]
         stmfd       sp!, {r0, lr}
-        pld         [r1, #(CACHE_LINE_SIZE*2)]
 
         /* do we have at least 16-bytes to copy (needed for alignment below) */
         cmp         r2, #16
@@ -56,7 +63,7 @@
         /* align destination to cache-line for the write-buffer */
         rsb         r3, r0, #0
         ands        r3, r3, #0xF
-        beq         0f
+        beq         2f
 
         /* copy up to 15-bytes (count in r3) */
         sub         r2, r2, r3
@@ -76,47 +83,29 @@
         // copies 8 bytes, destination 64-bits aligned
         vld1.8      {d0}, [r1]!
         vst1.8      {d0}, [r0, :64]!
-2:
 
-0:      /* preload immediately the next cache line, which we may need */
-        pld         [r1, #(CACHE_LINE_SIZE*0)]
-        pld         [r1, #(CACHE_LINE_SIZE*2)]
-
-        /* make sure we have at least 64 bytes to copy */
+2:      /* make sure we have at least 64 bytes to copy */
         subs        r2, r2, #64
         blo         2f
 
-        /* Preload all the cache lines we need.
-         * NOTE: The number of pld below depends on CACHE_LINE_SIZE,
-         * ideally we would increase the distance in the main loop to
-         * avoid the goofy code below. In practice this doesn't seem to make
-         * a big difference.
-         * NOTE: The value CACHE_LINE_SIZE * 8 was chosen through
-         * experimentation.
-         */
-        pld         [r1, #(CACHE_LINE_SIZE*4)]
-        pld         [r1, #(CACHE_LINE_SIZE*6)]
-        pld         [r1, #(CACHE_LINE_SIZE*8)]
-
 1:      /* The main loop copies 64 bytes at a time */
         vld1.8      {d0  - d3},   [r1]!
         vld1.8      {d4  - d7},   [r1]!
-        pld         [r1, #(CACHE_LINE_SIZE*8)]
+        pld         [r1, #(CACHE_LINE_SIZE*2)]
         subs        r2, r2, #64
         vst1.8      {d0  - d3},   [r0, :128]!
         vst1.8      {d4  - d7},   [r0, :128]!
         bhs         1b
 
 2:      /* fix-up the remaining count and make sure we have >= 32 bytes left */
-        add         r2, r2, #64
-        subs        r2, r2, #32
+        adds        r2, r2, #32
         blo         4f
 
-3:      /* 32 bytes at a time. These cache lines were already preloaded */
+        /* Copy 32 bytes. These cache lines were already preloaded */
         vld1.8      {d0 - d3},  [r1]!
-        subs        r2, r2, #32
+        sub         r2, r2, #32
         vst1.8      {d0 - d3},  [r0, :128]!
-        bhs         3b
+
 4:      /* less than 32 left */
         add         r2, r2, #32
         tst         r2, #0x10
@@ -143,4 +132,20 @@
 
         ldmfd       sp!, {r0, lr}
         bx          lr
+
+        // Only reached when the __memcpy_chk check fails.
+fortify_check_failed:
+        ldr     r0, error_message
+        ldr     r1, error_code
+1:
+        add     r0, pc
+        bl      __fortify_chk_fail
+error_code:
+        .word   BIONIC_EVENT_MEMCPY_BUFFER_OVERFLOW
+error_message:
+        .word   error_string-(1b+8)
 END(memcpy)
+
+        .data
+error_string:
+        .string     "memcpy buffer overflow"
diff --git a/libc/arch-arm/krait/bionic/memset.S b/libc/arch-arm/krait/bionic/memset.S
index a2e2d80..4e4788b 100644
--- a/libc/arch-arm/krait/bionic/memset.S
+++ b/libc/arch-arm/krait/bionic/memset.S
@@ -28,6 +28,7 @@
 
 #include <machine/cpu-features.h>
 #include <machine/asm.h>
+#include "libc_events.h"
 
 /*
  * This code assumes it is running on a processor that supports all arm v7
@@ -37,9 +38,28 @@
 
     .fpu    neon
 
+ENTRY(__memset_chk)
+        cmp         r2, r3
+        bls         done
+
+        ldr         r0, error_message
+        ldr         r1, error_code
+1:
+        add         r0, pc
+        bl          __fortify_chk_fail
+error_code:
+        .word       BIONIC_EVENT_MEMSET_BUFFER_OVERFLOW
+error_message:
+        .word       error_string-(1b+8)
+
+END(__memset_chk)
+
 ENTRY(bzero)
         mov     r2, r1
         mov     r1, #0
+
+done:
+        // Fall through to memset...
 END(bzero)
 
 /* memset() returns its first argument.  */
@@ -79,3 +99,7 @@
         ldmfd       sp!, {r0}
         bx          lr
 END(memset)
+
+        .data
+error_string:
+        .string     "memset buffer overflow"
diff --git a/libc/arch-arm/krait/krait.mk b/libc/arch-arm/krait/krait.mk
index 288afbb..1ff18e9 100644
--- a/libc/arch-arm/krait/krait.mk
+++ b/libc/arch-arm/krait/krait.mk
@@ -1,7 +1,9 @@
 $(call libc-add-cpu-variant-src,MEMCPY,arch-arm/krait/bionic/memcpy.S)
 $(call libc-add-cpu-variant-src,MEMSET,arch-arm/krait/bionic/memset.S)
 $(call libc-add-cpu-variant-src,STRCMP,arch-arm/krait/bionic/strcmp.S)
-# Use cortex-a15 version of strlen.
+# Use cortex-a15 versions of strcat/strcpy/strlen.
+$(call libc-add-cpu-variant-src,STRCAT,arch-arm/cortex-a15/bionic/strcat.S)
+$(call libc-add-cpu-variant-src,STRCPY,arch-arm/cortex-a15/bionic/strcpy.S)
 $(call libc-add-cpu-variant-src,STRLEN,arch-arm/cortex-a15/bionic/strlen.S)
 
 include bionic/libc/arch-arm/generic/generic.mk
diff --git a/libc/string/__strrchr_chk.c b/libc/bionic/__strrchr_chk.cpp
similarity index 82%
rename from libc/string/__strrchr_chk.c
rename to libc/bionic/__strrchr_chk.cpp
index c1e5d66..14100f7 100644
--- a/libc/string/__strrchr_chk.c
+++ b/libc/bionic/__strrchr_chk.cpp
@@ -31,18 +31,17 @@
 #include <string.h>
 #include "libc_logging.h"
 
-char *
-__strrchr_chk(const char *p, int ch, size_t s_len)
+extern "C" char* __strrchr_chk(const char *p, int ch, size_t s_len)
 {
-	char *save;
+    char *save;
 
-	for (save = NULL;; ++p, s_len--) {
-		if (s_len == 0)
-			__fortify_chk_fail("strrchr read beyond buffer", 0);
-		if (*p == (char) ch)
-			save = (char *)p;
-		if (!*p)
-			return(save);
-	}
-	/* NOTREACHED */
+    for (save = NULL;; ++p, s_len--) {
+        if (s_len == 0)
+            __fortify_chk_fail("strrchr read beyond buffer", 0);
+        if (*p == (char) ch)
+            save = (char *)p;
+        if (!*p)
+            return(save);
+    }
+    /* NOTREACHED */
 }
diff --git a/libc/bionic/dlmalloc.c b/libc/bionic/dlmalloc.c
index 78f2e1d..66a825b 100644
--- a/libc/bionic/dlmalloc.c
+++ b/libc/bionic/dlmalloc.c
@@ -16,6 +16,7 @@
 
 #include "dlmalloc.h"
 
+#include "private/bionic_name_mem.h"
 #include "private/libc_logging.h"
 
 // Send dlmalloc errors to the log.
@@ -25,6 +26,11 @@
 #define CORRUPTION_ERROR_ACTION(m) __bionic_heap_corruption_error(__FUNCTION__)
 #define USAGE_ERROR_ACTION(m,p) __bionic_heap_usage_error(__FUNCTION__, p)
 
+/* Bionic named anonymous memory declarations */
+static void* named_anonymous_mmap(size_t length);
+#define MMAP(s) named_anonymous_mmap(s)
+#define DIRECT_MMAP(s) named_anonymous_mmap(s)
+
 // Ugly inclusion of C file so that bionic specific #defines configure dlmalloc.
 #include "../upstream-dlmalloc/malloc.c"
 
@@ -42,3 +48,15 @@
   // TODO: improve the debuggerd protocol so we can tell it to dump an address when we abort.
   *((int**) 0xdeadbaad) = (int*) address;
 }
+
+static void* named_anonymous_mmap(size_t length)
+{
+    void* ret;
+    ret = mmap(NULL, length, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+    if (ret == MAP_FAILED)
+        return ret;
+
+    __bionic_name_mem(ret, length, "libc_malloc");
+
+    return ret;
+}
diff --git a/libc/bionic/name_mem.c b/libc/bionic/name_mem.c
new file mode 100644
index 0000000..69e10c2
--- /dev/null
+++ b/libc/bionic/name_mem.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "private/bionic_name_mem.h"
+
+/*
+ * Local definitions of custom prctl arguments to set a vma name in some kernels
+ */
+#define BIONIC_PR_SET_VMA               0x53564d41
+#define BIONIC_PR_SET_VMA_ANON_NAME     0
+
+/*
+ * Names a region of memory.  The name is expected to show up in /proc/pid/maps
+ * and /proc/pid/smaps.  There is no guarantee that it will work, and it if it
+ * does work it is likely to only work on memory that was allocated with
+ * mmap(MAP_ANONYMOUS), and only on regions that are page aligned.  name should
+ * be a pointer to a string that is valid for as long as the memory is mapped,
+ * preferably a compile-time constant string.
+ *
+ * Returns -1 on error and sets errno.  If it returns an error naming page
+ * aligned anonymous memory the kernel doesn't support naming, and an alternate
+ * method of naming memory should be used (like ashmem).
+ */
+int __bionic_name_mem(void *addr, size_t len, const char *name)
+{
+    return prctl(BIONIC_PR_SET_VMA, BIONIC_PR_SET_VMA_ANON_NAME,
+                 addr, len, name);
+}
diff --git a/libc/include/netdb.h b/libc/include/netdb.h
index 3ea512c..62a7a3c 100644
--- a/libc/include/netdb.h
+++ b/libc/include/netdb.h
@@ -207,13 +207,13 @@
 void endservent(void);
 void freehostent(struct hostent *);
 struct hostent	*gethostbyaddr(const void *, socklen_t, int);
-struct hostent	*android_gethostbyaddrforiface(const void *, socklen_t, int, const char*);
+struct hostent	*android_gethostbyaddrforiface(const void *, socklen_t, int, const char*, int);
 int gethostbyaddr_r(const void *, int, int, struct hostent *, char *, size_t, struct hostent **, int *);
 struct hostent	*gethostbyname(const char *);
 int gethostbyname_r(const char *, struct hostent *, char *, size_t, struct hostent **, int *);
 struct hostent	*gethostbyname2(const char *, int);
 int gethostbyname2_r(const char *, int, struct hostent *, char *, size_t, struct hostent **, int *);
-struct hostent	*android_gethostbynameforiface(const char *, int, const char *);
+struct hostent	*android_gethostbynameforiface(const char *, int, const char *, int);
 struct hostent	*gethostent(void);
 int gethostent_r(struct hostent *, char *, size_t, struct hostent **, int *);
 struct hostent	*getipnodebyaddr(const void *, size_t, int, int *);
@@ -241,9 +241,9 @@
 void setnetent(int);
 void setprotoent(int);
 int getaddrinfo(const char *, const char *, const struct addrinfo *, struct addrinfo **);
-int android_getaddrinfoforiface(const char *, const char *, const struct addrinfo *, const char *, struct addrinfo **);
+int android_getaddrinfoforiface(const char *, const char *, const struct addrinfo *, const char *, int, struct addrinfo **);
 int getnameinfo(const struct sockaddr *, socklen_t, char *, size_t, char *, size_t, int);
-int android_getnameinfoforiface(const struct sockaddr *, socklen_t, char *, size_t, char *, size_t, int, const char *);
+int android_getnameinfoforiface(const struct sockaddr *, socklen_t, char *, size_t, char *, size_t, int, const char *, int);
 void freeaddrinfo(struct addrinfo *);
 const char	*gai_strerror(int);
 void setnetgrent(const char *);
diff --git a/libc/kernel/common/linux/kexec.h b/libc/kernel/common/linux/kexec.h
deleted file mode 100644
index 1dfe07c..0000000
--- a/libc/kernel/common/linux/kexec.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/****************************************************************************
- ****************************************************************************
- ***
- ***   This header was automatically generated from a Linux kernel header
- ***   of the same name, to make information necessary for userspace to
- ***   call into the kernel available to libc.  It contains only constants,
- ***   structures, and macros generated from the original header, and thus,
- ***   contains no copyrightable information.
- ***
- ***   To edit the content of this header, modify the corresponding
- ***   source file (e.g. under external/kernel-headers/original/) then
- ***   run bionic/libc/kernel/tools/update_all.py
- ***
- ***   Any manual change here will be lost the next time this script will
- ***   be run. You've been warned!
- ***
- ****************************************************************************
- ****************************************************************************/
-#ifndef LINUX_KEXEC_H
-#define LINUX_KEXEC_H
-struct pt_regs;
-struct task_struct;
-/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
-#endif
diff --git a/libc/kernel/common/uapi/linux/kexec.h b/libc/kernel/common/uapi/linux/kexec.h
new file mode 100644
index 0000000..977fee6
--- /dev/null
+++ b/libc/kernel/common/uapi/linux/kexec.h
@@ -0,0 +1,49 @@
+/****************************************************************************
+ ****************************************************************************
+ ***
+ ***   This header was automatically generated from a Linux kernel header
+ ***   of the same name, to make information necessary for userspace to
+ ***   call into the kernel available to libc.  It contains only constants,
+ ***   structures, and macros generated from the original header, and thus,
+ ***   contains no copyrightable information.
+ ***
+ ***   To edit the content of this header, modify the corresponding
+ ***   source file (e.g. under external/kernel-headers/original/) then
+ ***   run bionic/libc/kernel/tools/update_all.py
+ ***
+ ***   Any manual change here will be lost the next time this script will
+ ***   be run. You've been warned!
+ ***
+ ****************************************************************************
+ ****************************************************************************/
+#ifndef _UAPILINUX_KEXEC_H
+#define _UAPILINUX_KEXEC_H
+#include <linux/types.h>
+#define KEXEC_ON_CRASH 0x00000001
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define KEXEC_PRESERVE_CONTEXT 0x00000002
+#define KEXEC_ARCH_MASK 0xffff0000
+#define KEXEC_ARCH_DEFAULT ( 0 << 16)
+#define KEXEC_ARCH_386 ( 3 << 16)
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define KEXEC_ARCH_X86_64 (62 << 16)
+#define KEXEC_ARCH_PPC (20 << 16)
+#define KEXEC_ARCH_PPC64 (21 << 16)
+#define KEXEC_ARCH_IA_64 (50 << 16)
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define KEXEC_ARCH_ARM (40 << 16)
+#define KEXEC_ARCH_S390 (22 << 16)
+#define KEXEC_ARCH_SH (42 << 16)
+#define KEXEC_ARCH_MIPS_LE (10 << 16)
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#define KEXEC_ARCH_MIPS ( 8 << 16)
+#define KEXEC_SEGMENT_MAX 16
+struct kexec_segment {
+ const void *buf;
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+ size_t bufsz;
+ const void *mem;
+ size_t memsz;
+};
+/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
+#endif
diff --git a/libc/netbsd/gethnamaddr.c b/libc/netbsd/gethnamaddr.c
index ee5052e..5b2f987 100644
--- a/libc/netbsd/gethnamaddr.c
+++ b/libc/netbsd/gethnamaddr.c
@@ -126,7 +126,7 @@
 static int _dns_gethtbyaddr(void *, void *, va_list);
 static int _dns_gethtbyname(void *, void *, va_list);
 
-static struct hostent *gethostbyname_internal(const char *, int, res_state, const char *);
+static struct hostent *gethostbyname_internal(const char *, int, res_state, const char *, int);
 
 static const ns_src default_dns_files[] = {
 	{ NSSRC_FILES, 	NS_SUCCESS },
@@ -497,13 +497,13 @@
 
 	/* try IPv6 first - if that fails do IPv4 */
 	if (res->options & RES_USE_INET6) {
-		hp = gethostbyname_internal(name, AF_INET6, res, NULL);
+		hp = gethostbyname_internal(name, AF_INET6, res, NULL, 0);
 		if (hp) {
 			__res_put_state(res);
 			return hp;
 		}
 	}
-	hp = gethostbyname_internal(name, AF_INET, res, NULL);
+	hp = gethostbyname_internal(name, AF_INET, res, NULL, 0);
 	__res_put_state(res);
 	return hp;
 }
@@ -511,18 +511,18 @@
 struct hostent *
 gethostbyname2(const char *name, int af)
 {
-	return android_gethostbynameforiface(name, af, NULL);
+	return android_gethostbynameforiface(name, af, NULL, 0);
 }
 
 struct hostent *
-android_gethostbynameforiface(const char *name, int af, const char *iface)
+android_gethostbynameforiface(const char *name, int af, const char *iface, int mark)
 {
 	struct hostent *hp;
 	res_state res = __res_get_state();
 
 	if (res == NULL)
 		return NULL;
-	hp = gethostbyname_internal(name, af, res, iface);
+	hp = gethostbyname_internal(name, af, res, iface, mark);
 	__res_put_state(res);
 	return hp;
 }
@@ -741,7 +741,7 @@
 
 // very similar in proxy-ness to android_getaddrinfo_proxy
 static struct hostent *
-gethostbyname_internal(const char *name, int af, res_state res, const char *iface)
+gethostbyname_internal(const char *name, int af, res_state res, const char *iface, int mark)
 {
 	const char *cache_mode = getenv("ANDROID_DNS_MODE");
 	FILE* proxy = NULL;
@@ -749,6 +749,7 @@
 
 	if (cache_mode != NULL && strcmp(cache_mode, "local") == 0) {
 		res_setiface(res, iface);
+		res_setmark(res, mark);
 		return gethostbyname_internal_real(name, af, res);
 	}
 
@@ -780,7 +781,7 @@
 
 struct hostent *
 android_gethostbyaddrforiface_proxy(const void *addr,
-    socklen_t len, int af, const char* iface)
+    socklen_t len, int af, const char* iface, int mark)
 {
 	struct hostent *result = NULL;
 	FILE* proxy = android_open_proxy();
@@ -810,7 +811,7 @@
 
 struct hostent *
 android_gethostbyaddrforiface_real(const void *addr,
-    socklen_t len, int af, const char* iface)
+    socklen_t len, int af, const char* iface, int mark)
 {
 	const u_char *uaddr = (const u_char *)addr;
 	socklen_t size;
@@ -858,28 +859,28 @@
 	hp = NULL;
 	h_errno = NETDB_INTERNAL;
 	if (nsdispatch(&hp, dtab, NSDB_HOSTS, "gethostbyaddr",
-	    default_dns_files, uaddr, len, af, iface) != NS_SUCCESS)
+		default_dns_files, uaddr, len, af, iface, mark) != NS_SUCCESS)
 		return NULL;
 	h_errno = NETDB_SUCCESS;
 	return hp;
 }
 
 struct hostent *
-android_gethostbyaddrforiface(const void *addr, socklen_t len, int af, const char* iface)
+android_gethostbyaddrforiface(const void *addr, socklen_t len, int af, const char* iface, int mark)
 {
 	const char *cache_mode = getenv("ANDROID_DNS_MODE");
 
 	if (cache_mode == NULL || strcmp(cache_mode, "local") != 0) {
-		return android_gethostbyaddrforiface_proxy(addr, len, af, iface);
+		return android_gethostbyaddrforiface_proxy(addr, len, af, iface, mark);
 	} else {
-		return android_gethostbyaddrforiface_real(addr,len, af,iface);
+		return android_gethostbyaddrforiface_real(addr,len, af, iface, mark);
 	}
 }
 
 struct hostent *
 gethostbyaddr(const void *addr, socklen_t len, int af)
 {
-	return android_gethostbyaddrforiface(addr, len, af, NULL);
+	return android_gethostbyaddrforiface(addr, len, af, NULL, 0);
 }
 
 
@@ -1315,6 +1316,7 @@
 	int len, af, advance;
 	res_state res;
 	const char* iface;
+	int mark;
 	res_static rs = __res_get_static();
 
 	assert(rv != NULL);
@@ -1323,6 +1325,7 @@
 	len = va_arg(ap, int);
 	af = va_arg(ap, int);
 	iface = va_arg(ap, char *);
+	mark = va_arg(ap, int);
 
 	switch (af) {
 	case AF_INET:
@@ -1365,6 +1368,7 @@
 		return NS_NOTFOUND;
 	}
 	res_setiface(res, iface);
+	res_setmark(res, mark);
 	n = res_nquery(res, qbuf, C_IN, T_PTR, buf->buf, sizeof(buf->buf));
 	if (n < 0) {
 		free(buf);
diff --git a/libc/netbsd/net/getaddrinfo.c b/libc/netbsd/net/getaddrinfo.c
index c4766e4..8c1a01b 100644
--- a/libc/netbsd/net/getaddrinfo.c
+++ b/libc/netbsd/net/getaddrinfo.c
@@ -215,7 +215,7 @@
 
 static int str2number(const char *);
 static int explore_fqdn(const struct addrinfo *, const char *,
-	const char *, struct addrinfo **, const char *iface);
+	const char *, struct addrinfo **, const char *iface, int mark);
 static int explore_null(const struct addrinfo *,
 	const char *, struct addrinfo **);
 static int explore_numeric(const struct addrinfo *, const char *,
@@ -578,12 +578,12 @@
 getaddrinfo(const char *hostname, const char *servname,
     const struct addrinfo *hints, struct addrinfo **res)
 {
-	return android_getaddrinfoforiface(hostname, servname, hints, NULL, res);
+	return android_getaddrinfoforiface(hostname, servname, hints, NULL, 0, res);
 }
 
 int
 android_getaddrinfoforiface(const char *hostname, const char *servname,
-    const struct addrinfo *hints, const char *iface, struct addrinfo **res)
+    const struct addrinfo *hints, const char *iface, int mark, struct addrinfo **res)
 {
 	struct addrinfo sentinel;
 	struct addrinfo *cur;
@@ -762,7 +762,7 @@
 			pai->ai_protocol = ex->e_protocol;
 
 		error = explore_fqdn(pai, hostname, servname,
-			&cur->ai_next, iface);
+			&cur->ai_next, iface, mark);
 
 		while (cur && cur->ai_next)
 			cur = cur->ai_next;
@@ -795,7 +795,7 @@
  */
 static int
 explore_fqdn(const struct addrinfo *pai, const char *hostname,
-    const char *servname, struct addrinfo **res, const char *iface)
+    const char *servname, struct addrinfo **res, const char *iface, int mark)
 {
 	struct addrinfo *result;
 	struct addrinfo *cur;
@@ -821,7 +821,7 @@
 		return 0;
 
 	switch (nsdispatch(&result, dtab, NSDB_HOSTS, "getaddrinfo",
-			default_dns_files, hostname, pai, iface)) {
+			default_dns_files, hostname, pai, iface, mark)) {
 	case NS_TRYAGAIN:
 		error = EAI_AGAIN;
 		goto free;
@@ -1892,10 +1892,12 @@
 	struct res_target q, q2;
 	res_state res;
 	const char* iface;
+	int mark;
 
 	name = va_arg(ap, char *);
 	pai = va_arg(ap, const struct addrinfo *);
 	iface = va_arg(ap, char *);
+	mark = va_arg(ap, int);
 	//fprintf(stderr, "_dns_getaddrinfo() name = '%s'\n", name);
 
 	memset(&q, 0, sizeof(q));
@@ -1983,6 +1985,7 @@
 	 * and have a cache hit that would be wasted, so we do the rest there on miss
 	 */
 	res_setiface(res, iface);
+	res_setmark(res, mark);
 	if (res_searchN(name, &q, res) < 0) {
 		__res_put_state(res);
 		free(buf);
diff --git a/libc/netbsd/net/getnameinfo.c b/libc/netbsd/net/getnameinfo.c
index ade5240..15d2675 100644
--- a/libc/netbsd/net/getnameinfo.c
+++ b/libc/netbsd/net/getnameinfo.c
@@ -93,7 +93,7 @@
 };
 
 static int getnameinfo_inet(const struct sockaddr *, socklen_t, char *,
-    socklen_t, char *, socklen_t, int, const char*);
+    socklen_t, char *, socklen_t, int, const char*, int);
 #ifdef INET6
 static int ip6_parsenumeric(const struct sockaddr *, const char *, char *,
 				 socklen_t, int);
@@ -108,16 +108,16 @@
  */
 int getnameinfo(const struct sockaddr* sa, socklen_t salen, char* host, size_t hostlen, char* serv, size_t servlen, int flags)
 {
-	return android_getnameinfoforiface(sa, salen, host, hostlen, serv, servlen, flags, NULL);
+	return android_getnameinfoforiface(sa, salen, host, hostlen, serv, servlen, flags, NULL, 0);
 }
 
-int android_getnameinfoforiface(const struct sockaddr* sa, socklen_t salen, char* host, size_t hostlen, char* serv, size_t servlen, int flags, const char* iface)
+int android_getnameinfoforiface(const struct sockaddr* sa, socklen_t salen, char* host, size_t hostlen, char* serv, size_t servlen, int flags, const char* iface, int mark)
 {
 	switch (sa->sa_family) {
 	case AF_INET:
 	case AF_INET6:
 		return getnameinfo_inet(sa, salen, host, hostlen,
-				serv, servlen, flags, iface);
+				serv, servlen, flags, iface, mark);
 	case AF_LOCAL:
 		return getnameinfo_local(sa, salen, host, hostlen,
 		    serv, servlen, flags);
@@ -158,10 +158,10 @@
  * the address. On failure -1 is returned in which case
  * normal execution flow shall continue. */
 static int
-android_gethostbyaddr_proxy(char* nameBuf, size_t nameBufLen, const void *addr, socklen_t addrLen, int addrFamily, const char* iface)
+android_gethostbyaddr_proxy(char* nameBuf, size_t nameBufLen, const void *addr, socklen_t addrLen, int addrFamily, const char* iface, int mark)
 {
 	struct hostent *hostResult =
-			android_gethostbyaddrforiface_proxy(addr, addrLen, addrFamily, iface);
+			android_gethostbyaddrforiface_proxy(addr, addrLen, addrFamily, iface, mark);
 
 	if (hostResult == NULL) return 0;
 
@@ -179,7 +179,7 @@
 getnameinfo_inet(const struct sockaddr* sa, socklen_t salen,
        char *host, socklen_t hostlen,
        char *serv, socklen_t servlen,
-       int flags, const char* iface)
+       int flags, const char* iface, int mark)
 {
 	const struct afd *afd;
 	struct servent *sp;
@@ -321,14 +321,15 @@
 		char android_proxy_buf[MAXDNAME];
 
 		int hostnamelen = android_gethostbyaddr_proxy(android_proxy_buf,
-				MAXDNAME, addr, afd->a_addrlen, afd->a_af, iface);
+				MAXDNAME, addr, afd->a_addrlen, afd->a_af, iface, mark);
 		if (hostnamelen > 0) {
 			hp = &android_proxy_hostent;
 			hp->h_name = android_proxy_buf;
 		} else if (!hostnamelen) {
 			hp = NULL;
 		} else {
-			hp = android_gethostbyaddrforiface(addr, afd->a_addrlen, afd->a_af, iface);
+			hp = android_gethostbyaddrforiface(addr, afd->a_addrlen, afd->a_af,
+					iface, mark);
 		}
 
 		if (hp) {
diff --git a/libc/netbsd/resolv/res_cache.c b/libc/netbsd/resolv/res_cache.c
index 829bf10..8e1bd14 100644
--- a/libc/netbsd/resolv/res_cache.c
+++ b/libc/netbsd/resolv/res_cache.c
@@ -1258,6 +1258,12 @@
     char                            ifname[IF_NAMESIZE + 1];
     struct resolv_pidiface_info*    next;
 } PidIfaceInfo;
+typedef struct resolv_uidiface_info {
+    int                             uid_start;
+    int                             uid_end;
+    char                            ifname[IF_NAMESIZE + 1];
+    struct resolv_uidiface_info*    next;
+} UidIfaceInfo;
 
 #define  HTABLE_VALID(x)  ((x) != NULL && (x) != HTABLE_DELETED)
 
@@ -1796,6 +1802,9 @@
 // List of pid iface pairs
 static struct resolv_pidiface_info _res_pidiface_list;
 
+// List of uid iface pairs
+static struct resolv_uidiface_info _res_uidiface_list;
+
 // name of the current default inteface
 static char            _res_default_ifname[IF_NAMESIZE + 1];
 
@@ -1805,6 +1814,9 @@
 // lock protecting the _res_pid_iface_list
 static pthread_mutex_t _res_pidiface_list_lock;
 
+// lock protecting the _res_uidiface_list
+static pthread_mutex_t _res_uidiface_list_lock;
+
 /* lookup the default interface name */
 static char *_get_default_iface_locked();
 /* find the first cache that has an associated interface and return the name of the interface */
@@ -1833,12 +1845,19 @@
 /* return 1 if the provided list of name servers differs from the list of name servers
  * currently attached to the provided cache_info */
 static int _resolv_is_nameservers_equal_locked(struct resolv_cache_info* cache_info,
-        char** servers, int numservers);
+        const char** servers, int numservers);
 /* remove a resolv_pidiface_info structure from _res_pidiface_list */
 static void _remove_pidiface_info_locked(int pid);
 /* get a resolv_pidiface_info structure from _res_pidiface_list with a certain pid */
 static struct resolv_pidiface_info* _get_pid_iface_info_locked(int pid);
 
+/* remove a resolv_pidiface_info structure from _res_uidiface_list */
+static int _remove_uidiface_info_locked(int uid_start, int uid_end);
+/* check if a range [low,high] overlaps with any already existing ranges in the uid=>iface map*/
+static int  _resolv_check_uid_range_overlap_locked(int uid_start, int uid_end);
+/* get a resolv_uidiface_info structure from _res_uidiface_list with a certain uid */
+static struct resolv_uidiface_info* _get_uid_iface_info_locked(int uid);
+
 static void
 _res_cache_init(void)
 {
@@ -1852,8 +1871,10 @@
     memset(&_res_default_ifname, 0, sizeof(_res_default_ifname));
     memset(&_res_cache_list, 0, sizeof(_res_cache_list));
     memset(&_res_pidiface_list, 0, sizeof(_res_pidiface_list));
+    memset(&_res_uidiface_list, 0, sizeof(_res_uidiface_list));
     pthread_mutex_init(&_res_cache_list_lock, NULL);
     pthread_mutex_init(&_res_pidiface_list_lock, NULL);
+    pthread_mutex_init(&_res_uidiface_list_lock, NULL);
 }
 
 struct resolv_cache*
@@ -2076,7 +2097,7 @@
 }
 
 void
-_resolv_set_nameservers_for_iface(const char* ifname, char** servers, int numservers,
+_resolv_set_nameservers_for_iface(const char* ifname, const char** servers, int numservers,
         const char *domains)
 {
     int i, rt, index;
@@ -2149,7 +2170,7 @@
 
 static int
 _resolv_is_nameservers_equal_locked(struct resolv_cache_info* cache_info,
-        char** servers, int numservers)
+        const char** servers, int numservers)
 {
     int i;
     char** ns;
@@ -2271,8 +2292,8 @@
         memcpy(&cache_info->ifaddr, addr, sizeof(*addr));
 
         if (DEBUG) {
-            char* addr_s = inet_ntoa(cache_info->ifaddr);
-            XLOG("address of interface %s is %s\n", ifname, addr_s);
+            XLOG("address of interface %s is %s\n",
+                    ifname, inet_ntoa(cache_info->ifaddr));
         }
     }
     pthread_mutex_unlock(&_res_cache_list_lock);
@@ -2411,26 +2432,183 @@
     return len;
 }
 
-int
-_resolv_get_default_iface(char* buff, int buffLen)
+static int
+_remove_uidiface_info_locked(int uid_start, int uid_end) {
+    struct resolv_uidiface_info* result = _res_uidiface_list.next;
+    struct resolv_uidiface_info* prev = &_res_uidiface_list;
+
+    while (result != NULL && result->uid_start != uid_start && result->uid_end != uid_end) {
+        prev = result;
+        result = result->next;
+    }
+    if (prev != NULL && result != NULL) {
+        prev->next = result->next;
+        free(result);
+        return 0;
+    }
+    errno = EINVAL;
+    return -1;
+}
+
+static struct resolv_uidiface_info*
+_get_uid_iface_info_locked(int uid)
 {
-    char* ifname;
+    struct resolv_uidiface_info* result = _res_uidiface_list.next;
+    while (result != NULL && !(result->uid_start <= uid && result->uid_end >= uid)) {
+        result = result->next;
+    }
+
+    return result;
+}
+
+static int
+_resolv_check_uid_range_overlap_locked(int uid_start, int uid_end)
+{
+    struct resolv_uidiface_info* cur = _res_uidiface_list.next;
+    while (cur != NULL) {
+        if (cur->uid_start <= uid_end && cur->uid_end >= uid_start) {
+            return -1;
+        }
+        cur = cur->next;
+    }
+    return 0;
+}
+
+void
+_resolv_clear_iface_uid_range_mapping()
+{
+    pthread_once(&_res_cache_once, _res_cache_init);
+    pthread_mutex_lock(&_res_uidiface_list_lock);
+    struct resolv_uidiface_info *current = _res_uidiface_list.next;
+    struct resolv_uidiface_info *next;
+    while (current != NULL) {
+        next = current->next;
+        free(current);
+        current = next;
+    }
+    _res_uidiface_list.next = NULL;
+    pthread_mutex_unlock(&_res_uidiface_list_lock);
+}
+
+void
+_resolv_clear_iface_pid_mapping()
+{
+    pthread_once(&_res_cache_once, _res_cache_init);
+    pthread_mutex_lock(&_res_pidiface_list_lock);
+    struct resolv_pidiface_info *current = _res_pidiface_list.next;
+    struct resolv_pidiface_info *next;
+    while (current != NULL) {
+        next = current->next;
+        free(current);
+        current = next;
+    }
+    _res_pidiface_list.next = NULL;
+    pthread_mutex_unlock(&_res_pidiface_list_lock);
+}
+
+int
+_resolv_set_iface_for_uid_range(const char* ifname, int uid_start, int uid_end)
+{
+    int rv = 0;
+    struct resolv_uidiface_info* uidiface_info;
+    // make sure the uid iface list is created
+    pthread_once(&_res_cache_once, _res_cache_init);
+    if (uid_start > uid_end) {
+        errno = EINVAL;
+        return -1;
+    }
+    pthread_mutex_lock(&_res_uidiface_list_lock);
+    //check that we aren't adding an overlapping range
+    if (!_resolv_check_uid_range_overlap_locked(uid_start, uid_end)) {
+        uidiface_info = calloc(sizeof(*uidiface_info), 1);
+        if (uidiface_info) {
+            uidiface_info->uid_start = uid_start;
+            uidiface_info->uid_end = uid_end;
+            int len = sizeof(uidiface_info->ifname);
+            strncpy(uidiface_info->ifname, ifname, len - 1);
+            uidiface_info->ifname[len - 1] = '\0';
+
+            uidiface_info->next = _res_uidiface_list.next;
+            _res_uidiface_list.next = uidiface_info;
+
+            XLOG("_resolv_set_iface_for_uid_range: [%d,%d], iface %s\n", low, high, ifname);
+        } else {
+            XLOG("_resolv_set_iface_for_uid_range failing calloc\n");
+            rv = -1;
+            errno = EINVAL;
+        }
+    } else {
+        XLOG("_resolv_set_iface_for_uid_range range [%d,%d] overlaps\n", low, high);
+        rv = -1;
+        errno = EINVAL;
+    }
+
+    pthread_mutex_unlock(&_res_uidiface_list_lock);
+    return rv;
+}
+
+int
+_resolv_clear_iface_for_uid_range(int uid_start, int uid_end)
+{
+    pthread_once(&_res_cache_once, _res_cache_init);
+    pthread_mutex_lock(&_res_uidiface_list_lock);
+
+    int rv = _remove_uidiface_info_locked(uid_start, uid_end);
+
+    XLOG("_resolv_clear_iface_for_uid_range: [%d,%d]\n", uid_start, uid_end);
+
+    pthread_mutex_unlock(&_res_uidiface_list_lock);
+
+    return rv;
+}
+
+int
+_resolv_get_uids_associated_interface(int uid, char* buff, int buffLen)
+{
     int len = 0;
 
-    if (!buff || buffLen == 0) {
+    if (!buff) {
         return -1;
     }
 
     pthread_once(&_res_cache_once, _res_cache_init);
+    pthread_mutex_lock(&_res_uidiface_list_lock);
+
+    struct resolv_uidiface_info* uidiface_info = _get_uid_iface_info_locked(uid);
+    buff[0] = '\0';
+    if (uidiface_info) {
+        len = strlen(uidiface_info->ifname);
+        if (len < buffLen) {
+            strncpy(buff, uidiface_info->ifname, len);
+            buff[len] = '\0';
+        }
+    }
+
+    XLOG("_resolv_get_uids_associated_interface buff: %s\n", buff);
+
+    pthread_mutex_unlock(&_res_uidiface_list_lock);
+
+    return len;
+}
+
+size_t
+_resolv_get_default_iface(char* buff, size_t buffLen)
+{
+    if (!buff || buffLen == 0) {
+        return 0;
+    }
+
+    pthread_once(&_res_cache_once, _res_cache_init);
     pthread_mutex_lock(&_res_cache_list_lock);
 
-    ifname = _get_default_iface_locked(); // never null, but may be empty
+    char* ifname = _get_default_iface_locked(); // never null, but may be empty
 
     // if default interface not set. Get first cache with an interface
     if (ifname[0] == '\0') {
         ifname = _find_any_iface_name_locked(); // may be null
     }
 
+    size_t len = 0;
     // if we got the default iface or if (no-default) the find_any call gave an answer
     if (ifname) {
         len = strlen(ifname);
@@ -2447,28 +2625,32 @@
     return len;
 }
 
-int
+void
 _resolv_populate_res_for_iface(res_state statp)
 {
-    int nserv;
-    struct resolv_cache_info* info = NULL;
+    if (statp == NULL) {
+        return;
+    }
 
-    if (statp) {
+    if (statp->iface[0] == '\0') { // no interface set assign default
+        size_t if_len = _resolv_get_default_iface(statp->iface, sizeof(statp->iface));
+        if (if_len + 1 > sizeof(statp->iface)) {
+            XLOG("%s: INTERNAL_ERROR: can't fit interface name into statp->iface.\n", __FUNCTION__);
+            return;
+        }
+        if (if_len == 0) {
+            XLOG("%s: INTERNAL_ERROR: can't find any suitable interfaces.\n", __FUNCTION__);
+            return;
+        }
+    }
+
+    pthread_once(&_res_cache_once, _res_cache_init);
+    pthread_mutex_lock(&_res_cache_list_lock);
+
+    struct resolv_cache_info* info = _find_cache_info_locked(statp->iface);
+    if (info != NULL) {
+        int nserv;
         struct addrinfo* ai;
-
-        if (statp->iface[0] == '\0') { // no interface set assign default
-            _resolv_get_default_iface(statp->iface, sizeof(statp->iface));
-        }
-
-        pthread_once(&_res_cache_once, _res_cache_init);
-        pthread_mutex_lock(&_res_cache_list_lock);
-        info = _find_cache_info_locked(statp->iface);
-
-        if (info == NULL) {
-            pthread_mutex_unlock(&_res_cache_list_lock);
-            return 0;
-        }
-
         XLOG("_resolv_populate_res_for_iface: %s\n", statp->iface);
         for (nserv = 0; nserv < MAXNS; nserv++) {
             ai = info->nsaddrinfo[nserv];
@@ -2502,8 +2684,6 @@
         while (pp < statp->dnsrch + MAXDNSRCH && *p != -1) {
             *pp++ = &statp->defdname + *p++;
         }
-
-        pthread_mutex_unlock(&_res_cache_list_lock);
     }
-    return nserv;
+    pthread_mutex_unlock(&_res_cache_list_lock);
 }
diff --git a/libc/netbsd/resolv/res_init.c b/libc/netbsd/resolv/res_init.c
index ff65299..ceb412b 100644
--- a/libc/netbsd/resolv/res_init.c
+++ b/libc/netbsd/resolv/res_init.c
@@ -806,4 +806,11 @@
 		}
 	}
 }
+
+void res_setmark(res_state statp, int mark)
+{
+	if (statp != NULL) {
+		statp->_mark = mark;
+	}
+}
 #endif /* ANDROID_CHANGES */
diff --git a/libc/netbsd/resolv/res_send.c b/libc/netbsd/resolv/res_send.c
index 0bb5b6b..f65b015 100644
--- a/libc/netbsd/resolv/res_send.c
+++ b/libc/netbsd/resolv/res_send.c
@@ -762,10 +762,13 @@
 	if (statp->_vcsock >= 0 && (statp->_flags & RES_F_VC) != 0) {
 		struct sockaddr_storage peer;
 		socklen_t size = sizeof peer;
-
+		int old_mark;
+		int mark_size = sizeof(old_mark);
 		if (getpeername(statp->_vcsock,
 				(struct sockaddr *)(void *)&peer, &size) < 0 ||
-		    !sock_eq((struct sockaddr *)(void *)&peer, nsap)) {
+		    !sock_eq((struct sockaddr *)(void *)&peer, nsap) ||
+			getsockopt(statp->_vcsock, SOL_SOCKET, SO_MARK, &old_mark, &mark_size) < 0 ||
+			old_mark != statp->_mark) {
 			res_nclose(statp);
 			statp->_flags &= ~RES_F_VC;
 		}
@@ -795,6 +798,14 @@
 				return (-1);
 			}
 		}
+		if (statp->_mark != 0) {
+			if (setsockopt(statp->_vcsock, SOL_SOCKET,
+				        SO_MARK, &statp->_mark, sizeof(statp->_mark)) < 0) {
+				*terrno = errno;
+				Perror(statp, stderr, "setsockopt", errno);
+				return -1;
+			}
+		}
 		errno = 0;
 		if (random_bind(statp->_vcsock,nsap->sa_family) < 0) {
 			*terrno = errno;
@@ -1070,6 +1081,14 @@
 				return (-1);
 			}
 		}
+
+		if (statp->_mark != 0) {
+			if (setsockopt(EXT(statp).nssocks[ns], SOL_SOCKET,
+					SO_MARK, &(statp->_mark), sizeof(statp->_mark)) < 0) {
+				res_nclose(statp);
+				return -1;
+			}
+		}
 #ifndef CANNOT_CONNECT_DGRAM
 		/*
 		 * On a 4.3BSD+ machine (client and server,
@@ -1097,6 +1116,7 @@
 #endif /* !CANNOT_CONNECT_DGRAM */
 		Dprint(statp->options & RES_DEBUG,
 		       (stdout, ";; new DG socket\n"))
+
 	}
 	s = EXT(statp).nssocks[ns];
 #ifndef CANNOT_CONNECT_DGRAM
diff --git a/libc/private/bionic_name_mem.h b/libc/private/bionic_name_mem.h
new file mode 100644
index 0000000..9f6163d
--- /dev/null
+++ b/libc/private/bionic_name_mem.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#ifndef _BIONIC_NAME_MEM_H
+#define _BIONIC_NAME_MEM_H
+
+#include <sys/cdefs.h>
+#include <stddef.h>
+
+__BEGIN_DECLS
+
+int __bionic_name_mem(void *addr, size_t len, const char *name);
+
+__END_DECLS
+
+#endif
diff --git a/libc/private/libc_events.h b/libc/private/libc_events.h
new file mode 100644
index 0000000..5d20f4b
--- /dev/null
+++ b/libc/private/libc_events.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LIBC_EVENTS_H
+#define _LIBC_EVENTS_H
+
+
+// This is going to be included in assembler code so only allow #define
+// values instead of defining an enum.
+
+#define BIONIC_EVENT_MEMCPY_BUFFER_OVERFLOW   80100
+#define BIONIC_EVENT_STRCAT_BUFFER_OVERFLOW   80105
+#define BIONIC_EVENT_MEMMOVE_BUFFER_OVERFLOW  80110
+#define BIONIC_EVENT_STRNCAT_BUFFER_OVERFLOW  80115
+#define BIONIC_EVENT_STRNCPY_BUFFER_OVERFLOW  80120
+#define BIONIC_EVENT_MEMSET_BUFFER_OVERFLOW   80125
+#define BIONIC_EVENT_STRCPY_BUFFER_OVERFLOW   80130
+
+#define BIONIC_EVENT_RESOLVER_OLD_RESPONSE    80300
+#define BIONIC_EVENT_RESOLVER_WRONG_SERVER    80305
+#define BIONIC_EVENT_RESOLVER_WRONG_QUERY     80310
+
+#endif // _LIBC_EVENTS_H
diff --git a/libc/private/libc_logging.h b/libc/private/libc_logging.h
index f69e2ed..1cdcb6e 100644
--- a/libc/private/libc_logging.h
+++ b/libc/private/libc_logging.h
@@ -36,19 +36,7 @@
 
 __BEGIN_DECLS
 
-enum {
-  BIONIC_EVENT_MEMCPY_BUFFER_OVERFLOW = 80100,
-  BIONIC_EVENT_STRCAT_BUFFER_OVERFLOW = 80105,
-  BIONIC_EVENT_MEMMOVE_BUFFER_OVERFLOW = 80110,
-  BIONIC_EVENT_STRNCAT_BUFFER_OVERFLOW = 80115,
-  BIONIC_EVENT_STRNCPY_BUFFER_OVERFLOW = 80120,
-  BIONIC_EVENT_MEMSET_BUFFER_OVERFLOW = 80125,
-  BIONIC_EVENT_STRCPY_BUFFER_OVERFLOW = 80130,
-
-  BIONIC_EVENT_RESOLVER_OLD_RESPONSE = 80300,
-  BIONIC_EVENT_RESOLVER_WRONG_SERVER = 80305,
-  BIONIC_EVENT_RESOLVER_WRONG_QUERY = 80310,
-};
+#include "libc_events.h"
 
 enum {
   ANDROID_LOG_UNKNOWN = 0,
diff --git a/libc/private/resolv_cache.h b/libc/private/resolv_cache.h
index d70857d..68a1180 100644
--- a/libc/private/resolv_cache.h
+++ b/libc/private/resolv_cache.h
@@ -28,6 +28,7 @@
 #ifndef _RESOLV_CACHE_H_
 #define _RESOLV_CACHE_H_
 
+#include <stddef.h>
 #include <sys/cdefs.h>
 
 struct __res_state;
@@ -77,16 +78,17 @@
 __LIBC_HIDDEN__
 extern struct in_addr* _resolv_get_addr_of_iface(const char* ifname);
 
-/* Copy the name of the default interface to provided buffer.
- * Return length of buffer on success on failure -1 is returned */
+/* Copy the name of the default interface to the provided buffer.
+ * Returns the string length of the default interface,
+ * be that less or more than the buffLen, or 0 if nothing had been written */
 __LIBC_HIDDEN__
-extern int _resolv_get_default_iface(char* buff, int buffLen);
+ extern size_t _resolv_get_default_iface(char* buff, size_t buffLen);
 
 /* sets the name server addresses to the provided res_state structure. The
  * name servers are retrieved from the cache which is associated
  * with the interface to which the res_state structure is associated */
 __LIBC_HIDDEN__
-extern int _resolv_populate_res_for_iface(struct __res_state* statp);
+extern void _resolv_populate_res_for_iface(struct __res_state* statp);
 
 typedef enum {
     RESOLV_CACHE_UNSUPPORTED,  /* the cache can't handle that kind of queries */
diff --git a/libc/private/resolv_iface.h b/libc/private/resolv_iface.h
index bf5abad..ad42793 100644
--- a/libc/private/resolv_iface.h
+++ b/libc/private/resolv_iface.h
@@ -48,7 +48,7 @@
 extern void _resolv_set_default_iface(const char* ifname);
 
 /* set name servers for an interface */
-extern void _resolv_set_nameservers_for_iface(const char* ifname, char** servers, int numservers,
+extern void _resolv_set_nameservers_for_iface(const char* ifname, const char** servers, int numservers,
         const char *domains);
 
 /* tell resolver of the address of an interface */
@@ -66,6 +66,9 @@
 /* clear pid from being associated with an interface */
 extern void _resolv_clear_iface_for_pid(int pid);
 
+/* clear the entire mapping of pids to interfaces. */
+extern void _resolv_clear_iface_pid_mapping();
+
 /** Gets the name of the interface to which the pid is attached.
  *  On error, -1 is returned.
  *  If no interface is found, 0 is returned and buff is set to empty ('\0').
@@ -75,6 +78,27 @@
  *               buffLen Length of buff. An interface is at most IF_NAMESIZE in length */
 extern int _resolv_get_pids_associated_interface(int pid, char* buff, int buffLen);
 
+
+/** set a uid range to use the name servers of the specified interface
+ *  If [low,high] overlaps with an already existing rule -1 is returned */
+extern int _resolv_set_iface_for_uid_range(const char* ifname, int uid_start, int uid_end);
+
+/* clear a uid range from being associated with an interface
+ * If the range given is not mapped -1 is returned. */
+extern int _resolv_clear_iface_for_uid_range(int uid_start, int uid_end);
+
+/* clear the entire mapping of uid ranges to interfaces. */
+extern void _resolv_clear_iface_uid_range_mapping();
+
+/** Gets the name of the interface to which the uid is attached.
+ *  On error, -1 is returned.
+ *  If no interface is found, 0 is returned and buff is set to empty ('\0').
+ *  If an interface is found, the name is copied to buff and the length of the name is returned.
+ *  Arguments:   uid The uid to find an interface for
+ *               buff A buffer to copy the result to
+ *               buffLen Length of buff. An interface is at most IF_NAMESIZE in length */
+extern int _resolv_get_uids_associated_interface(int uid, char* buff, int buffLen);
+
 #endif /* _BIONIC_RESOLV_IFACE_FUNCTIONS_DECLARED */
 
 __END_DECLS
diff --git a/libc/private/resolv_private.h b/libc/private/resolv_private.h
index 9648a8f..c7bcb89 100644
--- a/libc/private/resolv_private.h
+++ b/libc/private/resolv_private.h
@@ -175,6 +175,7 @@
 	res_send_qhook qhook;		/* query hook */
 	res_send_rhook rhook;		/* response hook */
 	int	res_h_errno;		/* last one set for this context */
+	int _mark;          /* If non-0 SET_MARK to _mark on all request sockets */
 	int	_vcsock;		/* PRIVATE: for res_send VC i/o */
 	u_int	_flags;			/* PRIVATE: see below */
 	u_int	_pad;			/* make _u 64 bit aligned */
@@ -490,6 +491,7 @@
 				    union res_sockaddr_union *, int);
 
 void res_setiface();
+void res_setmark();
 u_int  res_randomid(void);
 
 __END_DECLS
diff --git a/tests/system_properties_test.cpp b/tests/system_properties_test.cpp
index 9602607..b9256c6 100644
--- a/tests/system_properties_test.cpp
+++ b/tests/system_properties_test.cpp
@@ -198,6 +198,84 @@
     ASSERT_EQ((const prop_info *)NULL, __system_property_find_nth(247));
 }
 
+static void hierarchical_test_callback(const prop_info *pi, void *cookie) {
+    bool (*ok)[8][8] = static_cast<bool (*)[8][8]>(cookie);
+
+    char name[PROP_NAME_MAX];
+    char value[PROP_VALUE_MAX];
+
+    __system_property_read(pi, name, value);
+
+    int name_i, name_j, name_k;
+    int value_i, value_j, value_k;
+    ASSERT_EQ(3, sscanf(name, "property_%d.%d.%d", &name_i, &name_j, &name_k));
+    ASSERT_EQ(3, sscanf(value, "value_%d.%d.%d", &value_i, &value_j, &value_k));
+    ASSERT_EQ(name_i, value_i);
+    ASSERT_GE(name_i, 0);
+    ASSERT_LT(name_i, 8);
+    ASSERT_EQ(name_j, value_j);
+    ASSERT_GE(name_j, 0);
+    ASSERT_LT(name_j, 8);
+    ASSERT_EQ(name_k, value_k);
+    ASSERT_GE(name_k, 0);
+    ASSERT_LT(name_k, 8);
+
+    ok[name_i][name_j][name_k] = true;
+}
+
+TEST(properties, fill_hierarchical) {
+    LocalPropertyTestState pa;
+    ASSERT_TRUE(pa.valid);
+    char prop_name[PROP_NAME_MAX];
+    char prop_value[PROP_VALUE_MAX];
+    char prop_value_ret[PROP_VALUE_MAX];
+    int ret;
+
+    for (int i = 0; i < 8; i++) {
+        for (int j = 0; j < 8; j++) {
+            for (int k = 0; k < 8; k++) {
+                ret = snprintf(prop_name, PROP_NAME_MAX - 1, "property_%d.%d.%d", i, j, k);
+                memset(prop_name + ret, 'a', PROP_NAME_MAX - 1 - ret);
+                ret = snprintf(prop_value, PROP_VALUE_MAX - 1, "value_%d.%d.%d", i, j, k);
+                memset(prop_value + ret, 'b', PROP_VALUE_MAX - 1 - ret);
+                prop_name[PROP_NAME_MAX - 1] = 0;
+                prop_value[PROP_VALUE_MAX - 1] = 0;
+
+                ASSERT_EQ(0, __system_property_add(prop_name, PROP_NAME_MAX - 1, prop_value, PROP_VALUE_MAX - 1));
+            }
+        }
+    }
+
+    for (int i = 0; i < 8; i++) {
+        for (int j = 0; j < 8; j++) {
+            for (int k = 0; k < 8; k++) {
+                ret = snprintf(prop_name, PROP_NAME_MAX - 1, "property_%d.%d.%d", i, j, k);
+                memset(prop_name + ret, 'a', PROP_NAME_MAX - 1 - ret);
+                ret = snprintf(prop_value, PROP_VALUE_MAX - 1, "value_%d.%d.%d", i, j, k);
+                memset(prop_value + ret, 'b', PROP_VALUE_MAX - 1 - ret);
+                prop_name[PROP_NAME_MAX - 1] = 0;
+                prop_value[PROP_VALUE_MAX - 1] = 0;
+                memset(prop_value_ret, '\0', PROP_VALUE_MAX);
+
+                ASSERT_EQ(PROP_VALUE_MAX - 1, __system_property_get(prop_name, prop_value_ret));
+                ASSERT_EQ(0, memcmp(prop_value, prop_value_ret, PROP_VALUE_MAX));
+            }
+        }
+    }
+
+    bool ok[8][8][8];
+    memset(ok, 0, sizeof(ok));
+    __system_property_foreach(hierarchical_test_callback, ok);
+
+    for (int i = 0; i < 8; i++) {
+        for (int j = 0; j < 8; j++) {
+            for (int k = 0; k < 8; k++) {
+                ASSERT_TRUE(ok[i][j][k]);
+            }
+        }
+    }
+}
+
 TEST(properties, errors) {
     LocalPropertyTestState pa;
     ASSERT_TRUE(pa.valid);