Remove denver64 from libc
Test: compile
Change-Id: Ifcbe15c1682b4e1e18835e38915b2421196882f7
diff --git a/libc/Android.bp b/libc/Android.bp
index bc4dd9e..6b46ba0 100644
--- a/libc/Android.bp
+++ b/libc/Android.bp
@@ -841,18 +841,6 @@
"bionic/strchr.cpp",
"bionic/strnlen.c",
],
- denver64: {
- srcs: [
- "arch-arm64/denver64/bionic/memcpy.S",
- "arch-arm64/denver64/bionic/memmove.S",
- "arch-arm64/denver64/bionic/memset.S",
- ],
- exclude_srcs: [
- "arch-arm64/generic/bionic/memcpy.S",
- "arch-arm64/generic/bionic/memmove.S",
- "arch-arm64/generic/bionic/memset.S",
- ],
- },
},
mips: {
diff --git a/libc/NOTICE b/libc/NOTICE
index 5cce4d4..314b936 100644
--- a/libc/NOTICE
+++ b/libc/NOTICE
@@ -5348,35 +5348,6 @@
-------------------------------------------------------------------
-Copyright (c) 2012, Linaro Limited
- All rights reserved.
- Copyright (c) 2014, NVIDIA Corporation. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the Linaro nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
--------------------------------------------------------------------
-
Copyright (c) 2012-2013, Linaro Limited
All rights reserved.
diff --git a/libc/arch-arm64/denver64/bionic/memcpy.S b/libc/arch-arm64/denver64/bionic/memcpy.S
deleted file mode 100644
index baadb92..0000000
--- a/libc/arch-arm64/denver64/bionic/memcpy.S
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
- * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
- * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-// Prototype: void *memcpy (void *dst, const void *src, size_t count).
-
-#include <private/bionic_asm.h>
-
-ENTRY(__memcpy)
- #include "memcpy_base.S"
-END(__memcpy)
diff --git a/libc/arch-arm64/denver64/bionic/memcpy_base.S b/libc/arch-arm64/denver64/bionic/memcpy_base.S
deleted file mode 100644
index 3d7e9dd..0000000
--- a/libc/arch-arm64/denver64/bionic/memcpy_base.S
+++ /dev/null
@@ -1,199 +0,0 @@
-/* Copyright (c) 2012, Linaro Limited
- All rights reserved.
- Copyright (c) 2014, NVIDIA Corporation. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the Linaro nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/* Assumptions:
- *
- * denver, ARMv8-a, AArch64
- * Unaligned accesses
- *
- */
-
-#define dstin x0
-#define src x1
-#define count x2
-#define tmp1 x3
-#define tmp1w w3
-#define tmp2 x4
-#define tmp2w w4
-#define tmp3 x5
-#define tmp3w w5
-#define dst x6
-
-#define A_l x7
-#define A_h x8
-#define B_l x9
-#define B_h x10
-#define C_l x11
-#define C_h x12
-#define D_l x13
-#define D_h x14
-
-#define QA_l q0
-#define QA_h q1
-#define QB_l q2
-#define QB_h q3
-
- mov dst, dstin
- cmp count, #64
- b.ge .Lcpy_not_short
- cmp count, #15
- b.le .Ltail15tiny
-
- /* Deal with small copies quickly by dropping straight into the
- * exit block. */
-.Ltail63:
- /* Copy up to 48 bytes of data. At this point we only need the
- * bottom 6 bits of count to be accurate. */
- ands tmp1, count, #0x30
- b.eq .Ltail15
- add dst, dst, tmp1
- add src, src, tmp1
- cmp tmp1w, #0x20
- b.eq 1f
- b.lt 2f
- ldp A_l, A_h, [src, #-48]
- stp A_l, A_h, [dst, #-48]
-1:
- ldp A_l, A_h, [src, #-32]
- stp A_l, A_h, [dst, #-32]
-2:
- ldp A_l, A_h, [src, #-16]
- stp A_l, A_h, [dst, #-16]
-
-.Ltail15:
- ands count, count, #15
- beq 1f
- add src, src, count
- ldp A_l, A_h, [src, #-16]
- add dst, dst, count
- stp A_l, A_h, [dst, #-16]
-1:
- ret
-
-.Ltail15tiny:
- /* Copy up to 15 bytes of data. Does not assume additional data
- being copied. */
- tbz count, #3, 1f
- ldr tmp1, [src], #8
- str tmp1, [dst], #8
-1:
- tbz count, #2, 1f
- ldr tmp1w, [src], #4
- str tmp1w, [dst], #4
-1:
- tbz count, #1, 1f
- ldrh tmp1w, [src], #2
- strh tmp1w, [dst], #2
-1:
- tbz count, #0, 1f
- ldrb tmp1w, [src]
- strb tmp1w, [dst]
-1:
- ret
-
-.Lcpy_not_short:
- /* We don't much care about the alignment of DST, but we want SRC
- * to be 128-bit (16 byte) aligned so that we don't cross cache line
- * boundaries on both loads and stores. */
- neg tmp2, src
- ands tmp2, tmp2, #15 /* Bytes to reach alignment. */
- b.eq 2f
- sub count, count, tmp2
- /* Copy more data than needed; it's faster than jumping
- * around copying sub-Quadword quantities. We know that
- * it can't overrun. */
- ldp A_l, A_h, [src]
- add src, src, tmp2
- stp A_l, A_h, [dst]
- add dst, dst, tmp2
- /* There may be less than 63 bytes to go now. */
- cmp count, #63
- b.le .Ltail63
-2:
- subs count, count, #128
- b.ge .Lcpy_body_large
- /* Less than 128 bytes to copy, so handle 64 here and then jump
- * to the tail. */
- ldp QA_l, QA_h, [src]
- ldp QB_l, QB_h, [src, #32]
- stp QA_l, QA_h, [dst]
- stp QB_l, QB_h, [dst, #32]
- tst count, #0x3f
- add src, src, #64
- add dst, dst, #64
- b.ne .Ltail63
- ret
-
- /* Critical loop. Start at a new cache line boundary. Assuming
- * 64 bytes per line this ensures the entire loop is in one line. */
- .p2align 6
-.Lcpy_body_large:
- cmp count, 65536
- bhi .Lcpy_body_huge
- /* There are at least 128 bytes to copy. */
- ldp QA_l, QA_h, [src, #0]
- sub dst, dst, #32 /* Pre-bias. */
- ldp QB_l, QB_h, [src, #32]! /* src += 64 - Pre-bias. */
-1:
- stp QA_l, QA_h, [dst, #32]
- ldp QA_l, QA_h, [src, #32]
- stp QB_l, QB_h, [dst, #64]!
- ldp QB_l, QB_h, [src, #64]!
-
- subs count, count, #64
- b.ge 1b
-
- stp QA_l, QA_h, [dst, #32]
- stp QB_l, QB_h, [dst, #64]
- add src, src, #32
- add dst, dst, #64 + 32
- tst count, #0x3f
- b.ne .Ltail63
- ret
-.Lcpy_body_huge:
- /* There are at least 128 bytes to copy. */
- ldp QA_l, QA_h, [src, #0]
- sub dst, dst, #32 /* Pre-bias. */
- ldp QB_l, QB_h, [src, #32]!
-1:
- stnp QA_l, QA_h, [dst, #32]
- stnp QB_l, QB_h, [dst, #64]
- ldp QA_l, QA_h, [src, #32]
- ldp QB_l, QB_h, [src, #64]!
- add dst, dst, #64
-
- subs count, count, #64
- b.ge 1b
-
- stnp QA_l, QA_h, [dst, #32]
- stnp QB_l, QB_h, [dst, #64]
- add src, src, #32
- add dst, dst, #64 + 32
- tst count, #0x3f
- b.ne .Ltail63
- ret
diff --git a/libc/arch-arm64/denver64/bionic/memmove.S b/libc/arch-arm64/denver64/bionic/memmove.S
deleted file mode 100644
index 42271dc..0000000
--- a/libc/arch-arm64/denver64/bionic/memmove.S
+++ /dev/null
@@ -1,331 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the Linaro nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/* Assumptions:
- *
- * ARMv8-a, AArch64
- * Unaligned accesses
- * wchar_t is 4 bytes
- */
-
-#include <private/bionic_asm.h>
-
-/* Parameters and result. */
-#define dstin x0
-#define src x1
-#define count x2
-#define tmp1 x3
-#define tmp1w w3
-#define tmp2 x4
-#define tmp2w w4
-#define tmp3 x5
-#define tmp3w w5
-#define dst x6
-
-#define A_l x7
-#define A_h x8
-#define B_l x9
-#define B_h x10
-#define C_l x11
-#define C_h x12
-#define D_l x13
-#define D_h x14
-
-#if defined(WMEMMOVE)
-ENTRY(wmemmove)
- lsl count, count, #2
-#else
-ENTRY(memmove)
-#endif
- cmp dstin, src
- b.lo .Ldownwards
- add tmp1, src, count
- cmp dstin, tmp1
- b.hs __memcpy /* No overlap. */
-
- /* Upwards move with potential overlap.
- * Need to move from the tail backwards. SRC and DST point one
- * byte beyond the remaining data to move. */
- add dst, dstin, count
- add src, src, count
- cmp count, #64
- b.ge .Lmov_not_short_up
-
- /* Deal with small moves quickly by dropping straight into the
- * exit block. */
-.Ltail63up:
- /* Move up to 48 bytes of data. At this point we only need the
- * bottom 6 bits of count to be accurate. */
- ands tmp1, count, #0x30
- b.eq .Ltail15up
- sub dst, dst, tmp1
- sub src, src, tmp1
- cmp tmp1w, #0x20
- b.eq 1f
- b.lt 2f
- ldp A_l, A_h, [src, #32]
- stp A_l, A_h, [dst, #32]
-1:
- ldp A_l, A_h, [src, #16]
- stp A_l, A_h, [dst, #16]
-2:
- ldp A_l, A_h, [src]
- stp A_l, A_h, [dst]
-.Ltail15up:
- /* Move up to 15 bytes of data. Does not assume additional data
- * being moved. */
- tbz count, #3, 1f
- ldr tmp1, [src, #-8]!
- str tmp1, [dst, #-8]!
-1:
- tbz count, #2, 1f
- ldr tmp1w, [src, #-4]!
- str tmp1w, [dst, #-4]!
-1:
- tbz count, #1, 1f
- ldrh tmp1w, [src, #-2]!
- strh tmp1w, [dst, #-2]!
-1:
- tbz count, #0, 1f
- ldrb tmp1w, [src, #-1]
- strb tmp1w, [dst, #-1]
-1:
- ret
-
-.Lmov_not_short_up:
- /* We don't much care about the alignment of DST, but we want SRC
- * to be 128-bit (16 byte) aligned so that we don't cross cache line
- * boundaries on both loads and stores. */
- ands tmp2, src, #15 /* Bytes to reach alignment. */
- b.eq 2f
- sub count, count, tmp2
- /* Move enough data to reach alignment; unlike memcpy, we have to
- * be aware of the overlap, which means we can't move data twice. */
- tbz tmp2, #3, 1f
- ldr tmp1, [src, #-8]!
- str tmp1, [dst, #-8]!
-1:
- tbz tmp2, #2, 1f
- ldr tmp1w, [src, #-4]!
- str tmp1w, [dst, #-4]!
-1:
- tbz tmp2, #1, 1f
- ldrh tmp1w, [src, #-2]!
- strh tmp1w, [dst, #-2]!
-1:
- tbz tmp2, #0, 1f
- ldrb tmp1w, [src, #-1]!
- strb tmp1w, [dst, #-1]!
-1:
-
- /* There may be less than 63 bytes to go now. */
- cmp count, #63
- b.le .Ltail63up
-2:
- subs count, count, #128
- b.ge .Lmov_body_large_up
- /* Less than 128 bytes to move, so handle 64 here and then jump
- * to the tail. */
- ldp A_l, A_h, [src, #-64]!
- ldp B_l, B_h, [src, #16]
- ldp C_l, C_h, [src, #32]
- ldp D_l, D_h, [src, #48]
- stp A_l, A_h, [dst, #-64]!
- stp B_l, B_h, [dst, #16]
- stp C_l, C_h, [dst, #32]
- stp D_l, D_h, [dst, #48]
- tst count, #0x3f
- b.ne .Ltail63up
- ret
-
- /* Critical loop. Start at a new Icache line boundary. Assuming
- * 64 bytes per line this ensures the entire loop is in one line. */
- .p2align 6
-.Lmov_body_large_up:
- /* There are at least 128 bytes to move. */
- ldp A_l, A_h, [src, #-16]
- ldp B_l, B_h, [src, #-32]
- ldp C_l, C_h, [src, #-48]
- ldp D_l, D_h, [src, #-64]!
-1:
- stp A_l, A_h, [dst, #-16]
- ldp A_l, A_h, [src, #-16]
- stp B_l, B_h, [dst, #-32]
- ldp B_l, B_h, [src, #-32]
- stp C_l, C_h, [dst, #-48]
- ldp C_l, C_h, [src, #-48]
- stp D_l, D_h, [dst, #-64]!
- ldp D_l, D_h, [src, #-64]!
- subs count, count, #64
- b.ge 1b
- stp A_l, A_h, [dst, #-16]
- stp B_l, B_h, [dst, #-32]
- stp C_l, C_h, [dst, #-48]
- stp D_l, D_h, [dst, #-64]!
- tst count, #0x3f
- b.ne .Ltail63up
- ret
-
-
-.Ldownwards:
- /* For a downwards move we can safely use memcpy provided that
- * DST is more than 16 bytes away from SRC. */
- sub tmp1, src, #16
- cmp dstin, tmp1
- b.ls __memcpy /* May overlap, but not critically. */
-
- mov dst, dstin /* Preserve DSTIN for return value. */
- cmp count, #64
- b.ge .Lmov_not_short_down
-
- /* Deal with small moves quickly by dropping straight into the
- * exit block. */
-.Ltail63down:
- /* Move up to 48 bytes of data. At this point we only need the
- * bottom 6 bits of count to be accurate. */
- ands tmp1, count, #0x30
- b.eq .Ltail15down
- add dst, dst, tmp1
- add src, src, tmp1
- cmp tmp1w, #0x20
- b.eq 1f
- b.lt 2f
- ldp A_l, A_h, [src, #-48]
- stp A_l, A_h, [dst, #-48]
-1:
- ldp A_l, A_h, [src, #-32]
- stp A_l, A_h, [dst, #-32]
-2:
- ldp A_l, A_h, [src, #-16]
- stp A_l, A_h, [dst, #-16]
-.Ltail15down:
- /* Move up to 15 bytes of data. Does not assume additional data
- being moved. */
- tbz count, #3, 1f
- ldr tmp1, [src], #8
- str tmp1, [dst], #8
-1:
- tbz count, #2, 1f
- ldr tmp1w, [src], #4
- str tmp1w, [dst], #4
-1:
- tbz count, #1, 1f
- ldrh tmp1w, [src], #2
- strh tmp1w, [dst], #2
-1:
- tbz count, #0, 1f
- ldrb tmp1w, [src]
- strb tmp1w, [dst]
-1:
- ret
-
-.Lmov_not_short_down:
- /* We don't much care about the alignment of DST, but we want SRC
- * to be 128-bit (16 byte) aligned so that we don't cross cache line
- * boundaries on both loads and stores. */
- neg tmp2, src
- ands tmp2, tmp2, #15 /* Bytes to reach alignment. */
- b.eq 2f
- sub count, count, tmp2
- /* Move enough data to reach alignment; unlike memcpy, we have to
- * be aware of the overlap, which means we can't move data twice. */
- tbz tmp2, #3, 1f
- ldr tmp1, [src], #8
- str tmp1, [dst], #8
-1:
- tbz tmp2, #2, 1f
- ldr tmp1w, [src], #4
- str tmp1w, [dst], #4
-1:
- tbz tmp2, #1, 1f
- ldrh tmp1w, [src], #2
- strh tmp1w, [dst], #2
-1:
- tbz tmp2, #0, 1f
- ldrb tmp1w, [src], #1
- strb tmp1w, [dst], #1
-1:
-
- /* There may be less than 63 bytes to go now. */
- cmp count, #63
- b.le .Ltail63down
-2:
- subs count, count, #128
- b.ge .Lmov_body_large_down
- /* Less than 128 bytes to move, so handle 64 here and then jump
- * to the tail. */
- ldp A_l, A_h, [src]
- ldp B_l, B_h, [src, #16]
- ldp C_l, C_h, [src, #32]
- ldp D_l, D_h, [src, #48]
- stp A_l, A_h, [dst]
- stp B_l, B_h, [dst, #16]
- stp C_l, C_h, [dst, #32]
- stp D_l, D_h, [dst, #48]
- tst count, #0x3f
- add src, src, #64
- add dst, dst, #64
- b.ne .Ltail63down
- ret
-
- /* Critical loop. Start at a new cache line boundary. Assuming
- * 64 bytes per line this ensures the entire loop is in one line. */
- .p2align 6
-.Lmov_body_large_down:
- /* There are at least 128 bytes to move. */
- ldp A_l, A_h, [src, #0]
- sub dst, dst, #16 /* Pre-bias. */
- ldp B_l, B_h, [src, #16]
- ldp C_l, C_h, [src, #32]
- ldp D_l, D_h, [src, #48]! /* src += 64 - Pre-bias. */
-1:
- stp A_l, A_h, [dst, #16]
- ldp A_l, A_h, [src, #16]
- stp B_l, B_h, [dst, #32]
- ldp B_l, B_h, [src, #32]
- stp C_l, C_h, [dst, #48]
- ldp C_l, C_h, [src, #48]
- stp D_l, D_h, [dst, #64]!
- ldp D_l, D_h, [src, #64]!
- subs count, count, #64
- b.ge 1b
- stp A_l, A_h, [dst, #16]
- stp B_l, B_h, [dst, #32]
- stp C_l, C_h, [dst, #48]
- stp D_l, D_h, [dst, #64]
- add src, src, #16
- add dst, dst, #64 + 16
- tst count, #0x3f
- b.ne .Ltail63down
- ret
-#if defined(WMEMMOVE)
-END(wmemmove)
-#else
-END(memmove)
-
-ALIAS_SYMBOL(memcpy, memmove)
-#endif
diff --git a/libc/arch-arm64/denver64/bionic/memset.S b/libc/arch-arm64/denver64/bionic/memset.S
deleted file mode 100644
index bea5b26..0000000
--- a/libc/arch-arm64/denver64/bionic/memset.S
+++ /dev/null
@@ -1,285 +0,0 @@
-/* Copyright (c) 2012, Linaro Limited
- All rights reserved.
- Copyright (c) 2014, NVIDIA Corporation. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- * Neither the name of the Linaro nor the
- names of its contributors may be used to endorse or promote products
- derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-/* Assumptions:
- *
- * denver, ARMv8-a, AArch64
- * Unaligned accesses
- *
- */
-
-#include <private/bionic_asm.h>
-
-/* By default we assume that the DC instruction can be used to zero
- data blocks more efficiently. In some circumstances this might be
- unsafe, for example in an asymmetric multiprocessor environment with
- different DC clear lengths (neither the upper nor lower lengths are
- safe to use). The feature can be disabled by defining DONT_USE_DC.
-
- If code may be run in a virtualized environment, then define
- MAYBE_VIRT. This will cause the code to cache the system register
- values rather than re-reading them each call. */
-
-#define dstin x0
-#define val w1
-#define count x2
-#define dst_count x3 /* for __memset_chk */
-#define tmp1 x3
-#define tmp1w w3
-#define tmp2 x4
-#define tmp2w w4
-#define zva_len_x x5
-#define zva_len w5
-#define zva_bits_x x6
-
-#define A_l x7
-#define A_lw w7
-#define dst x8
-#define tmp3w w9
-
-#define QA_l q0
-
-ENTRY(__memset_chk)
- cmp count, dst_count
- bls memset
-
- // Preserve for accurate backtrace.
- stp x29, x30, [sp, -16]!
- .cfi_def_cfa_offset 16
- .cfi_rel_offset x29, 0
- .cfi_rel_offset x30, 8
-
- bl __memset_chk_fail
-END(__memset_chk)
-
-ENTRY(memset)
-
- mov dst, dstin /* Preserve return value. */
- ands A_lw, val, #255
-#ifndef DONT_USE_DC
-# b.eq .Lzero_mem
-#endif
- orr A_lw, A_lw, A_lw, lsl #8
- orr A_lw, A_lw, A_lw, lsl #16
- orr A_l, A_l, A_l, lsl #32
-.Ltail_maybe_long:
- cmp count, #256
- b.ge .Lnot_short
-.Ltail_maybe_tiny:
- cmp count, #15
- b.le .Ltail15tiny
-.Ltail255:
- ands tmp1, count, #0xC0
- b.eq .Ltail63
- dup v0.4s, A_lw
- cmp tmp1w, #0x80
- b.eq 1f
- b.lt 2f
- stp QA_l, QA_l, [dst], #32
- stp QA_l, QA_l, [dst], #32
-1:
- stp QA_l, QA_l, [dst], #32
- stp QA_l, QA_l, [dst], #32
-2:
- stp QA_l, QA_l, [dst], #32
- stp QA_l, QA_l, [dst], #32
-.Ltail63:
- ands tmp1, count, #0x30
- b.eq .Ltail15
- add dst, dst, tmp1
- cmp tmp1w, #0x20
- b.eq 1f
- b.lt 2f
- stp A_l, A_l, [dst, #-48]
-1:
- stp A_l, A_l, [dst, #-32]
-2:
- stp A_l, A_l, [dst, #-16]
-
-.Ltail15:
- and count, count, #15
- add dst, dst, count
- stp A_l, A_l, [dst, #-16] /* Repeat some/all of last store. */
- ret
-
-.Ltail15tiny:
- /* Set up to 15 bytes. Does not assume earlier memory
- being set. */
- tbz count, #3, 1f
- str A_l, [dst], #8
-1:
- tbz count, #2, 1f
- str A_lw, [dst], #4
-1:
- tbz count, #1, 1f
- strh A_lw, [dst], #2
-1:
- tbz count, #0, 1f
- strb A_lw, [dst]
-1:
- ret
-
- /* Critical loop. Start at a new cache line boundary. Assuming
- * 64 bytes per line, this ensures the entire loop is in one line. */
- .p2align 6
-.Lnot_short:
- dup v0.4s, A_lw
- neg tmp2, dst
- ands tmp2, tmp2, #15
- b.eq 2f
- /* Bring DST to 128-bit (16-byte) alignment. We know that there's
- * more than that to set, so we simply store 16 bytes and advance by
- * the amount required to reach alignment. */
- sub count, count, tmp2
- stp A_l, A_l, [dst]
- add dst, dst, tmp2
- /* There may be less than 63 bytes to go now. */
- cmp count, #255
- b.le .Ltail255
-2:
- cmp count, #2097152
- b.gt 3f
-1:
- sub count, count, #256
-2:
- stp QA_l, QA_l, [dst], #32
- stp QA_l, QA_l, [dst], #32
- stp QA_l, QA_l, [dst], #32
- stp QA_l, QA_l, [dst], #32
- stp QA_l, QA_l, [dst], #32
- stp QA_l, QA_l, [dst], #32
- stp QA_l, QA_l, [dst], #32
- stp QA_l, QA_l, [dst], #32
- subs count, count, #256
- b.ge 2b
- tst count, #0xff
- b.ne .Ltail255
- ret
-3:
- sub count, count, #64
-4:
- subs count, count, #64
- stnp QA_l, QA_l, [dst]
- stnp QA_l, QA_l, [dst, #32]
- add dst, dst, #64
- b.ge 4b
- tst count, #0x3f
- b.ne .Ltail63
- ret
-
-#ifndef DONT_USE_DC
- /* For zeroing memory, check to see if we can use the ZVA feature to
- * zero entire 'cache' lines. */
-.Lzero_mem:
- mov A_l, #0
- cmp count, #63
- b.le .Ltail_maybe_tiny
- neg tmp2, dst
- ands tmp2, tmp2, #15
- b.eq 1f
- sub count, count, tmp2
- stp A_l, A_l, [dst]
- add dst, dst, tmp2
- cmp count, #63
- b.le .Ltail63
-1:
- /* For zeroing small amounts of memory, it's not worth setting up
- * the line-clear code. */
- cmp count, #128
- b.lt .Lnot_short
-#ifdef MAYBE_VIRT
- /* For efficiency when virtualized, we cache the ZVA capability. */
- adrp tmp2, .Lcache_clear
- ldr zva_len, [tmp2, #:lo12:.Lcache_clear]
- tbnz zva_len, #31, .Lnot_short
- cbnz zva_len, .Lzero_by_line
- mrs tmp1, dczid_el0
- tbz tmp1, #4, 1f
- /* ZVA not available. Remember this for next time. */
- mov zva_len, #~0
- str zva_len, [tmp2, #:lo12:.Lcache_clear]
- b .Lnot_short
-1:
- mov tmp3w, #4
- and zva_len, tmp1w, #15 /* Safety: other bits reserved. */
- lsl zva_len, tmp3w, zva_len
- str zva_len, [tmp2, #:lo12:.Lcache_clear]
-#else
- mrs tmp1, dczid_el0
- tbnz tmp1, #4, .Lnot_short
- mov tmp3w, #4
- and zva_len, tmp1w, #15 /* Safety: other bits reserved. */
- lsl zva_len, tmp3w, zva_len
-#endif
-
-.Lzero_by_line:
- /* Compute how far we need to go to become suitably aligned. We're
- * already at quad-word alignment. */
- cmp count, zva_len_x
- b.lt .Lnot_short /* Not enough to reach alignment. */
- sub zva_bits_x, zva_len_x, #1
- neg tmp2, dst
- ands tmp2, tmp2, zva_bits_x
- b.eq 1f /* Already aligned. */
- /* Not aligned, check that there's enough to copy after alignment. */
- sub tmp1, count, tmp2
- cmp tmp1, #64
- ccmp tmp1, zva_len_x, #8, ge /* NZCV=0b1000 */
- b.lt .Lnot_short
- /* We know that there's at least 64 bytes to zero and that it's safe
- * to overrun by 64 bytes. */
- mov count, tmp1
-2:
- stp A_l, A_l, [dst]
- stp A_l, A_l, [dst, #16]
- stp A_l, A_l, [dst, #32]
- subs tmp2, tmp2, #64
- stp A_l, A_l, [dst, #48]
- add dst, dst, #64
- b.ge 2b
- /* We've overrun a bit, so adjust dst downwards. */
- add dst, dst, tmp2
-1:
- sub count, count, zva_len_x
-3:
- dc zva, dst
- add dst, dst, zva_len_x
- subs count, count, zva_len_x
- b.ge 3b
- ands count, count, zva_bits_x
- b.ne .Ltail_maybe_long
- ret
-END(memset)
-
-#ifdef MAYBE_VIRT
- .bss
- .p2align 2
-.Lcache_clear:
- .space 4
-#endif
-#endif /* DONT_USE_DC */