blob: 9c8cb40572f38d608807ad2306cadb60592fc3d6 [file] [log] [blame]
--- /dev/null 2009-04-24 06:09:48.000000000 -0700
+++ openssl-0.9.8h/crypto/0.9.9-dev/README.android 2009-09-03 15:42:39.000000000 -0700
@@ -0,0 +1,6 @@
+This directory does not exist in the OpenSSL distribution.
+
+It has been added to import assembler code from OpenSSL 0.9.9-dev
+(ftp://ftp.openssl.org/snapshot/). The assembler files (.s) were
+generated by running the Perl files (.pl), with ".align 2" appended
+to avoid assembler error messages where needed.
--- /dev/null 2009-04-24 06:09:48.000000000 -0700
+++ openssl-0.9.8h/crypto/0.9.9-dev/aes/aes-armv4.pl 2009-09-03 15:42:39.000000000 -0700
@@ -0,0 +1,1030 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# AES for ARMv4
+
+# January 2007.
+#
+# Code uses single 1K S-box and is >2 times faster than code generated
+# by gcc-3.4.1. This is thanks to unique feature of ARMv4 ISA, which
+# allows to merge logical or arithmetic operation with shift or rotate
+# in one instruction and emit combined result every cycle. The module
+# is endian-neutral. The performance is ~42 cycles/byte for 128-bit
+# key.
+
+# May 2007.
+#
+# AES_set_[en|de]crypt_key is added.
+
+$s0="r0";
+$s1="r1";
+$s2="r2";
+$s3="r3";
+$t1="r4";
+$t2="r5";
+$t3="r6";
+$i1="r7";
+$i2="r8";
+$i3="r9";
+
+$tbl="r10";
+$key="r11";
+$rounds="r12";
+
+$code=<<___;
+.text
+.code 32
+
+.type AES_Te,%object
+.align 5
+AES_Te:
+.word 0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d
+.word 0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554
+.word 0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d
+.word 0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a
+.word 0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87
+.word 0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b
+.word 0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea
+.word 0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b
+.word 0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a
+.word 0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f
+.word 0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108
+.word 0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f
+.word 0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e
+.word 0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5
+.word 0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d
+.word 0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f
+.word 0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e
+.word 0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb
+.word 0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce
+.word 0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497
+.word 0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c
+.word 0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed
+.word 0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b
+.word 0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a
+.word 0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16
+.word 0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594
+.word 0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81
+.word 0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3
+.word 0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a
+.word 0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504
+.word 0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163
+.word 0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d
+.word 0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f
+.word 0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739
+.word 0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47
+.word 0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395
+.word 0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f
+.word 0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883
+.word 0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c
+.word 0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76
+.word 0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e
+.word 0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4
+.word 0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6
+.word 0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b
+.word 0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7
+.word 0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0
+.word 0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25
+.word 0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818
+.word 0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72
+.word 0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651
+.word 0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21
+.word 0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85
+.word 0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa
+.word 0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12
+.word 0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0
+.word 0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9
+.word 0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133
+.word 0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7
+.word 0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920
+.word 0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a
+.word 0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17
+.word 0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8
+.word 0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11
+.word 0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a
+@ Te4[256]
+.byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
+.byte 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
+.byte 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
+.byte 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
+.byte 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
+.byte 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
+.byte 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
+.byte 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
+.byte 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
+.byte 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
+.byte 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
+.byte 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
+.byte 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
+.byte 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
+.byte 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
+.byte 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
+.byte 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
+.byte 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
+.byte 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
+.byte 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
+.byte 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
+.byte 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
+.byte 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
+.byte 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
+.byte 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
+.byte 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
+.byte 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
+.byte 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
+.byte 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
+.byte 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
+.byte 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
+.byte 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+@ rcon[]
+.word 0x01000000, 0x02000000, 0x04000000, 0x08000000
+.word 0x10000000, 0x20000000, 0x40000000, 0x80000000
+.word 0x1B000000, 0x36000000, 0, 0, 0, 0, 0, 0
+.size AES_Te,.-AES_Te
+
+@ void AES_encrypt(const unsigned char *in, unsigned char *out,
+@ const AES_KEY *key) {
+.global AES_encrypt
+.type AES_encrypt,%function
+.align 5
+AES_encrypt:
+ sub r3,pc,#8 @ AES_encrypt
+ stmdb sp!,{r1,r4-r12,lr}
+ mov $rounds,r0 @ inp
+ mov $key,r2
+ sub $tbl,r3,#AES_encrypt-AES_Te @ Te
+
+ ldrb $s0,[$rounds,#3] @ load input data in endian-neutral
+ ldrb $t1,[$rounds,#2] @ manner...
+ ldrb $t2,[$rounds,#1]
+ ldrb $t3,[$rounds,#0]
+ orr $s0,$s0,$t1,lsl#8
+ orr $s0,$s0,$t2,lsl#16
+ orr $s0,$s0,$t3,lsl#24
+ ldrb $s1,[$rounds,#7]
+ ldrb $t1,[$rounds,#6]
+ ldrb $t2,[$rounds,#5]
+ ldrb $t3,[$rounds,#4]
+ orr $s1,$s1,$t1,lsl#8
+ orr $s1,$s1,$t2,lsl#16
+ orr $s1,$s1,$t3,lsl#24
+ ldrb $s2,[$rounds,#11]
+ ldrb $t1,[$rounds,#10]
+ ldrb $t2,[$rounds,#9]
+ ldrb $t3,[$rounds,#8]
+ orr $s2,$s2,$t1,lsl#8
+ orr $s2,$s2,$t2,lsl#16
+ orr $s2,$s2,$t3,lsl#24
+ ldrb $s3,[$rounds,#15]
+ ldrb $t1,[$rounds,#14]
+ ldrb $t2,[$rounds,#13]
+ ldrb $t3,[$rounds,#12]
+ orr $s3,$s3,$t1,lsl#8
+ orr $s3,$s3,$t2,lsl#16
+ orr $s3,$s3,$t3,lsl#24
+
+ bl _armv4_AES_encrypt
+
+ ldr $rounds,[sp],#4 @ pop out
+ mov $t1,$s0,lsr#24 @ write output in endian-neutral
+ mov $t2,$s0,lsr#16 @ manner...
+ mov $t3,$s0,lsr#8
+ strb $t1,[$rounds,#0]
+ strb $t2,[$rounds,#1]
+ strb $t3,[$rounds,#2]
+ strb $s0,[$rounds,#3]
+ mov $t1,$s1,lsr#24
+ mov $t2,$s1,lsr#16
+ mov $t3,$s1,lsr#8
+ strb $t1,[$rounds,#4]
+ strb $t2,[$rounds,#5]
+ strb $t3,[$rounds,#6]
+ strb $s1,[$rounds,#7]
+ mov $t1,$s2,lsr#24
+ mov $t2,$s2,lsr#16
+ mov $t3,$s2,lsr#8
+ strb $t1,[$rounds,#8]
+ strb $t2,[$rounds,#9]
+ strb $t3,[$rounds,#10]
+ strb $s2,[$rounds,#11]
+ mov $t1,$s3,lsr#24
+ mov $t2,$s3,lsr#16
+ mov $t3,$s3,lsr#8
+ strb $t1,[$rounds,#12]
+ strb $t2,[$rounds,#13]
+ strb $t3,[$rounds,#14]
+ strb $s3,[$rounds,#15]
+
+ ldmia sp!,{r4-r12,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ bx lr @ interoperable with Thumb ISA:-)
+.size AES_encrypt,.-AES_encrypt
+
+.type _armv4_AES_encrypt,%function
+.align 2
+_armv4_AES_encrypt:
+ str lr,[sp,#-4]! @ push lr
+ ldr $t1,[$key],#16
+ ldr $t2,[$key,#-12]
+ ldr $t3,[$key,#-8]
+ ldr $i1,[$key,#-4]
+ ldr $rounds,[$key,#240-16]
+ eor $s0,$s0,$t1
+ eor $s1,$s1,$t2
+ eor $s2,$s2,$t3
+ eor $s3,$s3,$i1
+ sub $rounds,$rounds,#1
+ mov lr,#255
+
+.Lenc_loop:
+ and $i2,lr,$s0,lsr#8
+ and $i3,lr,$s0,lsr#16
+ and $i1,lr,$s0
+ mov $s0,$s0,lsr#24
+ ldr $t1,[$tbl,$i1,lsl#2] @ Te3[s0>>0]
+ ldr $s0,[$tbl,$s0,lsl#2] @ Te0[s0>>24]
+ ldr $t2,[$tbl,$i2,lsl#2] @ Te2[s0>>8]
+ ldr $t3,[$tbl,$i3,lsl#2] @ Te1[s0>>16]
+
+ and $i1,lr,$s1,lsr#16 @ i0
+ and $i2,lr,$s1
+ and $i3,lr,$s1,lsr#8
+ mov $s1,$s1,lsr#24
+ ldr $i1,[$tbl,$i1,lsl#2] @ Te1[s1>>16]
+ ldr $s1,[$tbl,$s1,lsl#2] @ Te0[s1>>24]
+ ldr $i2,[$tbl,$i2,lsl#2] @ Te3[s1>>0]
+ ldr $i3,[$tbl,$i3,lsl#2] @ Te2[s1>>8]
+ eor $s0,$s0,$i1,ror#8
+ eor $s1,$s1,$t1,ror#24
+ eor $t2,$t2,$i2,ror#8
+ eor $t3,$t3,$i3,ror#8
+
+ and $i1,lr,$s2,lsr#8 @ i0
+ and $i2,lr,$s2,lsr#16 @ i1
+ and $i3,lr,$s2
+ mov $s2,$s2,lsr#24
+ ldr $i1,[$tbl,$i1,lsl#2] @ Te2[s2>>8]
+ ldr $i2,[$tbl,$i2,lsl#2] @ Te1[s2>>16]
+ ldr $s2,[$tbl,$s2,lsl#2] @ Te0[s2>>24]
+ ldr $i3,[$tbl,$i3,lsl#2] @ Te3[s2>>0]
+ eor $s0,$s0,$i1,ror#16
+ eor $s1,$s1,$i2,ror#8
+ eor $s2,$s2,$t2,ror#16
+ eor $t3,$t3,$i3,ror#16
+
+ and $i1,lr,$s3 @ i0
+ and $i2,lr,$s3,lsr#8 @ i1
+ and $i3,lr,$s3,lsr#16 @ i2
+ mov $s3,$s3,lsr#24
+ ldr $i1,[$tbl,$i1,lsl#2] @ Te3[s3>>0]
+ ldr $i2,[$tbl,$i2,lsl#2] @ Te2[s3>>8]
+ ldr $i3,[$tbl,$i3,lsl#2] @ Te1[s3>>16]
+ ldr $s3,[$tbl,$s3,lsl#2] @ Te0[s3>>24]
+ eor $s0,$s0,$i1,ror#24
+ eor $s1,$s1,$i2,ror#16
+ eor $s2,$s2,$i3,ror#8
+ eor $s3,$s3,$t3,ror#8
+
+ ldr $t1,[$key],#16
+ ldr $t2,[$key,#-12]
+ ldr $t3,[$key,#-8]
+ ldr $i1,[$key,#-4]
+ eor $s0,$s0,$t1
+ eor $s1,$s1,$t2
+ eor $s2,$s2,$t3
+ eor $s3,$s3,$i1
+
+ subs $rounds,$rounds,#1
+ bne .Lenc_loop
+
+ add $tbl,$tbl,#2
+
+ and $i1,lr,$s0
+ and $i2,lr,$s0,lsr#8
+ and $i3,lr,$s0,lsr#16
+ mov $s0,$s0,lsr#24
+ ldrb $t1,[$tbl,$i1,lsl#2] @ Te4[s0>>0]
+ ldrb $s0,[$tbl,$s0,lsl#2] @ Te4[s0>>24]
+ ldrb $t2,[$tbl,$i2,lsl#2] @ Te4[s0>>8]
+ ldrb $t3,[$tbl,$i3,lsl#2] @ Te4[s0>>16]
+
+ and $i1,lr,$s1,lsr#16 @ i0
+ and $i2,lr,$s1
+ and $i3,lr,$s1,lsr#8
+ mov $s1,$s1,lsr#24
+ ldrb $i1,[$tbl,$i1,lsl#2] @ Te4[s1>>16]
+ ldrb $s1,[$tbl,$s1,lsl#2] @ Te4[s1>>24]
+ ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s1>>0]
+ ldrb $i3,[$tbl,$i3,lsl#2] @ Te4[s1>>8]
+ eor $s0,$i1,$s0,lsl#8
+ eor $s1,$t1,$s1,lsl#24
+ eor $t2,$i2,$t2,lsl#8
+ eor $t3,$i3,$t3,lsl#8
+
+ and $i1,lr,$s2,lsr#8 @ i0
+ and $i2,lr,$s2,lsr#16 @ i1
+ and $i3,lr,$s2
+ mov $s2,$s2,lsr#24
+ ldrb $i1,[$tbl,$i1,lsl#2] @ Te4[s2>>8]
+ ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s2>>16]
+ ldrb $s2,[$tbl,$s2,lsl#2] @ Te4[s2>>24]
+ ldrb $i3,[$tbl,$i3,lsl#2] @ Te4[s2>>0]
+ eor $s0,$i1,$s0,lsl#8
+ eor $s1,$s1,$i2,lsl#16
+ eor $s2,$t2,$s2,lsl#24
+ eor $t3,$i3,$t3,lsl#8
+
+ and $i1,lr,$s3 @ i0
+ and $i2,lr,$s3,lsr#8 @ i1
+ and $i3,lr,$s3,lsr#16 @ i2
+ mov $s3,$s3,lsr#24
+ ldrb $i1,[$tbl,$i1,lsl#2] @ Te4[s3>>0]
+ ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s3>>8]
+ ldrb $i3,[$tbl,$i3,lsl#2] @ Te4[s3>>16]
+ ldrb $s3,[$tbl,$s3,lsl#2] @ Te4[s3>>24]
+ eor $s0,$i1,$s0,lsl#8
+ eor $s1,$s1,$i2,lsl#8
+ eor $s2,$s2,$i3,lsl#16
+ eor $s3,$t3,$s3,lsl#24
+
+ ldr lr,[sp],#4 @ pop lr
+ ldr $t1,[$key,#0]
+ ldr $t2,[$key,#4]
+ ldr $t3,[$key,#8]
+ ldr $i1,[$key,#12]
+ eor $s0,$s0,$t1
+ eor $s1,$s1,$t2
+ eor $s2,$s2,$t3
+ eor $s3,$s3,$i1
+
+ sub $tbl,$tbl,#2
+ mov pc,lr @ return
+.size _armv4_AES_encrypt,.-_armv4_AES_encrypt
+
+.global AES_set_encrypt_key
+.type AES_set_encrypt_key,%function
+.align 5
+AES_set_encrypt_key:
+ sub r3,pc,#8 @ AES_set_encrypt_key
+ teq r0,#0
+ moveq r0,#-1
+ beq .Labrt
+ teq r2,#0
+ moveq r0,#-1
+ beq .Labrt
+
+ teq r1,#128
+ beq .Lok
+ teq r1,#192
+ beq .Lok
+ teq r1,#256
+ movne r0,#-1
+ bne .Labrt
+
+.Lok: stmdb sp!,{r4-r12,lr}
+ sub $tbl,r3,#AES_set_encrypt_key-AES_Te-1024 @ Te4
+
+ mov $rounds,r0 @ inp
+ mov lr,r1 @ bits
+ mov $key,r2 @ key
+
+ ldrb $s0,[$rounds,#3] @ load input data in endian-neutral
+ ldrb $t1,[$rounds,#2] @ manner...
+ ldrb $t2,[$rounds,#1]
+ ldrb $t3,[$rounds,#0]
+ orr $s0,$s0,$t1,lsl#8
+ orr $s0,$s0,$t2,lsl#16
+ orr $s0,$s0,$t3,lsl#24
+ ldrb $s1,[$rounds,#7]
+ ldrb $t1,[$rounds,#6]
+ ldrb $t2,[$rounds,#5]
+ ldrb $t3,[$rounds,#4]
+ orr $s1,$s1,$t1,lsl#8
+ orr $s1,$s1,$t2,lsl#16
+ orr $s1,$s1,$t3,lsl#24
+ ldrb $s2,[$rounds,#11]
+ ldrb $t1,[$rounds,#10]
+ ldrb $t2,[$rounds,#9]
+ ldrb $t3,[$rounds,#8]
+ orr $s2,$s2,$t1,lsl#8
+ orr $s2,$s2,$t2,lsl#16
+ orr $s2,$s2,$t3,lsl#24
+ ldrb $s3,[$rounds,#15]
+ ldrb $t1,[$rounds,#14]
+ ldrb $t2,[$rounds,#13]
+ ldrb $t3,[$rounds,#12]
+ orr $s3,$s3,$t1,lsl#8
+ orr $s3,$s3,$t2,lsl#16
+ orr $s3,$s3,$t3,lsl#24
+ str $s0,[$key],#16
+ str $s1,[$key,#-12]
+ str $s2,[$key,#-8]
+ str $s3,[$key,#-4]
+
+ teq lr,#128
+ bne .Lnot128
+ mov $rounds,#10
+ str $rounds,[$key,#240-16]
+ add $t3,$tbl,#256 @ rcon
+ mov lr,#255
+
+.L128_loop:
+ and $t2,lr,$s3,lsr#24
+ and $i1,lr,$s3,lsr#16
+ and $i2,lr,$s3,lsr#8
+ and $i3,lr,$s3
+ ldrb $t2,[$tbl,$t2]
+ ldrb $i1,[$tbl,$i1]
+ ldrb $i2,[$tbl,$i2]
+ ldrb $i3,[$tbl,$i3]
+ ldr $t1,[$t3],#4 @ rcon[i++]
+ orr $t2,$t2,$i1,lsl#24
+ orr $t2,$t2,$i2,lsl#16
+ orr $t2,$t2,$i3,lsl#8
+ eor $t2,$t2,$t1
+ eor $s0,$s0,$t2 @ rk[4]=rk[0]^...
+ eor $s1,$s1,$s0 @ rk[5]=rk[1]^rk[4]
+ eor $s2,$s2,$s1 @ rk[6]=rk[2]^rk[5]
+ eor $s3,$s3,$s2 @ rk[7]=rk[3]^rk[6]
+ str $s0,[$key],#16
+ str $s1,[$key,#-12]
+ str $s2,[$key,#-8]
+ str $s3,[$key,#-4]
+
+ subs $rounds,$rounds,#1
+ bne .L128_loop
+ sub r2,$key,#176
+ b .Ldone
+
+.Lnot128:
+ ldrb $i2,[$rounds,#19]
+ ldrb $t1,[$rounds,#18]
+ ldrb $t2,[$rounds,#17]
+ ldrb $t3,[$rounds,#16]
+ orr $i2,$i2,$t1,lsl#8
+ orr $i2,$i2,$t2,lsl#16
+ orr $i2,$i2,$t3,lsl#24
+ ldrb $i3,[$rounds,#23]
+ ldrb $t1,[$rounds,#22]
+ ldrb $t2,[$rounds,#21]
+ ldrb $t3,[$rounds,#20]
+ orr $i3,$i3,$t1,lsl#8
+ orr $i3,$i3,$t2,lsl#16
+ orr $i3,$i3,$t3,lsl#24
+ str $i2,[$key],#8
+ str $i3,[$key,#-4]
+
+ teq lr,#192
+ bne .Lnot192
+ mov $rounds,#12
+ str $rounds,[$key,#240-24]
+ add $t3,$tbl,#256 @ rcon
+ mov lr,#255
+ mov $rounds,#8
+
+.L192_loop:
+ and $t2,lr,$i3,lsr#24
+ and $i1,lr,$i3,lsr#16
+ and $i2,lr,$i3,lsr#8
+ and $i3,lr,$i3
+ ldrb $t2,[$tbl,$t2]
+ ldrb $i1,[$tbl,$i1]
+ ldrb $i2,[$tbl,$i2]
+ ldrb $i3,[$tbl,$i3]
+ ldr $t1,[$t3],#4 @ rcon[i++]
+ orr $t2,$t2,$i1,lsl#24
+ orr $t2,$t2,$i2,lsl#16
+ orr $t2,$t2,$i3,lsl#8
+ eor $i3,$t2,$t1
+ eor $s0,$s0,$i3 @ rk[6]=rk[0]^...
+ eor $s1,$s1,$s0 @ rk[7]=rk[1]^rk[6]
+ eor $s2,$s2,$s1 @ rk[8]=rk[2]^rk[7]
+ eor $s3,$s3,$s2 @ rk[9]=rk[3]^rk[8]
+ str $s0,[$key],#24
+ str $s1,[$key,#-20]
+ str $s2,[$key,#-16]
+ str $s3,[$key,#-12]
+
+ subs $rounds,$rounds,#1
+ subeq r2,$key,#216
+ beq .Ldone
+
+ ldr $i1,[$key,#-32]
+ ldr $i2,[$key,#-28]
+ eor $i1,$i1,$s3 @ rk[10]=rk[4]^rk[9]
+ eor $i3,$i2,$i1 @ rk[11]=rk[5]^rk[10]
+ str $i1,[$key,#-8]
+ str $i3,[$key,#-4]
+ b .L192_loop
+
+.Lnot192:
+ ldrb $i2,[$rounds,#27]
+ ldrb $t1,[$rounds,#26]
+ ldrb $t2,[$rounds,#25]
+ ldrb $t3,[$rounds,#24]
+ orr $i2,$i2,$t1,lsl#8
+ orr $i2,$i2,$t2,lsl#16
+ orr $i2,$i2,$t3,lsl#24
+ ldrb $i3,[$rounds,#31]
+ ldrb $t1,[$rounds,#30]
+ ldrb $t2,[$rounds,#29]
+ ldrb $t3,[$rounds,#28]
+ orr $i3,$i3,$t1,lsl#8
+ orr $i3,$i3,$t2,lsl#16
+ orr $i3,$i3,$t3,lsl#24
+ str $i2,[$key],#8
+ str $i3,[$key,#-4]
+
+ mov $rounds,#14
+ str $rounds,[$key,#240-32]
+ add $t3,$tbl,#256 @ rcon
+ mov lr,#255
+ mov $rounds,#7
+
+.L256_loop:
+ and $t2,lr,$i3,lsr#24
+ and $i1,lr,$i3,lsr#16
+ and $i2,lr,$i3,lsr#8
+ and $i3,lr,$i3
+ ldrb $t2,[$tbl,$t2]
+ ldrb $i1,[$tbl,$i1]
+ ldrb $i2,[$tbl,$i2]
+ ldrb $i3,[$tbl,$i3]
+ ldr $t1,[$t3],#4 @ rcon[i++]
+ orr $t2,$t2,$i1,lsl#24
+ orr $t2,$t2,$i2,lsl#16
+ orr $t2,$t2,$i3,lsl#8
+ eor $i3,$t2,$t1
+ eor $s0,$s0,$i3 @ rk[8]=rk[0]^...
+ eor $s1,$s1,$s0 @ rk[9]=rk[1]^rk[8]
+ eor $s2,$s2,$s1 @ rk[10]=rk[2]^rk[9]
+ eor $s3,$s3,$s2 @ rk[11]=rk[3]^rk[10]
+ str $s0,[$key],#32
+ str $s1,[$key,#-28]
+ str $s2,[$key,#-24]
+ str $s3,[$key,#-20]
+
+ subs $rounds,$rounds,#1
+ subeq r2,$key,#256
+ beq .Ldone
+
+ and $t2,lr,$s3
+ and $i1,lr,$s3,lsr#8
+ and $i2,lr,$s3,lsr#16
+ and $i3,lr,$s3,lsr#24
+ ldrb $t2,[$tbl,$t2]
+ ldrb $i1,[$tbl,$i1]
+ ldrb $i2,[$tbl,$i2]
+ ldrb $i3,[$tbl,$i3]
+ orr $t2,$t2,$i1,lsl#8
+ orr $t2,$t2,$i2,lsl#16
+ orr $t2,$t2,$i3,lsl#24
+
+ ldr $t1,[$key,#-48]
+ ldr $i1,[$key,#-44]
+ ldr $i2,[$key,#-40]
+ ldr $i3,[$key,#-36]
+ eor $t1,$t1,$t2 @ rk[12]=rk[4]^...
+ eor $i1,$i1,$t1 @ rk[13]=rk[5]^rk[12]
+ eor $i2,$i2,$i1 @ rk[14]=rk[6]^rk[13]
+ eor $i3,$i3,$i2 @ rk[15]=rk[7]^rk[14]
+ str $t1,[$key,#-16]
+ str $i1,[$key,#-12]
+ str $i2,[$key,#-8]
+ str $i3,[$key,#-4]
+ b .L256_loop
+
+.Ldone: mov r0,#0
+ ldmia sp!,{r4-r12,lr}
+.Labrt: tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ bx lr @ interoperable with Thumb ISA:-)
+.size AES_set_encrypt_key,.-AES_set_encrypt_key
+
+.global AES_set_decrypt_key
+.type AES_set_decrypt_key,%function
+.align 5
+AES_set_decrypt_key:
+ str lr,[sp,#-4]! @ push lr
+ bl AES_set_encrypt_key
+ teq r0,#0
+ ldrne lr,[sp],#4 @ pop lr
+ bne .Labrt
+
+ stmdb sp!,{r4-r12}
+
+ ldr $rounds,[r2,#240] @ AES_set_encrypt_key preserves r2,
+ mov $key,r2 @ which is AES_KEY *key
+ mov $i1,r2
+ add $i2,r2,$rounds,lsl#4
+
+.Linv: ldr $s0,[$i1]
+ ldr $s1,[$i1,#4]
+ ldr $s2,[$i1,#8]
+ ldr $s3,[$i1,#12]
+ ldr $t1,[$i2]
+ ldr $t2,[$i2,#4]
+ ldr $t3,[$i2,#8]
+ ldr $i3,[$i2,#12]
+ str $s0,[$i2],#-16
+ str $s1,[$i2,#16+4]
+ str $s2,[$i2,#16+8]
+ str $s3,[$i2,#16+12]
+ str $t1,[$i1],#16
+ str $t2,[$i1,#-12]
+ str $t3,[$i1,#-8]
+ str $i3,[$i1,#-4]
+ teq $i1,$i2
+ bne .Linv
+___
+$mask80=$i1;
+$mask1b=$i2;
+$mask7f=$i3;
+$code.=<<___;
+ ldr $s0,[$key,#16]! @ prefetch tp1
+ mov $mask80,#0x80
+ mov $mask1b,#0x1b
+ orr $mask80,$mask80,#0x8000
+ orr $mask1b,$mask1b,#0x1b00
+ orr $mask80,$mask80,$mask80,lsl#16
+ orr $mask1b,$mask1b,$mask1b,lsl#16
+ sub $rounds,$rounds,#1
+ mvn $mask7f,$mask80
+ mov $rounds,$rounds,lsl#2 @ (rounds-1)*4
+
+.Lmix: and $t1,$s0,$mask80
+ and $s1,$s0,$mask7f
+ sub $t1,$t1,$t1,lsr#7
+ and $t1,$t1,$mask1b
+ eor $s1,$t1,$s1,lsl#1 @ tp2
+
+ and $t1,$s1,$mask80
+ and $s2,$s1,$mask7f
+ sub $t1,$t1,$t1,lsr#7
+ and $t1,$t1,$mask1b
+ eor $s2,$t1,$s2,lsl#1 @ tp4
+
+ and $t1,$s2,$mask80
+ and $s3,$s2,$mask7f
+ sub $t1,$t1,$t1,lsr#7
+ and $t1,$t1,$mask1b
+ eor $s3,$t1,$s3,lsl#1 @ tp8
+
+ eor $t1,$s1,$s2
+ eor $t2,$s0,$s3 @ tp9
+ eor $t1,$t1,$s3 @ tpe
+ eor $t1,$t1,$s1,ror#24
+ eor $t1,$t1,$t2,ror#24 @ ^= ROTATE(tpb=tp9^tp2,8)
+ eor $t1,$t1,$s2,ror#16
+ eor $t1,$t1,$t2,ror#16 @ ^= ROTATE(tpd=tp9^tp4,16)
+ eor $t1,$t1,$t2,ror#8 @ ^= ROTATE(tp9,24)
+
+ ldr $s0,[$key,#4] @ prefetch tp1
+ str $t1,[$key],#4
+ subs $rounds,$rounds,#1
+ bne .Lmix
+
+ mov r0,#0
+ ldmia sp!,{r4-r12,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ bx lr @ interoperable with Thumb ISA:-)
+.size AES_set_decrypt_key,.-AES_set_decrypt_key
+
+.type AES_Td,%object
+.align 5
+AES_Td:
+.word 0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96
+.word 0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393
+.word 0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25
+.word 0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f
+.word 0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1
+.word 0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6
+.word 0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da
+.word 0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844
+.word 0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd
+.word 0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4
+.word 0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45
+.word 0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94
+.word 0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7
+.word 0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a
+.word 0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5
+.word 0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c
+.word 0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1
+.word 0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a
+.word 0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75
+.word 0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051
+.word 0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46
+.word 0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff
+.word 0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77
+.word 0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb
+.word 0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000
+.word 0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e
+.word 0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927
+.word 0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a
+.word 0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e
+.word 0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16
+.word 0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d
+.word 0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8
+.word 0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd
+.word 0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34
+.word 0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163
+.word 0xd731dcca, 0x42638510, 0x13972240, 0x84c61120
+.word 0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d
+.word 0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0
+.word 0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422
+.word 0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef
+.word 0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36
+.word 0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4
+.word 0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662
+.word 0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5
+.word 0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3
+.word 0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b
+.word 0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8
+.word 0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6
+.word 0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6
+.word 0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0
+.word 0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815
+.word 0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f
+.word 0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df
+.word 0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f
+.word 0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e
+.word 0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713
+.word 0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89
+.word 0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c
+.word 0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf
+.word 0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86
+.word 0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f
+.word 0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541
+.word 0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190
+.word 0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742
+@ Td4[256]
+.byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
+.byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+.byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+.byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+.byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+.byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+.byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+.byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+.byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+.byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+.byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+.byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+.byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+.byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+.byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+.byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+.byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+.byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+.byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+.byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+.byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+.byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+.byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+.byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+.byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+.byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+.byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+.byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+.byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+.byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+.byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+.byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+.size AES_Td,.-AES_Td
+
+@ void AES_decrypt(const unsigned char *in, unsigned char *out,
+@ const AES_KEY *key) {
+.global AES_decrypt
+.type AES_decrypt,%function
+.align 5
+AES_decrypt:
+ sub r3,pc,#8 @ AES_decrypt
+ stmdb sp!,{r1,r4-r12,lr}
+ mov $rounds,r0 @ inp
+ mov $key,r2
+ sub $tbl,r3,#AES_decrypt-AES_Td @ Td
+
+ ldrb $s0,[$rounds,#3] @ load input data in endian-neutral
+ ldrb $t1,[$rounds,#2] @ manner...
+ ldrb $t2,[$rounds,#1]
+ ldrb $t3,[$rounds,#0]
+ orr $s0,$s0,$t1,lsl#8
+ orr $s0,$s0,$t2,lsl#16
+ orr $s0,$s0,$t3,lsl#24
+ ldrb $s1,[$rounds,#7]
+ ldrb $t1,[$rounds,#6]
+ ldrb $t2,[$rounds,#5]
+ ldrb $t3,[$rounds,#4]
+ orr $s1,$s1,$t1,lsl#8
+ orr $s1,$s1,$t2,lsl#16
+ orr $s1,$s1,$t3,lsl#24
+ ldrb $s2,[$rounds,#11]
+ ldrb $t1,[$rounds,#10]
+ ldrb $t2,[$rounds,#9]
+ ldrb $t3,[$rounds,#8]
+ orr $s2,$s2,$t1,lsl#8
+ orr $s2,$s2,$t2,lsl#16
+ orr $s2,$s2,$t3,lsl#24
+ ldrb $s3,[$rounds,#15]
+ ldrb $t1,[$rounds,#14]
+ ldrb $t2,[$rounds,#13]
+ ldrb $t3,[$rounds,#12]
+ orr $s3,$s3,$t1,lsl#8
+ orr $s3,$s3,$t2,lsl#16
+ orr $s3,$s3,$t3,lsl#24
+
+ bl _armv4_AES_decrypt
+
+ ldr $rounds,[sp],#4 @ pop out
+ mov $t1,$s0,lsr#24 @ write output in endian-neutral
+ mov $t2,$s0,lsr#16 @ manner...
+ mov $t3,$s0,lsr#8
+ strb $t1,[$rounds,#0]
+ strb $t2,[$rounds,#1]
+ strb $t3,[$rounds,#2]
+ strb $s0,[$rounds,#3]
+ mov $t1,$s1,lsr#24
+ mov $t2,$s1,lsr#16
+ mov $t3,$s1,lsr#8
+ strb $t1,[$rounds,#4]
+ strb $t2,[$rounds,#5]
+ strb $t3,[$rounds,#6]
+ strb $s1,[$rounds,#7]
+ mov $t1,$s2,lsr#24
+ mov $t2,$s2,lsr#16
+ mov $t3,$s2,lsr#8
+ strb $t1,[$rounds,#8]
+ strb $t2,[$rounds,#9]
+ strb $t3,[$rounds,#10]
+ strb $s2,[$rounds,#11]
+ mov $t1,$s3,lsr#24
+ mov $t2,$s3,lsr#16
+ mov $t3,$s3,lsr#8
+ strb $t1,[$rounds,#12]
+ strb $t2,[$rounds,#13]
+ strb $t3,[$rounds,#14]
+ strb $s3,[$rounds,#15]
+
+ ldmia sp!,{r4-r12,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ bx lr @ interoperable with Thumb ISA:-)
+.size AES_decrypt,.-AES_decrypt
+
+.type _armv4_AES_decrypt,%function
+.align 2
+_armv4_AES_decrypt:
+ str lr,[sp,#-4]! @ push lr
+ ldr $t1,[$key],#16
+ ldr $t2,[$key,#-12]
+ ldr $t3,[$key,#-8]
+ ldr $i1,[$key,#-4]
+ ldr $rounds,[$key,#240-16]
+ eor $s0,$s0,$t1
+ eor $s1,$s1,$t2
+ eor $s2,$s2,$t3
+ eor $s3,$s3,$i1
+ sub $rounds,$rounds,#1
+ mov lr,#255
+
+.Ldec_loop:
+ and $i1,lr,$s0,lsr#16
+ and $i2,lr,$s0,lsr#8
+ and $i3,lr,$s0
+ mov $s0,$s0,lsr#24
+ ldr $t1,[$tbl,$i1,lsl#2] @ Td1[s0>>16]
+ ldr $s0,[$tbl,$s0,lsl#2] @ Td0[s0>>24]
+ ldr $t2,[$tbl,$i2,lsl#2] @ Td2[s0>>8]
+ ldr $t3,[$tbl,$i3,lsl#2] @ Td3[s0>>0]
+
+ and $i1,lr,$s1 @ i0
+ and $i2,lr,$s1,lsr#16
+ and $i3,lr,$s1,lsr#8
+ mov $s1,$s1,lsr#24
+ ldr $i1,[$tbl,$i1,lsl#2] @ Td3[s1>>0]
+ ldr $s1,[$tbl,$s1,lsl#2] @ Td0[s1>>24]
+ ldr $i2,[$tbl,$i2,lsl#2] @ Td1[s1>>16]
+ ldr $i3,[$tbl,$i3,lsl#2] @ Td2[s1>>8]
+ eor $s0,$s0,$i1,ror#24
+ eor $s1,$s1,$t1,ror#8
+ eor $t2,$i2,$t2,ror#8
+ eor $t3,$i3,$t3,ror#8
+
+ and $i1,lr,$s2,lsr#8 @ i0
+ and $i2,lr,$s2 @ i1
+ and $i3,lr,$s2,lsr#16
+ mov $s2,$s2,lsr#24
+ ldr $i1,[$tbl,$i1,lsl#2] @ Td2[s2>>8]
+ ldr $i2,[$tbl,$i2,lsl#2] @ Td3[s2>>0]
+ ldr $s2,[$tbl,$s2,lsl#2] @ Td0[s2>>24]
+ ldr $i3,[$tbl,$i3,lsl#2] @ Td1[s2>>16]
+ eor $s0,$s0,$i1,ror#16
+ eor $s1,$s1,$i2,ror#24
+ eor $s2,$s2,$t2,ror#8
+ eor $t3,$i3,$t3,ror#8
+
+ and $i1,lr,$s3,lsr#16 @ i0
+ and $i2,lr,$s3,lsr#8 @ i1
+ and $i3,lr,$s3 @ i2
+ mov $s3,$s3,lsr#24
+ ldr $i1,[$tbl,$i1,lsl#2] @ Td1[s3>>16]
+ ldr $i2,[$tbl,$i2,lsl#2] @ Td2[s3>>8]
+ ldr $i3,[$tbl,$i3,lsl#2] @ Td3[s3>>0]
+ ldr $s3,[$tbl,$s3,lsl#2] @ Td0[s3>>24]
+ eor $s0,$s0,$i1,ror#8
+ eor $s1,$s1,$i2,ror#16
+ eor $s2,$s2,$i3,ror#24
+ eor $s3,$s3,$t3,ror#8
+
+ ldr $t1,[$key],#16
+ ldr $t2,[$key,#-12]
+ ldr $t3,[$key,#-8]
+ ldr $i1,[$key,#-4]
+ eor $s0,$s0,$t1
+ eor $s1,$s1,$t2
+ eor $s2,$s2,$t3
+ eor $s3,$s3,$i1
+
+ subs $rounds,$rounds,#1
+ bne .Ldec_loop
+
+ add $tbl,$tbl,#1024
+
+ ldr $t1,[$tbl,#0] @ prefetch Td4
+ ldr $t2,[$tbl,#32]
+ ldr $t3,[$tbl,#64]
+ ldr $i1,[$tbl,#96]
+ ldr $i2,[$tbl,#128]
+ ldr $i3,[$tbl,#160]
+ ldr $t1,[$tbl,#192]
+ ldr $t2,[$tbl,#224]
+
+ and $i1,lr,$s0,lsr#16
+ and $i2,lr,$s0,lsr#8
+ and $i3,lr,$s0
+ ldrb $s0,[$tbl,$s0,lsr#24] @ Td4[s0>>24]
+ ldrb $t1,[$tbl,$i1] @ Td4[s0>>16]
+ ldrb $t2,[$tbl,$i2] @ Td4[s0>>8]
+ ldrb $t3,[$tbl,$i3] @ Td4[s0>>0]
+
+ and $i1,lr,$s1 @ i0
+ and $i2,lr,$s1,lsr#16
+ and $i3,lr,$s1,lsr#8
+ ldrb $i1,[$tbl,$i1] @ Td4[s1>>0]
+ ldrb $s1,[$tbl,$s1,lsr#24] @ Td4[s1>>24]
+ ldrb $i2,[$tbl,$i2] @ Td4[s1>>16]
+ ldrb $i3,[$tbl,$i3] @ Td4[s1>>8]
+ eor $s0,$i1,$s0,lsl#24
+ eor $s1,$t1,$s1,lsl#8
+ eor $t2,$t2,$i2,lsl#8
+ eor $t3,$t3,$i3,lsl#8
+
+ and $i1,lr,$s2,lsr#8 @ i0
+ and $i2,lr,$s2 @ i1
+ and $i3,lr,$s2,lsr#16
+ ldrb $i1,[$tbl,$i1] @ Td4[s2>>8]
+ ldrb $i2,[$tbl,$i2] @ Td4[s2>>0]
+ ldrb $s2,[$tbl,$s2,lsr#24] @ Td4[s2>>24]
+ ldrb $i3,[$tbl,$i3] @ Td4[s2>>16]
+ eor $s0,$s0,$i1,lsl#8
+ eor $s1,$i2,$s1,lsl#16
+ eor $s2,$t2,$s2,lsl#16
+ eor $t3,$t3,$i3,lsl#16
+
+ and $i1,lr,$s3,lsr#16 @ i0
+ and $i2,lr,$s3,lsr#8 @ i1
+ and $i3,lr,$s3 @ i2
+ ldrb $i1,[$tbl,$i1] @ Td4[s3>>16]
+ ldrb $i2,[$tbl,$i2] @ Td4[s3>>8]
+ ldrb $i3,[$tbl,$i3] @ Td4[s3>>0]
+ ldrb $s3,[$tbl,$s3,lsr#24] @ Td4[s3>>24]
+ eor $s0,$s0,$i1,lsl#16
+ eor $s1,$s1,$i2,lsl#8
+ eor $s2,$i3,$s2,lsl#8
+ eor $s3,$t3,$s3,lsl#24
+
+ ldr lr,[sp],#4 @ pop lr
+ ldr $t1,[$key,#0]
+ ldr $t2,[$key,#4]
+ ldr $t3,[$key,#8]
+ ldr $i1,[$key,#12]
+ eor $s0,$s0,$t1
+ eor $s1,$s1,$t2
+ eor $s2,$s2,$t3
+ eor $s3,$s3,$i1
+
+ sub $tbl,$tbl,#1024
+ mov pc,lr @ return
+.size _armv4_AES_decrypt,.-_armv4_AES_decrypt
+.asciz "AES for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
+print $code;
--- /dev/null 2009-04-24 06:09:48.000000000 -0700
+++ openssl-0.9.8h/crypto/0.9.9-dev/aes/aes-armv4.s 2009-09-03 15:42:39.000000000 -0700
@@ -0,0 +1,982 @@
+.text
+.code 32
+
+.type AES_Te,%object
+.align 5
+AES_Te:
+.word 0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d
+.word 0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554
+.word 0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d
+.word 0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a
+.word 0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87
+.word 0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b
+.word 0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea
+.word 0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b
+.word 0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a
+.word 0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f
+.word 0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108
+.word 0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f
+.word 0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e
+.word 0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5
+.word 0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d
+.word 0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f
+.word 0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e
+.word 0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb
+.word 0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce
+.word 0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497
+.word 0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c
+.word 0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed
+.word 0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b
+.word 0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a
+.word 0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16
+.word 0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594
+.word 0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81
+.word 0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3
+.word 0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a
+.word 0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504
+.word 0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163
+.word 0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d
+.word 0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f
+.word 0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739
+.word 0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47
+.word 0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395
+.word 0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f
+.word 0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883
+.word 0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c
+.word 0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76
+.word 0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e
+.word 0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4
+.word 0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6
+.word 0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b
+.word 0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7
+.word 0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0
+.word 0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25
+.word 0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818
+.word 0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72
+.word 0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651
+.word 0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21
+.word 0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85
+.word 0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa
+.word 0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12
+.word 0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0
+.word 0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9
+.word 0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133
+.word 0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7
+.word 0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920
+.word 0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a
+.word 0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17
+.word 0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8
+.word 0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11
+.word 0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a
+@ Te4[256]
+.byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
+.byte 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
+.byte 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
+.byte 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
+.byte 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
+.byte 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
+.byte 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
+.byte 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
+.byte 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
+.byte 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
+.byte 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
+.byte 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
+.byte 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
+.byte 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
+.byte 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
+.byte 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
+.byte 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
+.byte 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
+.byte 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
+.byte 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
+.byte 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
+.byte 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
+.byte 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
+.byte 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
+.byte 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
+.byte 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
+.byte 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
+.byte 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
+.byte 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
+.byte 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
+.byte 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
+.byte 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+@ rcon[]
+.word 0x01000000, 0x02000000, 0x04000000, 0x08000000
+.word 0x10000000, 0x20000000, 0x40000000, 0x80000000
+.word 0x1B000000, 0x36000000, 0, 0, 0, 0, 0, 0
+.size AES_Te,.-AES_Te
+
+@ void AES_encrypt(const unsigned char *in, unsigned char *out,
+@ const AES_KEY *key) {
+.global AES_encrypt
+.type AES_encrypt,%function
+.align 5
+AES_encrypt:
+ sub r3,pc,#8 @ AES_encrypt
+ stmdb sp!,{r1,r4-r12,lr}
+ mov r12,r0 @ inp
+ mov r11,r2
+ sub r10,r3,#AES_encrypt-AES_Te @ Te
+
+ ldrb r0,[r12,#3] @ load input data in endian-neutral
+ ldrb r4,[r12,#2] @ manner...
+ ldrb r5,[r12,#1]
+ ldrb r6,[r12,#0]
+ orr r0,r0,r4,lsl#8
+ orr r0,r0,r5,lsl#16
+ orr r0,r0,r6,lsl#24
+ ldrb r1,[r12,#7]
+ ldrb r4,[r12,#6]
+ ldrb r5,[r12,#5]
+ ldrb r6,[r12,#4]
+ orr r1,r1,r4,lsl#8
+ orr r1,r1,r5,lsl#16
+ orr r1,r1,r6,lsl#24
+ ldrb r2,[r12,#11]
+ ldrb r4,[r12,#10]
+ ldrb r5,[r12,#9]
+ ldrb r6,[r12,#8]
+ orr r2,r2,r4,lsl#8
+ orr r2,r2,r5,lsl#16
+ orr r2,r2,r6,lsl#24
+ ldrb r3,[r12,#15]
+ ldrb r4,[r12,#14]
+ ldrb r5,[r12,#13]
+ ldrb r6,[r12,#12]
+ orr r3,r3,r4,lsl#8
+ orr r3,r3,r5,lsl#16
+ orr r3,r3,r6,lsl#24
+
+ bl _armv4_AES_encrypt
+
+ ldr r12,[sp],#4 @ pop out
+ mov r4,r0,lsr#24 @ write output in endian-neutral
+ mov r5,r0,lsr#16 @ manner...
+ mov r6,r0,lsr#8
+ strb r4,[r12,#0]
+ strb r5,[r12,#1]
+ strb r6,[r12,#2]
+ strb r0,[r12,#3]
+ mov r4,r1,lsr#24
+ mov r5,r1,lsr#16
+ mov r6,r1,lsr#8
+ strb r4,[r12,#4]
+ strb r5,[r12,#5]
+ strb r6,[r12,#6]
+ strb r1,[r12,#7]
+ mov r4,r2,lsr#24
+ mov r5,r2,lsr#16
+ mov r6,r2,lsr#8
+ strb r4,[r12,#8]
+ strb r5,[r12,#9]
+ strb r6,[r12,#10]
+ strb r2,[r12,#11]
+ mov r4,r3,lsr#24
+ mov r5,r3,lsr#16
+ mov r6,r3,lsr#8
+ strb r4,[r12,#12]
+ strb r5,[r12,#13]
+ strb r6,[r12,#14]
+ strb r3,[r12,#15]
+
+ ldmia sp!,{r4-r12,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ .word 0xe12fff1e @ interoperable with Thumb ISA:-)
+.size AES_encrypt,.-AES_encrypt
+
+.type _armv4_AES_encrypt,%function
+.align 2
+_armv4_AES_encrypt:
+ str lr,[sp,#-4]! @ push lr
+ ldr r4,[r11],#16
+ ldr r5,[r11,#-12]
+ ldr r6,[r11,#-8]
+ ldr r7,[r11,#-4]
+ ldr r12,[r11,#240-16]
+ eor r0,r0,r4
+ eor r1,r1,r5
+ eor r2,r2,r6
+ eor r3,r3,r7
+ sub r12,r12,#1
+ mov lr,#255
+
+.Lenc_loop:
+ and r8,lr,r0,lsr#8
+ and r9,lr,r0,lsr#16
+ and r7,lr,r0
+ mov r0,r0,lsr#24
+ ldr r4,[r10,r7,lsl#2] @ Te3[s0>>0]
+ ldr r0,[r10,r0,lsl#2] @ Te0[s0>>24]
+ ldr r5,[r10,r8,lsl#2] @ Te2[s0>>8]
+ ldr r6,[r10,r9,lsl#2] @ Te1[s0>>16]
+
+ and r7,lr,r1,lsr#16 @ i0
+ and r8,lr,r1
+ and r9,lr,r1,lsr#8
+ mov r1,r1,lsr#24
+ ldr r7,[r10,r7,lsl#2] @ Te1[s1>>16]
+ ldr r1,[r10,r1,lsl#2] @ Te0[s1>>24]
+ ldr r8,[r10,r8,lsl#2] @ Te3[s1>>0]
+ ldr r9,[r10,r9,lsl#2] @ Te2[s1>>8]
+ eor r0,r0,r7,ror#8
+ eor r1,r1,r4,ror#24
+ eor r5,r5,r8,ror#8
+ eor r6,r6,r9,ror#8
+
+ and r7,lr,r2,lsr#8 @ i0
+ and r8,lr,r2,lsr#16 @ i1
+ and r9,lr,r2
+ mov r2,r2,lsr#24
+ ldr r7,[r10,r7,lsl#2] @ Te2[s2>>8]
+ ldr r8,[r10,r8,lsl#2] @ Te1[s2>>16]
+ ldr r2,[r10,r2,lsl#2] @ Te0[s2>>24]
+ ldr r9,[r10,r9,lsl#2] @ Te3[s2>>0]
+ eor r0,r0,r7,ror#16
+ eor r1,r1,r8,ror#8
+ eor r2,r2,r5,ror#16
+ eor r6,r6,r9,ror#16
+
+ and r7,lr,r3 @ i0
+ and r8,lr,r3,lsr#8 @ i1
+ and r9,lr,r3,lsr#16 @ i2
+ mov r3,r3,lsr#24
+ ldr r7,[r10,r7,lsl#2] @ Te3[s3>>0]
+ ldr r8,[r10,r8,lsl#2] @ Te2[s3>>8]
+ ldr r9,[r10,r9,lsl#2] @ Te1[s3>>16]
+ ldr r3,[r10,r3,lsl#2] @ Te0[s3>>24]
+ eor r0,r0,r7,ror#24
+ eor r1,r1,r8,ror#16
+ eor r2,r2,r9,ror#8
+ eor r3,r3,r6,ror#8
+
+ ldr r4,[r11],#16
+ ldr r5,[r11,#-12]
+ ldr r6,[r11,#-8]
+ ldr r7,[r11,#-4]
+ eor r0,r0,r4
+ eor r1,r1,r5
+ eor r2,r2,r6
+ eor r3,r3,r7
+
+ subs r12,r12,#1
+ bne .Lenc_loop
+
+ add r10,r10,#2
+
+ and r7,lr,r0
+ and r8,lr,r0,lsr#8
+ and r9,lr,r0,lsr#16
+ mov r0,r0,lsr#24
+ ldrb r4,[r10,r7,lsl#2] @ Te4[s0>>0]
+ ldrb r0,[r10,r0,lsl#2] @ Te4[s0>>24]
+ ldrb r5,[r10,r8,lsl#2] @ Te4[s0>>8]
+ ldrb r6,[r10,r9,lsl#2] @ Te4[s0>>16]
+
+ and r7,lr,r1,lsr#16 @ i0
+ and r8,lr,r1
+ and r9,lr,r1,lsr#8
+ mov r1,r1,lsr#24
+ ldrb r7,[r10,r7,lsl#2] @ Te4[s1>>16]
+ ldrb r1,[r10,r1,lsl#2] @ Te4[s1>>24]
+ ldrb r8,[r10,r8,lsl#2] @ Te4[s1>>0]
+ ldrb r9,[r10,r9,lsl#2] @ Te4[s1>>8]
+ eor r0,r7,r0,lsl#8
+ eor r1,r4,r1,lsl#24
+ eor r5,r8,r5,lsl#8
+ eor r6,r9,r6,lsl#8
+
+ and r7,lr,r2,lsr#8 @ i0
+ and r8,lr,r2,lsr#16 @ i1
+ and r9,lr,r2
+ mov r2,r2,lsr#24
+ ldrb r7,[r10,r7,lsl#2] @ Te4[s2>>8]
+ ldrb r8,[r10,r8,lsl#2] @ Te4[s2>>16]
+ ldrb r2,[r10,r2,lsl#2] @ Te4[s2>>24]
+ ldrb r9,[r10,r9,lsl#2] @ Te4[s2>>0]
+ eor r0,r7,r0,lsl#8
+ eor r1,r1,r8,lsl#16
+ eor r2,r5,r2,lsl#24
+ eor r6,r9,r6,lsl#8
+
+ and r7,lr,r3 @ i0
+ and r8,lr,r3,lsr#8 @ i1
+ and r9,lr,r3,lsr#16 @ i2
+ mov r3,r3,lsr#24
+ ldrb r7,[r10,r7,lsl#2] @ Te4[s3>>0]
+ ldrb r8,[r10,r8,lsl#2] @ Te4[s3>>8]
+ ldrb r9,[r10,r9,lsl#2] @ Te4[s3>>16]
+ ldrb r3,[r10,r3,lsl#2] @ Te4[s3>>24]
+ eor r0,r7,r0,lsl#8
+ eor r1,r1,r8,lsl#8
+ eor r2,r2,r9,lsl#16
+ eor r3,r6,r3,lsl#24
+
+ ldr lr,[sp],#4 @ pop lr
+ ldr r4,[r11,#0]
+ ldr r5,[r11,#4]
+ ldr r6,[r11,#8]
+ ldr r7,[r11,#12]
+ eor r0,r0,r4
+ eor r1,r1,r5
+ eor r2,r2,r6
+ eor r3,r3,r7
+
+ sub r10,r10,#2
+ mov pc,lr @ return
+.size _armv4_AES_encrypt,.-_armv4_AES_encrypt
+
+.global AES_set_encrypt_key
+.type AES_set_encrypt_key,%function
+.align 5
+AES_set_encrypt_key:
+ sub r3,pc,#8 @ AES_set_encrypt_key
+ teq r0,#0
+ moveq r0,#-1
+ beq .Labrt
+ teq r2,#0
+ moveq r0,#-1
+ beq .Labrt
+
+ teq r1,#128
+ beq .Lok
+ teq r1,#192
+ beq .Lok
+ teq r1,#256
+ movne r0,#-1
+ bne .Labrt
+
+.Lok: stmdb sp!,{r4-r12,lr}
+ sub r10,r3,#AES_set_encrypt_key-AES_Te-1024 @ Te4
+
+ mov r12,r0 @ inp
+ mov lr,r1 @ bits
+ mov r11,r2 @ key
+
+ ldrb r0,[r12,#3] @ load input data in endian-neutral
+ ldrb r4,[r12,#2] @ manner...
+ ldrb r5,[r12,#1]
+ ldrb r6,[r12,#0]
+ orr r0,r0,r4,lsl#8
+ orr r0,r0,r5,lsl#16
+ orr r0,r0,r6,lsl#24
+ ldrb r1,[r12,#7]
+ ldrb r4,[r12,#6]
+ ldrb r5,[r12,#5]
+ ldrb r6,[r12,#4]
+ orr r1,r1,r4,lsl#8
+ orr r1,r1,r5,lsl#16
+ orr r1,r1,r6,lsl#24
+ ldrb r2,[r12,#11]
+ ldrb r4,[r12,#10]
+ ldrb r5,[r12,#9]
+ ldrb r6,[r12,#8]
+ orr r2,r2,r4,lsl#8
+ orr r2,r2,r5,lsl#16
+ orr r2,r2,r6,lsl#24
+ ldrb r3,[r12,#15]
+ ldrb r4,[r12,#14]
+ ldrb r5,[r12,#13]
+ ldrb r6,[r12,#12]
+ orr r3,r3,r4,lsl#8
+ orr r3,r3,r5,lsl#16
+ orr r3,r3,r6,lsl#24
+ str r0,[r11],#16
+ str r1,[r11,#-12]
+ str r2,[r11,#-8]
+ str r3,[r11,#-4]
+
+ teq lr,#128
+ bne .Lnot128
+ mov r12,#10
+ str r12,[r11,#240-16]
+ add r6,r10,#256 @ rcon
+ mov lr,#255
+
+.L128_loop:
+ and r5,lr,r3,lsr#24
+ and r7,lr,r3,lsr#16
+ and r8,lr,r3,lsr#8
+ and r9,lr,r3
+ ldrb r5,[r10,r5]
+ ldrb r7,[r10,r7]
+ ldrb r8,[r10,r8]
+ ldrb r9,[r10,r9]
+ ldr r4,[r6],#4 @ rcon[i++]
+ orr r5,r5,r7,lsl#24
+ orr r5,r5,r8,lsl#16
+ orr r5,r5,r9,lsl#8
+ eor r5,r5,r4
+ eor r0,r0,r5 @ rk[4]=rk[0]^...
+ eor r1,r1,r0 @ rk[5]=rk[1]^rk[4]
+ eor r2,r2,r1 @ rk[6]=rk[2]^rk[5]
+ eor r3,r3,r2 @ rk[7]=rk[3]^rk[6]
+ str r0,[r11],#16
+ str r1,[r11,#-12]
+ str r2,[r11,#-8]
+ str r3,[r11,#-4]
+
+ subs r12,r12,#1
+ bne .L128_loop
+ sub r2,r11,#176
+ b .Ldone
+
+.Lnot128:
+ ldrb r8,[r12,#19]
+ ldrb r4,[r12,#18]
+ ldrb r5,[r12,#17]
+ ldrb r6,[r12,#16]
+ orr r8,r8,r4,lsl#8
+ orr r8,r8,r5,lsl#16
+ orr r8,r8,r6,lsl#24
+ ldrb r9,[r12,#23]
+ ldrb r4,[r12,#22]
+ ldrb r5,[r12,#21]
+ ldrb r6,[r12,#20]
+ orr r9,r9,r4,lsl#8
+ orr r9,r9,r5,lsl#16
+ orr r9,r9,r6,lsl#24
+ str r8,[r11],#8
+ str r9,[r11,#-4]
+
+ teq lr,#192
+ bne .Lnot192
+ mov r12,#12
+ str r12,[r11,#240-24]
+ add r6,r10,#256 @ rcon
+ mov lr,#255
+ mov r12,#8
+
+.L192_loop:
+ and r5,lr,r9,lsr#24
+ and r7,lr,r9,lsr#16
+ and r8,lr,r9,lsr#8
+ and r9,lr,r9
+ ldrb r5,[r10,r5]
+ ldrb r7,[r10,r7]
+ ldrb r8,[r10,r8]
+ ldrb r9,[r10,r9]
+ ldr r4,[r6],#4 @ rcon[i++]
+ orr r5,r5,r7,lsl#24
+ orr r5,r5,r8,lsl#16
+ orr r5,r5,r9,lsl#8
+ eor r9,r5,r4
+ eor r0,r0,r9 @ rk[6]=rk[0]^...
+ eor r1,r1,r0 @ rk[7]=rk[1]^rk[6]
+ eor r2,r2,r1 @ rk[8]=rk[2]^rk[7]
+ eor r3,r3,r2 @ rk[9]=rk[3]^rk[8]
+ str r0,[r11],#24
+ str r1,[r11,#-20]
+ str r2,[r11,#-16]
+ str r3,[r11,#-12]
+
+ subs r12,r12,#1
+ subeq r2,r11,#216
+ beq .Ldone
+
+ ldr r7,[r11,#-32]
+ ldr r8,[r11,#-28]
+ eor r7,r7,r3 @ rk[10]=rk[4]^rk[9]
+ eor r9,r8,r7 @ rk[11]=rk[5]^rk[10]
+ str r7,[r11,#-8]
+ str r9,[r11,#-4]
+ b .L192_loop
+
+.Lnot192:
+ ldrb r8,[r12,#27]
+ ldrb r4,[r12,#26]
+ ldrb r5,[r12,#25]
+ ldrb r6,[r12,#24]
+ orr r8,r8,r4,lsl#8
+ orr r8,r8,r5,lsl#16
+ orr r8,r8,r6,lsl#24
+ ldrb r9,[r12,#31]
+ ldrb r4,[r12,#30]
+ ldrb r5,[r12,#29]
+ ldrb r6,[r12,#28]
+ orr r9,r9,r4,lsl#8
+ orr r9,r9,r5,lsl#16
+ orr r9,r9,r6,lsl#24
+ str r8,[r11],#8
+ str r9,[r11,#-4]
+
+ mov r12,#14
+ str r12,[r11,#240-32]
+ add r6,r10,#256 @ rcon
+ mov lr,#255
+ mov r12,#7
+
+.L256_loop:
+ and r5,lr,r9,lsr#24
+ and r7,lr,r9,lsr#16
+ and r8,lr,r9,lsr#8
+ and r9,lr,r9
+ ldrb r5,[r10,r5]
+ ldrb r7,[r10,r7]
+ ldrb r8,[r10,r8]
+ ldrb r9,[r10,r9]
+ ldr r4,[r6],#4 @ rcon[i++]
+ orr r5,r5,r7,lsl#24
+ orr r5,r5,r8,lsl#16
+ orr r5,r5,r9,lsl#8
+ eor r9,r5,r4
+ eor r0,r0,r9 @ rk[8]=rk[0]^...
+ eor r1,r1,r0 @ rk[9]=rk[1]^rk[8]
+ eor r2,r2,r1 @ rk[10]=rk[2]^rk[9]
+ eor r3,r3,r2 @ rk[11]=rk[3]^rk[10]
+ str r0,[r11],#32
+ str r1,[r11,#-28]
+ str r2,[r11,#-24]
+ str r3,[r11,#-20]
+
+ subs r12,r12,#1
+ subeq r2,r11,#256
+ beq .Ldone
+
+ and r5,lr,r3
+ and r7,lr,r3,lsr#8
+ and r8,lr,r3,lsr#16
+ and r9,lr,r3,lsr#24
+ ldrb r5,[r10,r5]
+ ldrb r7,[r10,r7]
+ ldrb r8,[r10,r8]
+ ldrb r9,[r10,r9]
+ orr r5,r5,r7,lsl#8
+ orr r5,r5,r8,lsl#16
+ orr r5,r5,r9,lsl#24
+
+ ldr r4,[r11,#-48]
+ ldr r7,[r11,#-44]
+ ldr r8,[r11,#-40]
+ ldr r9,[r11,#-36]
+ eor r4,r4,r5 @ rk[12]=rk[4]^...
+ eor r7,r7,r4 @ rk[13]=rk[5]^rk[12]
+ eor r8,r8,r7 @ rk[14]=rk[6]^rk[13]
+ eor r9,r9,r8 @ rk[15]=rk[7]^rk[14]
+ str r4,[r11,#-16]
+ str r7,[r11,#-12]
+ str r8,[r11,#-8]
+ str r9,[r11,#-4]
+ b .L256_loop
+
+.Ldone: mov r0,#0
+ ldmia sp!,{r4-r12,lr}
+.Labrt: tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ .word 0xe12fff1e @ interoperable with Thumb ISA:-)
+.size AES_set_encrypt_key,.-AES_set_encrypt_key
+
+.global AES_set_decrypt_key
+.type AES_set_decrypt_key,%function
+.align 5
+AES_set_decrypt_key:
+ str lr,[sp,#-4]! @ push lr
+ bl AES_set_encrypt_key
+ teq r0,#0
+ ldrne lr,[sp],#4 @ pop lr
+ bne .Labrt
+
+ stmdb sp!,{r4-r12}
+
+ ldr r12,[r2,#240] @ AES_set_encrypt_key preserves r2,
+ mov r11,r2 @ which is AES_KEY *key
+ mov r7,r2
+ add r8,r2,r12,lsl#4
+
+.Linv: ldr r0,[r7]
+ ldr r1,[r7,#4]
+ ldr r2,[r7,#8]
+ ldr r3,[r7,#12]
+ ldr r4,[r8]
+ ldr r5,[r8,#4]
+ ldr r6,[r8,#8]
+ ldr r9,[r8,#12]
+ str r0,[r8],#-16
+ str r1,[r8,#16+4]
+ str r2,[r8,#16+8]
+ str r3,[r8,#16+12]
+ str r4,[r7],#16
+ str r5,[r7,#-12]
+ str r6,[r7,#-8]
+ str r9,[r7,#-4]
+ teq r7,r8
+ bne .Linv
+ ldr r0,[r11,#16]! @ prefetch tp1
+ mov r7,#0x80
+ mov r8,#0x1b
+ orr r7,r7,#0x8000
+ orr r8,r8,#0x1b00
+ orr r7,r7,r7,lsl#16
+ orr r8,r8,r8,lsl#16
+ sub r12,r12,#1
+ mvn r9,r7
+ mov r12,r12,lsl#2 @ (rounds-1)*4
+
+.Lmix: and r4,r0,r7
+ and r1,r0,r9
+ sub r4,r4,r4,lsr#7
+ and r4,r4,r8
+ eor r1,r4,r1,lsl#1 @ tp2
+
+ and r4,r1,r7
+ and r2,r1,r9
+ sub r4,r4,r4,lsr#7
+ and r4,r4,r8
+ eor r2,r4,r2,lsl#1 @ tp4
+
+ and r4,r2,r7
+ and r3,r2,r9
+ sub r4,r4,r4,lsr#7
+ and r4,r4,r8
+ eor r3,r4,r3,lsl#1 @ tp8
+
+ eor r4,r1,r2
+ eor r5,r0,r3 @ tp9
+ eor r4,r4,r3 @ tpe
+ eor r4,r4,r1,ror#24
+ eor r4,r4,r5,ror#24 @ ^= ROTATE(tpb=tp9^tp2,8)
+ eor r4,r4,r2,ror#16
+ eor r4,r4,r5,ror#16 @ ^= ROTATE(tpd=tp9^tp4,16)
+ eor r4,r4,r5,ror#8 @ ^= ROTATE(tp9,24)
+
+ ldr r0,[r11,#4] @ prefetch tp1
+ str r4,[r11],#4
+ subs r12,r12,#1
+ bne .Lmix
+
+ mov r0,#0
+ ldmia sp!,{r4-r12,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ .word 0xe12fff1e @ interoperable with Thumb ISA:-)
+.size AES_set_decrypt_key,.-AES_set_decrypt_key
+
+.type AES_Td,%object
+.align 5
+AES_Td:
+.word 0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96
+.word 0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393
+.word 0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25
+.word 0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f
+.word 0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1
+.word 0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6
+.word 0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da
+.word 0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844
+.word 0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd
+.word 0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4
+.word 0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45
+.word 0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94
+.word 0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7
+.word 0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a
+.word 0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5
+.word 0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c
+.word 0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1
+.word 0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a
+.word 0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75
+.word 0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051
+.word 0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46
+.word 0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff
+.word 0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77
+.word 0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb
+.word 0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000
+.word 0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e
+.word 0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927
+.word 0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a
+.word 0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e
+.word 0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16
+.word 0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d
+.word 0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8
+.word 0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd
+.word 0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34
+.word 0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163
+.word 0xd731dcca, 0x42638510, 0x13972240, 0x84c61120
+.word 0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d
+.word 0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0
+.word 0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422
+.word 0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef
+.word 0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36
+.word 0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4
+.word 0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662
+.word 0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5
+.word 0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3
+.word 0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b
+.word 0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8
+.word 0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6
+.word 0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6
+.word 0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0
+.word 0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815
+.word 0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f
+.word 0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df
+.word 0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f
+.word 0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e
+.word 0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713
+.word 0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89
+.word 0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c
+.word 0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf
+.word 0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86
+.word 0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f
+.word 0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541
+.word 0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190
+.word 0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742
+@ Td4[256]
+.byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
+.byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+.byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+.byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+.byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+.byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+.byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+.byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+.byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+.byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+.byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+.byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+.byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+.byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+.byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+.byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+.byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+.byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+.byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+.byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+.byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+.byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+.byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+.byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+.byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+.byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+.byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+.byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+.byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+.byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+.byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+.byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+.size AES_Td,.-AES_Td
+
+@ void AES_decrypt(const unsigned char *in, unsigned char *out,
+@ const AES_KEY *key) {
+.global AES_decrypt
+.type AES_decrypt,%function
+.align 5
+AES_decrypt:
+ sub r3,pc,#8 @ AES_decrypt
+ stmdb sp!,{r1,r4-r12,lr}
+ mov r12,r0 @ inp
+ mov r11,r2
+ sub r10,r3,#AES_decrypt-AES_Td @ Td
+
+ ldrb r0,[r12,#3] @ load input data in endian-neutral
+ ldrb r4,[r12,#2] @ manner...
+ ldrb r5,[r12,#1]
+ ldrb r6,[r12,#0]
+ orr r0,r0,r4,lsl#8
+ orr r0,r0,r5,lsl#16
+ orr r0,r0,r6,lsl#24
+ ldrb r1,[r12,#7]
+ ldrb r4,[r12,#6]
+ ldrb r5,[r12,#5]
+ ldrb r6,[r12,#4]
+ orr r1,r1,r4,lsl#8
+ orr r1,r1,r5,lsl#16
+ orr r1,r1,r6,lsl#24
+ ldrb r2,[r12,#11]
+ ldrb r4,[r12,#10]
+ ldrb r5,[r12,#9]
+ ldrb r6,[r12,#8]
+ orr r2,r2,r4,lsl#8
+ orr r2,r2,r5,lsl#16
+ orr r2,r2,r6,lsl#24
+ ldrb r3,[r12,#15]
+ ldrb r4,[r12,#14]
+ ldrb r5,[r12,#13]
+ ldrb r6,[r12,#12]
+ orr r3,r3,r4,lsl#8
+ orr r3,r3,r5,lsl#16
+ orr r3,r3,r6,lsl#24
+
+ bl _armv4_AES_decrypt
+
+ ldr r12,[sp],#4 @ pop out
+ mov r4,r0,lsr#24 @ write output in endian-neutral
+ mov r5,r0,lsr#16 @ manner...
+ mov r6,r0,lsr#8
+ strb r4,[r12,#0]
+ strb r5,[r12,#1]
+ strb r6,[r12,#2]
+ strb r0,[r12,#3]
+ mov r4,r1,lsr#24
+ mov r5,r1,lsr#16
+ mov r6,r1,lsr#8
+ strb r4,[r12,#4]
+ strb r5,[r12,#5]
+ strb r6,[r12,#6]
+ strb r1,[r12,#7]
+ mov r4,r2,lsr#24
+ mov r5,r2,lsr#16
+ mov r6,r2,lsr#8
+ strb r4,[r12,#8]
+ strb r5,[r12,#9]
+ strb r6,[r12,#10]
+ strb r2,[r12,#11]
+ mov r4,r3,lsr#24
+ mov r5,r3,lsr#16
+ mov r6,r3,lsr#8
+ strb r4,[r12,#12]
+ strb r5,[r12,#13]
+ strb r6,[r12,#14]
+ strb r3,[r12,#15]
+
+ ldmia sp!,{r4-r12,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ .word 0xe12fff1e @ interoperable with Thumb ISA:-)
+.size AES_decrypt,.-AES_decrypt
+
+.type _armv4_AES_decrypt,%function
+.align 2
+_armv4_AES_decrypt:
+ str lr,[sp,#-4]! @ push lr
+ ldr r4,[r11],#16
+ ldr r5,[r11,#-12]
+ ldr r6,[r11,#-8]
+ ldr r7,[r11,#-4]
+ ldr r12,[r11,#240-16]
+ eor r0,r0,r4
+ eor r1,r1,r5
+ eor r2,r2,r6
+ eor r3,r3,r7
+ sub r12,r12,#1
+ mov lr,#255
+
+.Ldec_loop:
+ and r7,lr,r0,lsr#16
+ and r8,lr,r0,lsr#8
+ and r9,lr,r0
+ mov r0,r0,lsr#24
+ ldr r4,[r10,r7,lsl#2] @ Td1[s0>>16]
+ ldr r0,[r10,r0,lsl#2] @ Td0[s0>>24]
+ ldr r5,[r10,r8,lsl#2] @ Td2[s0>>8]
+ ldr r6,[r10,r9,lsl#2] @ Td3[s0>>0]
+
+ and r7,lr,r1 @ i0
+ and r8,lr,r1,lsr#16
+ and r9,lr,r1,lsr#8
+ mov r1,r1,lsr#24
+ ldr r7,[r10,r7,lsl#2] @ Td3[s1>>0]
+ ldr r1,[r10,r1,lsl#2] @ Td0[s1>>24]
+ ldr r8,[r10,r8,lsl#2] @ Td1[s1>>16]
+ ldr r9,[r10,r9,lsl#2] @ Td2[s1>>8]
+ eor r0,r0,r7,ror#24
+ eor r1,r1,r4,ror#8
+ eor r5,r8,r5,ror#8
+ eor r6,r9,r6,ror#8
+
+ and r7,lr,r2,lsr#8 @ i0
+ and r8,lr,r2 @ i1
+ and r9,lr,r2,lsr#16
+ mov r2,r2,lsr#24
+ ldr r7,[r10,r7,lsl#2] @ Td2[s2>>8]
+ ldr r8,[r10,r8,lsl#2] @ Td3[s2>>0]
+ ldr r2,[r10,r2,lsl#2] @ Td0[s2>>24]
+ ldr r9,[r10,r9,lsl#2] @ Td1[s2>>16]
+ eor r0,r0,r7,ror#16
+ eor r1,r1,r8,ror#24
+ eor r2,r2,r5,ror#8
+ eor r6,r9,r6,ror#8
+
+ and r7,lr,r3,lsr#16 @ i0
+ and r8,lr,r3,lsr#8 @ i1
+ and r9,lr,r3 @ i2
+ mov r3,r3,lsr#24
+ ldr r7,[r10,r7,lsl#2] @ Td1[s3>>16]
+ ldr r8,[r10,r8,lsl#2] @ Td2[s3>>8]
+ ldr r9,[r10,r9,lsl#2] @ Td3[s3>>0]
+ ldr r3,[r10,r3,lsl#2] @ Td0[s3>>24]
+ eor r0,r0,r7,ror#8
+ eor r1,r1,r8,ror#16
+ eor r2,r2,r9,ror#24
+ eor r3,r3,r6,ror#8
+
+ ldr r4,[r11],#16
+ ldr r5,[r11,#-12]
+ ldr r6,[r11,#-8]
+ ldr r7,[r11,#-4]
+ eor r0,r0,r4
+ eor r1,r1,r5
+ eor r2,r2,r6
+ eor r3,r3,r7
+
+ subs r12,r12,#1
+ bne .Ldec_loop
+
+ add r10,r10,#1024
+
+ ldr r4,[r10,#0] @ prefetch Td4
+ ldr r5,[r10,#32]
+ ldr r6,[r10,#64]
+ ldr r7,[r10,#96]
+ ldr r8,[r10,#128]
+ ldr r9,[r10,#160]
+ ldr r4,[r10,#192]
+ ldr r5,[r10,#224]
+
+ and r7,lr,r0,lsr#16
+ and r8,lr,r0,lsr#8
+ and r9,lr,r0
+ ldrb r0,[r10,r0,lsr#24] @ Td4[s0>>24]
+ ldrb r4,[r10,r7] @ Td4[s0>>16]
+ ldrb r5,[r10,r8] @ Td4[s0>>8]
+ ldrb r6,[r10,r9] @ Td4[s0>>0]
+
+ and r7,lr,r1 @ i0
+ and r8,lr,r1,lsr#16
+ and r9,lr,r1,lsr#8
+ ldrb r7,[r10,r7] @ Td4[s1>>0]
+ ldrb r1,[r10,r1,lsr#24] @ Td4[s1>>24]
+ ldrb r8,[r10,r8] @ Td4[s1>>16]
+ ldrb r9,[r10,r9] @ Td4[s1>>8]
+ eor r0,r7,r0,lsl#24
+ eor r1,r4,r1,lsl#8
+ eor r5,r5,r8,lsl#8
+ eor r6,r6,r9,lsl#8
+
+ and r7,lr,r2,lsr#8 @ i0
+ and r8,lr,r2 @ i1
+ and r9,lr,r2,lsr#16
+ ldrb r7,[r10,r7] @ Td4[s2>>8]
+ ldrb r8,[r10,r8] @ Td4[s2>>0]
+ ldrb r2,[r10,r2,lsr#24] @ Td4[s2>>24]
+ ldrb r9,[r10,r9] @ Td4[s2>>16]
+ eor r0,r0,r7,lsl#8
+ eor r1,r8,r1,lsl#16
+ eor r2,r5,r2,lsl#16
+ eor r6,r6,r9,lsl#16
+
+ and r7,lr,r3,lsr#16 @ i0
+ and r8,lr,r3,lsr#8 @ i1
+ and r9,lr,r3 @ i2
+ ldrb r7,[r10,r7] @ Td4[s3>>16]
+ ldrb r8,[r10,r8] @ Td4[s3>>8]
+ ldrb r9,[r10,r9] @ Td4[s3>>0]
+ ldrb r3,[r10,r3,lsr#24] @ Td4[s3>>24]
+ eor r0,r0,r7,lsl#16
+ eor r1,r1,r8,lsl#8
+ eor r2,r9,r2,lsl#8
+ eor r3,r6,r3,lsl#24
+
+ ldr lr,[sp],#4 @ pop lr
+ ldr r4,[r11,#0]
+ ldr r5,[r11,#4]
+ ldr r6,[r11,#8]
+ ldr r7,[r11,#12]
+ eor r0,r0,r4
+ eor r1,r1,r5
+ eor r2,r2,r6
+ eor r3,r3,r7
+
+ sub r10,r10,#1024
+ mov pc,lr @ return
+.size _armv4_AES_decrypt,.-_armv4_AES_decrypt
+.asciz "AES for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
+.align 2
--- /dev/null 2009-04-24 06:09:48.000000000 -0700
+++ openssl-0.9.8h/crypto/0.9.9-dev/bn/armv4-mont.pl 2009-09-03 15:42:39.000000000 -0700
@@ -0,0 +1,200 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# January 2007.
+
+# Montgomery multiplication for ARMv4.
+#
+# Performance improvement naturally varies among CPU implementations
+# and compilers. The code was observed to provide +65-35% improvement
+# [depending on key length, less for longer keys] on ARM920T, and
+# +115-80% on Intel IXP425. This is compared to pre-bn_mul_mont code
+# base and compiler generated code with in-lined umull and even umlal
+# instructions. The latter means that this code didn't really have an
+# "advantage" of utilizing some "secret" instruction.
+#
+# The code is interoperable with Thumb ISA and is rather compact, less
+# than 1/2KB. Windows CE port would be trivial, as it's exclusively
+# about decorations, ABI and instruction syntax are identical.
+
+$num="r0"; # starts as num argument, but holds &tp[num-1]
+$ap="r1";
+$bp="r2"; $bi="r2"; $rp="r2";
+$np="r3";
+$tp="r4";
+$aj="r5";
+$nj="r6";
+$tj="r7";
+$n0="r8";
+########### # r9 is reserved by ELF as platform specific, e.g. TLS pointer
+$alo="r10"; # sl, gcc uses it to keep @GOT
+$ahi="r11"; # fp
+$nlo="r12"; # ip
+########### # r13 is stack pointer
+$nhi="r14"; # lr
+########### # r15 is program counter
+
+#### argument block layout relative to &tp[num-1], a.k.a. $num
+$_rp="$num,#12*4";
+# ap permanently resides in r1
+$_bp="$num,#13*4";
+# np permanently resides in r3
+$_n0="$num,#14*4";
+$_num="$num,#15*4"; $_bpend=$_num;
+
+$code=<<___;
+.text
+
+.global bn_mul_mont
+.type bn_mul_mont,%function
+
+.align 2
+bn_mul_mont:
+ stmdb sp!,{r0,r2} @ sp points at argument block
+ ldr $num,[sp,#3*4] @ load num
+ cmp $num,#2
+ movlt r0,#0
+ addlt sp,sp,#2*4
+ blt .Labrt
+
+ stmdb sp!,{r4-r12,lr} @ save 10 registers
+
+ mov $num,$num,lsl#2 @ rescale $num for byte count
+ sub sp,sp,$num @ alloca(4*num)
+ sub sp,sp,#4 @ +extra dword
+ sub $num,$num,#4 @ "num=num-1"
+ add $tp,$bp,$num @ &bp[num-1]
+
+ add $num,sp,$num @ $num to point at &tp[num-1]
+ ldr $n0,[$_n0] @ &n0
+ ldr $bi,[$bp] @ bp[0]
+ ldr $aj,[$ap],#4 @ ap[0],ap++
+ ldr $nj,[$np],#4 @ np[0],np++
+ ldr $n0,[$n0] @ *n0
+ str $tp,[$_bpend] @ save &bp[num]
+
+ umull $alo,$ahi,$aj,$bi @ ap[0]*bp[0]
+ str $n0,[$_n0] @ save n0 value
+ mul $n0,$alo,$n0 @ "tp[0]"*n0
+ mov $nlo,#0
+ umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"t[0]"
+ mov $tp,sp
+
+.L1st:
+ ldr $aj,[$ap],#4 @ ap[j],ap++
+ mov $alo,$ahi
+ mov $ahi,#0
+ umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[0]
+ ldr $nj,[$np],#4 @ np[j],np++
+ mov $nhi,#0
+ umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0
+ adds $nlo,$nlo,$alo
+ str $nlo,[$tp],#4 @ tp[j-1]=,tp++
+ adc $nlo,$nhi,#0
+ cmp $tp,$num
+ bne .L1st
+
+ adds $nlo,$nlo,$ahi
+ mov $nhi,#0
+ adc $nhi,$nhi,#0
+ ldr $tp,[$_bp] @ restore bp
+ str $nlo,[$num] @ tp[num-1]=
+ ldr $n0,[$_n0] @ restore n0
+ str $nhi,[$num,#4] @ tp[num]=
+
+.Louter:
+ sub $tj,$num,sp @ "original" $num-1 value
+ sub $ap,$ap,$tj @ "rewind" ap to &ap[1]
+ sub $np,$np,$tj @ "rewind" np to &np[1]
+ ldr $bi,[$tp,#4]! @ *(++bp)
+ ldr $aj,[$ap,#-4] @ ap[0]
+ ldr $nj,[$np,#-4] @ np[0]
+ ldr $alo,[sp] @ tp[0]
+ ldr $tj,[sp,#4] @ tp[1]
+
+ mov $ahi,#0
+ umlal $alo,$ahi,$aj,$bi @ ap[0]*bp[i]+tp[0]
+ str $tp,[$_bp] @ save bp
+ mul $n0,$alo,$n0
+ mov $nlo,#0
+ umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"tp[0]"
+ mov $tp,sp
+
+.Linner:
+ ldr $aj,[$ap],#4 @ ap[j],ap++
+ adds $alo,$ahi,$tj @ +=tp[j]
+ mov $ahi,#0
+ umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[i]
+ ldr $nj,[$np],#4 @ np[j],np++
+ mov $nhi,#0
+ umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0
+ ldr $tj,[$tp,#8] @ tp[j+1]
+ adc $ahi,$ahi,#0
+ adds $nlo,$nlo,$alo
+ str $nlo,[$tp],#4 @ tp[j-1]=,tp++
+ adc $nlo,$nhi,#0
+ cmp $tp,$num
+ bne .Linner
+
+ adds $nlo,$nlo,$ahi
+ mov $nhi,#0
+ adc $nhi,$nhi,#0
+ adds $nlo,$nlo,$tj
+ adc $nhi,$nhi,#0
+ ldr $tp,[$_bp] @ restore bp
+ ldr $tj,[$_bpend] @ restore &bp[num]
+ str $nlo,[$num] @ tp[num-1]=
+ ldr $n0,[$_n0] @ restore n0
+ str $nhi,[$num,#4] @ tp[num]=
+
+ cmp $tp,$tj
+ bne .Louter
+
+ ldr $rp,[$_rp] @ pull rp
+ add $num,$num,#4 @ $num to point at &tp[num]
+ sub $aj,$num,sp @ "original" num value
+ mov $tp,sp @ "rewind" $tp
+ mov $ap,$tp @ "borrow" $ap
+ sub $np,$np,$aj @ "rewind" $np to &np[0]
+
+ subs $tj,$tj,$tj @ "clear" carry flag
+.Lsub: ldr $tj,[$tp],#4
+ ldr $nj,[$np],#4
+ sbcs $tj,$tj,$nj @ tp[j]-np[j]
+ str $tj,[$rp],#4 @ rp[j]=
+ teq $tp,$num @ preserve carry
+ bne .Lsub
+ sbcs $nhi,$nhi,#0 @ upmost carry
+ mov $tp,sp @ "rewind" $tp
+ sub $rp,$rp,$aj @ "rewind" $rp
+
+ and $ap,$tp,$nhi
+ bic $np,$rp,$nhi
+ orr $ap,$ap,$np @ ap=borrow?tp:rp
+
+.Lcopy: ldr $tj,[$ap],#4 @ copy or in-place refresh
+ str sp,[$tp],#4 @ zap tp
+ str $tj,[$rp],#4
+ cmp $tp,$num
+ bne .Lcopy
+
+ add sp,$num,#4 @ skip over tp[num+1]
+ ldmia sp!,{r4-r12,lr} @ restore registers
+ add sp,sp,#2*4 @ skip over {r0,r2}
+ mov r0,#1
+.Labrt: tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ bx lr @ interoperable with Thumb ISA:-)
+.size bn_mul_mont,.-bn_mul_mont
+.asciz "Montgomery multiplication for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
+print $code;
+close STDOUT;
--- /dev/null 2009-04-24 06:09:48.000000000 -0700
+++ openssl-0.9.8h/crypto/0.9.9-dev/bn/armv4-mont.s 2009-09-03 15:42:39.000000000 -0700
@@ -0,0 +1,145 @@
+.text
+
+.global bn_mul_mont
+.type bn_mul_mont,%function
+
+.align 2
+bn_mul_mont:
+ stmdb sp!,{r0,r2} @ sp points at argument block
+ ldr r0,[sp,#3*4] @ load num
+ cmp r0,#2
+ movlt r0,#0
+ addlt sp,sp,#2*4
+ blt .Labrt
+
+ stmdb sp!,{r4-r12,lr} @ save 10 registers
+
+ mov r0,r0,lsl#2 @ rescale r0 for byte count
+ sub sp,sp,r0 @ alloca(4*num)
+ sub sp,sp,#4 @ +extra dword
+ sub r0,r0,#4 @ "num=num-1"
+ add r4,r2,r0 @ &bp[num-1]
+
+ add r0,sp,r0 @ r0 to point at &tp[num-1]
+ ldr r8,[r0,#14*4] @ &n0
+ ldr r2,[r2] @ bp[0]
+ ldr r5,[r1],#4 @ ap[0],ap++
+ ldr r6,[r3],#4 @ np[0],np++
+ ldr r8,[r8] @ *n0
+ str r4,[r0,#15*4] @ save &bp[num]
+
+ umull r10,r11,r5,r2 @ ap[0]*bp[0]
+ str r8,[r0,#14*4] @ save n0 value
+ mul r8,r10,r8 @ "tp[0]"*n0
+ mov r12,#0
+ umlal r10,r12,r6,r8 @ np[0]*n0+"t[0]"
+ mov r4,sp
+
+.L1st:
+ ldr r5,[r1],#4 @ ap[j],ap++
+ mov r10,r11
+ mov r11,#0
+ umlal r10,r11,r5,r2 @ ap[j]*bp[0]
+ ldr r6,[r3],#4 @ np[j],np++
+ mov r14,#0
+ umlal r12,r14,r6,r8 @ np[j]*n0
+ adds r12,r12,r10
+ str r12,[r4],#4 @ tp[j-1]=,tp++
+ adc r12,r14,#0
+ cmp r4,r0
+ bne .L1st
+
+ adds r12,r12,r11
+ mov r14,#0
+ adc r14,r14,#0
+ ldr r4,[r0,#13*4] @ restore bp
+ str r12,[r0] @ tp[num-1]=
+ ldr r8,[r0,#14*4] @ restore n0
+ str r14,[r0,#4] @ tp[num]=
+
+.Louter:
+ sub r7,r0,sp @ "original" r0-1 value
+ sub r1,r1,r7 @ "rewind" ap to &ap[1]
+ sub r3,r3,r7 @ "rewind" np to &np[1]
+ ldr r2,[r4,#4]! @ *(++bp)
+ ldr r5,[r1,#-4] @ ap[0]
+ ldr r6,[r3,#-4] @ np[0]
+ ldr r10,[sp] @ tp[0]
+ ldr r7,[sp,#4] @ tp[1]
+
+ mov r11,#0
+ umlal r10,r11,r5,r2 @ ap[0]*bp[i]+tp[0]
+ str r4,[r0,#13*4] @ save bp
+ mul r8,r10,r8
+ mov r12,#0
+ umlal r10,r12,r6,r8 @ np[0]*n0+"tp[0]"
+ mov r4,sp
+
+.Linner:
+ ldr r5,[r1],#4 @ ap[j],ap++
+ adds r10,r11,r7 @ +=tp[j]
+ mov r11,#0
+ umlal r10,r11,r5,r2 @ ap[j]*bp[i]
+ ldr r6,[r3],#4 @ np[j],np++
+ mov r14,#0
+ umlal r12,r14,r6,r8 @ np[j]*n0
+ ldr r7,[r4,#8] @ tp[j+1]
+ adc r11,r11,#0
+ adds r12,r12,r10
+ str r12,[r4],#4 @ tp[j-1]=,tp++
+ adc r12,r14,#0
+ cmp r4,r0
+ bne .Linner
+
+ adds r12,r12,r11
+ mov r14,#0
+ adc r14,r14,#0
+ adds r12,r12,r7
+ adc r14,r14,#0
+ ldr r4,[r0,#13*4] @ restore bp
+ ldr r7,[r0,#15*4] @ restore &bp[num]
+ str r12,[r0] @ tp[num-1]=
+ ldr r8,[r0,#14*4] @ restore n0
+ str r14,[r0,#4] @ tp[num]=
+
+ cmp r4,r7
+ bne .Louter
+
+ ldr r2,[r0,#12*4] @ pull rp
+ add r0,r0,#4 @ r0 to point at &tp[num]
+ sub r5,r0,sp @ "original" num value
+ mov r4,sp @ "rewind" r4
+ mov r1,r4 @ "borrow" r1
+ sub r3,r3,r5 @ "rewind" r3 to &np[0]
+
+ subs r7,r7,r7 @ "clear" carry flag
+.Lsub: ldr r7,[r4],#4
+ ldr r6,[r3],#4
+ sbcs r7,r7,r6 @ tp[j]-np[j]
+ str r7,[r2],#4 @ rp[j]=
+ teq r4,r0 @ preserve carry
+ bne .Lsub
+ sbcs r14,r14,#0 @ upmost carry
+ mov r4,sp @ "rewind" r4
+ sub r2,r2,r5 @ "rewind" r2
+
+ and r1,r4,r14
+ bic r3,r2,r14
+ orr r1,r1,r3 @ ap=borrow?tp:rp
+
+.Lcopy: ldr r7,[r1],#4 @ copy or in-place refresh
+ str sp,[r4],#4 @ zap tp
+ str r7,[r2],#4
+ cmp r4,r0
+ bne .Lcopy
+
+ add sp,r0,#4 @ skip over tp[num+1]
+ ldmia sp!,{r4-r12,lr} @ restore registers
+ add sp,sp,#2*4 @ skip over {r0,r2}
+ mov r0,#1
+.Labrt: tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ .word 0xe12fff1e @ interoperable with Thumb ISA:-)
+.size bn_mul_mont,.-bn_mul_mont
+.asciz "Montgomery multiplication for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
+.align 2
--- /dev/null 2009-04-24 06:09:48.000000000 -0700
+++ openssl-0.9.8h/crypto/0.9.9-dev/sha/sha1-armv4-large.pl 2009-09-03 15:42:39.000000000 -0700
@@ -0,0 +1,231 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# sha1_block procedure for ARMv4.
+#
+# January 2007.
+
+# Size/performance trade-off
+# ====================================================================
+# impl size in bytes comp cycles[*] measured performance
+# ====================================================================
+# thumb 304 3212 4420
+# armv4-small 392/+29% 1958/+64% 2250/+96%
+# armv4-compact 740/+89% 1552/+26% 1840/+22%
+# armv4-large 1420/+92% 1307/+19% 1500/+23%
+# full unroll ~5100/+260% ~1260/+4% ~1500/+0%
+# ====================================================================
+# thumb = same as 'small' but in Thumb instructions[**] and
+# with recurring code in two private functions;
+# small = detached Xload/update, loops are folded;
+# compact = detached Xload/update, 5x unroll;
+# large = interleaved Xload/update, 5x unroll;
+# full unroll = interleaved Xload/update, full unroll, estimated[!];
+#
+# [*] Manually counted instructions in "grand" loop body. Measured
+# performance is affected by prologue and epilogue overhead,
+# i-cache availability, branch penalties, etc.
+# [**] While each Thumb instruction is twice smaller, they are not as
+# diverse as ARM ones: e.g., there are only two arithmetic
+# instructions with 3 arguments, no [fixed] rotate, addressing
+# modes are limited. As result it takes more instructions to do
+# the same job in Thumb, therefore the code is never twice as
+# small and always slower.
+
+$output=shift;
+open STDOUT,">$output";
+
+$ctx="r0";
+$inp="r1";
+$len="r2";
+$a="r3";
+$b="r4";
+$c="r5";
+$d="r6";
+$e="r7";
+$K="r8";
+$t0="r10";
+$t1="r11";
+$t2="r12";
+$Xi="r14";
+@V=($a,$b,$c,$d,$e);
+
+# One can optimize this for aligned access on big-endian architecture,
+# but code's endian neutrality makes it too pretty:-)
+sub Xload {
+my ($a,$b,$c,$d,$e)=@_;
+$code.=<<___;
+ ldrb $t0,[$inp],#4
+ ldrb $t1,[$inp,#-3]
+ ldrb $t2,[$inp,#-2]
+ add $e,$K,$e,ror#2 @ E+=K_00_19
+ orr $t0,$t1,$t0,lsl#8
+ ldrb $t1,[$inp,#-1]
+ orr $t0,$t2,$t0,lsl#8
+ add $e,$e,$a,ror#27 @ E+=ROR(A,27)
+ orr $t0,$t1,$t0,lsl#8
+ add $e,$e,$t0 @ E+=X[i]
+ eor $t1,$c,$d @ F_xx_xx
+ str $t0,[$Xi,#-4]!
+___
+}
+sub Xupdate {
+my ($a,$b,$c,$d,$e,$flag)=@_;
+$code.=<<___;
+ ldr $t0,[$Xi,#15*4]
+ ldr $t1,[$Xi,#13*4]
+ ldr $t2,[$Xi,#7*4]
+ add $e,$K,$e,ror#2 @ E+=K_xx_xx
+ eor $t0,$t0,$t1
+ ldr $t1,[$Xi,#2*4]
+ add $e,$e,$a,ror#27 @ E+=ROR(A,27)
+ eor $t0,$t0,$t2
+ eor $t0,$t0,$t1
+___
+$code.=<<___ if (!defined($flag));
+ eor $t1,$c,$d @ F_xx_xx, but not in 40_59
+___
+$code.=<<___;
+ mov $t0,$t0,ror#31
+ add $e,$e,$t0 @ E+=X[i]
+ str $t0,[$Xi,#-4]!
+___
+}
+
+sub BODY_00_15 {
+my ($a,$b,$c,$d,$e)=@_;
+ &Xload(@_);
+$code.=<<___;
+ and $t1,$b,$t1,ror#2
+ eor $t1,$t1,$d,ror#2 @ F_00_19(B,C,D)
+ add $e,$e,$t1 @ E+=F_00_19(B,C,D)
+___
+}
+
+sub BODY_16_19 {
+my ($a,$b,$c,$d,$e)=@_;
+ &Xupdate(@_);
+$code.=<<___;
+ and $t1,$b,$t1,ror#2
+ eor $t1,$t1,$d,ror#2 @ F_00_19(B,C,D)
+ add $e,$e,$t1 @ E+=F_00_19(B,C,D)
+___
+}
+
+sub BODY_20_39 {
+my ($a,$b,$c,$d,$e)=@_;
+ &Xupdate(@_);
+$code.=<<___;
+ eor $t1,$b,$t1,ror#2 @ F_20_39(B,C,D)
+ add $e,$e,$t1 @ E+=F_20_39(B,C,D)
+___
+}
+
+sub BODY_40_59 {
+my ($a,$b,$c,$d,$e)=@_;
+ &Xupdate(@_,1);
+$code.=<<___;
+ and $t1,$b,$c,ror#2
+ orr $t2,$b,$c,ror#2
+ and $t2,$t2,$d,ror#2
+ orr $t1,$t1,$t2 @ F_40_59(B,C,D)
+ add $e,$e,$t1 @ E+=F_40_59(B,C,D)
+___
+}
+
+$code=<<___;
+.text
+
+.global sha1_block_data_order
+.type sha1_block_data_order,%function
+
+.align 2
+sha1_block_data_order:
+ stmdb sp!,{r4-r12,lr}
+ add $len,$inp,$len,lsl#6 @ $len to point at the end of $inp
+ ldmia $ctx,{$a,$b,$c,$d,$e}
+.Lloop:
+ ldr $K,.LK_00_19
+ mov $Xi,sp
+ sub sp,sp,#15*4
+ mov $c,$c,ror#30
+ mov $d,$d,ror#30
+ mov $e,$e,ror#30 @ [6]
+.L_00_15:
+___
+for($i=0;$i<5;$i++) {
+ &BODY_00_15(@V); unshift(@V,pop(@V));
+}
+$code.=<<___;
+ teq $Xi,sp
+ bne .L_00_15 @ [((11+4)*5+2)*3]
+___
+ &BODY_00_15(@V); unshift(@V,pop(@V));
+ &BODY_16_19(@V); unshift(@V,pop(@V));
+ &BODY_16_19(@V); unshift(@V,pop(@V));
+ &BODY_16_19(@V); unshift(@V,pop(@V));
+ &BODY_16_19(@V); unshift(@V,pop(@V));
+$code.=<<___;
+
+ ldr $K,.LK_20_39 @ [+15+16*4]
+ sub sp,sp,#25*4
+ cmn sp,#0 @ [+3], clear carry to denote 20_39
+.L_20_39_or_60_79:
+___
+for($i=0;$i<5;$i++) {
+ &BODY_20_39(@V); unshift(@V,pop(@V));
+}
+$code.=<<___;
+ teq $Xi,sp @ preserve carry
+ bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4]
+ bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes
+
+ ldr $K,.LK_40_59
+ sub sp,sp,#20*4 @ [+2]
+.L_40_59:
+___
+for($i=0;$i<5;$i++) {
+ &BODY_40_59(@V); unshift(@V,pop(@V));
+}
+$code.=<<___;
+ teq $Xi,sp
+ bne .L_40_59 @ [+((12+5)*5+2)*4]
+
+ ldr $K,.LK_60_79
+ sub sp,sp,#20*4
+ cmp sp,#0 @ set carry to denote 60_79
+ b .L_20_39_or_60_79 @ [+4], spare 300 bytes
+.L_done:
+ add sp,sp,#80*4 @ "deallocate" stack frame
+ ldmia $ctx,{$K,$t0,$t1,$t2,$Xi}
+ add $a,$K,$a
+ add $b,$t0,$b
+ add $c,$t1,$c,ror#2
+ add $d,$t2,$d,ror#2
+ add $e,$Xi,$e,ror#2
+ stmia $ctx,{$a,$b,$c,$d,$e}
+ teq $inp,$len
+ bne .Lloop @ [+18], total 1307
+
+ ldmia sp!,{r4-r12,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ bx lr @ interoperable with Thumb ISA:-)
+.align 2
+.LK_00_19: .word 0x5a827999
+.LK_20_39: .word 0x6ed9eba1
+.LK_40_59: .word 0x8f1bbcdc
+.LK_60_79: .word 0xca62c1d6
+.size sha1_block_data_order,.-sha1_block_data_order
+.asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
+print $code;
+close STDOUT; # enforce flush
--- /dev/null 2009-04-24 06:09:48.000000000 -0700
+++ openssl-0.9.8h/crypto/0.9.9-dev/sha/sha1-armv4-large.s 2009-09-03 15:42:39.000000000 -0700
@@ -0,0 +1,376 @@
+.text
+
+.global sha1_block_data_order
+.type sha1_block_data_order,%function
+
+.align 2
+sha1_block_data_order:
+ stmdb sp!,{r4-r12,lr}
+ add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
+ ldmia r0,{r3,r4,r5,r6,r7}
+.Lloop:
+ ldr r8,.LK_00_19
+ mov r14,sp
+ sub sp,sp,#15*4
+ mov r5,r5,ror#30
+ mov r6,r6,ror#30
+ mov r7,r7,ror#30 @ [6]
+.L_00_15:
+ ldrb r10,[r1],#4
+ ldrb r11,[r1,#-3]
+ ldrb r12,[r1,#-2]
+ add r7,r8,r7,ror#2 @ E+=K_00_19
+ orr r10,r11,r10,lsl#8
+ ldrb r11,[r1,#-1]
+ orr r10,r12,r10,lsl#8
+ add r7,r7,r3,ror#27 @ E+=ROR(A,27)
+ orr r10,r11,r10,lsl#8
+ add r7,r7,r10 @ E+=X[i]
+ eor r11,r5,r6 @ F_xx_xx
+ str r10,[r14,#-4]!
+ and r11,r4,r11,ror#2
+ eor r11,r11,r6,ror#2 @ F_00_19(B,C,D)
+ add r7,r7,r11 @ E+=F_00_19(B,C,D)
+ ldrb r10,[r1],#4
+ ldrb r11,[r1,#-3]
+ ldrb r12,[r1,#-2]
+ add r6,r8,r6,ror#2 @ E+=K_00_19
+ orr r10,r11,r10,lsl#8
+ ldrb r11,[r1,#-1]
+ orr r10,r12,r10,lsl#8
+ add r6,r6,r7,ror#27 @ E+=ROR(A,27)
+ orr r10,r11,r10,lsl#8
+ add r6,r6,r10 @ E+=X[i]
+ eor r11,r4,r5 @ F_xx_xx
+ str r10,[r14,#-4]!
+ and r11,r3,r11,ror#2
+ eor r11,r11,r5,ror#2 @ F_00_19(B,C,D)
+ add r6,r6,r11 @ E+=F_00_19(B,C,D)
+ ldrb r10,[r1],#4
+ ldrb r11,[r1,#-3]
+ ldrb r12,[r1,#-2]
+ add r5,r8,r5,ror#2 @ E+=K_00_19
+ orr r10,r11,r10,lsl#8
+ ldrb r11,[r1,#-1]
+ orr r10,r12,r10,lsl#8
+ add r5,r5,r6,ror#27 @ E+=ROR(A,27)
+ orr r10,r11,r10,lsl#8
+ add r5,r5,r10 @ E+=X[i]
+ eor r11,r3,r4 @ F_xx_xx
+ str r10,[r14,#-4]!
+ and r11,r7,r11,ror#2
+ eor r11,r11,r4,ror#2 @ F_00_19(B,C,D)
+ add r5,r5,r11 @ E+=F_00_19(B,C,D)
+ ldrb r10,[r1],#4
+ ldrb r11,[r1,#-3]
+ ldrb r12,[r1,#-2]
+ add r4,r8,r4,ror#2 @ E+=K_00_19
+ orr r10,r11,r10,lsl#8
+ ldrb r11,[r1,#-1]
+ orr r10,r12,r10,lsl#8
+ add r4,r4,r5,ror#27 @ E+=ROR(A,27)
+ orr r10,r11,r10,lsl#8
+ add r4,r4,r10 @ E+=X[i]
+ eor r11,r7,r3 @ F_xx_xx
+ str r10,[r14,#-4]!
+ and r11,r6,r11,ror#2
+ eor r11,r11,r3,ror#2 @ F_00_19(B,C,D)
+ add r4,r4,r11 @ E+=F_00_19(B,C,D)
+ ldrb r10,[r1],#4
+ ldrb r11,[r1,#-3]
+ ldrb r12,[r1,#-2]
+ add r3,r8,r3,ror#2 @ E+=K_00_19
+ orr r10,r11,r10,lsl#8
+ ldrb r11,[r1,#-1]
+ orr r10,r12,r10,lsl#8
+ add r3,r3,r4,ror#27 @ E+=ROR(A,27)
+ orr r10,r11,r10,lsl#8
+ add r3,r3,r10 @ E+=X[i]
+ eor r11,r6,r7 @ F_xx_xx
+ str r10,[r14,#-4]!
+ and r11,r5,r11,ror#2
+ eor r11,r11,r7,ror#2 @ F_00_19(B,C,D)
+ add r3,r3,r11 @ E+=F_00_19(B,C,D)
+ teq r14,sp
+ bne .L_00_15 @ [((11+4)*5+2)*3]
+ ldrb r10,[r1],#4
+ ldrb r11,[r1,#-3]
+ ldrb r12,[r1,#-2]
+ add r7,r8,r7,ror#2 @ E+=K_00_19
+ orr r10,r11,r10,lsl#8
+ ldrb r11,[r1,#-1]
+ orr r10,r12,r10,lsl#8
+ add r7,r7,r3,ror#27 @ E+=ROR(A,27)
+ orr r10,r11,r10,lsl#8
+ add r7,r7,r10 @ E+=X[i]
+ eor r11,r5,r6 @ F_xx_xx
+ str r10,[r14,#-4]!
+ and r11,r4,r11,ror#2
+ eor r11,r11,r6,ror#2 @ F_00_19(B,C,D)
+ add r7,r7,r11 @ E+=F_00_19(B,C,D)
+ ldr r10,[r14,#15*4]
+ ldr r11,[r14,#13*4]
+ ldr r12,[r14,#7*4]
+ add r6,r8,r6,ror#2 @ E+=K_xx_xx
+ eor r10,r10,r11
+ ldr r11,[r14,#2*4]
+ add r6,r6,r7,ror#27 @ E+=ROR(A,27)
+ eor r10,r10,r12
+ eor r10,r10,r11
+ eor r11,r4,r5 @ F_xx_xx, but not in 40_59
+ mov r10,r10,ror#31
+ add r6,r6,r10 @ E+=X[i]
+ str r10,[r14,#-4]!
+ and r11,r3,r11,ror#2
+ eor r11,r11,r5,ror#2 @ F_00_19(B,C,D)
+ add r6,r6,r11 @ E+=F_00_19(B,C,D)
+ ldr r10,[r14,#15*4]
+ ldr r11,[r14,#13*4]
+ ldr r12,[r14,#7*4]
+ add r5,r8,r5,ror#2 @ E+=K_xx_xx
+ eor r10,r10,r11
+ ldr r11,[r14,#2*4]
+ add r5,r5,r6,ror#27 @ E+=ROR(A,27)
+ eor r10,r10,r12
+ eor r10,r10,r11
+ eor r11,r3,r4 @ F_xx_xx, but not in 40_59
+ mov r10,r10,ror#31
+ add r5,r5,r10 @ E+=X[i]
+ str r10,[r14,#-4]!
+ and r11,r7,r11,ror#2
+ eor r11,r11,r4,ror#2 @ F_00_19(B,C,D)
+ add r5,r5,r11 @ E+=F_00_19(B,C,D)
+ ldr r10,[r14,#15*4]
+ ldr r11,[r14,#13*4]
+ ldr r12,[r14,#7*4]
+ add r4,r8,r4,ror#2 @ E+=K_xx_xx
+ eor r10,r10,r11
+ ldr r11,[r14,#2*4]
+ add r4,r4,r5,ror#27 @ E+=ROR(A,27)
+ eor r10,r10,r12
+ eor r10,r10,r11
+ eor r11,r7,r3 @ F_xx_xx, but not in 40_59
+ mov r10,r10,ror#31
+ add r4,r4,r10 @ E+=X[i]
+ str r10,[r14,#-4]!
+ and r11,r6,r11,ror#2
+ eor r11,r11,r3,ror#2 @ F_00_19(B,C,D)
+ add r4,r4,r11 @ E+=F_00_19(B,C,D)
+ ldr r10,[r14,#15*4]
+ ldr r11,[r14,#13*4]
+ ldr r12,[r14,#7*4]
+ add r3,r8,r3,ror#2 @ E+=K_xx_xx
+ eor r10,r10,r11
+ ldr r11,[r14,#2*4]
+ add r3,r3,r4,ror#27 @ E+=ROR(A,27)
+ eor r10,r10,r12
+ eor r10,r10,r11
+ eor r11,r6,r7 @ F_xx_xx, but not in 40_59
+ mov r10,r10,ror#31
+ add r3,r3,r10 @ E+=X[i]
+ str r10,[r14,#-4]!
+ and r11,r5,r11,ror#2
+ eor r11,r11,r7,ror#2 @ F_00_19(B,C,D)
+ add r3,r3,r11 @ E+=F_00_19(B,C,D)
+
+ ldr r8,.LK_20_39 @ [+15+16*4]
+ sub sp,sp,#25*4
+ cmn sp,#0 @ [+3], clear carry to denote 20_39
+.L_20_39_or_60_79:
+ ldr r10,[r14,#15*4]
+ ldr r11,[r14,#13*4]
+ ldr r12,[r14,#7*4]
+ add r7,r8,r7,ror#2 @ E+=K_xx_xx
+ eor r10,r10,r11
+ ldr r11,[r14,#2*4]
+ add r7,r7,r3,ror#27 @ E+=ROR(A,27)
+ eor r10,r10,r12
+ eor r10,r10,r11
+ eor r11,r5,r6 @ F_xx_xx, but not in 40_59
+ mov r10,r10,ror#31
+ add r7,r7,r10 @ E+=X[i]
+ str r10,[r14,#-4]!
+ eor r11,r4,r11,ror#2 @ F_20_39(B,C,D)
+ add r7,r7,r11 @ E+=F_20_39(B,C,D)
+ ldr r10,[r14,#15*4]
+ ldr r11,[r14,#13*4]
+ ldr r12,[r14,#7*4]
+ add r6,r8,r6,ror#2 @ E+=K_xx_xx
+ eor r10,r10,r11
+ ldr r11,[r14,#2*4]
+ add r6,r6,r7,ror#27 @ E+=ROR(A,27)
+ eor r10,r10,r12
+ eor r10,r10,r11
+ eor r11,r4,r5 @ F_xx_xx, but not in 40_59
+ mov r10,r10,ror#31
+ add r6,r6,r10 @ E+=X[i]
+ str r10,[r14,#-4]!
+ eor r11,r3,r11,ror#2 @ F_20_39(B,C,D)
+ add r6,r6,r11 @ E+=F_20_39(B,C,D)
+ ldr r10,[r14,#15*4]
+ ldr r11,[r14,#13*4]
+ ldr r12,[r14,#7*4]
+ add r5,r8,r5,ror#2 @ E+=K_xx_xx
+ eor r10,r10,r11
+ ldr r11,[r14,#2*4]
+ add r5,r5,r6,ror#27 @ E+=ROR(A,27)
+ eor r10,r10,r12
+ eor r10,r10,r11
+ eor r11,r3,r4 @ F_xx_xx, but not in 40_59
+ mov r10,r10,ror#31
+ add r5,r5,r10 @ E+=X[i]
+ str r10,[r14,#-4]!
+ eor r11,r7,r11,ror#2 @ F_20_39(B,C,D)
+ add r5,r5,r11 @ E+=F_20_39(B,C,D)
+ ldr r10,[r14,#15*4]
+ ldr r11,[r14,#13*4]
+ ldr r12,[r14,#7*4]
+ add r4,r8,r4,ror#2 @ E+=K_xx_xx
+ eor r10,r10,r11
+ ldr r11,[r14,#2*4]
+ add r4,r4,r5,ror#27 @ E+=ROR(A,27)
+ eor r10,r10,r12
+ eor r10,r10,r11
+ eor r11,r7,r3 @ F_xx_xx, but not in 40_59
+ mov r10,r10,ror#31
+ add r4,r4,r10 @ E+=X[i]
+ str r10,[r14,#-4]!
+ eor r11,r6,r11,ror#2 @ F_20_39(B,C,D)
+ add r4,r4,r11 @ E+=F_20_39(B,C,D)
+ ldr r10,[r14,#15*4]
+ ldr r11,[r14,#13*4]
+ ldr r12,[r14,#7*4]
+ add r3,r8,r3,ror#2 @ E+=K_xx_xx
+ eor r10,r10,r11
+ ldr r11,[r14,#2*4]
+ add r3,r3,r4,ror#27 @ E+=ROR(A,27)
+ eor r10,r10,r12
+ eor r10,r10,r11
+ eor r11,r6,r7 @ F_xx_xx, but not in 40_59
+ mov r10,r10,ror#31
+ add r3,r3,r10 @ E+=X[i]
+ str r10,[r14,#-4]!
+ eor r11,r5,r11,ror#2 @ F_20_39(B,C,D)
+ add r3,r3,r11 @ E+=F_20_39(B,C,D)
+ teq r14,sp @ preserve carry
+ bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4]
+ bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes
+
+ ldr r8,.LK_40_59
+ sub sp,sp,#20*4 @ [+2]
+.L_40_59:
+ ldr r10,[r14,#15*4]
+ ldr r11,[r14,#13*4]
+ ldr r12,[r14,#7*4]
+ add r7,r8,r7,ror#2 @ E+=K_xx_xx
+ eor r10,r10,r11
+ ldr r11,[r14,#2*4]
+ add r7,r7,r3,ror#27 @ E+=ROR(A,27)
+ eor r10,r10,r12
+ eor r10,r10,r11
+ mov r10,r10,ror#31
+ add r7,r7,r10 @ E+=X[i]
+ str r10,[r14,#-4]!
+ and r11,r4,r5,ror#2
+ orr r12,r4,r5,ror#2
+ and r12,r12,r6,ror#2
+ orr r11,r11,r12 @ F_40_59(B,C,D)
+ add r7,r7,r11 @ E+=F_40_59(B,C,D)
+ ldr r10,[r14,#15*4]
+ ldr r11,[r14,#13*4]
+ ldr r12,[r14,#7*4]
+ add r6,r8,r6,ror#2 @ E+=K_xx_xx
+ eor r10,r10,r11
+ ldr r11,[r14,#2*4]
+ add r6,r6,r7,ror#27 @ E+=ROR(A,27)
+ eor r10,r10,r12
+ eor r10,r10,r11
+ mov r10,r10,ror#31
+ add r6,r6,r10 @ E+=X[i]
+ str r10,[r14,#-4]!
+ and r11,r3,r4,ror#2
+ orr r12,r3,r4,ror#2
+ and r12,r12,r5,ror#2
+ orr r11,r11,r12 @ F_40_59(B,C,D)
+ add r6,r6,r11 @ E+=F_40_59(B,C,D)
+ ldr r10,[r14,#15*4]
+ ldr r11,[r14,#13*4]
+ ldr r12,[r14,#7*4]
+ add r5,r8,r5,ror#2 @ E+=K_xx_xx
+ eor r10,r10,r11
+ ldr r11,[r14,#2*4]
+ add r5,r5,r6,ror#27 @ E+=ROR(A,27)
+ eor r10,r10,r12
+ eor r10,r10,r11
+ mov r10,r10,ror#31
+ add r5,r5,r10 @ E+=X[i]
+ str r10,[r14,#-4]!
+ and r11,r7,r3,ror#2
+ orr r12,r7,r3,ror#2
+ and r12,r12,r4,ror#2
+ orr r11,r11,r12 @ F_40_59(B,C,D)
+ add r5,r5,r11 @ E+=F_40_59(B,C,D)
+ ldr r10,[r14,#15*4]
+ ldr r11,[r14,#13*4]
+ ldr r12,[r14,#7*4]
+ add r4,r8,r4,ror#2 @ E+=K_xx_xx
+ eor r10,r10,r11
+ ldr r11,[r14,#2*4]
+ add r4,r4,r5,ror#27 @ E+=ROR(A,27)
+ eor r10,r10,r12
+ eor r10,r10,r11
+ mov r10,r10,ror#31
+ add r4,r4,r10 @ E+=X[i]
+ str r10,[r14,#-4]!
+ and r11,r6,r7,ror#2
+ orr r12,r6,r7,ror#2
+ and r12,r12,r3,ror#2
+ orr r11,r11,r12 @ F_40_59(B,C,D)
+ add r4,r4,r11 @ E+=F_40_59(B,C,D)
+ ldr r10,[r14,#15*4]
+ ldr r11,[r14,#13*4]
+ ldr r12,[r14,#7*4]
+ add r3,r8,r3,ror#2 @ E+=K_xx_xx
+ eor r10,r10,r11
+ ldr r11,[r14,#2*4]
+ add r3,r3,r4,ror#27 @ E+=ROR(A,27)
+ eor r10,r10,r12
+ eor r10,r10,r11
+ mov r10,r10,ror#31
+ add r3,r3,r10 @ E+=X[i]
+ str r10,[r14,#-4]!
+ and r11,r5,r6,ror#2
+ orr r12,r5,r6,ror#2
+ and r12,r12,r7,ror#2
+ orr r11,r11,r12 @ F_40_59(B,C,D)
+ add r3,r3,r11 @ E+=F_40_59(B,C,D)
+ teq r14,sp
+ bne .L_40_59 @ [+((12+5)*5+2)*4]
+
+ ldr r8,.LK_60_79
+ sub sp,sp,#20*4
+ cmp sp,#0 @ set carry to denote 60_79
+ b .L_20_39_or_60_79 @ [+4], spare 300 bytes
+.L_done:
+ add sp,sp,#80*4 @ "deallocate" stack frame
+ ldmia r0,{r8,r10,r11,r12,r14}
+ add r3,r8,r3
+ add r4,r10,r4
+ add r5,r11,r5,ror#2
+ add r6,r12,r6,ror#2
+ add r7,r14,r7,ror#2
+ stmia r0,{r3,r4,r5,r6,r7}
+ teq r1,r2
+ bne .Lloop @ [+18], total 1307
+
+ ldmia sp!,{r4-r12,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ .word 0xe12fff1e @ interoperable with Thumb ISA:-)
+.align 2
+.LK_00_19: .word 0x5a827999
+.LK_20_39: .word 0x6ed9eba1
+.LK_40_59: .word 0x8f1bbcdc
+.LK_60_79: .word 0xca62c1d6
+.size sha1_block_data_order,.-sha1_block_data_order
+.asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
--- /dev/null 2009-04-24 06:09:48.000000000 -0700
+++ openssl-0.9.8h/crypto/0.9.9-dev/sha/sha256-armv4.pl 2009-09-03 15:42:39.000000000 -0700
@@ -0,0 +1,180 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# SHA256 block procedure for ARMv4. May 2007.
+
+# Performance is ~2x better than gcc 3.4 generated code and in "abso-
+# lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per
+# byte.
+
+$output=shift;
+open STDOUT,">$output";
+
+$ctx="r0"; $t0="r0";
+$inp="r1";
+$len="r2"; $t1="r2";
+$T1="r3";
+$A="r4";
+$B="r5";
+$C="r6";
+$D="r7";
+$E="r8";
+$F="r9";
+$G="r10";
+$H="r11";
+@V=($A,$B,$C,$D,$E,$F,$G,$H);
+$t2="r12";
+$Ktbl="r14";
+
+@Sigma0=( 2,13,22);
+@Sigma1=( 6,11,25);
+@sigma0=( 7,18, 3);
+@sigma1=(17,19,10);
+
+sub BODY_00_15 {
+my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
+
+$code.=<<___ if ($i<16);
+ ldrb $T1,[$inp,#3] @ $i
+ ldrb $t2,[$inp,#2]
+ ldrb $t1,[$inp,#1]
+ ldrb $t0,[$inp],#4
+ orr $T1,$T1,$t2,lsl#8
+ orr $T1,$T1,$t1,lsl#16
+ orr $T1,$T1,$t0,lsl#24
+ `"str $inp,[sp,#17*4]" if ($i==15)`
+___
+$code.=<<___;
+ ldr $t2,[$Ktbl],#4 @ *K256++
+ str $T1,[sp,#`$i%16`*4]
+ mov $t0,$e,ror#$Sigma1[0]
+ eor $t0,$t0,$e,ror#$Sigma1[1]
+ eor $t0,$t0,$e,ror#$Sigma1[2] @ Sigma1(e)
+ add $T1,$T1,$t0
+ eor $t1,$f,$g
+ and $t1,$t1,$e
+ eor $t1,$t1,$g @ Ch(e,f,g)
+ add $T1,$T1,$t1
+ add $T1,$T1,$h
+ add $T1,$T1,$t2
+ mov $h,$a,ror#$Sigma0[0]
+ eor $h,$h,$a,ror#$Sigma0[1]
+ eor $h,$h,$a,ror#$Sigma0[2] @ Sigma0(a)
+ orr $t0,$a,$b
+ and $t0,$t0,$c
+ and $t1,$a,$b
+ orr $t0,$t0,$t1 @ Maj(a,b,c)
+ add $h,$h,$t0
+ add $d,$d,$T1
+ add $h,$h,$T1
+___
+}
+
+sub BODY_16_XX {
+my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
+
+$code.=<<___;
+ ldr $t1,[sp,#`($i+1)%16`*4] @ $i
+ ldr $t2,[sp,#`($i+14)%16`*4]
+ ldr $T1,[sp,#`($i+0)%16`*4]
+ ldr $inp,[sp,#`($i+9)%16`*4]
+ mov $t0,$t1,ror#$sigma0[0]
+ eor $t0,$t0,$t1,ror#$sigma0[1]
+ eor $t0,$t0,$t1,lsr#$sigma0[2] @ sigma0(X[i+1])
+ mov $t1,$t2,ror#$sigma1[0]
+ eor $t1,$t1,$t2,ror#$sigma1[1]
+ eor $t1,$t1,$t2,lsr#$sigma1[2] @ sigma1(X[i+14])
+ add $T1,$T1,$t0
+ add $T1,$T1,$t1
+ add $T1,$T1,$inp
+___
+ &BODY_00_15(@_);
+}
+
+$code=<<___;
+.text
+.code 32
+
+.type K256,%object
+.align 5
+K256:
+.word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+.word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+.word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+.word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+.word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+.word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+.word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+.word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+.word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+.word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+.word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+.word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+.word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+.word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+.word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+.word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+.size K256,.-K256
+
+.global sha256_block_data_order
+.type sha256_block_data_order,%function
+sha256_block_data_order:
+ sub r3,pc,#8 @ sha256_block_data_order
+ add $len,$inp,$len,lsl#6 @ len to point at the end of inp
+ stmdb sp!,{$ctx,$inp,$len,r4-r12,lr}
+ ldmia $ctx,{$A,$B,$C,$D,$E,$F,$G,$H}
+ sub $Ktbl,r3,#256 @ K256
+ sub sp,sp,#16*4 @ alloca(X[16])
+.Loop:
+___
+for($i=0;$i<16;$i++) { &BODY_00_15($i,@V); unshift(@V,pop(@V)); }
+$code.=".Lrounds_16_xx:\n";
+for (;$i<32;$i++) { &BODY_16_XX($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+ and $t2,$t2,#0xff
+ cmp $t2,#0xf2
+ bne .Lrounds_16_xx
+
+ ldr $T1,[sp,#16*4] @ pull ctx
+ ldr $t0,[$T1,#0]
+ ldr $t1,[$T1,#4]
+ ldr $t2,[$T1,#8]
+ add $A,$A,$t0
+ ldr $t0,[$T1,#12]
+ add $B,$B,$t1
+ ldr $t1,[$T1,#16]
+ add $C,$C,$t2
+ ldr $t2,[$T1,#20]
+ add $D,$D,$t0
+ ldr $t0,[$T1,#24]
+ add $E,$E,$t1
+ ldr $t1,[$T1,#28]
+ add $F,$F,$t2
+ ldr $inp,[sp,#17*4] @ pull inp
+ ldr $t2,[sp,#18*4] @ pull inp+len
+ add $G,$G,$t0
+ add $H,$H,$t1
+ stmia $T1,{$A,$B,$C,$D,$E,$F,$G,$H}
+ cmp $inp,$t2
+ sub $Ktbl,$Ktbl,#256 @ rewind Ktbl
+ bne .Loop
+
+ add sp,sp,#`16+3`*4 @ destroy frame
+ ldmia sp!,{r4-r12,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ bx lr @ interoperable with Thumb ISA:-)
+.size sha256_block_data_order,.-sha256_block_data_order
+.asciz "SHA256 block transform for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
+print $code;
+close STDOUT; # enforce flush
--- /dev/null 2009-04-24 06:09:48.000000000 -0700
+++ openssl-0.9.8h/crypto/0.9.9-dev/sha/sha256-armv4.s 2009-09-03 15:42:39.000000000 -0700
@@ -0,0 +1,1110 @@
+.text
+.code 32
+
+.type K256,%object
+.align 5
+K256:
+.word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+.word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+.word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+.word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+.word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+.word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+.word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+.word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+.word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+.word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+.word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+.word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+.word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+.word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+.word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+.word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+.size K256,.-K256
+
+.global sha256_block_data_order
+.type sha256_block_data_order,%function
+sha256_block_data_order:
+ sub r3,pc,#8 @ sha256_block_data_order
+ add r2,r1,r2,lsl#6 @ len to point at the end of inp
+ stmdb sp!,{r0,r1,r2,r4-r12,lr}
+ ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11}
+ sub r14,r3,#256 @ K256
+ sub sp,sp,#16*4 @ alloca(X[16])
+.Loop:
+ ldrb r3,[r1,#3] @ 0
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#0*4]
+ mov r0,r8,ror#6
+ eor r0,r0,r8,ror#11
+ eor r0,r0,r8,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r9,r10
+ and r2,r2,r8
+ eor r2,r2,r10 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r11
+ add r3,r3,r12
+ mov r11,r4,ror#2
+ eor r11,r11,r4,ror#13
+ eor r11,r11,r4,ror#22 @ Sigma0(a)
+ orr r0,r4,r5
+ and r0,r0,r6
+ and r2,r4,r5
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r11,r11,r0
+ add r7,r7,r3
+ add r11,r11,r3
+ ldrb r3,[r1,#3] @ 1
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#1*4]
+ mov r0,r7,ror#6
+ eor r0,r0,r7,ror#11
+ eor r0,r0,r7,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r8,r9
+ and r2,r2,r7
+ eor r2,r2,r9 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r10
+ add r3,r3,r12
+ mov r10,r11,ror#2
+ eor r10,r10,r11,ror#13
+ eor r10,r10,r11,ror#22 @ Sigma0(a)
+ orr r0,r11,r4
+ and r0,r0,r5
+ and r2,r11,r4
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r10,r10,r0
+ add r6,r6,r3
+ add r10,r10,r3
+ ldrb r3,[r1,#3] @ 2
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#2*4]
+ mov r0,r6,ror#6
+ eor r0,r0,r6,ror#11
+ eor r0,r0,r6,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r7,r8
+ and r2,r2,r6
+ eor r2,r2,r8 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r9
+ add r3,r3,r12
+ mov r9,r10,ror#2
+ eor r9,r9,r10,ror#13
+ eor r9,r9,r10,ror#22 @ Sigma0(a)
+ orr r0,r10,r11
+ and r0,r0,r4
+ and r2,r10,r11
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r9,r9,r0
+ add r5,r5,r3
+ add r9,r9,r3
+ ldrb r3,[r1,#3] @ 3
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#3*4]
+ mov r0,r5,ror#6
+ eor r0,r0,r5,ror#11
+ eor r0,r0,r5,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r6,r7
+ and r2,r2,r5
+ eor r2,r2,r7 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r8
+ add r3,r3,r12
+ mov r8,r9,ror#2
+ eor r8,r8,r9,ror#13
+ eor r8,r8,r9,ror#22 @ Sigma0(a)
+ orr r0,r9,r10
+ and r0,r0,r11
+ and r2,r9,r10
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r8,r8,r0
+ add r4,r4,r3
+ add r8,r8,r3
+ ldrb r3,[r1,#3] @ 4
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#4*4]
+ mov r0,r4,ror#6
+ eor r0,r0,r4,ror#11
+ eor r0,r0,r4,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r5,r6
+ and r2,r2,r4
+ eor r2,r2,r6 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r7
+ add r3,r3,r12
+ mov r7,r8,ror#2
+ eor r7,r7,r8,ror#13
+ eor r7,r7,r8,ror#22 @ Sigma0(a)
+ orr r0,r8,r9
+ and r0,r0,r10
+ and r2,r8,r9
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r7,r7,r0
+ add r11,r11,r3
+ add r7,r7,r3
+ ldrb r3,[r1,#3] @ 5
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#5*4]
+ mov r0,r11,ror#6
+ eor r0,r0,r11,ror#11
+ eor r0,r0,r11,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r4,r5
+ and r2,r2,r11
+ eor r2,r2,r5 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r6
+ add r3,r3,r12
+ mov r6,r7,ror#2
+ eor r6,r6,r7,ror#13
+ eor r6,r6,r7,ror#22 @ Sigma0(a)
+ orr r0,r7,r8
+ and r0,r0,r9
+ and r2,r7,r8
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r6,r6,r0
+ add r10,r10,r3
+ add r6,r6,r3
+ ldrb r3,[r1,#3] @ 6
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#6*4]
+ mov r0,r10,ror#6
+ eor r0,r0,r10,ror#11
+ eor r0,r0,r10,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r11,r4
+ and r2,r2,r10
+ eor r2,r2,r4 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r5
+ add r3,r3,r12
+ mov r5,r6,ror#2
+ eor r5,r5,r6,ror#13
+ eor r5,r5,r6,ror#22 @ Sigma0(a)
+ orr r0,r6,r7
+ and r0,r0,r8
+ and r2,r6,r7
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r5,r5,r0
+ add r9,r9,r3
+ add r5,r5,r3
+ ldrb r3,[r1,#3] @ 7
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#7*4]
+ mov r0,r9,ror#6
+ eor r0,r0,r9,ror#11
+ eor r0,r0,r9,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r10,r11
+ and r2,r2,r9
+ eor r2,r2,r11 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r4
+ add r3,r3,r12
+ mov r4,r5,ror#2
+ eor r4,r4,r5,ror#13
+ eor r4,r4,r5,ror#22 @ Sigma0(a)
+ orr r0,r5,r6
+ and r0,r0,r7
+ and r2,r5,r6
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r4,r4,r0
+ add r8,r8,r3
+ add r4,r4,r3
+ ldrb r3,[r1,#3] @ 8
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#8*4]
+ mov r0,r8,ror#6
+ eor r0,r0,r8,ror#11
+ eor r0,r0,r8,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r9,r10
+ and r2,r2,r8
+ eor r2,r2,r10 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r11
+ add r3,r3,r12
+ mov r11,r4,ror#2
+ eor r11,r11,r4,ror#13
+ eor r11,r11,r4,ror#22 @ Sigma0(a)
+ orr r0,r4,r5
+ and r0,r0,r6
+ and r2,r4,r5
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r11,r11,r0
+ add r7,r7,r3
+ add r11,r11,r3
+ ldrb r3,[r1,#3] @ 9
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#9*4]
+ mov r0,r7,ror#6
+ eor r0,r0,r7,ror#11
+ eor r0,r0,r7,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r8,r9
+ and r2,r2,r7
+ eor r2,r2,r9 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r10
+ add r3,r3,r12
+ mov r10,r11,ror#2
+ eor r10,r10,r11,ror#13
+ eor r10,r10,r11,ror#22 @ Sigma0(a)
+ orr r0,r11,r4
+ and r0,r0,r5
+ and r2,r11,r4
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r10,r10,r0
+ add r6,r6,r3
+ add r10,r10,r3
+ ldrb r3,[r1,#3] @ 10
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#10*4]
+ mov r0,r6,ror#6
+ eor r0,r0,r6,ror#11
+ eor r0,r0,r6,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r7,r8
+ and r2,r2,r6
+ eor r2,r2,r8 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r9
+ add r3,r3,r12
+ mov r9,r10,ror#2
+ eor r9,r9,r10,ror#13
+ eor r9,r9,r10,ror#22 @ Sigma0(a)
+ orr r0,r10,r11
+ and r0,r0,r4
+ and r2,r10,r11
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r9,r9,r0
+ add r5,r5,r3
+ add r9,r9,r3
+ ldrb r3,[r1,#3] @ 11
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#11*4]
+ mov r0,r5,ror#6
+ eor r0,r0,r5,ror#11
+ eor r0,r0,r5,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r6,r7
+ and r2,r2,r5
+ eor r2,r2,r7 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r8
+ add r3,r3,r12
+ mov r8,r9,ror#2
+ eor r8,r8,r9,ror#13
+ eor r8,r8,r9,ror#22 @ Sigma0(a)
+ orr r0,r9,r10
+ and r0,r0,r11
+ and r2,r9,r10
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r8,r8,r0
+ add r4,r4,r3
+ add r8,r8,r3
+ ldrb r3,[r1,#3] @ 12
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#12*4]
+ mov r0,r4,ror#6
+ eor r0,r0,r4,ror#11
+ eor r0,r0,r4,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r5,r6
+ and r2,r2,r4
+ eor r2,r2,r6 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r7
+ add r3,r3,r12
+ mov r7,r8,ror#2
+ eor r7,r7,r8,ror#13
+ eor r7,r7,r8,ror#22 @ Sigma0(a)
+ orr r0,r8,r9
+ and r0,r0,r10
+ and r2,r8,r9
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r7,r7,r0
+ add r11,r11,r3
+ add r7,r7,r3
+ ldrb r3,[r1,#3] @ 13
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#13*4]
+ mov r0,r11,ror#6
+ eor r0,r0,r11,ror#11
+ eor r0,r0,r11,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r4,r5
+ and r2,r2,r11
+ eor r2,r2,r5 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r6
+ add r3,r3,r12
+ mov r6,r7,ror#2
+ eor r6,r6,r7,ror#13
+ eor r6,r6,r7,ror#22 @ Sigma0(a)
+ orr r0,r7,r8
+ and r0,r0,r9
+ and r2,r7,r8
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r6,r6,r0
+ add r10,r10,r3
+ add r6,r6,r3
+ ldrb r3,[r1,#3] @ 14
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#14*4]
+ mov r0,r10,ror#6
+ eor r0,r0,r10,ror#11
+ eor r0,r0,r10,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r11,r4
+ and r2,r2,r10
+ eor r2,r2,r4 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r5
+ add r3,r3,r12
+ mov r5,r6,ror#2
+ eor r5,r5,r6,ror#13
+ eor r5,r5,r6,ror#22 @ Sigma0(a)
+ orr r0,r6,r7
+ and r0,r0,r8
+ and r2,r6,r7
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r5,r5,r0
+ add r9,r9,r3
+ add r5,r5,r3
+ ldrb r3,[r1,#3] @ 15
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+ str r1,[sp,#17*4]
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#15*4]
+ mov r0,r9,ror#6
+ eor r0,r0,r9,ror#11
+ eor r0,r0,r9,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r10,r11
+ and r2,r2,r9
+ eor r2,r2,r11 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r4
+ add r3,r3,r12
+ mov r4,r5,ror#2
+ eor r4,r4,r5,ror#13
+ eor r4,r4,r5,ror#22 @ Sigma0(a)
+ orr r0,r5,r6
+ and r0,r0,r7
+ and r2,r5,r6
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r4,r4,r0
+ add r8,r8,r3
+ add r4,r4,r3
+.Lrounds_16_xx:
+ ldr r2,[sp,#1*4] @ 16
+ ldr r12,[sp,#14*4]
+ ldr r3,[sp,#0*4]
+ ldr r1,[sp,#9*4]
+ mov r0,r2,ror#7
+ eor r0,r0,r2,ror#18
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ mov r2,r12,ror#17
+ eor r2,r2,r12,ror#19
+ eor r2,r2,r12,lsr#10 @ sigma1(X[i+14])
+ add r3,r3,r0
+ add r3,r3,r2
+ add r3,r3,r1
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#0*4]
+ mov r0,r8,ror#6
+ eor r0,r0,r8,ror#11
+ eor r0,r0,r8,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r9,r10
+ and r2,r2,r8
+ eor r2,r2,r10 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r11
+ add r3,r3,r12
+ mov r11,r4,ror#2
+ eor r11,r11,r4,ror#13
+ eor r11,r11,r4,ror#22 @ Sigma0(a)
+ orr r0,r4,r5
+ and r0,r0,r6
+ and r2,r4,r5
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r11,r11,r0
+ add r7,r7,r3
+ add r11,r11,r3
+ ldr r2,[sp,#2*4] @ 17
+ ldr r12,[sp,#15*4]
+ ldr r3,[sp,#1*4]
+ ldr r1,[sp,#10*4]
+ mov r0,r2,ror#7
+ eor r0,r0,r2,ror#18
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ mov r2,r12,ror#17
+ eor r2,r2,r12,ror#19
+ eor r2,r2,r12,lsr#10 @ sigma1(X[i+14])
+ add r3,r3,r0
+ add r3,r3,r2
+ add r3,r3,r1
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#1*4]
+ mov r0,r7,ror#6
+ eor r0,r0,r7,ror#11
+ eor r0,r0,r7,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r8,r9
+ and r2,r2,r7
+ eor r2,r2,r9 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r10
+ add r3,r3,r12
+ mov r10,r11,ror#2
+ eor r10,r10,r11,ror#13
+ eor r10,r10,r11,ror#22 @ Sigma0(a)
+ orr r0,r11,r4
+ and r0,r0,r5
+ and r2,r11,r4
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r10,r10,r0
+ add r6,r6,r3
+ add r10,r10,r3
+ ldr r2,[sp,#3*4] @ 18
+ ldr r12,[sp,#0*4]
+ ldr r3,[sp,#2*4]
+ ldr r1,[sp,#11*4]
+ mov r0,r2,ror#7
+ eor r0,r0,r2,ror#18
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ mov r2,r12,ror#17
+ eor r2,r2,r12,ror#19
+ eor r2,r2,r12,lsr#10 @ sigma1(X[i+14])
+ add r3,r3,r0
+ add r3,r3,r2
+ add r3,r3,r1
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#2*4]
+ mov r0,r6,ror#6
+ eor r0,r0,r6,ror#11
+ eor r0,r0,r6,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r7,r8
+ and r2,r2,r6
+ eor r2,r2,r8 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r9
+ add r3,r3,r12
+ mov r9,r10,ror#2
+ eor r9,r9,r10,ror#13
+ eor r9,r9,r10,ror#22 @ Sigma0(a)
+ orr r0,r10,r11
+ and r0,r0,r4
+ and r2,r10,r11
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r9,r9,r0
+ add r5,r5,r3
+ add r9,r9,r3
+ ldr r2,[sp,#4*4] @ 19
+ ldr r12,[sp,#1*4]
+ ldr r3,[sp,#3*4]
+ ldr r1,[sp,#12*4]
+ mov r0,r2,ror#7
+ eor r0,r0,r2,ror#18
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ mov r2,r12,ror#17
+ eor r2,r2,r12,ror#19
+ eor r2,r2,r12,lsr#10 @ sigma1(X[i+14])
+ add r3,r3,r0
+ add r3,r3,r2
+ add r3,r3,r1
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#3*4]
+ mov r0,r5,ror#6
+ eor r0,r0,r5,ror#11
+ eor r0,r0,r5,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r6,r7
+ and r2,r2,r5
+ eor r2,r2,r7 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r8
+ add r3,r3,r12
+ mov r8,r9,ror#2
+ eor r8,r8,r9,ror#13
+ eor r8,r8,r9,ror#22 @ Sigma0(a)
+ orr r0,r9,r10
+ and r0,r0,r11
+ and r2,r9,r10
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r8,r8,r0
+ add r4,r4,r3
+ add r8,r8,r3
+ ldr r2,[sp,#5*4] @ 20
+ ldr r12,[sp,#2*4]
+ ldr r3,[sp,#4*4]
+ ldr r1,[sp,#13*4]
+ mov r0,r2,ror#7
+ eor r0,r0,r2,ror#18
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ mov r2,r12,ror#17
+ eor r2,r2,r12,ror#19
+ eor r2,r2,r12,lsr#10 @ sigma1(X[i+14])
+ add r3,r3,r0
+ add r3,r3,r2
+ add r3,r3,r1
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#4*4]
+ mov r0,r4,ror#6
+ eor r0,r0,r4,ror#11
+ eor r0,r0,r4,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r5,r6
+ and r2,r2,r4
+ eor r2,r2,r6 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r7
+ add r3,r3,r12
+ mov r7,r8,ror#2
+ eor r7,r7,r8,ror#13
+ eor r7,r7,r8,ror#22 @ Sigma0(a)
+ orr r0,r8,r9
+ and r0,r0,r10
+ and r2,r8,r9
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r7,r7,r0
+ add r11,r11,r3
+ add r7,r7,r3
+ ldr r2,[sp,#6*4] @ 21
+ ldr r12,[sp,#3*4]
+ ldr r3,[sp,#5*4]
+ ldr r1,[sp,#14*4]
+ mov r0,r2,ror#7
+ eor r0,r0,r2,ror#18
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ mov r2,r12,ror#17
+ eor r2,r2,r12,ror#19
+ eor r2,r2,r12,lsr#10 @ sigma1(X[i+14])
+ add r3,r3,r0
+ add r3,r3,r2
+ add r3,r3,r1
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#5*4]
+ mov r0,r11,ror#6
+ eor r0,r0,r11,ror#11
+ eor r0,r0,r11,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r4,r5
+ and r2,r2,r11
+ eor r2,r2,r5 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r6
+ add r3,r3,r12
+ mov r6,r7,ror#2
+ eor r6,r6,r7,ror#13
+ eor r6,r6,r7,ror#22 @ Sigma0(a)
+ orr r0,r7,r8
+ and r0,r0,r9
+ and r2,r7,r8
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r6,r6,r0
+ add r10,r10,r3
+ add r6,r6,r3
+ ldr r2,[sp,#7*4] @ 22
+ ldr r12,[sp,#4*4]
+ ldr r3,[sp,#6*4]
+ ldr r1,[sp,#15*4]
+ mov r0,r2,ror#7
+ eor r0,r0,r2,ror#18
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ mov r2,r12,ror#17
+ eor r2,r2,r12,ror#19
+ eor r2,r2,r12,lsr#10 @ sigma1(X[i+14])
+ add r3,r3,r0
+ add r3,r3,r2
+ add r3,r3,r1
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#6*4]
+ mov r0,r10,ror#6
+ eor r0,r0,r10,ror#11
+ eor r0,r0,r10,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r11,r4
+ and r2,r2,r10
+ eor r2,r2,r4 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r5
+ add r3,r3,r12
+ mov r5,r6,ror#2
+ eor r5,r5,r6,ror#13
+ eor r5,r5,r6,ror#22 @ Sigma0(a)
+ orr r0,r6,r7
+ and r0,r0,r8
+ and r2,r6,r7
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r5,r5,r0
+ add r9,r9,r3
+ add r5,r5,r3
+ ldr r2,[sp,#8*4] @ 23
+ ldr r12,[sp,#5*4]
+ ldr r3,[sp,#7*4]
+ ldr r1,[sp,#0*4]
+ mov r0,r2,ror#7
+ eor r0,r0,r2,ror#18
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ mov r2,r12,ror#17
+ eor r2,r2,r12,ror#19
+ eor r2,r2,r12,lsr#10 @ sigma1(X[i+14])
+ add r3,r3,r0
+ add r3,r3,r2
+ add r3,r3,r1
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#7*4]
+ mov r0,r9,ror#6
+ eor r0,r0,r9,ror#11
+ eor r0,r0,r9,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r10,r11
+ and r2,r2,r9
+ eor r2,r2,r11 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r4
+ add r3,r3,r12
+ mov r4,r5,ror#2
+ eor r4,r4,r5,ror#13
+ eor r4,r4,r5,ror#22 @ Sigma0(a)
+ orr r0,r5,r6
+ and r0,r0,r7
+ and r2,r5,r6
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r4,r4,r0
+ add r8,r8,r3
+ add r4,r4,r3
+ ldr r2,[sp,#9*4] @ 24
+ ldr r12,[sp,#6*4]
+ ldr r3,[sp,#8*4]
+ ldr r1,[sp,#1*4]
+ mov r0,r2,ror#7
+ eor r0,r0,r2,ror#18
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ mov r2,r12,ror#17
+ eor r2,r2,r12,ror#19
+ eor r2,r2,r12,lsr#10 @ sigma1(X[i+14])
+ add r3,r3,r0
+ add r3,r3,r2
+ add r3,r3,r1
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#8*4]
+ mov r0,r8,ror#6
+ eor r0,r0,r8,ror#11
+ eor r0,r0,r8,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r9,r10
+ and r2,r2,r8
+ eor r2,r2,r10 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r11
+ add r3,r3,r12
+ mov r11,r4,ror#2
+ eor r11,r11,r4,ror#13
+ eor r11,r11,r4,ror#22 @ Sigma0(a)
+ orr r0,r4,r5
+ and r0,r0,r6
+ and r2,r4,r5
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r11,r11,r0
+ add r7,r7,r3
+ add r11,r11,r3
+ ldr r2,[sp,#10*4] @ 25
+ ldr r12,[sp,#7*4]
+ ldr r3,[sp,#9*4]
+ ldr r1,[sp,#2*4]
+ mov r0,r2,ror#7
+ eor r0,r0,r2,ror#18
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ mov r2,r12,ror#17
+ eor r2,r2,r12,ror#19
+ eor r2,r2,r12,lsr#10 @ sigma1(X[i+14])
+ add r3,r3,r0
+ add r3,r3,r2
+ add r3,r3,r1
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#9*4]
+ mov r0,r7,ror#6
+ eor r0,r0,r7,ror#11
+ eor r0,r0,r7,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r8,r9
+ and r2,r2,r7
+ eor r2,r2,r9 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r10
+ add r3,r3,r12
+ mov r10,r11,ror#2
+ eor r10,r10,r11,ror#13
+ eor r10,r10,r11,ror#22 @ Sigma0(a)
+ orr r0,r11,r4
+ and r0,r0,r5
+ and r2,r11,r4
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r10,r10,r0
+ add r6,r6,r3
+ add r10,r10,r3
+ ldr r2,[sp,#11*4] @ 26
+ ldr r12,[sp,#8*4]
+ ldr r3,[sp,#10*4]
+ ldr r1,[sp,#3*4]
+ mov r0,r2,ror#7
+ eor r0,r0,r2,ror#18
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ mov r2,r12,ror#17
+ eor r2,r2,r12,ror#19
+ eor r2,r2,r12,lsr#10 @ sigma1(X[i+14])
+ add r3,r3,r0
+ add r3,r3,r2
+ add r3,r3,r1
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#10*4]
+ mov r0,r6,ror#6
+ eor r0,r0,r6,ror#11
+ eor r0,r0,r6,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r7,r8
+ and r2,r2,r6
+ eor r2,r2,r8 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r9
+ add r3,r3,r12
+ mov r9,r10,ror#2
+ eor r9,r9,r10,ror#13
+ eor r9,r9,r10,ror#22 @ Sigma0(a)
+ orr r0,r10,r11
+ and r0,r0,r4
+ and r2,r10,r11
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r9,r9,r0
+ add r5,r5,r3
+ add r9,r9,r3
+ ldr r2,[sp,#12*4] @ 27
+ ldr r12,[sp,#9*4]
+ ldr r3,[sp,#11*4]
+ ldr r1,[sp,#4*4]
+ mov r0,r2,ror#7
+ eor r0,r0,r2,ror#18
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ mov r2,r12,ror#17
+ eor r2,r2,r12,ror#19
+ eor r2,r2,r12,lsr#10 @ sigma1(X[i+14])
+ add r3,r3,r0
+ add r3,r3,r2
+ add r3,r3,r1
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#11*4]
+ mov r0,r5,ror#6
+ eor r0,r0,r5,ror#11
+ eor r0,r0,r5,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r6,r7
+ and r2,r2,r5
+ eor r2,r2,r7 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r8
+ add r3,r3,r12
+ mov r8,r9,ror#2
+ eor r8,r8,r9,ror#13
+ eor r8,r8,r9,ror#22 @ Sigma0(a)
+ orr r0,r9,r10
+ and r0,r0,r11
+ and r2,r9,r10
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r8,r8,r0
+ add r4,r4,r3
+ add r8,r8,r3
+ ldr r2,[sp,#13*4] @ 28
+ ldr r12,[sp,#10*4]
+ ldr r3,[sp,#12*4]
+ ldr r1,[sp,#5*4]
+ mov r0,r2,ror#7
+ eor r0,r0,r2,ror#18
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ mov r2,r12,ror#17
+ eor r2,r2,r12,ror#19
+ eor r2,r2,r12,lsr#10 @ sigma1(X[i+14])
+ add r3,r3,r0
+ add r3,r3,r2
+ add r3,r3,r1
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#12*4]
+ mov r0,r4,ror#6
+ eor r0,r0,r4,ror#11
+ eor r0,r0,r4,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r5,r6
+ and r2,r2,r4
+ eor r2,r2,r6 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r7
+ add r3,r3,r12
+ mov r7,r8,ror#2
+ eor r7,r7,r8,ror#13
+ eor r7,r7,r8,ror#22 @ Sigma0(a)
+ orr r0,r8,r9
+ and r0,r0,r10
+ and r2,r8,r9
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r7,r7,r0
+ add r11,r11,r3
+ add r7,r7,r3
+ ldr r2,[sp,#14*4] @ 29
+ ldr r12,[sp,#11*4]
+ ldr r3,[sp,#13*4]
+ ldr r1,[sp,#6*4]
+ mov r0,r2,ror#7
+ eor r0,r0,r2,ror#18
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ mov r2,r12,ror#17
+ eor r2,r2,r12,ror#19
+ eor r2,r2,r12,lsr#10 @ sigma1(X[i+14])
+ add r3,r3,r0
+ add r3,r3,r2
+ add r3,r3,r1
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#13*4]
+ mov r0,r11,ror#6
+ eor r0,r0,r11,ror#11
+ eor r0,r0,r11,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r4,r5
+ and r2,r2,r11
+ eor r2,r2,r5 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r6
+ add r3,r3,r12
+ mov r6,r7,ror#2
+ eor r6,r6,r7,ror#13
+ eor r6,r6,r7,ror#22 @ Sigma0(a)
+ orr r0,r7,r8
+ and r0,r0,r9
+ and r2,r7,r8
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r6,r6,r0
+ add r10,r10,r3
+ add r6,r6,r3
+ ldr r2,[sp,#15*4] @ 30
+ ldr r12,[sp,#12*4]
+ ldr r3,[sp,#14*4]
+ ldr r1,[sp,#7*4]
+ mov r0,r2,ror#7
+ eor r0,r0,r2,ror#18
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ mov r2,r12,ror#17
+ eor r2,r2,r12,ror#19
+ eor r2,r2,r12,lsr#10 @ sigma1(X[i+14])
+ add r3,r3,r0
+ add r3,r3,r2
+ add r3,r3,r1
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#14*4]
+ mov r0,r10,ror#6
+ eor r0,r0,r10,ror#11
+ eor r0,r0,r10,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r11,r4
+ and r2,r2,r10
+ eor r2,r2,r4 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r5
+ add r3,r3,r12
+ mov r5,r6,ror#2
+ eor r5,r5,r6,ror#13
+ eor r5,r5,r6,ror#22 @ Sigma0(a)
+ orr r0,r6,r7
+ and r0,r0,r8
+ and r2,r6,r7
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r5,r5,r0
+ add r9,r9,r3
+ add r5,r5,r3
+ ldr r2,[sp,#0*4] @ 31
+ ldr r12,[sp,#13*4]
+ ldr r3,[sp,#15*4]
+ ldr r1,[sp,#8*4]
+ mov r0,r2,ror#7
+ eor r0,r0,r2,ror#18
+ eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
+ mov r2,r12,ror#17
+ eor r2,r2,r12,ror#19
+ eor r2,r2,r12,lsr#10 @ sigma1(X[i+14])
+ add r3,r3,r0
+ add r3,r3,r2
+ add r3,r3,r1
+ ldr r12,[r14],#4 @ *K256++
+ str r3,[sp,#15*4]
+ mov r0,r9,ror#6
+ eor r0,r0,r9,ror#11
+ eor r0,r0,r9,ror#25 @ Sigma1(e)
+ add r3,r3,r0
+ eor r2,r10,r11
+ and r2,r2,r9
+ eor r2,r2,r11 @ Ch(e,f,g)
+ add r3,r3,r2
+ add r3,r3,r4
+ add r3,r3,r12
+ mov r4,r5,ror#2
+ eor r4,r4,r5,ror#13
+ eor r4,r4,r5,ror#22 @ Sigma0(a)
+ orr r0,r5,r6
+ and r0,r0,r7
+ and r2,r5,r6
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r4,r4,r0
+ add r8,r8,r3
+ add r4,r4,r3
+ and r12,r12,#0xff
+ cmp r12,#0xf2
+ bne .Lrounds_16_xx
+
+ ldr r3,[sp,#16*4] @ pull ctx
+ ldr r0,[r3,#0]
+ ldr r2,[r3,#4]
+ ldr r12,[r3,#8]
+ add r4,r4,r0
+ ldr r0,[r3,#12]
+ add r5,r5,r2
+ ldr r2,[r3,#16]
+ add r6,r6,r12
+ ldr r12,[r3,#20]
+ add r7,r7,r0
+ ldr r0,[r3,#24]
+ add r8,r8,r2
+ ldr r2,[r3,#28]
+ add r9,r9,r12
+ ldr r1,[sp,#17*4] @ pull inp
+ ldr r12,[sp,#18*4] @ pull inp+len
+ add r10,r10,r0
+ add r11,r11,r2
+ stmia r3,{r4,r5,r6,r7,r8,r9,r10,r11}
+ cmp r1,r12
+ sub r14,r14,#256 @ rewind Ktbl
+ bne .Loop
+
+ add sp,sp,#19*4 @ destroy frame
+ ldmia sp!,{r4-r12,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ .word 0xe12fff1e @ interoperable with Thumb ISA:-)
+.size sha256_block_data_order,.-sha256_block_data_order
+.asciz "SHA256 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
--- /dev/null 2009-04-24 06:09:48.000000000 -0700
+++ openssl-0.9.8h/crypto/0.9.9-dev/sha/sha512-armv4.pl 2009-09-03 15:42:39.000000000 -0700
@@ -0,0 +1,398 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# SHA512 block procedure for ARMv4. September 2007.
+
+# This code is ~4.5 (four and a half) times faster than code generated
+# by gcc 3.4 and it spends ~72 clock cycles per byte.
+
+# Byte order [in]dependence. =========================================
+#
+# Caller is expected to maintain specific *dword* order in h[0-7],
+# namely with most significant dword at *lower* address, which is
+# reflected in below two parameters. *Byte* order within these dwords
+# in turn is whatever *native* byte order on current platform.
+$hi=0;
+$lo=4;
+# ====================================================================
+
+$output=shift;
+open STDOUT,">$output";
+
+$ctx="r0";
+$inp="r1";
+$len="r2";
+$Tlo="r3";
+$Thi="r4";
+$Alo="r5";
+$Ahi="r6";
+$Elo="r7";
+$Ehi="r8";
+$t0="r9";
+$t1="r10";
+$t2="r11";
+$t3="r12";
+############ r13 is stack pointer
+$Ktbl="r14";
+############ r15 is program counter
+
+$Aoff=8*0;
+$Boff=8*1;
+$Coff=8*2;
+$Doff=8*3;
+$Eoff=8*4;
+$Foff=8*5;
+$Goff=8*6;
+$Hoff=8*7;
+$Xoff=8*8;
+
+sub BODY_00_15() {
+my $magic = shift;
+$code.=<<___;
+ ldr $t2,[sp,#$Hoff+0] @ h.lo
+ ldr $t3,[sp,#$Hoff+4] @ h.hi
+ @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
+ @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
+ @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
+ mov $t0,$Elo,lsr#14
+ mov $t1,$Ehi,lsr#14
+ eor $t0,$t0,$Ehi,lsl#18
+ eor $t1,$t1,$Elo,lsl#18
+ eor $t0,$t0,$Elo,lsr#18
+ eor $t1,$t1,$Ehi,lsr#18
+ eor $t0,$t0,$Ehi,lsl#14
+ eor $t1,$t1,$Elo,lsl#14
+ eor $t0,$t0,$Ehi,lsr#9
+ eor $t1,$t1,$Elo,lsr#9
+ eor $t0,$t0,$Elo,lsl#23
+ eor $t1,$t1,$Ehi,lsl#23 @ Sigma1(e)
+ adds $Tlo,$Tlo,$t0
+ adc $Thi,$Thi,$t1 @ T += Sigma1(e)
+ adds $Tlo,$Tlo,$t2
+ adc $Thi,$Thi,$t3 @ T += h
+
+ ldr $t0,[sp,#$Foff+0] @ f.lo
+ ldr $t1,[sp,#$Foff+4] @ f.hi
+ ldr $t2,[sp,#$Goff+0] @ g.lo
+ ldr $t3,[sp,#$Goff+4] @ g.hi
+ str $Elo,[sp,#$Eoff+0]
+ str $Ehi,[sp,#$Eoff+4]
+ str $Alo,[sp,#$Aoff+0]
+ str $Ahi,[sp,#$Aoff+4]
+
+ eor $t0,$t0,$t2
+ eor $t1,$t1,$t3
+ and $t0,$t0,$Elo
+ and $t1,$t1,$Ehi
+ eor $t0,$t0,$t2
+ eor $t1,$t1,$t3 @ Ch(e,f,g)
+
+ ldr $t2,[$Ktbl,#4] @ K[i].lo
+ ldr $t3,[$Ktbl,#0] @ K[i].hi
+ ldr $Elo,[sp,#$Doff+0] @ d.lo
+ ldr $Ehi,[sp,#$Doff+4] @ d.hi
+
+ adds $Tlo,$Tlo,$t0
+ adc $Thi,$Thi,$t1 @ T += Ch(e,f,g)
+ adds $Tlo,$Tlo,$t2
+ adc $Thi,$Thi,$t3 @ T += K[i]
+ adds $Elo,$Elo,$Tlo
+ adc $Ehi,$Ehi,$Thi @ d += T
+
+ and $t0,$t2,#0xff
+ teq $t0,#$magic
+ orreq $Ktbl,$Ktbl,#1
+
+ ldr $t2,[sp,#$Boff+0] @ b.lo
+ ldr $t3,[sp,#$Coff+0] @ c.lo
+ @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
+ @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
+ @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
+ mov $t0,$Alo,lsr#28
+ mov $t1,$Ahi,lsr#28
+ eor $t0,$t0,$Ahi,lsl#4
+ eor $t1,$t1,$Alo,lsl#4
+ eor $t0,$t0,$Ahi,lsr#2
+ eor $t1,$t1,$Alo,lsr#2
+ eor $t0,$t0,$Alo,lsl#30
+ eor $t1,$t1,$Ahi,lsl#30
+ eor $t0,$t0,$Ahi,lsr#7
+ eor $t1,$t1,$Alo,lsr#7
+ eor $t0,$t0,$Alo,lsl#25
+ eor $t1,$t1,$Ahi,lsl#25 @ Sigma0(a)
+ adds $Tlo,$Tlo,$t0
+ adc $Thi,$Thi,$t1 @ T += Sigma0(a)
+
+ and $t0,$Alo,$t2
+ orr $Alo,$Alo,$t2
+ ldr $t1,[sp,#$Boff+4] @ b.hi
+ ldr $t2,[sp,#$Coff+4] @ c.hi
+ and $Alo,$Alo,$t3
+ orr $Alo,$Alo,$t0 @ Maj(a,b,c).lo
+ and $t3,$Ahi,$t1
+ orr $Ahi,$Ahi,$t1
+ and $Ahi,$Ahi,$t2
+ orr $Ahi,$Ahi,$t3 @ Maj(a,b,c).hi
+ adds $Alo,$Alo,$Tlo
+ adc $Ahi,$Ahi,$Thi @ h += T
+
+ sub sp,sp,#8
+ add $Ktbl,$Ktbl,#8
+___
+}
+$code=<<___;
+.text
+.code 32
+.type K512,%object
+.align 5
+K512:
+.word 0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd
+.word 0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc
+.word 0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019
+.word 0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118
+.word 0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe
+.word 0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2
+.word 0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1
+.word 0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694
+.word 0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3
+.word 0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65
+.word 0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483
+.word 0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5
+.word 0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210
+.word 0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4
+.word 0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725
+.word 0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70
+.word 0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926
+.word 0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df
+.word 0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8
+.word 0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b
+.word 0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001
+.word 0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30
+.word 0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910
+.word 0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8
+.word 0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53
+.word 0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8
+.word 0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb
+.word 0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3
+.word 0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60
+.word 0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec
+.word 0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9
+.word 0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b
+.word 0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207
+.word 0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178
+.word 0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6
+.word 0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b
+.word 0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493
+.word 0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c
+.word 0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a
+.word 0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817
+.size K512,.-K512
+
+.global sha512_block_data_order
+.type sha512_block_data_order,%function
+sha512_block_data_order:
+ sub r3,pc,#8 @ sha512_block_data_order
+ add $len,$inp,$len,lsl#7 @ len to point at the end of inp
+ stmdb sp!,{r4-r12,lr}
+ sub $Ktbl,r3,#640 @ K512
+ sub sp,sp,#9*8
+
+ ldr $Elo,[$ctx,#$Eoff+$lo]
+ ldr $Ehi,[$ctx,#$Eoff+$hi]
+ ldr $t0, [$ctx,#$Goff+$lo]
+ ldr $t1, [$ctx,#$Goff+$hi]
+ ldr $t2, [$ctx,#$Hoff+$lo]
+ ldr $t3, [$ctx,#$Hoff+$hi]
+.Loop:
+ str $t0, [sp,#$Goff+0]
+ str $t1, [sp,#$Goff+4]
+ str $t2, [sp,#$Hoff+0]
+ str $t3, [sp,#$Hoff+4]
+ ldr $Alo,[$ctx,#$Aoff+$lo]
+ ldr $Ahi,[$ctx,#$Aoff+$hi]
+ ldr $Tlo,[$ctx,#$Boff+$lo]
+ ldr $Thi,[$ctx,#$Boff+$hi]
+ ldr $t0, [$ctx,#$Coff+$lo]
+ ldr $t1, [$ctx,#$Coff+$hi]
+ ldr $t2, [$ctx,#$Doff+$lo]
+ ldr $t3, [$ctx,#$Doff+$hi]
+ str $Tlo,[sp,#$Boff+0]
+ str $Thi,[sp,#$Boff+4]
+ str $t0, [sp,#$Coff+0]
+ str $t1, [sp,#$Coff+4]
+ str $t2, [sp,#$Doff+0]
+ str $t3, [sp,#$Doff+4]
+ ldr $Tlo,[$ctx,#$Foff+$lo]
+ ldr $Thi,[$ctx,#$Foff+$hi]
+ str $Tlo,[sp,#$Foff+0]
+ str $Thi,[sp,#$Foff+4]
+
+.L00_15:
+ ldrb $Tlo,[$inp,#7]
+ ldrb $t0, [$inp,#6]
+ ldrb $t1, [$inp,#5]
+ ldrb $t2, [$inp,#4]
+ ldrb $Thi,[$inp,#3]
+ ldrb $t3, [$inp,#2]
+ orr $Tlo,$Tlo,$t0,lsl#8
+ ldrb $t0, [$inp,#1]
+ orr $Tlo,$Tlo,$t1,lsl#16
+ ldrb $t1, [$inp],#8
+ orr $Tlo,$Tlo,$t2,lsl#24
+ orr $Thi,$Thi,$t3,lsl#8
+ orr $Thi,$Thi,$t0,lsl#16
+ orr $Thi,$Thi,$t1,lsl#24
+ str $Tlo,[sp,#$Xoff+0]
+ str $Thi,[sp,#$Xoff+4]
+___
+ &BODY_00_15(0x94);
+$code.=<<___;
+ tst $Ktbl,#1
+ beq .L00_15
+ bic $Ktbl,$Ktbl,#1
+
+.L16_79:
+ ldr $t0,[sp,#`$Xoff+8*(16-1)`+0]
+ ldr $t1,[sp,#`$Xoff+8*(16-1)`+4]
+ ldr $t2,[sp,#`$Xoff+8*(16-14)`+0]
+ ldr $t3,[sp,#`$Xoff+8*(16-14)`+4]
+
+ @ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7))
+ @ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
+ @ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7
+ mov $Tlo,$t0,lsr#1
+ mov $Thi,$t1,lsr#1
+ eor $Tlo,$Tlo,$t1,lsl#31
+ eor $Thi,$Thi,$t0,lsl#31
+ eor $Tlo,$Tlo,$t0,lsr#8
+ eor $Thi,$Thi,$t1,lsr#8
+ eor $Tlo,$Tlo,$t1,lsl#24
+ eor $Thi,$Thi,$t0,lsl#24
+ eor $Tlo,$Tlo,$t0,lsr#7
+ eor $Thi,$Thi,$t1,lsr#7
+ eor $Tlo,$Tlo,$t1,lsl#25
+
+ @ sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
+ @ LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
+ @ HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
+ mov $t0,$t2,lsr#19
+ mov $t1,$t3,lsr#19
+ eor $t0,$t0,$t3,lsl#13
+ eor $t1,$t1,$t2,lsl#13
+ eor $t0,$t0,$t3,lsr#29
+ eor $t1,$t1,$t2,lsr#29
+ eor $t0,$t0,$t2,lsl#3
+ eor $t1,$t1,$t3,lsl#3
+ eor $t0,$t0,$t2,lsr#6
+ eor $t1,$t1,$t3,lsr#6
+ eor $t0,$t0,$t3,lsl#26
+
+ ldr $t2,[sp,#`$Xoff+8*(16-9)`+0]
+ ldr $t3,[sp,#`$Xoff+8*(16-9)`+4]
+ adds $Tlo,$Tlo,$t0
+ adc $Thi,$Thi,$t1
+
+ ldr $t0,[sp,#`$Xoff+8*16`+0]
+ ldr $t1,[sp,#`$Xoff+8*16`+4]
+ adds $Tlo,$Tlo,$t2
+ adc $Thi,$Thi,$t3
+ adds $Tlo,$Tlo,$t0
+ adc $Thi,$Thi,$t1
+ str $Tlo,[sp,#$Xoff+0]
+ str $Thi,[sp,#$Xoff+4]
+___
+ &BODY_00_15(0x17);
+$code.=<<___;
+ tst $Ktbl,#1
+ beq .L16_79
+ bic $Ktbl,$Ktbl,#1
+
+ ldr $Tlo,[sp,#$Boff+0]
+ ldr $Thi,[sp,#$Boff+4]
+ ldr $t0, [$ctx,#$Aoff+$lo]
+ ldr $t1, [$ctx,#$Aoff+$hi]
+ ldr $t2, [$ctx,#$Boff+$lo]
+ ldr $t3, [$ctx,#$Boff+$hi]
+ adds $t0,$Alo,$t0
+ adc $t1,$Ahi,$t1
+ adds $t2,$Tlo,$t2
+ adc $t3,$Thi,$t3
+ str $t0, [$ctx,#$Aoff+$lo]
+ str $t1, [$ctx,#$Aoff+$hi]
+ str $t2, [$ctx,#$Boff+$lo]
+ str $t3, [$ctx,#$Boff+$hi]
+
+ ldr $Alo,[sp,#$Coff+0]
+ ldr $Ahi,[sp,#$Coff+4]
+ ldr $Tlo,[sp,#$Doff+0]
+ ldr $Thi,[sp,#$Doff+4]
+ ldr $t0, [$ctx,#$Coff+$lo]
+ ldr $t1, [$ctx,#$Coff+$hi]
+ ldr $t2, [$ctx,#$Doff+$lo]
+ ldr $t3, [$ctx,#$Doff+$hi]
+ adds $t0,$Alo,$t0
+ adc $t1,$Ahi,$t1
+ adds $t2,$Tlo,$t2
+ adc $t3,$Thi,$t3
+ str $t0, [$ctx,#$Coff+$lo]
+ str $t1, [$ctx,#$Coff+$hi]
+ str $t2, [$ctx,#$Doff+$lo]
+ str $t3, [$ctx,#$Doff+$hi]
+
+ ldr $Tlo,[sp,#$Foff+0]
+ ldr $Thi,[sp,#$Foff+4]
+ ldr $t0, [$ctx,#$Eoff+$lo]
+ ldr $t1, [$ctx,#$Eoff+$hi]
+ ldr $t2, [$ctx,#$Foff+$lo]
+ ldr $t3, [$ctx,#$Foff+$hi]
+ adds $Elo,$Elo,$t0
+ adc $Ehi,$Ehi,$t1
+ adds $t2,$Tlo,$t2
+ adc $t3,$Thi,$t3
+ str $Elo,[$ctx,#$Eoff+$lo]
+ str $Ehi,[$ctx,#$Eoff+$hi]
+ str $t2, [$ctx,#$Foff+$lo]
+ str $t3, [$ctx,#$Foff+$hi]
+
+ ldr $Alo,[sp,#$Goff+0]
+ ldr $Ahi,[sp,#$Goff+4]
+ ldr $Tlo,[sp,#$Hoff+0]
+ ldr $Thi,[sp,#$Hoff+4]
+ ldr $t0, [$ctx,#$Goff+$lo]
+ ldr $t1, [$ctx,#$Goff+$hi]
+ ldr $t2, [$ctx,#$Hoff+$lo]
+ ldr $t3, [$ctx,#$Hoff+$hi]
+ adds $t0,$Alo,$t0
+ adc $t1,$Ahi,$t1
+ adds $t2,$Tlo,$t2
+ adc $t3,$Thi,$t3
+ str $t0, [$ctx,#$Goff+$lo]
+ str $t1, [$ctx,#$Goff+$hi]
+ str $t2, [$ctx,#$Hoff+$lo]
+ str $t3, [$ctx,#$Hoff+$hi]
+
+ add sp,sp,#640
+ sub $Ktbl,$Ktbl,#640
+
+ teq $inp,$len
+ bne .Loop
+
+ add sp,sp,#8*9 @ destroy frame
+ ldmia sp!,{r4-r12,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ bx lr @ interoperable with Thumb ISA:-)
+.size sha512_block_data_order,.-sha512_block_data_order
+.asciz "SHA512 block transform for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
+print $code;
+close STDOUT; # enforce flush
--- /dev/null 2009-04-24 06:09:48.000000000 -0700
+++ openssl-0.9.8h/crypto/0.9.9-dev/sha/sha512-armv4.s 2009-09-03 15:42:39.000000000 -0700
@@ -0,0 +1,415 @@
+.text
+.code 32
+.type K512,%object
+.align 5
+K512:
+.word 0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd
+.word 0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc
+.word 0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019
+.word 0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118
+.word 0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe
+.word 0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2
+.word 0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1
+.word 0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694
+.word 0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3
+.word 0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65
+.word 0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483
+.word 0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5
+.word 0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210
+.word 0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4
+.word 0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725
+.word 0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70
+.word 0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926
+.word 0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df
+.word 0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8
+.word 0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b
+.word 0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001
+.word 0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30
+.word 0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910
+.word 0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8
+.word 0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53
+.word 0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8
+.word 0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb
+.word 0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3
+.word 0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60
+.word 0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec
+.word 0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9
+.word 0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b
+.word 0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207
+.word 0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178
+.word 0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6
+.word 0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b
+.word 0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493
+.word 0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c
+.word 0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a
+.word 0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817
+.size K512,.-K512
+
+.global sha512_block_data_order
+.type sha512_block_data_order,%function
+sha512_block_data_order:
+ sub r3,pc,#8 @ sha512_block_data_order
+ add r2,r1,r2,lsl#7 @ len to point at the end of inp
+ stmdb sp!,{r4-r12,lr}
+ sub r14,r3,#640 @ K512
+ sub sp,sp,#9*8
+
+ ldr r7,[r0,#32+4]
+ ldr r8,[r0,#32+0]
+ ldr r9, [r0,#48+4]
+ ldr r10, [r0,#48+0]
+ ldr r11, [r0,#56+4]
+ ldr r12, [r0,#56+0]
+.Loop:
+ str r9, [sp,#48+0]
+ str r10, [sp,#48+4]
+ str r11, [sp,#56+0]
+ str r12, [sp,#56+4]
+ ldr r5,[r0,#0+4]
+ ldr r6,[r0,#0+0]
+ ldr r3,[r0,#8+4]
+ ldr r4,[r0,#8+0]
+ ldr r9, [r0,#16+4]
+ ldr r10, [r0,#16+0]
+ ldr r11, [r0,#24+4]
+ ldr r12, [r0,#24+0]
+ str r3,[sp,#8+0]
+ str r4,[sp,#8+4]
+ str r9, [sp,#16+0]
+ str r10, [sp,#16+4]
+ str r11, [sp,#24+0]
+ str r12, [sp,#24+4]
+ ldr r3,[r0,#40+4]
+ ldr r4,[r0,#40+0]
+ str r3,[sp,#40+0]
+ str r4,[sp,#40+4]
+
+.L00_15:
+ ldrb r3,[r1,#7]
+ ldrb r9, [r1,#6]
+ ldrb r10, [r1,#5]
+ ldrb r11, [r1,#4]
+ ldrb r4,[r1,#3]
+ ldrb r12, [r1,#2]
+ orr r3,r3,r9,lsl#8
+ ldrb r9, [r1,#1]
+ orr r3,r3,r10,lsl#16
+ ldrb r10, [r1],#8
+ orr r3,r3,r11,lsl#24
+ orr r4,r4,r12,lsl#8
+ orr r4,r4,r9,lsl#16
+ orr r4,r4,r10,lsl#24
+ str r3,[sp,#64+0]
+ str r4,[sp,#64+4]
+ ldr r11,[sp,#56+0] @ h.lo
+ ldr r12,[sp,#56+4] @ h.hi
+ @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
+ @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
+ @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
+ mov r9,r7,lsr#14
+ mov r10,r8,lsr#14
+ eor r9,r9,r8,lsl#18
+ eor r10,r10,r7,lsl#18
+ eor r9,r9,r7,lsr#18
+ eor r10,r10,r8,lsr#18
+ eor r9,r9,r8,lsl#14
+ eor r10,r10,r7,lsl#14
+ eor r9,r9,r8,lsr#9
+ eor r10,r10,r7,lsr#9
+ eor r9,r9,r7,lsl#23
+ eor r10,r10,r8,lsl#23 @ Sigma1(e)
+ adds r3,r3,r9
+ adc r4,r4,r10 @ T += Sigma1(e)
+ adds r3,r3,r11
+ adc r4,r4,r12 @ T += h
+
+ ldr r9,[sp,#40+0] @ f.lo
+ ldr r10,[sp,#40+4] @ f.hi
+ ldr r11,[sp,#48+0] @ g.lo
+ ldr r12,[sp,#48+4] @ g.hi
+ str r7,[sp,#32+0]
+ str r8,[sp,#32+4]
+ str r5,[sp,#0+0]
+ str r6,[sp,#0+4]
+
+ eor r9,r9,r11
+ eor r10,r10,r12
+ and r9,r9,r7
+ and r10,r10,r8
+ eor r9,r9,r11
+ eor r10,r10,r12 @ Ch(e,f,g)
+
+ ldr r11,[r14,#4] @ K[i].lo
+ ldr r12,[r14,#0] @ K[i].hi
+ ldr r7,[sp,#24+0] @ d.lo
+ ldr r8,[sp,#24+4] @ d.hi
+
+ adds r3,r3,r9
+ adc r4,r4,r10 @ T += Ch(e,f,g)
+ adds r3,r3,r11
+ adc r4,r4,r12 @ T += K[i]
+ adds r7,r7,r3
+ adc r8,r8,r4 @ d += T
+
+ and r9,r11,#0xff
+ teq r9,#148
+ orreq r14,r14,#1
+
+ ldr r11,[sp,#8+0] @ b.lo
+ ldr r12,[sp,#16+0] @ c.lo
+ @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
+ @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
+ @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
+ mov r9,r5,lsr#28
+ mov r10,r6,lsr#28
+ eor r9,r9,r6,lsl#4
+ eor r10,r10,r5,lsl#4
+ eor r9,r9,r6,lsr#2
+ eor r10,r10,r5,lsr#2
+ eor r9,r9,r5,lsl#30
+ eor r10,r10,r6,lsl#30
+ eor r9,r9,r6,lsr#7
+ eor r10,r10,r5,lsr#7
+ eor r9,r9,r5,lsl#25
+ eor r10,r10,r6,lsl#25 @ Sigma0(a)
+ adds r3,r3,r9
+ adc r4,r4,r10 @ T += Sigma0(a)
+
+ and r9,r5,r11
+ orr r5,r5,r11
+ ldr r10,[sp,#8+4] @ b.hi
+ ldr r11,[sp,#16+4] @ c.hi
+ and r5,r5,r12
+ orr r5,r5,r9 @ Maj(a,b,c).lo
+ and r12,r6,r10
+ orr r6,r6,r10
+ and r6,r6,r11
+ orr r6,r6,r12 @ Maj(a,b,c).hi
+ adds r5,r5,r3
+ adc r6,r6,r4 @ h += T
+
+ sub sp,sp,#8
+ add r14,r14,#8
+ tst r14,#1
+ beq .L00_15
+ bic r14,r14,#1
+
+.L16_79:
+ ldr r9,[sp,#184+0]
+ ldr r10,[sp,#184+4]
+ ldr r11,[sp,#80+0]
+ ldr r12,[sp,#80+4]
+
+ @ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7))
+ @ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
+ @ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7
+ mov r3,r9,lsr#1
+ mov r4,r10,lsr#1
+ eor r3,r3,r10,lsl#31
+ eor r4,r4,r9,lsl#31
+ eor r3,r3,r9,lsr#8
+ eor r4,r4,r10,lsr#8
+ eor r3,r3,r10,lsl#24
+ eor r4,r4,r9,lsl#24
+ eor r3,r3,r9,lsr#7
+ eor r4,r4,r10,lsr#7
+ eor r3,r3,r10,lsl#25
+
+ @ sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
+ @ LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
+ @ HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
+ mov r9,r11,lsr#19
+ mov r10,r12,lsr#19
+ eor r9,r9,r12,lsl#13
+ eor r10,r10,r11,lsl#13
+ eor r9,r9,r12,lsr#29
+ eor r10,r10,r11,lsr#29
+ eor r9,r9,r11,lsl#3
+ eor r10,r10,r12,lsl#3
+ eor r9,r9,r11,lsr#6
+ eor r10,r10,r12,lsr#6
+ eor r9,r9,r12,lsl#26
+
+ ldr r11,[sp,#120+0]
+ ldr r12,[sp,#120+4]
+ adds r3,r3,r9
+ adc r4,r4,r10
+
+ ldr r9,[sp,#192+0]
+ ldr r10,[sp,#192+4]
+ adds r3,r3,r11
+ adc r4,r4,r12
+ adds r3,r3,r9
+ adc r4,r4,r10
+ str r3,[sp,#64+0]
+ str r4,[sp,#64+4]
+ ldr r11,[sp,#56+0] @ h.lo
+ ldr r12,[sp,#56+4] @ h.hi
+ @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
+ @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
+ @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
+ mov r9,r7,lsr#14
+ mov r10,r8,lsr#14
+ eor r9,r9,r8,lsl#18
+ eor r10,r10,r7,lsl#18
+ eor r9,r9,r7,lsr#18
+ eor r10,r10,r8,lsr#18
+ eor r9,r9,r8,lsl#14
+ eor r10,r10,r7,lsl#14
+ eor r9,r9,r8,lsr#9
+ eor r10,r10,r7,lsr#9
+ eor r9,r9,r7,lsl#23
+ eor r10,r10,r8,lsl#23 @ Sigma1(e)
+ adds r3,r3,r9
+ adc r4,r4,r10 @ T += Sigma1(e)
+ adds r3,r3,r11
+ adc r4,r4,r12 @ T += h
+
+ ldr r9,[sp,#40+0] @ f.lo
+ ldr r10,[sp,#40+4] @ f.hi
+ ldr r11,[sp,#48+0] @ g.lo
+ ldr r12,[sp,#48+4] @ g.hi
+ str r7,[sp,#32+0]
+ str r8,[sp,#32+4]
+ str r5,[sp,#0+0]
+ str r6,[sp,#0+4]
+
+ eor r9,r9,r11
+ eor r10,r10,r12
+ and r9,r9,r7
+ and r10,r10,r8
+ eor r9,r9,r11
+ eor r10,r10,r12 @ Ch(e,f,g)
+
+ ldr r11,[r14,#4] @ K[i].lo
+ ldr r12,[r14,#0] @ K[i].hi
+ ldr r7,[sp,#24+0] @ d.lo
+ ldr r8,[sp,#24+4] @ d.hi
+
+ adds r3,r3,r9
+ adc r4,r4,r10 @ T += Ch(e,f,g)
+ adds r3,r3,r11
+ adc r4,r4,r12 @ T += K[i]
+ adds r7,r7,r3
+ adc r8,r8,r4 @ d += T
+
+ and r9,r11,#0xff
+ teq r9,#23
+ orreq r14,r14,#1
+
+ ldr r11,[sp,#8+0] @ b.lo
+ ldr r12,[sp,#16+0] @ c.lo
+ @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
+ @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
+ @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
+ mov r9,r5,lsr#28
+ mov r10,r6,lsr#28
+ eor r9,r9,r6,lsl#4
+ eor r10,r10,r5,lsl#4
+ eor r9,r9,r6,lsr#2
+ eor r10,r10,r5,lsr#2
+ eor r9,r9,r5,lsl#30
+ eor r10,r10,r6,lsl#30
+ eor r9,r9,r6,lsr#7
+ eor r10,r10,r5,lsr#7
+ eor r9,r9,r5,lsl#25
+ eor r10,r10,r6,lsl#25 @ Sigma0(a)
+ adds r3,r3,r9
+ adc r4,r4,r10 @ T += Sigma0(a)
+
+ and r9,r5,r11
+ orr r5,r5,r11
+ ldr r10,[sp,#8+4] @ b.hi
+ ldr r11,[sp,#16+4] @ c.hi
+ and r5,r5,r12
+ orr r5,r5,r9 @ Maj(a,b,c).lo
+ and r12,r6,r10
+ orr r6,r6,r10
+ and r6,r6,r11
+ orr r6,r6,r12 @ Maj(a,b,c).hi
+ adds r5,r5,r3
+ adc r6,r6,r4 @ h += T
+
+ sub sp,sp,#8
+ add r14,r14,#8
+ tst r14,#1
+ beq .L16_79
+ bic r14,r14,#1
+
+ ldr r3,[sp,#8+0]
+ ldr r4,[sp,#8+4]
+ ldr r9, [r0,#0+4]
+ ldr r10, [r0,#0+0]
+ ldr r11, [r0,#8+4]
+ ldr r12, [r0,#8+0]
+ adds r9,r5,r9
+ adc r10,r6,r10
+ adds r11,r3,r11
+ adc r12,r4,r12
+ str r9, [r0,#0+4]
+ str r10, [r0,#0+0]
+ str r11, [r0,#8+4]
+ str r12, [r0,#8+0]
+
+ ldr r5,[sp,#16+0]
+ ldr r6,[sp,#16+4]
+ ldr r3,[sp,#24+0]
+ ldr r4,[sp,#24+4]
+ ldr r9, [r0,#16+4]
+ ldr r10, [r0,#16+0]
+ ldr r11, [r0,#24+4]
+ ldr r12, [r0,#24+0]
+ adds r9,r5,r9
+ adc r10,r6,r10
+ adds r11,r3,r11
+ adc r12,r4,r12
+ str r9, [r0,#16+4]
+ str r10, [r0,#16+0]
+ str r11, [r0,#24+4]
+ str r12, [r0,#24+0]
+
+ ldr r3,[sp,#40+0]
+ ldr r4,[sp,#40+4]
+ ldr r9, [r0,#32+4]
+ ldr r10, [r0,#32+0]
+ ldr r11, [r0,#40+4]
+ ldr r12, [r0,#40+0]
+ adds r7,r7,r9
+ adc r8,r8,r10
+ adds r11,r3,r11
+ adc r12,r4,r12
+ str r7,[r0,#32+4]
+ str r8,[r0,#32+0]
+ str r11, [r0,#40+4]
+ str r12, [r0,#40+0]
+
+ ldr r5,[sp,#48+0]
+ ldr r6,[sp,#48+4]
+ ldr r3,[sp,#56+0]
+ ldr r4,[sp,#56+4]
+ ldr r9, [r0,#48+4]
+ ldr r10, [r0,#48+0]
+ ldr r11, [r0,#56+4]
+ ldr r12, [r0,#56+0]
+ adds r9,r5,r9
+ adc r10,r6,r10
+ adds r11,r3,r11
+ adc r12,r4,r12
+ str r9, [r0,#48+4]
+ str r10, [r0,#48+0]
+ str r11, [r0,#56+4]
+ str r12, [r0,#56+0]
+
+ add sp,sp,#640
+ sub r14,r14,#640
+
+ teq r1,r2
+ bne .Loop
+
+ add sp,sp,#8*9 @ destroy frame
+ ldmia sp!,{r4-r12,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ .word 0xe12fff1e @ interoperable with Thumb ISA:-)
+.size sha512_block_data_order,.-sha512_block_data_order
+.asciz "SHA512 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>"