| @/***************************************************************************** |
| @* |
| @* Copyright (C) 2012 Ittiam Systems Pvt Ltd, Bangalore |
| @* |
| @* Licensed under the Apache License, Version 2.0 (the "License"); |
| @* you may not use this file except in compliance with the License. |
| @* You may obtain a copy of the License at: |
| @* |
| @* http://www.apache.org/licenses/LICENSE-2.0 |
| @* |
| @* Unless required by applicable law or agreed to in writing, software |
| @* distributed under the License is distributed on an "AS IS" BASIS, |
| @* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| @* See the License for the specific language governing permissions and |
| @* limitations under the License. |
| @* |
| @*****************************************************************************/ |
| @/** |
| @******************************************************************************* |
| @* ,:file |
| @* ihevc_sao_edge_offset_class3_chroma.s |
| @* |
| @* ,:brief |
| @* Contains function definitions for inter prediction interpolation. |
| @* Functions are coded using NEON intrinsics and can be compiled using@ ARM |
| @* RVCT |
| @* |
| @* ,:author |
| @* Parthiban V |
| @* |
| @* ,:par List of Functions: |
| @* |
| @* |
| @* ,:remarks |
| @* None |
| @* |
| @******************************************************************************* |
| @*/ |
| @void ihevc_sao_edge_offset_class3_chroma(UWORD8 *pu1_src, |
| @ WORD32 src_strd, |
| @ UWORD8 *pu1_src_left, |
| @ UWORD8 *pu1_src_top, |
| @ UWORD8 *pu1_src_top_left, |
| @ UWORD8 *pu1_src_top_right, |
| @ UWORD8 *pu1_src_bot_left, |
| @ UWORD8 *pu1_avail, |
| @ WORD8 *pi1_sao_offset_u, |
| @ WORD8 *pi1_sao_offset_v, |
| @ WORD32 wd, |
| @ WORD32 ht) |
| @**************Variables Vs Registers***************************************** |
| @r0 => *pu1_src |
| @r1 => src_strd |
| @r2 => *pu1_src_left |
| @r3 => *pu1_src_top |
| @r4 => *pu1_src_top_left |
| @r5 => *pu1_avail |
| @r6 => *pi1_sao_offset_u |
| @r9 => *pi1_sao_offset_v |
| @r7 => wd |
| @r8=> ht |
| |
| .text |
| .syntax unified |
| .p2align 2 |
| |
| .extern gi1_table_edge_idx |
| .globl ihevc_sao_edge_offset_class3_chroma_a9q |
| |
| gi1_table_edge_idx_addr_1: |
| .long gi1_table_edge_idx - ulbl1 - 8 |
| |
| gi1_table_edge_idx_addr_2: |
| .long gi1_table_edge_idx - ulbl2 - 8 |
| |
| gi1_table_edge_idx_addr_3: |
| .long gi1_table_edge_idx - ulbl3 - 8 |
| |
| gi1_table_edge_idx_addr_4: |
| .long gi1_table_edge_idx - ulbl4 - 8 |
| |
| gi1_table_edge_idx_addr_5: |
| .long gi1_table_edge_idx - ulbl5 - 8 |
| |
| ihevc_sao_edge_offset_class3_chroma_a9q: |
| |
| |
| STMFD sp!,{r4-r12,r14} @stack stores the values of the arguments |
| |
| LDR r7,[sp,#0x40] @Loads wd |
| LDR r8,[sp,#0x44] @Loads ht |
| SUB r9,r7,#2 @wd - 2 |
| |
| LDR r4,[sp,#0x28] @Loads pu1_src_top_left |
| LDRH r10,[r3,r9] @pu1_src_top[wd - 2] |
| |
| MOV r9,r7 @Move width to r9 for loop count |
| |
| LDR r5,[sp,#0x34] @Loads pu1_avail |
| LDR r6,[sp,#0x38] @Loads pi1_sao_offset_u |
| |
| STR r3,[sp,#0x38] @Store pu1_src_top in sp |
| SUB sp,sp,#0xD4 @Decrement the stack pointer to store some temp arr values |
| |
| STRH r10,[sp] @u1_src_top_left_tmp = pu1_src_top[wd - 2] |
| SUB r10,r8,#1 @ht-1 |
| MLA r11,r10,r1,r0 @pu1_src[(ht - 1) * src_strd + col] |
| ADD r12,sp,#10 @temp array |
| |
| AU1_SRC_TOP_LOOP: |
| VLD1.8 D0,[r11]! @pu1_src[(ht - 1) * src_strd + col] |
| SUBS r9,r9,#8 @Decrement the loop count by 8 |
| VST1.8 D0,[r12]! @au1_src_top_tmp[col] = pu1_src[(ht - 1) * src_strd + col] |
| BNE AU1_SRC_TOP_LOOP |
| |
| PU1_AVAIL_5_LOOP_U: |
| LDRB r9,[r5,#5] @pu1_avail[5] |
| CMP r9,#0 |
| SUB r14,r7,#2 @[wd - 2] |
| LDRB r9,[r0,r14] @u1_pos_0_0_tmp_u = pu1_src[wd - 2] |
| SUB r11,r7,#1 @[wd - 1] |
| LDRB r10,[r0,r11] @u1_pos_0_0_tmp_v = pu1_src[wd - 1] |
| BEQ PU1_AVAIL_6_LOOP_U |
| |
| LDR r11,[sp,#0x100] @Load pu1_src_top_right from sp |
| LDRB r11,[r11] @pu1_src_top_right[0] |
| SUB r12,r9,r11 @pu1_src[wd - 2] - pu1_src_top_right[0] |
| CMP r12,#0 |
| MVNLT r12,#0 |
| MOVGT r12,#1 @SIGN(pu1_src[wd - 2] - pu1_src_top_right[0]) |
| ADD r11,r0,r1 @pu1_src + src_strd |
| SUB r14,r14,#2 @[wd - 2 - 2] |
| LDRB r14,[r11,r14] @pu1_src[wd - 2 - 2 + src_strd] |
| SUB r11,r9,r14 @pu1_src[wd - 2] - pu1_src[wd - 2 - 2 + src_strd] |
| CMP r11,#0 |
| MVNLT r11,#0 |
| MOVGT r11,#1 @SIGN(pu1_src[wd - 2] - pu1_src[wd - 2 - 2 + src_strd]) |
| ADD r11,r12,r11 @SIGN(pu1_src[wd - 2] - pu1_src_top_right[0]) + SIGN(pu1_src[wd - 2] - pu1_src[wd - 2 - 2 + src_strd]) |
| ADD r11,r11,#2 @edge_idx |
| LDR r14, gi1_table_edge_idx_addr_1 @table pointer |
| ulbl1: |
| add r14,r14,pc |
| |
| LDRSB r12,[r14,r11] @edge_idx = gi1_table_edge_idx[edge_idx] |
| CMP r12,#0 @0 != edge_idx |
| BEQ PU1_AVAIL_5_LOOP_V |
| LDRSB r11,[r6,r12] @pi1_sao_offset_u[edge_idx] |
| ADD r9,r9,r11 @pu1_src[wd - 2] + pi1_sao_offset_u[edge_idx] |
| USAT r9,#8,r9 @u1_pos_0_0_tmp_u = CLIP3(pu1_src[wd - 2] + pi1_sao_offset[edge_idx], 0, (1 << bit_depth) - 1) |
| |
| PU1_AVAIL_5_LOOP_V: |
| |
| LDR r11,[sp,#0x100] @Load pu1_src_top_right from sp |
| LDRB r11,[r11,#1] @pu1_src_top_right[1] |
| SUB r12,r10,r11 @pu1_src[wd - 1] - pu1_src_top_right[1] |
| CMP r12,#0 |
| MVNLT r12,#0 |
| MOVGT r12,#1 @SIGN(pu1_src[wd - 1] - pu1_src_top_right[1]) |
| ADD r11,r0,r1 @pu1_src + src_strd |
| SUB r14,r7,#3 @[wd - 1 - 2] |
| LDRB r14,[r11,r14] @pu1_src[wd - 1 - 2 + src_strd] |
| SUB r11,r10,r14 @pu1_src[wd - 1] - pu1_src[wd - 1 - 2 + src_strd] |
| CMP r11,#0 |
| MVNLT r11,#0 |
| MOVGT r11,#1 @SIGN(pu1_src[wd - 1] - pu1_src[wd - 1 - 2 + src_strd]) |
| ADD r11,r12,r11 @SIGN(pu1_src[wd - 1] - pu1_src_top_right[1]) + SIGN(pu1_src[wd - 1] - pu1_src[wd - 1 - 2 + src_strd]) |
| ADD r11,r11,#2 @edge_idx |
| LDR r14, gi1_table_edge_idx_addr_2 @table pointer |
| ulbl2: |
| add r14,r14,pc |
| |
| LDRSB r12,[r14,r11] @edge_idx = gi1_table_edge_idx[edge_idx] |
| CMP r12,#0 @0 != edge_idx |
| BEQ PU1_AVAIL_6_LOOP_U |
| LDR r11,[sp,#0x110] @Loads pi1_sao_offset_v |
| LDRSB r11,[r11,r12] @pi1_sao_offset_v[edge_idx] |
| ADD r10,r10,r11 @pu1_src[wd - 1] + pi1_sao_offset_v[edge_idx] |
| USAT r10,#8,r10 @u1_pos_0_0_tmp_v = CLIP3(pu1_src[wd - 1] + pi1_sao_offset_v[edge_idx], 0, (1 << bit_depth) - 1) |
| |
| PU1_AVAIL_6_LOOP_U: |
| STRB r9,[sp,#6] |
| STRB r10,[sp,#7] |
| STR r0,[sp,#0x100] @Store pu1_src in sp |
| |
| LDRB r10,[r5,#6] @pu1_avail[6] |
| CMP r10,#0 |
| SUB r11,r8,#1 @ht - 1 |
| MLA r12,r11,r1,r0 @pu1_src[(ht - 1) * src_strd] |
| LDRB r10,[r12] @u1_pos_wd_ht_tmp_u = pu1_src[(ht - 1) * src_strd] |
| LDRB r9,[r12,#1] @u1_pos_wd_ht_tmp_v = pu1_src[(ht - 1) * src_strd + 1] |
| BEQ PU1_AVAIL_3_LOOP |
| |
| SUB r11,r12,r1 @pu1_src[(ht - 1) * src_strd - src_strd] |
| ADD r11,r11,#2 @pu1_src[(ht - 1) * src_strd + 2 - src_strd] |
| LDRB r11,[r11] @Load pu1_src[(ht - 1) * src_strd + 2 - src_strd] |
| SUB r11,r10,r11 @pu1_src[(ht - 1) * src_strd] - pu1_src[(ht - 1) * src_strd + 2 - src_strd] |
| CMP r11,#0 |
| MVNLT r11,#0 |
| MOVGT r11,#1 @SIGN(pu1_src[(ht - 1) * src_strd] - pu1_src[(ht - 1) * src_strd + 2 - src_strd]) |
| |
| LDR r14,[sp,#0x104] @Load pu1_src_bot_left from sp |
| LDRB r14,[r14] @Load pu1_src_bot_left[0] |
| SUB r14,r10,r14 @pu1_src[(ht - 1) * src_strd] - pu1_src_bot_left[0] |
| CMP r14,#0 |
| MVNLT r14,#0 |
| MOVGT r14,#1 @SIGN(pu1_src[(ht - 1) * src_strd] - pu1_src_bot_left[0]) |
| |
| ADD r11,r11,r14 @Add 2 sign value |
| ADD r11,r11,#2 @edge_idx |
| LDR r14, gi1_table_edge_idx_addr_3 @table pointer |
| ulbl3: |
| add r14,r14,pc |
| |
| LDRSB r14,[r14,r11] @edge_idx = gi1_table_edge_idx[edge_idx] |
| CMP r14,#0 |
| BEQ PU1_AVAIL_6_LOOP_V |
| LDRSB r11,[r6,r14] @pi1_sao_offset_u[edge_idx] |
| ADD r10,r10,r11 @pu1_src[(ht - 1) * src_strd] + pi1_sao_offset[edge_idx] |
| USAT r10,#8,r10 @u1_pos_wd_ht_tmp = CLIP3(pu1_src[(ht - 1) * src_strd] + pi1_sao_offset[edge_idx], 0, (1 << bit_depth) - 1) |
| |
| PU1_AVAIL_6_LOOP_V: |
| ADD r12,r12,#1 @pu1_src[(ht - 1) * src_strd + 1] |
| SUB r11,r12,r1 @pu1_src[(ht - 1) * src_strd + 1) - src_strd] |
| ADD r11,r11,#2 @pu1_src[(ht - 1) * src_strd + 2 - src_strd] |
| LDRB r11,[r11] @Load pu1_src[(ht - 1) * src_strd + 2 - src_strd] |
| SUB r11,r9,r11 @pu1_src[(ht - 1) * src_strd + 1] - pu1_src[(ht - 1) * src_strd + 1 + 2 - src_strd] |
| CMP r11,#0 |
| MVNLT r11,#0 |
| MOVGT r11,#1 @SIGN(pu1_src[(ht - 1) * src_strd + 1] - pu1_src[(ht - 1) * src_strd + 1 + 2 - src_strd]) |
| |
| LDR r14,[sp,#0x104] @Load pu1_src_bot_left from sp |
| LDRB r14,[r14,#1] @Load pu1_src_bot_left[1] |
| SUB r14,r9,r14 @pu1_src[(ht - 1) * src_strd + 1] - pu1_src_bot_left[1] |
| CMP r14,#0 |
| MVNLT r14,#0 |
| MOVGT r14,#1 @SIGN(pu1_src[(ht - 1) * src_strd + 1] - pu1_src_bot_left[1]) |
| |
| ADD r11,r11,r14 @Add 2 sign value |
| ADD r11,r11,#2 @edge_idx |
| LDR r14, gi1_table_edge_idx_addr_4 @table pointer |
| ulbl4: |
| add r14,r14,pc |
| |
| LDRSB r12,[r14,r11] @edge_idx = gi1_table_edge_idx[edge_idx] |
| CMP r12,#0 |
| BEQ PU1_AVAIL_3_LOOP |
| LDR r14,[sp,#0x110] @Loads pi1_sao_offset_v |
| LDRSB r11,[r14,r12] @pi1_sao_offset_v[edge_idx] |
| ADD r9,r9,r11 @pu1_src[(ht - 1) * src_strd] + pi1_sao_offset[edge_idx] |
| USAT r9,#8,r9 @u1_pos_wd_ht_tmp_v = CLIP3(pu1_src[(ht - 1) * src_strd] + pi1_sao_offset[edge_idx], 0, (1 << bit_depth) - 1) |
| |
| PU1_AVAIL_3_LOOP: |
| STRB r10,[sp,#8] |
| STRB r9,[sp,#9] |
| STR r2,[sp,#0x104] @Store pu1_src_left in sp |
| |
| MOV r12,r8 @Move ht |
| MOV r14,r2 @Move pu1_src_left to pu1_src_left_cpy |
| LDRB r11,[r5,#3] @pu1_avail[3] |
| CMP r11,#0 |
| BNE PU1_AVAIL_2_LOOP |
| SUB r12,r12,#1 @ht_tmp-- |
| |
| PU1_AVAIL_2_LOOP: |
| LDRB r5,[r5,#2] @pu1_avail[2] |
| CMP r5,#0 |
| BNE PU1_AVAIL_2_LOOP_END |
| |
| ADD r0,r0,r1 @pu1_src += src_strd |
| SUB r12,r12,#1 @ht_tmp-- |
| ADD r14,r14,#2 @pu1_src_left_cpy += 2 |
| |
| PU1_AVAIL_2_LOOP_END: |
| STR r0,[sp,#2] @Store pu1_src in sp |
| VMOV.I8 Q0,#2 @const_2 = vdupq_n_s8(2) |
| VMOV.I16 Q1,#0 @const_min_clip = vdupq_n_s16(0) |
| VMOV.I16 Q2,#255 @const_max_clip = vdupq_n_u16((1 << bit_depth) - 1) |
| VLD1.8 D6,[r6] @offset_tbl_u = vld1_s8(pi1_sao_offset_u) |
| LDR r6,[sp,#0x110] @Loads pi1_sao_offset_v |
| VLD1.8 D7,[r6] @offset_tbl_v = vld1_s8(pi1_sao_offset_v) |
| LDR r2, gi1_table_edge_idx_addr_5 @table pointer |
| ulbl5: |
| add r2,r2,pc |
| @VLD1.8 D6,[r6] @edge_idx_tbl = vld1_s8(gi1_table_edge_idx) |
| VMOV.S8 Q4,#0xFF @au1_mask = vdupq_n_s8(-1) |
| MOV r6,r7 @move wd to r6 loop_count |
| |
| CMP r7,#16 @Compare wd with 16 |
| BLT WIDTH_RESIDUE @If not jump to WIDTH_RESIDUE where loop is unrolled for 8 case |
| CMP r8,#4 @Compare ht with 4 |
| BLE WD_16_HT_4_LOOP @If jump to WD_16_HT_4_LOOP |
| |
| WIDTH_LOOP_16: |
| LDR r7,[sp,#0x114] @Loads wd |
| CMP r6,r7 @col == wd |
| LDR r5,[sp,#0x108] @Loads pu1_avail |
| |
| LDRBEQ r8,[r5] @pu1_avail[0] |
| MOVNE r8,#-1 |
| |
| VMOV.8 D8[0],r8 @au1_mask = vsetq_lane_s8(-1, au1_mask, 0) |
| LDRB r11,[r5,#2] @pu1_avail[2] |
| |
| CMP r6,#16 @if(col == 16) |
| VMOV.8 D8[1],r8 @au1_mask = vsetq_lane_s8(-1, au1_mask, 0) |
| |
| BNE SKIP_AU1_MASK_VAL |
| LDRB r8,[r5,#1] @pu1_avail[1] |
| VMOV.8 D9[6],r8 @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) |
| VMOV.8 D9[7],r8 @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) |
| |
| SKIP_AU1_MASK_VAL: |
| CMP r11,#0 |
| VLD1.8 D12,[r0]! @pu1_cur_row = vld1q_u8(pu1_src) |
| VLD1.8 D13,[r0] @pu1_cur_row = vld1q_u8(pu1_src) |
| SUB r0,#8 |
| ADD r5,sp,#0x4B @*au1_src_left_tmp |
| |
| SUBEQ r8,r0,r1 @pu1_src - src_strd |
| VMOV.I8 Q9,#0 |
| MOVNE r8,r3 |
| |
| ADD r8,r8,#2 @pu1_src - src_strd + 2 |
| VLD1.8 D10,[r8]! @pu1_top_row = vld1q_u8(pu1_src - src_strd + 2) |
| VLD1.8 D11,[r8] @pu1_top_row = vld1q_u8(pu1_src - src_strd + 2) |
| SUB r8,#8 |
| ADD r3,r3,#16 |
| |
| LDR r4,[sp,#0x118] @Loads ht |
| VCGT.U8 Q7,Q6,Q5 @vcgtq_u8(pu1_cur_row, pu1_top_row) |
| LDR r7,[sp,#0x114] @Loads wd |
| |
| SUB r7,r7,r6 @(wd - col) |
| VCLT.U8 Q8,Q6,Q5 @vcltq_u8(pu1_cur_row, pu1_top_row) |
| ADD r7,r7,#14 @15 + (wd - col) |
| |
| LDR r8,[sp,#0x100] @Loads *pu1_src |
| VSUB.U8 Q7,Q8,Q7 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) |
| ADD r7,r8,r7 @pu1_src[0 * src_strd + 15 + (wd - col)] |
| |
| AU1_SRC_LEFT_LOOP: |
| LDRH r8,[r7] @load the value and increment by src_strd |
| SUBS r4,r4,#1 @decrement the loop count |
| |
| STRH r8,[r5],#2 @store it in the stack pointer |
| ADD r7,r7,r1 |
| BNE AU1_SRC_LEFT_LOOP |
| |
| |
| MOV r7,r12 @row count, move ht_tmp to r7 |
| VMOV.I8 Q9,#0 @I |
| ADD r11,r0,r1 @I *pu1_src + src_strd |
| |
| SUB r5,r12,r7 @I ht_tmp - row |
| VLD1.8 D16,[r11]! @I pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd) |
| VLD1.8 D17,[r11] @I pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd) |
| SUB r11,#8 |
| ADD r8,r14,r5,LSL #1 @I pu1_src_left_cpy[(ht_tmp - row) * 2] |
| |
| LDRH r5,[r8,#2] @I |
| VMOV.16 D19[3],r5 @I vsetq_lane_u8 |
| LDR r11,[sp,#0x108] @I Loads pu1_avail |
| |
| LDRB r11,[r11,#2] @I pu1_avail[2] |
| VEXT.8 Q9,Q9,Q8,#14 @I pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_row, 14) |
| CMP r11,#0 @I |
| BNE SIGN_UP_CHANGE_DONE @I |
| |
| LDRB r8,[r0,#14] @I pu1_src_cpy[14] |
| SUB r5,r0,r1 @I |
| |
| LDRB r11,[r5,#16] @I load the value pu1_src_cpy[16 - src_strd] |
| |
| LDRB r9,[r0,#15] @I pu1_src_cpy[15] |
| SUB r8,r8,r11 @I pu1_src_cpy[14] - pu1_src_cpy[16 - src_strd] |
| |
| LDRB r10,[r5,#17] @I load the value pu1_src_cpy[17 - src_strd] |
| CMP r8,#0 @I |
| |
| MVNLT r8,#0 @I |
| SUB r9,r9,r10 @I pu1_src_cpy[15] - pu1_src_cpy[17 - src_strd] |
| |
| MOVGT r8,#1 @I SIGN(pu1_src_cpy[14] - pu1_src_cpy[16 - src_strd]) |
| CMP r9,#0 @I |
| |
| MVNLT r9,#0 @I |
| VMOV.8 D15[6],r8 @I sign_up = sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[14] -pu1_src_cpy[16 - src_strd]), sign_up, 0) |
| MOVGT r9,#1 @I SIGN(pu1_src_cpy[15] - pu1_src_cpy[17 - src_strd] |
| |
| VMOV.8 D15[7],r9 @I sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[15] -pu1_src_cpy[17 - src_strd]), sign_up, 1) |
| |
| SIGN_UP_CHANGE_DONE: |
| VLD1.8 D28,[r2] @edge_idx_tbl = vld1_s8(gi1_table_edge_idx) |
| VCGT.U8 Q10,Q6,Q9 @I vcgtq_u8(pu1_cur_row, pu1_next_row_tmp) |
| |
| VCLT.U8 Q11,Q6,Q9 @I vcltq_u8(pu1_cur_row, pu1_next_row_tmp) |
| VSUB.U8 Q11,Q11,Q10 @I sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) |
| |
| VADD.I8 Q9,Q0,Q7 @I edge_idx = vaddq_s8(const_2, sign_up) |
| VADD.I8 Q9,Q9,Q11 @I edge_idx = vaddq_s8(edge_idx, sign_down) |
| VTBL.8 D18,{D28},D18 @I vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx)) |
| VNEG.S8 Q7,Q11 @I sign_up = vnegq_s8(sign_down) |
| |
| VTBL.8 D19,{D28},D19 @I vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx)) |
| VEXT.8 Q7,Q7,Q7,#2 @I sign_up = vextq_s8(sign_up, sign_up, 2) |
| |
| VMOVL.U8 Q10,D12 @I pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) |
| VAND Q9,Q9,Q4 @I edge_idx = vandq_s8(edge_idx, au1_mask) |
| |
| VUZP.8 D18,D19 @I |
| VTBL.8 D22,{D6},D18 @I |
| VTBL.8 D23,{D7},D19 @I |
| VZIP.8 D22,D23 @I |
| |
| VMOVL.U8 Q9,D13 @I pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) |
| VADDW.S8 Q10,Q10,D22 @I pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset) |
| |
| VMAX.S16 Q10,Q10,Q1 @I pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip) |
| VMIN.U16 Q10,Q10,Q2 @I pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip)) |
| |
| VMOV Q6,Q8 @I pu1_cur_row = pu1_next_row |
| VADDW.S8 Q9,Q9,D23 @I pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset) |
| |
| SUB r7,r7,#1 @I Decrement the ht_tmp loop count by 1 |
| VMAX.S16 Q9,Q9,Q1 @I pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip) |
| |
| VMIN.U16 Q9,Q9,Q2 @I pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[1]), const_max_clip)) |
| |
| |
| PU1_SRC_LOOP: |
| ADD r11,r0,r1,LSL #1 @II *pu1_src + src_strd |
| VMOVN.I16 D20,Q10 @I vmovn_s16(pi2_tmp_cur_row.val[0]) |
| SUB r5,r12,r7 @II ht_tmp - row |
| |
| ADD r4,r0,r1 @III *pu1_src + src_strd |
| VMOVN.I16 D21,Q9 @I vmovn_s16(pi2_tmp_cur_row.val[1]) |
| ADD r8,r14,r5,LSL #1 @II pu1_src_left_cpy[(ht_tmp - row) * 2] |
| |
| LDRH r9,[r8,#2] |
| VLD1.8 D16,[r11]! @II pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd) |
| VLD1.8 D17,[r11] @II pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd) |
| SUB r11,#8 |
| LDRB r10,[r4,#14] @II pu1_src_cpy[14] |
| |
| LDRB r8,[r4,#15] @II pu1_src_cpy[15] |
| VMOV.16 D29[3],r9 @II vsetq_lane_u8 |
| ADD r4,r11,r1 @III *pu1_src + src_strd |
| |
| LDRB r5,[r0,#17] @II load the value pu1_src_cpy[17 - src_strd] |
| VLD1.8 D30,[r4]! @III pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd) |
| VLD1.8 D31,[r4] @III pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd) |
| SUB r4,#8 |
| LDRB r11,[r0,#16] @II load the value pu1_src_cpy[16 - src_strd] |
| |
| SUB r7,r7,#1 @II Decrement the ht_tmp loop count by 1 |
| VST1.8 {Q10},[r0],r1 @I vst1q_u8(pu1_src_cpy, pu1_cur_row) |
| SUB r10,r10,r11 @II pu1_src_cpy[14] - pu1_src_cpy[16 - src_strd] |
| |
| CMP r10,#0 @II |
| VEXT.8 Q14,Q14,Q8,#14 @II pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_row, 14) |
| SUB r8,r8,r5 @II pu1_src_cpy[15] - pu1_src_cpy[17 - src_strd] |
| |
| MVNLT r10,#0 @II |
| VLD1.8 D21,[r2] @edge_idx_tbl = vld1_s8(gi1_table_edge_idx) |
| MOVGT r10,#1 @II SIGN(pu1_src_cpy[14] - pu1_src_cpy[16 - src_strd]) |
| |
| CMP r8,#0 @II |
| VMOV.8 D15[6],r10 @II sign_up = sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[14] -pu1_src_cpy[16 - src_strd]), sign_up, 0) |
| MVNLT r8,#0 @II |
| |
| MOVGT r8,#1 @II SIGN(pu1_src_cpy[15] - pu1_src_cpy[17 - src_strd] |
| SUB r10,r12,r7 @III ht_tmp - row |
| VMOV.8 D15[7],r8 @II sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[15] -pu1_src_cpy[17 - src_strd]), sign_up, 1) |
| ADD r11,r14,r10,LSL #1 @III pu1_src_left_cpy[(ht_tmp - row) * 2] |
| |
| CMP r7,#1 @III |
| VCGT.U8 Q11,Q6,Q14 @II vcgtq_u8(pu1_cur_row, pu1_next_row_tmp) |
| BNE NEXT_ROW_POINTER_ASSIGNED_2 @III |
| |
| LDR r5,[sp,#0x108] @III Loads pu1_avail |
| LDRB r5,[r5,#3] @III pu1_avail[3] |
| CMP r5,#0 @III |
| SUBNE r11,r4,#4 @III pu1_src[src_strd - 2] |
| |
| NEXT_ROW_POINTER_ASSIGNED_2: |
| LDRH r5,[r11,#2] @III |
| VCLT.U8 Q12,Q6,Q14 @II vcltq_u8(pu1_cur_row, pu1_next_row_tmp) |
| ADD r11,r0,r1 @III |
| |
| LDRB r9,[r11,#14] @III pu1_src_cpy[14] |
| VMOV.16 D19[3],r5 @III vsetq_lane_u8 |
| LDRB r8,[r11,#15] @III pu1_src_cpy[15] |
| |
| LDRB r11,[r0,#16] @III load the value pu1_src_cpy[16 - src_strd] |
| VSUB.U8 Q12,Q12,Q11 @II sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) |
| LDRB r10,[r0,#17] @III load the value pu1_src_cpy[17 - src_strd] |
| |
| SUB r9,r9,r11 @III pu1_src_cpy[14] - pu1_src_cpy[16 - src_strd] |
| VEXT.8 Q9,Q9,Q15,#14 @III pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_row, 14) |
| SUB r10,r8,r10 @III pu1_src_cpy[15] - pu1_src_cpy[17 - src_strd] |
| |
| CMP r9,#0 @III |
| VADD.I8 Q13,Q0,Q7 @II edge_idx = vaddq_s8(const_2, sign_up) |
| MVNLT r9,#0 @III |
| |
| MOVGT r9,#1 @III SIGN(pu1_src_cpy[14] - pu1_src_cpy[16 - src_strd]) |
| VADD.I8 Q13,Q13,Q12 @II edge_idx = vaddq_s8(edge_idx, sign_down) |
| CMP r10,#0 @III |
| |
| VNEG.S8 Q7,Q12 @II sign_up = vnegq_s8(sign_down) |
| VTBL.8 D26,{D21},D26 @II vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx)) |
| MVNLT r10,#0 @III |
| MOVGT r10,#1 @III SIGN(pu1_src_cpy[15] - pu1_src_cpy[17 - src_strd] |
| |
| VEXT.8 Q7,Q7,Q7,#2 @II sign_up = vextq_s8(sign_up, sign_up, 2) |
| VTBL.8 D27,{D21},D27 @II vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx)) |
| VCGT.U8 Q11,Q8,Q9 @III vcgtq_u8(pu1_cur_row, pu1_next_row_tmp) |
| |
| VMOV.8 D15[6],r9 @III sign_up = sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[14] -pu1_src_cpy[16 - src_strd]), sign_up, 0) |
| VAND Q13,Q13,Q4 @II edge_idx = vandq_s8(edge_idx, au1_mask) |
| |
| VMOV.8 D15[7],r10 @III sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[15] -pu1_src_cpy[17 - src_strd]), sign_up, 1) |
| VUZP.8 D26,D27 @II |
| |
| VCLT.U8 Q10,Q8,Q9 @III vcltq_u8(pu1_cur_row, pu1_next_row_tmp) |
| VTBL.8 D24,{D6},D26 @II |
| VSUB.U8 Q11,Q10,Q11 @III sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) |
| |
| VADD.I8 Q9,Q0,Q7 @III edge_idx = vaddq_s8(const_2, sign_up) |
| VTBL.8 D25,{D7},D27 @II |
| VADD.I8 Q9,Q9,Q11 @III edge_idx = vaddq_s8(edge_idx, sign_down) |
| |
| VLD1.8 D20,[r2] @edge_idx_tbl = vld1_s8(gi1_table_edge_idx) |
| VZIP.8 D24,D25 @II |
| |
| VMOVL.U8 Q14,D12 @II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) |
| VTBL.8 D18,{D20},D18 @III vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx)) |
| VNEG.S8 Q7,Q11 @III sign_up = vnegq_s8(sign_down) |
| |
| VADDW.S8 Q14,Q14,D24 @II pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset) |
| VTBL.8 D19,{D20},D19 @III vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx)) |
| VEXT.8 Q7,Q7,Q7,#2 @III sign_up = vextq_s8(sign_up, sign_up, 2) |
| |
| VMOVL.U8 Q13,D13 @II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) |
| VAND Q9,Q9,Q4 @III edge_idx = vandq_s8(edge_idx, au1_mask) |
| |
| VMOVL.U8 Q10,D16 @III pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) |
| VUZP.8 D18,D19 @III |
| |
| VMAX.S16 Q14,Q14,Q1 @II pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip) |
| VTBL.8 D22,{D6},D18 @III |
| VMIN.U16 Q14,Q14,Q2 @II pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip)) |
| |
| VADDW.S8 Q13,Q13,D25 @II pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset) |
| VTBL.8 D23,{D7},D19 @III |
| VMAX.S16 Q13,Q13,Q1 @II pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip) |
| |
| VMOVL.U8 Q9,D17 @III pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) |
| VZIP.8 D22,D23 @III |
| |
| VMOVN.I16 D28,Q14 @II vmovn_s16(pi2_tmp_cur_row.val[0]) |
| VADDW.S8 Q10,Q10,D22 @III pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset) |
| |
| VMOV Q6,Q15 @III pu1_cur_row = pu1_next_row |
| VMIN.U16 Q13,Q13,Q2 @II pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[1]), const_max_clip)) |
| |
| SUB r7,r7,#1 @III Decrement the ht_tmp loop count by 1 |
| VMAX.S16 Q10,Q10,Q1 @III pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip) |
| CMP r7,#1 @III |
| |
| VMOVN.I16 D29,Q13 @II vmovn_s16(pi2_tmp_cur_row.val[1]) |
| VMIN.U16 Q10,Q10,Q2 @III pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip)) |
| |
| VADDW.S8 Q9,Q9,D23 @III pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset) |
| |
| VMAX.S16 Q9,Q9,Q1 @III pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip) |
| |
| VST1.8 {Q14},[r0],r1 @II vst1q_u8(pu1_src_cpy, pu1_cur_row) |
| VMIN.U16 Q9,Q9,Q2 @III pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[1]), const_max_clip)) |
| |
| BGT PU1_SRC_LOOP @If not equal jump to PU1_SRC_LOOP |
| BLT INNER_LOOP_DONE |
| |
| |
| ADD r11,r0,r1,LSL #1 @*pu1_src + src_strd |
| VMOVN.I16 D20,Q10 @III vmovn_s16(pi2_tmp_cur_row.val[0]) |
| SUB r5,r12,r7 @ht_tmp - row |
| |
| ADD r8,r14,r5,LSL #1 @pu1_src_left_cpy[(ht_tmp - row) * 2] |
| VMOVN.I16 D21,Q9 @III vmovn_s16(pi2_tmp_cur_row.val[1]) |
| CMP r7,#1 |
| |
| LDRB r4,[r0,#16] @load the value pu1_src_cpy[16 - src_strd] |
| VLD1.8 D16,[r11]! @pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd) |
| VLD1.8 D17,[r11] @pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd) |
| SUB r11,#8 |
| LDRB r9,[r0,#17] @load the value pu1_src_cpy[17 - src_strd] |
| |
| BNE NEXT_ROW_POINTER_ASSIGNED_3 |
| LDR r5,[sp,#0x108] @Loads pu1_avail |
| LDRB r5,[r5,#3] @pu1_avail[3] |
| CMP r5,#0 |
| SUBNE r8,r11,#4 @pu1_src[src_strd - 2] |
| |
| NEXT_ROW_POINTER_ASSIGNED_3: |
| LDRH r5,[r8,#2] |
| VST1.8 {Q10},[r0],r1 @III vst1q_u8(pu1_src_cpy, pu1_cur_row) |
| LDRB r8,[r0,#14] @pu1_src_cpy[14] |
| |
| SUB r8,r8,r4 @pu1_src_cpy[14] - pu1_src_cpy[16 - src_strd] |
| VMOV.16 D19[3],r5 @vsetq_lane_u8 |
| LDRB r10,[r0,#15] @pu1_src_cpy[15] |
| |
| CMP r8,#0 |
| VEXT.8 Q9,Q9,Q8,#14 @pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_row, 14) |
| SUB r10,r10,r9 @pu1_src_cpy[15] - pu1_src_cpy[17 - src_strd] |
| |
| MVNLT r8,#0 |
| VLD1.8 D28,[r2] @edge_idx_tbl = vld1_s8(gi1_table_edge_idx) |
| MOVGT r8,#1 @SIGN(pu1_src_cpy[14] - pu1_src_cpy[16 - src_strd]) |
| |
| CMP r10,#0 |
| VMOV.8 D15[6],r8 @sign_up = sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[14] -pu1_src_cpy[16 - src_strd]), sign_up, 0) |
| MVNLT r10,#0 |
| |
| MOVGT r10,#1 @SIGN(pu1_src_cpy[15] - pu1_src_cpy[17 - src_strd] |
| VMOV.8 D15[7],r10 @sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[15] -pu1_src_cpy[17 - src_strd]), sign_up, 1) |
| VCGT.U8 Q10,Q6,Q9 @vcgtq_u8(pu1_cur_row, pu1_next_row_tmp) |
| |
| VCLT.U8 Q11,Q6,Q9 @vcltq_u8(pu1_cur_row, pu1_next_row_tmp) |
| VSUB.U8 Q11,Q11,Q10 @sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) |
| |
| VADD.I8 Q9,Q0,Q7 @edge_idx = vaddq_s8(const_2, sign_up) |
| VADD.I8 Q9,Q9,Q11 @edge_idx = vaddq_s8(edge_idx, sign_down) |
| VTBL.8 D18,{D28},D18 @vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx)) |
| VTBL.8 D19,{D28},D19 @vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx)) |
| |
| VAND Q9,Q9,Q4 @edge_idx = vandq_s8(edge_idx, au1_mask) |
| |
| VMOVL.U8 Q10,D12 @pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) |
| VUZP.8 D18,D19 |
| |
| VTBL.8 D22,{D6},D18 |
| VTBL.8 D23,{D7},D19 |
| |
| VMOVL.U8 Q9,D13 @pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) |
| VZIP.8 D22,D23 |
| |
| VADDW.S8 Q10,Q10,D22 @pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset) |
| VMAX.S16 Q10,Q10,Q1 @pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip) |
| VMIN.U16 Q10,Q10,Q2 @pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip)) |
| |
| VADDW.S8 Q9,Q9,D23 @pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset) |
| VMAX.S16 Q9,Q9,Q1 @pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip) |
| VMIN.U16 Q9,Q9,Q2 @pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[1]), const_max_clip)) |
| |
| |
| INNER_LOOP_DONE: |
| |
| LDR r8,[sp,#0x118] @Loads ht |
| VMOVN.I16 D20,Q10 @III vmovn_s16(pi2_tmp_cur_row.val[0]) |
| ADD r5,sp,#0x4B @*au1_src_left_tmp |
| |
| LSL r8,r8,#1 |
| VMOVN.I16 D21,Q9 @III vmovn_s16(pi2_tmp_cur_row.val[1]) |
| LDR r11,[sp,#0x104] @Loads *pu1_src_left |
| |
| SRC_LEFT_LOOP: |
| LDR r7,[r5],#4 @au1_src_left_tmp[row] |
| SUBS r8,r8,#4 |
| STR r7,[r11],#4 @pu1_src_left[row] = au1_src_left_tmp[row] |
| BNE SRC_LEFT_LOOP |
| |
| SUBS r6,r6,#16 @Decrement the wd loop count by 16 |
| VST1.8 {Q10},[r0],r1 @III vst1q_u8(pu1_src_cpy, pu1_cur_row) |
| CMP r6,#8 @Check whether residue remains |
| |
| BLT RE_ASSINING_LOOP @Jump to re-assigning loop |
| LDR r7,[sp,#0x114] @Loads wd |
| LDR r0,[sp,#0x02] @Loads *pu1_src |
| SUB r7,r7,r6 |
| ADD r0,r0,r7 |
| BGT WIDTH_LOOP_16 @If not equal jump to width_loop |
| BEQ WIDTH_RESIDUE @If residue remains jump to residue loop |
| |
| WD_16_HT_4_LOOP: |
| LDR r7,[sp,#0x114] @Loads wd |
| |
| LDR r5,[sp,#0x108] @Loads pu1_avail |
| CMP r6,r7 @col == wd |
| |
| LDRBEQ r8,[r5] @pu1_avail[0] |
| MOVNE r8,#-1 |
| VMOV.8 D8[0],r8 @au1_mask = vsetq_lane_s8(-1, au1_mask, 0) |
| |
| CMP r6,#16 @if(col == 16) |
| VMOV.8 D8[1],r8 @au1_mask = vsetq_lane_s8(-1, au1_mask, 0) |
| |
| BNE SKIP_AU1_MASK_VAL_WD_16_HT_4 |
| LDRB r8,[r5,#1] @pu1_avail[1] |
| VMOV.8 D9[6],r8 @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) |
| VMOV.8 D9[7],r8 @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) |
| |
| SKIP_AU1_MASK_VAL_WD_16_HT_4: |
| LDRB r11,[r5,#2] @pu1_avail[2] |
| CMP r11,#0 |
| SUBEQ r8,r0,r1 @pu1_src - src_strd |
| |
| MOVNE r8,r3 |
| VLD1.8 D12,[r0]! @pu1_cur_row = vld1q_u8(pu1_src) |
| VLD1.8 D13,[r0] @pu1_cur_row = vld1q_u8(pu1_src) |
| SUB r0,#8 |
| ADD r8,r8,#2 @pu1_src - src_strd + 2 |
| |
| ADD r3,r3,#16 |
| VLD1.8 D10,[r8]! @pu1_top_row = vld1q_u8(pu1_src - src_strd + 2) |
| VLD1.8 D11,[r8] @pu1_top_row = vld1q_u8(pu1_src - src_strd + 2) |
| SUB r8,#8 |
| ADD r5,sp,#0x4B @*au1_src_left_tmp |
| |
| LDR r4,[sp,#0x118] @Loads ht |
| VCGT.U8 Q7,Q6,Q5 @vcgtq_u8(pu1_cur_row, pu1_top_row) |
| LDR r7,[sp,#0x114] @Loads wd |
| |
| SUB r7,r7,r6 @(wd - col) |
| VCLT.U8 Q8,Q6,Q5 @vcltq_u8(pu1_cur_row, pu1_top_row) |
| ADD r7,r7,#14 @15 + (wd - col) |
| |
| LDR r8,[sp,#0x100] @Loads *pu1_src |
| VSUB.U8 Q7,Q8,Q7 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) |
| ADD r7,r8,r7 @pu1_src[0 * src_strd + 15 + (wd - col)] |
| |
| AU1_SRC_LEFT_LOOP_WD_16_HT_4: |
| LDRH r8,[r7] @load the value and increment by src_strd |
| SUBS r4,r4,#1 @decrement the loop count |
| |
| STRH r8,[r5],#2 @store it in the stack pointer |
| ADD r7,r7,r1 |
| BNE AU1_SRC_LEFT_LOOP_WD_16_HT_4 |
| |
| VMOV.I8 Q9,#0 |
| MOV r7,r12 @row count, move ht_tmp to r7 |
| |
| PU1_SRC_LOOP_WD_16_HT_4: |
| ADD r9,r0,r1 @*pu1_src + src_strd |
| |
| LDR r5,[sp,#0x108] @Loads pu1_avail |
| VLD1.8 D16,[r9]! @pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd) |
| VLD1.8 D17,[r9] @pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd) |
| SUB r9,#8 |
| LDRB r5,[r5,#3] @pu1_avail[3] |
| |
| SUB r11,r12,r7 @ht_tmp - row |
| ADD r8,r14,r11,LSL #1 @pu1_src_left_cpy[(ht_tmp - row) * 2] |
| ADD r8,r8,#2 @pu1_src_left_cpy[(ht_tmp - row + 1) * 2] |
| |
| CMP r5,#0 |
| BEQ NEXT_ROW_POINTER_ASSIGNED_WD_16_HT_4 |
| CMP r7,#1 |
| SUBEQ r8,r9,#2 @pu1_src[src_strd - 2] |
| |
| NEXT_ROW_POINTER_ASSIGNED_WD_16_HT_4: |
| LDRH r5,[r8] |
| VMOV.16 D19[3],r5 @vsetq_lane_u8 |
| VEXT.8 Q9,Q9,Q8,#14 @pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_row, 14) |
| |
| CMP r7,r12 |
| BLT SIGN_UP_CHANGE_WD_16_HT_4 |
| LDR r5,[sp,#0x108] @Loads pu1_avail |
| LDRB r5,[r5,#2] @pu1_avail[2] |
| CMP r5,#0 |
| BNE SIGN_UP_CHANGE_DONE_WD_16_HT_4 |
| |
| SIGN_UP_CHANGE_WD_16_HT_4: |
| LDRB r8,[r0,#14] @pu1_src_cpy[14] |
| SUB r9,r0,r1 |
| |
| LDRB r5,[r9,#16] @load the value pu1_src_cpy[16 - src_strd] |
| |
| LDRB r10,[r0,#15] @pu1_src_cpy[15] |
| SUB r8,r8,r5 @pu1_src_cpy[14] - pu1_src_cpy[16 - src_strd] |
| |
| LDRB r11,[r9,#17] @load the value pu1_src_cpy[17 - src_strd] |
| CMP r8,#0 |
| |
| MVNLT r8,#0 |
| SUB r10,r10,r11 @pu1_src_cpy[15] - pu1_src_cpy[17 - src_strd] |
| |
| MOVGT r8,#1 @SIGN(pu1_src_cpy[14] - pu1_src_cpy[16 - src_strd]) |
| |
| CMP r10,#0 |
| VMOV.8 D15[6],r8 @sign_up = sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[14] -pu1_src_cpy[16 - src_strd]), sign_up, 0) |
| MVNLT r10,#0 |
| |
| MOVGT r10,#1 @SIGN(pu1_src_cpy[15] - pu1_src_cpy[17 - src_strd] |
| VMOV.8 D15[7],r10 @sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[15] -pu1_src_cpy[17 - src_strd]), sign_up, 1) |
| |
| SIGN_UP_CHANGE_DONE_WD_16_HT_4: |
| VLD1.8 D20,[r2] @edge_idx_tbl = vld1_s8(gi1_table_edge_idx) |
| VCGT.U8 Q11,Q6,Q9 @vcgtq_u8(pu1_cur_row, pu1_next_row_tmp) |
| |
| VCLT.U8 Q12,Q6,Q9 @vcltq_u8(pu1_cur_row, pu1_next_row_tmp) |
| VSUB.U8 Q12,Q12,Q11 @sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) |
| |
| VADD.I8 Q13,Q0,Q7 @edge_idx = vaddq_s8(const_2, sign_up) |
| VADD.I8 Q13,Q13,Q12 @edge_idx = vaddq_s8(edge_idx, sign_down) |
| |
| VNEG.S8 Q7,Q12 @sign_up = vnegq_s8(sign_down) |
| VTBL.8 D26,{D20},D26 @vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx)) |
| |
| VTBL.8 D27,{D20},D27 @vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx)) |
| VEXT.8 Q7,Q7,Q7,#2 @sign_up = vextq_s8(sign_up, sign_up, 2) |
| |
| VMOVL.U8 Q14,D12 @pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) |
| VAND Q13,Q13,Q4 @edge_idx = vandq_s8(edge_idx, au1_mask) |
| |
| |
| VUZP.8 D26,D27 |
| VTBL.8 D24,{D6},D26 |
| VTBL.8 D25,{D7},D27 |
| VZIP.8 D24,D25 |
| |
| VMOVL.U8 Q15,D13 @pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(pu1_cur_row))) |
| VADDW.S8 Q14,Q14,D24 @pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset) |
| |
| VMAX.S16 Q14,Q14,Q1 @pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip) |
| VMIN.U16 Q14,Q14,Q2 @pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip)) |
| |
| VMOV Q6,Q8 @pu1_cur_row = pu1_next_row |
| VADDW.S8 Q15,Q15,D25 @pi2_tmp_cur_row.val[1] = vaddw_s8(pi2_tmp_cur_row.val[1], offset) |
| |
| VMAX.S16 Q15,Q15,Q1 @pi2_tmp_cur_row.val[1] = vmaxq_s16(pi2_tmp_cur_row.val[1], const_min_clip) |
| VMIN.U16 Q15,Q15,Q2 @pi2_tmp_cur_row.val[1] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[1]), const_max_clip)) |
| |
| VMOVN.I16 D28,Q14 @vmovn_s16(pi2_tmp_cur_row.val[0]) |
| VMOVN.I16 D29,Q15 @vmovn_s16(pi2_tmp_cur_row.val[1]) |
| |
| SUBS r7,r7,#1 @Decrement the ht_tmp loop count by 1 |
| VST1.8 {Q14},[r0],r1 @vst1q_u8(pu1_src_cpy, pu1_cur_row) |
| BNE PU1_SRC_LOOP_WD_16_HT_4 @If not equal jump to PU1_SRC_LOOP_WD_16_HT_4 |
| |
| LDR r8,[sp,#0x118] @Loads ht |
| ADD r5,sp,#0x4B @*au1_src_left_tmp |
| LDR r11,[sp,#0x104] @Loads *pu1_src_left |
| |
| SRC_LEFT_LOOP_WD_16_HT_4: |
| LDR r7,[r5],#4 @au1_src_left_tmp[row] |
| SUBS r8,r8,#2 |
| STR r7,[r11],#4 @pu1_src_left[row] = au1_src_left_tmp[row] |
| BNE SRC_LEFT_LOOP_WD_16_HT_4 |
| |
| SUBS r6,r6,#16 @Decrement the wd loop count by 16 |
| BLE RE_ASSINING_LOOP @Jump to re-assigning loop |
| BGT WD_16_HT_4_LOOP @If not equal jump to width_loop |
| |
| WIDTH_RESIDUE: |
| LDR r7,[sp,#0x114] @Loads wd |
| |
| LDR r5,[sp,#0x108] @Loads pu1_avail |
| CMP r6,r7 @wd_residue == wd |
| |
| LDRBEQ r8,[r5] @pu1_avail[0] |
| |
| MOVNE r8,#-1 |
| LDRB r11,[r5,#1] @pu1_avail[1] |
| |
| LDRB r9,[r5,#2] @pu1_avail[2] |
| VMOV.8 d8[0],r8 @au1_mask = vsetq_lane_s8(-1, au1_mask, 0) |
| CMP r9,#0 |
| |
| SUBEQ r10,r0,r1 @pu1_src - src_strd |
| VMOV.8 d8[1],r8 @au1_mask = vsetq_lane_s8(-1, au1_mask, 0) |
| MOVNE r10,r3 |
| |
| ADD r10,r10,#2 @pu1_src - src_strd + 2 |
| VMOV.8 d8[6],r11 @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) |
| ADD r5,sp,#0x4B @*au1_src_left_tmp |
| |
| LDR r4,[sp,#0x118] @Loads ht |
| VMOV.8 d8[7],r11 @au1_mask = vsetq_lane_s8(pu1_avail[1], au1_mask, 15) |
| LDR r7,[sp,#0x114] @Loads wd |
| |
| LDR r8,[sp,#0x100] @Loads *pu1_src |
| VLD1.8 D10,[r10]! @pu1_top_row = vld1q_u8(pu1_src - src_strd + 2) |
| VLD1.8 D11,[r10] @pu1_top_row = vld1q_u8(pu1_src - src_strd + 2) |
| SUB r10,#8 |
| SUB r7,r7,#2 @(wd - 2) |
| |
| ADD r7,r8,r7 @pu1_src[0 * src_strd + (wd - 2)] |
| |
| AU1_SRC_LEFT_LOOP_RESIDUE: |
| LDRH r8,[r7] @load the value and increment by src_strd |
| ADD r7,r7,r1 |
| STRH r8,[r5],#2 @store it in the stack pointer |
| SUBS r4,r4,#1 @decrement the loop count |
| BNE AU1_SRC_LEFT_LOOP_RESIDUE |
| |
| VLD1.8 D12,[r0]! @pu1_cur_row = vld1q_u8(pu1_src) |
| VLD1.8 D13,[r0] @pu1_cur_row = vld1q_u8(pu1_src) |
| SUB r0,#8 |
| |
| VMOV.I8 Q9,#0 |
| VCGT.U8 Q7,Q6,Q5 @vcgtq_u8(pu1_cur_row, pu1_top_row) |
| |
| VCLT.U8 Q8,Q6,Q5 @vcltq_u8(pu1_cur_row, pu1_top_row) |
| VSUB.U8 Q7,Q8,Q7 @sign_up = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) |
| MOV r7,r12 @row count, move ht_tmp to r7 |
| |
| PU1_SRC_LOOP_RESIDUE: |
| ADD r9,r0,r1 @*pu1_src + src_strd |
| |
| SUB r11,r12,r7 @ht_tmp - row |
| VLD1.8 D16,[r9]! @pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd) |
| VLD1.8 D17,[r9] @pu1_next_row = vld1q_u8(pu1_src_cpy + src_strd) |
| SUB r9,#8 |
| LDR r5,[sp,#0x108] @Loads pu1_avail |
| |
| LDRB r5,[r5,#3] @pu1_avail[3] |
| ADD r8,r14,r11,LSL #1 @pu1_src_left_cpy[(ht_tmp - row) * 2] |
| |
| CMP r5,#0 |
| ADD r8,r8,#2 @pu1_src_left_cpy[(ht_tmp - row + 1) * 2] |
| |
| BEQ NEXT_ROW_POINTER_ASSIGNED_RESIDUE |
| CMP r7,#1 |
| SUBEQ r8,r9,#2 @pu1_src[src_strd - 2] |
| |
| NEXT_ROW_POINTER_ASSIGNED_RESIDUE: |
| LDRB r5,[r8] |
| |
| LDRB r8,[r8,#1] |
| VMOV.8 D19[6],r5 @vsetq_lane_u8 |
| CMP r7,r12 |
| |
| VMOV.8 D19[7],r8 @vsetq_lane_u8 |
| VEXT.8 Q9,Q9,Q8,#14 @pu1_next_row_tmp = vextq_u8(pu1_next_row_tmp, pu1_next_row, 14) |
| |
| BLT SIGN_UP_CHANGE_RESIDUE |
| LDR r5,[sp,#0x108] @Loads pu1_avail |
| LDRB r5,[r5,#2] @pu1_avail[2] |
| CMP r5,#0 |
| BNE SIGN_UP_CHANGE_DONE_RESIDUE |
| |
| SIGN_UP_CHANGE_RESIDUE: |
| LDRB r8,[r0,#14] @pu1_src_cpy[14] |
| SUB r9,r0,r1 |
| |
| LDRB r5,[r9,#16] @load the value pu1_src_cpy[16 - src_strd] |
| |
| LDRB r10,[r0,#15] @pu1_src_cpy[15] |
| SUB r8,r8,r5 @pu1_src_cpy[14] - pu1_src_cpy[16 - src_strd] |
| |
| LDRB r11,[r9,#17] @load the value pu1_src_cpy[17 - src_strd] |
| CMP r8,#0 |
| |
| MVNLT r8,#0 |
| SUB r10,r10,r11 @pu1_src_cpy[15] - pu1_src_cpy[17 - src_strd] |
| |
| MOVGT r8,#1 @SIGN(pu1_src_cpy[14] - pu1_src_cpy[16 - src_strd]) |
| |
| CMP r10,#0 |
| VMOV.8 D15[6],r8 @sign_up = sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[14] -pu1_src_cpy[16 - src_strd]), sign_up, 0) |
| MVNLT r10,#0 |
| |
| MOVGT r10,#1 @SIGN(pu1_src_cpy[15] - pu1_src_cpy[17 - src_strd] |
| VMOV.8 D15[7],r10 @sign_up = vsetq_lane_s8(SIGN(pu1_src_cpy[15] -pu1_src_cpy[17 - src_strd]), sign_up, 1) |
| |
| SIGN_UP_CHANGE_DONE_RESIDUE: |
| VLD1.8 D20,[r2] @edge_idx_tbl = vld1_s8(gi1_table_edge_idx) |
| VCGT.U8 Q11,Q6,Q9 @vcgtq_u8(pu1_cur_row, pu1_next_row_tmp) |
| |
| VCLT.U8 Q12,Q6,Q9 @vcltq_u8(pu1_cur_row, pu1_next_row_tmp) |
| VSUB.U8 Q12,Q12,Q11 @sign_down = vreinterpretq_s8_u8(vsubq_u8(cmp_lt, cmp_gt)) |
| |
| VADD.I8 Q13,Q0,Q7 @edge_idx = vaddq_s8(const_2, sign_up) |
| VADD.I8 Q13,Q13,Q12 @edge_idx = vaddq_s8(edge_idx, sign_down) |
| |
| VNEG.S8 Q7,Q12 @sign_up = vnegq_s8(sign_down) |
| VTBL.8 D26,{D20},D26 @vtbl1_s8(edge_idx_tbl, vget_low_s8(edge_idx)) |
| |
| VTBL.8 D27,{D20},D27 @vtbl1_s8(edge_idx_tbl, vget_high_s8(edge_idx)) |
| VEXT.8 Q7,Q7,Q7,#2 @sign_up = vextq_s8(sign_up, sign_up, 14) |
| |
| VMOVL.U8 Q14,D12 @pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(pu1_cur_row))) |
| VAND Q13,Q13,Q4 @edge_idx = vandq_s8(edge_idx, au1_mask) |
| |
| |
| VUZP.8 D26,D27 |
| VTBL.8 D24,{D6},D26 |
| VTBL.8 D25,{D7},D27 |
| VZIP.8 D24,D25 |
| |
| VMOV Q6,Q8 @pu1_cur_row = pu1_next_row |
| VADDW.S8 Q14,Q14,D24 @pi2_tmp_cur_row.val[0] = vaddw_s8(pi2_tmp_cur_row.val[0], offset) |
| |
| VMAX.S16 Q14,Q14,Q1 @pi2_tmp_cur_row.val[0] = vmaxq_s16(pi2_tmp_cur_row.val[0], const_min_clip) |
| VMIN.U16 Q14,Q14,Q2 @pi2_tmp_cur_row.val[0] = vreinterpretq_s16_u16(vminq_u16(vreinterpretq_u16_s16(pi2_tmp_cur_row.val[0]), const_max_clip)) |
| |
| SUBS r7,r7,#1 @Decrement the ht_tmp loop count by 1 |
| VMOVN.I16 D30,Q14 @vmovn_s16(pi2_tmp_cur_row.val[0]) |
| |
| VST1.8 {D30},[r0],r1 @vst1q_u8(pu1_src_cpy, pu1_cur_row) |
| |
| BNE PU1_SRC_LOOP_RESIDUE @If not equal jump to PU1_SRC_LOOP |
| |
| LDR r8,[sp,#0x118] @Loads ht |
| ADD r5,sp,#0x4B @*au1_src_left_tmp |
| |
| LDR r11,[sp,#0x104] @Loads *pu1_src_left |
| |
| SRC_LEFT_LOOP_RESIDUE: |
| LDR r7,[r5],#4 @au1_src_left_tmp[row] |
| SUBS r8,r8,#2 |
| STR r7,[r11],#4 @pu1_src_left[row] = au1_src_left_tmp[row] |
| BNE SRC_LEFT_LOOP_RESIDUE |
| |
| |
| RE_ASSINING_LOOP: |
| LDR r7,[sp,#0x114] @Loads wd |
| LDR r8,[sp,#0x118] @Loads ht |
| |
| LDR r0,[sp,#0x100] @Loads *pu1_src |
| SUB r10,r7,#2 @wd - 2 |
| |
| LDRH r9,[sp,#6] |
| SUB r8,r8,#1 @ht - 1 |
| |
| STRH r9,[r0,r10] @pu1_src_org[0] = u1_pos_0_0_tmp |
| MLA r6,r8,r1,r0 @pu1_src[(ht - 1) * src_strd] |
| |
| LDR r4,[sp,#0xFC] @Loads pu1_src_top_left |
| |
| LDRH r9,[sp,#8] |
| ADD r12,sp,#10 |
| |
| STRH r9,[r6] @pu1_src_org[(ht - 1) * src_strd] = u1_pos_wd_ht_tmp_u |
| |
| LDRH r10,[sp] @load u1_src_top_left_tmp from stack pointer |
| STRH r10,[r4] @*pu1_src_top_left = u1_src_top_left_tmp |
| LDR r3,[sp,#0x10C] @Loads pu1_src_top |
| |
| SRC_TOP_LOOP: |
| VLD1.8 D0,[r12]! @pu1_src_top[col] = au1_src_top_tmp[col] |
| SUBS r7,r7,#8 @Decrement the width |
| VST1.8 D0,[r3]! @pu1_src_top[col] = au1_src_top_tmp[col] |
| BNE SRC_TOP_LOOP |
| |
| END_LOOPS: |
| ADD sp,sp,#0xD4 |
| LDMFD sp!,{r4-r12,r15} @Reload the registers from SP |
| |
| |
| |