| R"( |
| |
| |
| |
| |
| #ifndef ARM_COMPUTE_HELPER_H |
| #define ARM_COMPUTE_HELPER_H |
| |
| |
| |
| |
| #define STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); |
| |
| #define STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); |
| |
| #define STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); |
| |
| #define STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); |
| |
| #define STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); |
| |
| #define STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); |
| |
| #define STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); |
| |
| #define STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); |
| |
| #define STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); |
| |
| #define STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); |
| |
| #define STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); |
| |
| #define STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); |
| |
| #define STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); |
| |
| #define STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); |
| |
| #define STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); |
| |
| #define STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); |
| |
| |
| |
| #define CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##0), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); |
| |
| #define CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##1), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); |
| |
| #define CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##2), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); |
| |
| #define CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##3), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); |
| |
| #define CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##4), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); |
| |
| #define CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##5), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); |
| |
| #define CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##6), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); |
| |
| #define CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##7), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); |
| |
| #define CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##8), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); |
| |
| #define CONVERT_STORE_ROW_10(N0, DATA, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##9), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); |
| |
| #define CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##A), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); |
| |
| #define CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##B), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); |
| |
| #define CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##C), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); |
| |
| #define CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##D), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); |
| |
| #define CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##E), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); |
| |
| #define CONVERT_STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##F), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); |
| |
| |
| |
| |
| #define STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| #define STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| |
| |
| |
| #define CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| #define CONVERT_STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| |
| |
| |
| #define STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); |
| |
| #define STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); |
| |
| #define STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); |
| |
| #define STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); |
| |
| #define STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); |
| |
| #define STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); |
| |
| #define STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); |
| |
| #define STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); |
| |
| #define STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); |
| |
| #define STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); |
| |
| #define STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); |
| |
| #define STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); |
| |
| #define STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); |
| |
| #define STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); |
| |
| #define STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); |
| |
| #define STORE_ROW_PARTIAL_16(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); |
| |
| |
| |
| #define STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_PARTIAL_##STORE_M0(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| #define STORE_BLOCK_PARTIAL(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| |
| #define STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| if(!(PARTIAL_COND_X) && !(PARTIAL_COND_Y)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else if((PARTIAL_COND_Y) && !(PARTIAL_COND_X)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else if(!(PARTIAL_COND_Y) && (PARTIAL_COND_X)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else \ |
| { \ |
| STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } |
| |
| #define STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) \ |
| if(!(PARTIAL_COND_X)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } |
| |
| #define STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) \ |
| if(!(PARTIAL_COND_Y)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else \ |
| { \ |
| STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } |
| |
| |
| #if defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0) |
| |
| |
| #if PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0 |
| |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| |
| #elif PARTIAL_STORE_M0 > 0 && PARTIAL_STORE_N0 == 0 |
| |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) |
| |
| #elif PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 > 0 |
| |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) |
| |
| #else |
| |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) |
| |
| #endif |
| |
| #endif |
| |
| |
| #if defined(PARTIAL_STORE_M0) |
| |
| #define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \ |
| ((uint)(max(0, (int)(y * M0) - (int)((M0 - PARTIAL_STORE_M0) % M0)))) |
| #else |
| #define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \ |
| ((uint)(y * M0)) |
| #endif |
| |
| |
| |
| #define STORE_VECTOR_SELECT(basename, data_type, ptr, vec_size, leftover, cond) \ |
| STORE_BLOCK_PARTIAL_IN_X(1, vec_size, data_type, basename, ptr, 0, 0, leftover, cond) |
| |
| |
| #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) |
| #pragma OPENCL EXTENSION cl_khr_fp16 : enable |
| #endif |
| |
| #if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) |
| #pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : enable |
| #endif |
| |
| #if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8) |
| #pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : enable |
| #endif |
| |
| #if defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf) |
| #pragma OPENCL EXTENSION cl_arm_printf : enable |
| #endif |
| |
| #define GPU_ARCH_MIDGARD 0x100 |
| #define GPU_ARCH_BIFROST 0x200 |
| #define GPU_ARCH_VALHALL 0x300 |
| |
| |
| #define CONCAT(a, b) a##b |
| |
| |
| #define EXPAND(x) x |
| |
| |
| #define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val) |
| |
| |
| #define REV1(x) ((x)) |
| #define REV2(x) ((x).s10) |
| #define REV3(x) ((x).s210) |
| #define REV4(x) ((x).s3210) |
| #define REV8(x) ((x).s76543210) |
| #define REV16(x) ((x).sFEDCBA9876543210) |
| |
| |
| |
| #define REVERSE_STR(x, s) REV##s((x)) |
| #define REVERSE(x, s) REVERSE_STR(x, s) |
| |
| |
| |
| #define ROT1_0(x) ((x)) |
| #define ROT1_1(x) ((x)) |
| |
| #define ROT2_0(x) ((x)) |
| #define ROT2_1(x) ((x).s10) |
| #define ROT2_2(x) ((x)) |
| |
| #define ROT3_0(x) ((x)) |
| #define ROT3_1(x) ((x).s201) |
| #define ROT3_2(x) ((x).s120) |
| #define ROT3_3(x) ((x)) |
| |
| #define ROT4_0(x) ((x)) |
| #define ROT4_1(x) ((x).s3012) |
| #define ROT4_2(x) ((x).s2301) |
| #define ROT4_3(x) ((x).s1230) |
| #define ROT4_4(x) ((x)) |
| |
| #define ROT8_0(x) ((x)) |
| #define ROT8_1(x) ((x).s70123456) |
| #define ROT8_2(x) ((x).s67012345) |
| #define ROT8_3(x) ((x).s56701234) |
| #define ROT8_4(x) ((x).s45670123) |
| #define ROT8_5(x) ((x).s34567012) |
| #define ROT8_6(x) ((x).s23456701) |
| #define ROT8_7(x) ((x).s12345670) |
| #define ROT8_8(x) ((x)) |
| |
| #define ROT16_0(x) ((x)) |
| #define ROT16_1(x) ((x).sF0123456789ABCDE) |
| #define ROT16_2(x) ((x).sEF0123456789ABCD) |
| #define ROT16_3(x) ((x).sDEF0123456789ABC) |
| #define ROT16_4(x) ((x).sCDEF0123456789AB) |
| #define ROT16_5(x) ((x).sBCDEF0123456789A) |
| #define ROT16_6(x) ((x).sABCDEF0123456789) |
| #define ROT16_7(x) ((x).s9ABCDEF012345678) |
| #define ROT16_8(x) ((x).s89ABCDEF01234567) |
| #define ROT16_9(x) ((x).s789ABCDEF0123456) |
| #define ROT16_10(x) ((x).s6789ABCDEF012345) |
| #define ROT16_11(x) ((x).s56789ABCDEF01234) |
| #define ROT16_12(x) ((x).s456789ABCDEF0123) |
| #define ROT16_13(x) ((x).s3456789ABCDEF012) |
| #define ROT16_14(x) ((x).s23456789ABCDEF01) |
| #define ROT16_15(x) ((x).s123456789ABCDEF0) |
| #define ROT16_16(x) ((x)) |
| |
| |
| |
| #define ROTATE_STR(x, s, n) ROT##s##_##n(x) |
| #define ROTATE(x, s, n) ROTATE_STR(x, s, n) |
| |
| |
| |
| #define V_OFFS1(dt) (dt##1)(0) |
| #define V_OFFS2(dt) (dt##2)(0, 1) |
| #define V_OFFS3(dt) (dt##3)(0, 1, 2) |
| #define V_OFFS4(dt) (dt##4)(0, 1, 2, 3) |
| #define V_OFFS8(dt) (dt##8)(0, 1, 2, 3, 4, 5, 6, 7) |
| #define V_OFFS16(dt) (dt##16)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15) |
| |
| |
| |
| #define VEC_OFFS_STR(dt, s) V_OFFS##s(dt) |
| #define VEC_OFFS(dt, s) VEC_OFFS_STR(dt, s) |
| |
| |
| #define VLOAD_STR(size) vload##size |
| #define VLOAD(size) VLOAD_STR(size) |
| |
| |
| #define VLOAD_PARTIAL_STR(size, load_size) vload_partial_##size##_##load_size |
| #define VLOAD_PARTIAL(size, load_size) VLOAD_PARTIAL_STR(size, load_size) |
| |
| #define NO_LOAD(data, offs, ptr) \ |
| { \ |
| } |
| |
| |
| #define vload_partial_1_0 NO_LOAD |
| #define vload_partial_1_1 vload1 |
| #define vload_partial_1_2 NO_LOAD |
| #define vload_partial_1_3 NO_LOAD |
| #define vload_partial_1_4 NO_LOAD |
| #define vload_partial_1_5 NO_LOAD |
| #define vload_partial_1_6 NO_LOAD |
| #define vload_partial_1_7 NO_LOAD |
| #define vload_partial_1_8 NO_LOAD |
| #define vload_partial_1_9 NO_LOAD |
| #define vload_partial_1_10 NO_LOAD |
| #define vload_partial_1_11 NO_LOAD |
| #define vload_partial_1_12 NO_LOAD |
| #define vload_partial_1_13 NO_LOAD |
| #define vload_partial_1_14 NO_LOAD |
| #define vload_partial_1_15 NO_LOAD |
| #define vload_partial_1_16 NO_LOAD |
| |
| #define vload_partial_2_0 NO_LOAD |
| #define vload_partial_2_1 vload_partial_1 |
| #define vload_partial_2_2 vload_partial_2 |
| #define vload_partial_2_3 NO_LOAD |
| #define vload_partial_2_4 NO_LOAD |
| #define vload_partial_2_5 NO_LOAD |
| #define vload_partial_2_6 NO_LOAD |
| #define vload_partial_2_7 NO_LOAD |
| #define vload_partial_2_8 NO_LOAD |
| #define vload_partial_2_9 NO_LOAD |
| #define vload_partial_2_10 NO_LOAD |
| #define vload_partial_2_11 NO_LOAD |
| #define vload_partial_2_12 NO_LOAD |
| #define vload_partial_2_13 NO_LOAD |
| #define vload_partial_2_14 NO_LOAD |
| #define vload_partial_2_15 NO_LOAD |
| #define vload_partial_2_16 NO_LOAD |
| |
| #define vload_partial_3_0 NO_LOAD |
| #define vload_partial_3_1 vload_partial_1 |
| #define vload_partial_3_2 vload_partial_2 |
| #define vload_partial_3_3 vload_partial_3 |
| #define vload_partial_3_4 NO_LOAD |
| #define vload_partial_3_5 NO_LOAD |
| #define vload_partial_3_6 NO_LOAD |
| #define vload_partial_3_7 NO_LOAD |
| #define vload_partial_3_8 NO_LOAD |
| #define vload_partial_3_9 NO_LOAD |
| #define vload_partial_3_10 NO_LOAD |
| #define vload_partial_3_11 NO_LOAD |
| #define vload_partial_3_12 NO_LOAD |
| #define vload_partial_3_13 NO_LOAD |
| #define vload_partial_3_14 NO_LOAD |
| #define vload_partial_3_15 NO_LOAD |
| #define vload_partial_3_16 NO_LOAD |
| |
| #define vload_partial_4_0 NO_LOAD |
| #define vload_partial_4_1 vload_partial_1 |
| #define vload_partial_4_2 vload_partial_2 |
| #define vload_partial_4_3 vload_partial_3 |
| #define vload_partial_4_4 vload_partial_4 |
| #define vload_partial_4_5 NO_LOAD |
| #define vload_partial_4_6 NO_LOAD |
| #define vload_partial_4_7 NO_LOAD |
| #define vload_partial_4_8 NO_LOAD |
| #define vload_partial_4_9 NO_LOAD |
| #define vload_partial_4_10 NO_LOAD |
| #define vload_partial_4_11 NO_LOAD |
| #define vload_partial_4_12 NO_LOAD |
| #define vload_partial_4_13 NO_LOAD |
| #define vload_partial_4_14 NO_LOAD |
| #define vload_partial_4_15 NO_LOAD |
| #define vload_partial_4_16 NO_LOAD |
| |
| #define vload_partial_8_0 NO_LOAD |
| #define vload_partial_8_1 vload_partial_1 |
| #define vload_partial_8_2 vload_partial_2 |
| #define vload_partial_8_3 vload_partial_3 |
| #define vload_partial_8_4 vload_partial_4 |
| #define vload_partial_8_5 vload_partial_5 |
| #define vload_partial_8_6 vload_partial_6 |
| #define vload_partial_8_7 vload_partial_7 |
| #define vload_partial_8_8 vload_partial_8 |
| #define vload_partial_8_9 NO_LOAD |
| #define vload_partial_8_10 NO_LOAD |
| #define vload_partial_8_11 NO_LOAD |
| #define vload_partial_8_12 NO_LOAD |
| #define vload_partial_8_13 NO_LOAD |
| #define vload_partial_8_14 NO_LOAD |
| #define vload_partial_8_15 NO_LOAD |
| #define vload_partial_8_16 NO_LOAD |
| |
| #define vload_partial_16_0 NO_LOAD |
| #define vload_partial_16_1 vload_partial_1 |
| #define vload_partial_16_2 vload_partial_2 |
| #define vload_partial_16_3 vload_partial_3 |
| #define vload_partial_16_4 vload_partial_4 |
| #define vload_partial_16_5 vload_partial_5 |
| #define vload_partial_16_6 vload_partial_6 |
| #define vload_partial_16_7 vload_partial_7 |
| #define vload_partial_16_8 vload_partial_8 |
| #define vload_partial_16_9 vload_partial_9 |
| #define vload_partial_16_10 vload_partial_10 |
| #define vload_partial_16_11 vload_partial_11 |
| #define vload_partial_16_12 vload_partial_12 |
| #define vload_partial_16_13 vload_partial_13 |
| #define vload_partial_16_14 vload_partial_14 |
| #define vload_partial_16_15 vload_partial_15 |
| #define vload_partial_16_16 vload_partial_16 |
| |
| |
| #define vload_partial_1(DATA, OFFSET, PTR) \ |
| DATA.s0 = vload1(OFFSET, PTR); |
| |
| #define vload_partial_2(DATA, OFFSET, PTR) \ |
| DATA.s01 = vload2(OFFSET, PTR); |
| |
| #define vload_partial_3(DATA, OFFSET, PTR) \ |
| DATA.s012 = vload3(OFFSET, PTR); |
| |
| #define vload_partial_4(DATA, OFFSET, PTR) \ |
| DATA.s0123 = vload4(OFFSET, PTR); |
| |
| #define vload_partial_5(DATA, OFFSET, PTR) \ |
| vload_partial_4(DATA.s0123, OFFSET, PTR); \ |
| DATA.s4 = vload1(OFFSET, PTR + 4); |
| |
| #define vload_partial_6(DATA, OFFSET, PTR) \ |
| vload_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vload_partial_2(DATA.s45, OFFSET, PTR + 4); |
| |
| #define vload_partial_7(DATA, OFFSET, PTR) \ |
| vload_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vload_partial_3(DATA.s456, OFFSET, PTR + 4); |
| |
| #define vload_partial_8(DATA, OFFSET, PTR) \ |
| DATA.s01234567 = vload8(OFFSET, PTR); |
| |
| #define vload_partial_9(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| DATA.s8 = vload1(OFFSET, PTR + 8); |
| |
| #define vload_partial_10(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_2(DATA.s89, OFFSET, PTR + 8); |
| |
| #define vload_partial_11(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_3(DATA.s89A, OFFSET, PTR + 8); |
| |
| #define vload_partial_12(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_4(DATA.s89AB, OFFSET, PTR + 8); |
| |
| #define vload_partial_13(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_5(DATA.s89ABCDEF, OFFSET, PTR + 8); |
| |
| #define vload_partial_14(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_6(DATA.s89ABCDEF, OFFSET, PTR + 8); |
| |
| #define vload_partial_15(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_7(DATA.s89ABCDEF, OFFSET, PTR + 8); |
| |
| #define vload_partial_16(DATA, OFFSET, PTR) \ |
| DATA = vload16(OFFSET, PTR); |
| |
| |
| |
| #define PIXEL_UNIT4 1 |
| #define PIXEL_UNIT8 2 |
| #define PIXEL_UNIT16 4 |
| |
| |
| #define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) PIXEL_UNIT##vec_size |
| #define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(vec_size) CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) |
| |
| |
| #define read_image2d_floatx1(img, x_coord, y_coord) (float4)(read_imagef(img, (int2)(x_coord, y_coord))); |
| #define read_image2d_floatx2(img, x_coord, y_coord) (float8)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord))); |
| #define read_image2d_floatx4(img, x_coord, y_coord) (float16)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)), read_imagef(img, (int2)(x_coord + 2, y_coord)), read_imagef(img, (int2)(x_coord + 3, y_coord))); |
| |
| #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) |
| #define read_image2d_halfx1(img, x_coord, y_coord) (half4)(read_imageh(img, (int2)(x_coord, y_coord))); |
| #define read_image2d_halfx2(img, x_coord, y_coord) (half8)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord))); |
| #define read_image2d_halfx4(img, x_coord, y_coord) (half16)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)), read_imageh(img, (int2)(x_coord + 2, y_coord)), read_imageh(img, (int2)(x_coord + 3, y_coord))); |
| #endif |
| |
| #define write_image2d_floatx1(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values)); |
| #define write_image2d_floatx2(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values.s0123), write_imagef(img, (int2)(x_coord + 1, y_coord), values.s4567)); |
| #define write_image2d_floatx4(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values.s0123), write_imagef(img, (int2)(x_coord + 1, y_coord), values.s4567), write_imagef(img, (int2)(x_coord + 2, y_coord), values.s89AB), write_imagef(img, (int2)(x_coord + 3, y_coord), values.sCDEF)); |
| |
| #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) |
| #define write_image2d_halfx1(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values)); |
| #define write_image2d_halfx2(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values.s0123), write_imageh(img, (int2)(x_coord + 1, y_coord), values.s4567)); |
| #define write_image2d_halfx4(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values.s0123), write_imageh(img, (int2)(x_coord + 1, y_coord), values.s4567), write_imageh(img, (int2)(x_coord + 2, y_coord), values.s89AB), write_imageh(img, (int2)(x_coord + 3, y_coord), values.sCDEF)); |
| #endif |
| |
| |
| #define READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) read_image2d_##data_type##x##n0(img, x_coord, y_coord) |
| #define READ_IMAGE2D(data_type, n0, img, x_coord, y_coord) READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) |
| |
| |
| #define WRITE_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord, values) write_image2d_##data_type##x##n0(img, x_coord, y_coord, values) |
| #define WRITE_IMAGE2D(data_type, n0, img, x_coord, y_coord, values) WRITE_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord, values) |
| |
| #define VSTORE_STR(size) vstore##size |
| #define VSTORE(size) VSTORE_STR(size) |
| |
| #define float1 float |
| #define half1 half |
| #define char1 char |
| #define uchar1 uchar |
| #define short1 short |
| #define ushort1 ushort |
| #define int1 int |
| #define uint1 uint |
| #define long1 long |
| #define ulong1 ulong |
| #define double1 double |
| |
| #define vload1(OFFSET, PTR) *(OFFSET + PTR) |
| #define vstore1(DATA, OFFSET, PTR) *(OFFSET + PTR) = DATA |
| |
| |
| #define VSTORE_PARTIAL_STR(size, store_size) vstore_partial_##size##_##store_size |
| #define VSTORE_PARTIAL(size, store_size) VSTORE_PARTIAL_STR(size, store_size) |
| |
| #define NO_STORE(data, offs, ptr) \ |
| { \ |
| } |
| |
| |
| #define vstore_partial_1_0 NO_STORE |
| #define vstore_partial_1_1 vstore1 |
| #define vstore_partial_1_2 NO_STORE |
| #define vstore_partial_1_3 NO_STORE |
| #define vstore_partial_1_4 NO_STORE |
| #define vstore_partial_1_5 NO_STORE |
| #define vstore_partial_1_6 NO_STORE |
| #define vstore_partial_1_7 NO_STORE |
| #define vstore_partial_1_8 NO_STORE |
| #define vstore_partial_1_9 NO_STORE |
| #define vstore_partial_1_10 NO_STORE |
| #define vstore_partial_1_11 NO_STORE |
| #define vstore_partial_1_12 NO_STORE |
| #define vstore_partial_1_13 NO_STORE |
| #define vstore_partial_1_14 NO_STORE |
| #define vstore_partial_1_15 NO_STORE |
| #define vstore_partial_1_16 NO_STORE |
| |
| #define vstore_partial_2_0 NO_STORE |
| #define vstore_partial_2_1 vstore_partial_1 |
| #define vstore_partial_2_2 vstore_partial_2 |
| #define vstore_partial_2_3 NO_STORE |
| #define vstore_partial_2_4 NO_STORE |
| #define vstore_partial_2_5 NO_STORE |
| #define vstore_partial_2_6 NO_STORE |
| #define vstore_partial_2_7 NO_STORE |
| #define vstore_partial_2_8 NO_STORE |
| #define vstore_partial_2_9 NO_STORE |
| #define vstore_partial_2_10 NO_STORE |
| #define vstore_partial_2_11 NO_STORE |
| #define vstore_partial_2_12 NO_STORE |
| #define vstore_partial_2_13 NO_STORE |
| #define vstore_partial_2_14 NO_STORE |
| #define vstore_partial_2_15 NO_STORE |
| #define vstore_partial_2_16 NO_STORE |
| |
| #define vstore_partial_3_0 NO_STORE |
| #define vstore_partial_3_1 vstore_partial_1 |
| #define vstore_partial_3_2 vstore_partial_2 |
| #define vstore_partial_3_3 vstore_partial_3 |
| #define vstore_partial_3_4 NO_STORE |
| #define vstore_partial_3_5 NO_STORE |
| #define vstore_partial_3_6 NO_STORE |
| #define vstore_partial_3_7 NO_STORE |
| #define vstore_partial_3_8 NO_STORE |
| #define vstore_partial_3_9 NO_STORE |
| #define vstore_partial_3_10 NO_STORE |
| #define vstore_partial_3_11 NO_STORE |
| #define vstore_partial_3_12 NO_STORE |
| #define vstore_partial_3_13 NO_STORE |
| #define vstore_partial_3_14 NO_STORE |
| #define vstore_partial_3_15 NO_STORE |
| #define vstore_partial_3_16 NO_STORE |
| |
| #define vstore_partial_4_0 NO_STORE |
| #define vstore_partial_4_1 vstore_partial_1 |
| #define vstore_partial_4_2 vstore_partial_2 |
| #define vstore_partial_4_3 vstore_partial_3 |
| #define vstore_partial_4_4 vstore_partial_4 |
| #define vstore_partial_4_5 NO_STORE |
| #define vstore_partial_4_6 NO_STORE |
| #define vstore_partial_4_7 NO_STORE |
| #define vstore_partial_4_8 NO_STORE |
| #define vstore_partial_4_9 NO_STORE |
| #define vstore_partial_4_10 NO_STORE |
| #define vstore_partial_4_11 NO_STORE |
| #define vstore_partial_4_12 NO_STORE |
| #define vstore_partial_4_13 NO_STORE |
| #define vstore_partial_4_14 NO_STORE |
| #define vstore_partial_4_15 NO_STORE |
| #define vstore_partial_4_16 NO_STORE |
| |
| #define vstore_partial_8_0 NO_STORE |
| #define vstore_partial_8_1 vstore_partial_1 |
| #define vstore_partial_8_2 vstore_partial_2 |
| #define vstore_partial_8_3 vstore_partial_3 |
| #define vstore_partial_8_4 vstore_partial_4 |
| #define vstore_partial_8_5 vstore_partial_5 |
| #define vstore_partial_8_6 vstore_partial_6 |
| #define vstore_partial_8_7 vstore_partial_7 |
| #define vstore_partial_8_8 vstore_partial_8 |
| #define vstore_partial_8_9 NO_STORE |
| #define vstore_partial_8_10 NO_STORE |
| #define vstore_partial_8_11 NO_STORE |
| #define vstore_partial_8_12 NO_STORE |
| #define vstore_partial_8_13 NO_STORE |
| #define vstore_partial_8_14 NO_STORE |
| #define vstore_partial_8_15 NO_STORE |
| #define vstore_partial_8_16 NO_STORE |
| |
| #define vstore_partial_16_0 NO_STORE |
| #define vstore_partial_16_1 vstore_partial_1 |
| #define vstore_partial_16_2 vstore_partial_2 |
| #define vstore_partial_16_3 vstore_partial_3 |
| #define vstore_partial_16_4 vstore_partial_4 |
| #define vstore_partial_16_5 vstore_partial_5 |
| #define vstore_partial_16_6 vstore_partial_6 |
| #define vstore_partial_16_7 vstore_partial_7 |
| #define vstore_partial_16_8 vstore_partial_8 |
| #define vstore_partial_16_9 vstore_partial_9 |
| #define vstore_partial_16_10 vstore_partial_10 |
| #define vstore_partial_16_11 vstore_partial_11 |
| #define vstore_partial_16_12 vstore_partial_12 |
| #define vstore_partial_16_13 vstore_partial_13 |
| #define vstore_partial_16_14 vstore_partial_14 |
| #define vstore_partial_16_15 vstore_partial_15 |
| #define vstore_partial_16_16 vstore_partial_16 |
| |
| |
| #define vstore_partial_1(DATA, OFFSET, PTR) \ |
| vstore1(DATA.s0, OFFSET, PTR); |
| |
| #define vstore_partial_2(DATA, OFFSET, PTR) \ |
| vstore2(DATA.s01, OFFSET, PTR); |
| |
| #define vstore_partial_3(DATA, OFFSET, PTR) \ |
| vstore3(DATA.s012, OFFSET, PTR); |
| |
| #define vstore_partial_4(DATA, OFFSET, PTR) \ |
| vstore4(DATA.s0123, OFFSET, PTR); |
| |
| #define vstore_partial_5(DATA, OFFSET, PTR) \ |
| vstore_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vstore1(DATA.s4, OFFSET, PTR + 4); |
| |
| #define vstore_partial_6(DATA, OFFSET, PTR) \ |
| vstore_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vstore_partial_2(DATA.s45, OFFSET, PTR + 4); |
| |
| #define vstore_partial_7(DATA, OFFSET, PTR) \ |
| vstore_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vstore_partial_3(DATA.s456, OFFSET, PTR + 4); |
| |
| #define vstore_partial_8(DATA, OFFSET, PTR) \ |
| vstore8(DATA.s01234567, OFFSET, PTR); |
| |
| #define vstore_partial_9(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore1(DATA.s8, OFFSET, PTR + 8); |
| |
| #define vstore_partial_10(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_2(DATA.s89, OFFSET, PTR + 8); |
| |
| #define vstore_partial_11(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_3(DATA.s89a, OFFSET, PTR + 8); |
| |
| #define vstore_partial_12(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_4(DATA.s89ab, OFFSET, PTR + 8); |
| |
| #define vstore_partial_13(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_5(DATA.s89abcdef, OFFSET, PTR + 8); |
| |
| #define vstore_partial_14(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_6(DATA.s89abcdef, OFFSET, PTR + 8); |
| |
| #define vstore_partial_15(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_7(DATA.s89abcdef, OFFSET, PTR + 8); |
| |
| #define vstore_partial_16(DATA, OFFSET, PTR) \ |
| vstore16(DATA, OFFSET, PTR); |
| |
| |
| |
| |
| |
| #define convert_float_sat convert_float |
| #define convert_float1_sat convert_float |
| #define convert_float2_sat convert_float2 |
| #define convert_float3_sat convert_float3 |
| #define convert_float4_sat convert_float4 |
| #define convert_float8_sat convert_float8 |
| #define convert_float16_sat convert_float16 |
| #define convert_half_sat convert_float |
| #define convert_half1_sat convert_half |
| #define convert_half2_sat convert_half2 |
| #define convert_half3_sat convert_half3 |
| #define convert_half4_sat convert_half4 |
| #define convert_half8_sat convert_half8 |
| #define convert_half16_sat convert_half16 |
| |
| #define convert_float1 convert_float |
| #define convert_half1 convert_half |
| #define convert_char1 convert_char |
| #define convert_uchar1 convert_uchar |
| #define convert_short1 convert_short |
| #define convert_ushort1 convert_ushort |
| #define convert_int1 convert_int |
| #define convert_uint1 convert_uint |
| #define convert_long1 convert_long |
| #define convert_ulong1 convert_ulong |
| #define convert_double1 convert_double |
| |
| #define convert_char1_sat convert_char_sat |
| #define convert_uchar1_sat convert_uchar_sat |
| #define convert_uchar2_sat convert_uchar2_sat |
| #define convert_uchar3_sat convert_uchar3_sat |
| #define convert_uchar4_sat convert_uchar4_sat |
| #define convert_uchar8_sat convert_uchar8_sat |
| #define convert_uchar16_sat convert_uchar16_sat |
| #define convert_short1_sat convert_short_sat |
| #define convert_ushort1_sat convert_ushort_sat |
| #define convert_int1_sat convert_int_sat |
| #define convert_uint1_sat convert_uint_sat |
| #define convert_long1_sat convert_long_sat |
| #define convert_ulong1_sat convert_ulong_sat |
| #define convert_double1_sat convert_double_sat |
| |
| #define VEC_DATA_TYPE_STR(type, size) type##size |
| #define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size) |
| |
| #define CONVERT_STR(x, type) (convert_##type((x))) |
| #define CONVERT(x, type) CONVERT_STR(x, type) |
| |
| #define CONVERT_SAT_STR(x, type) (convert_##type##_sat((x))) |
| #define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type) |
| |
| #define CONVERT_SAT_ROUND_STR(x, type, round) (convert_##type##_sat_##round((x))) |
| #define CONVERT_SAT_ROUND(x, type, round) CONVERT_SAT_ROUND_STR(x, type, round) |
| |
| #define select_vec_dt_uchar(size) uchar##size |
| #define select_vec_dt_char(size) char##size |
| #define select_vec_dt_ushort(size) ushort##size |
| #define select_vec_dt_short(size) short##size |
| #define select_vec_dt_half(size) short##size |
| #define select_vec_dt_uint(size) uint##size |
| #define select_vec_dt_int(size) int##size |
| #define select_vec_dt_float(size) int##size |
| #define select_vec_dt_ulong(size) ulong##size |
| #define select_vec_dt_long(size) long##size |
| |
| #define SELECT_VEC_DATA_TYPE_STR(type, size) select_vec_dt_##type(size) |
| #define SELECT_VEC_DATA_TYPE(type, size) SELECT_VEC_DATA_TYPE_STR(type, size) |
| #define SELECT_DATA_TYPE(type) SELECT_VEC_DATA_TYPE_STR(type, 1) |
| |
| #define signed_int_vec_dt_uchar(size) char##size |
| #define signed_int_vec_dt_char(size) char##size |
| #define signed_int_vec_dt_ushort(size) short##size |
| #define signed_int_vec_dt_short(size) short##size |
| #define signed_int_vec_dt_half(size) short##size |
| #define signed_int_vec_dt_uint(size) int##size |
| #define signed_int_vec_dt_int(size) int##size |
| #define signed_int_vec_dt_float(size) int##size |
| #define signed_int_vec_dt_ulong(size) long##size |
| #define signed_int_vec_dt_long(size) long##size |
| |
| #define SIGNED_INT_VEC_DATA_TYPE_STR(type, size) signed_int_vec_dt_##type(size) |
| #define SIGNED_INT_VEC_DATA_TYPE(type, size) SIGNED_INT_VEC_DATA_TYPE_STR(type, size) |
| #define SIGNED_INT_DATA_TYPE(type) SIGNED_INT_VEC_DATA_TYPE_STR(type, 1) |
| |
| #define sum_reduce_1(x) (x) |
| #define sum_reduce_2(x) ((x).s0) + ((x).s1) |
| #define sum_reduce_3(x) sum_reduce_2((x).s01) + ((x).s2) |
| #define sum_reduce_4(x) sum_reduce_2((x).s01) + sum_reduce_2((x).s23) |
| #define sum_reduce_8(x) sum_reduce_4((x).s0123) + sum_reduce_4((x).s4567) |
| #define sum_reduce_16(x) sum_reduce_8((x).s01234567) + sum_reduce_8((x).s89ABCDEF) |
| |
| #define SUM_REDUCE_STR(x, size) sum_reduce_##size(x) |
| #define SUM_REDUCE(x, size) SUM_REDUCE_STR(x, size) |
| |
| #define prod_reduce_1(x) (x) |
| #define prod_reduce_2(x) ((x).s0) * ((x).s1) |
| #define prod_reduce_3(x) prod_reduce_2((x).s01) * ((x).s2) |
| #define prod_reduce_4(x) prod_reduce_2((x).s01) * prod_reduce_2((x).s23) |
| #define prod_reduce_8(x) prod_reduce_4((x).s0123) * prod_reduce_4((x).s4567) |
| #define prod_reduce_16(x) prod_reduce_8((x).s01234567) * prod_reduce_8((x).s89ABCDEF) |
| |
| #define PROD_REDUCE_STR(x, size) prod_reduce_##size(x) |
| #define PROD_REDUCE(x, size) PROD_REDUCE_STR(x, size) |
| |
| #define max_reduce_1(x) (x) |
| #define max_reduce_2(x) max(((x).s0), ((x).s1)) |
| #define max_reduce_3(x) max(max_reduce_2((x).s01), ((x).s2)) |
| #define max_reduce_4(x) max(max_reduce_2((x).s01), max_reduce_2((x).s23)) |
| #define max_reduce_8(x) max(max_reduce_4((x).s0123), max_reduce_4((x).s4567)) |
| #define max_reduce_16(x) max(max_reduce_8((x).s01234567), max_reduce_8((x).s89ABCDEF)) |
| |
| #define MAX_REDUCE_STR(x, size) max_reduce_##size(x) |
| #define MAX_REDUCE(x, size) MAX_REDUCE_STR(x, size) |
| |
| #define VECTOR_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define IMAGE_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define TENSOR3D_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_stride_z, \ |
| uint name##_step_z, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define TENSOR4D_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_stride_z, \ |
| uint name##_step_z, \ |
| uint name##_stride_w, \ |
| uint name##_step_w, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define TENSOR5D_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_stride_z, \ |
| uint name##_step_z, \ |
| uint name##_stride_w, \ |
| uint name##_step_w, \ |
| uint name##_stride_v, \ |
| uint name##_step_v, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define CONVERT_TO_VECTOR_STRUCT(name) \ |
| update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x) |
| |
| #define CONVERT_TO_VECTOR_STRUCT_NO_STEP(name) \ |
| update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0) |
| |
| #define CONVERT_TO_IMAGE_STRUCT(name) \ |
| update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y) |
| |
| #define CONVERT_TO_IMAGE_STRUCT_NO_STEP(name) \ |
| update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0) |
| |
| #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \ |
| update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(name) \ |
| update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \ |
| update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TO_TENSOR3D_STRUCT(name) \ |
| update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ |
| name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(name) \ |
| update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0) |
| |
| #define CONVERT_TO_TENSOR4D_STRUCT(name, mod_size) \ |
| update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ |
| name##_stride_z, name##_step_z, name##_stride_w, name##_step_w, mod_size) |
| |
| #define CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(name, mod_size) \ |
| update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0, name##_stride_w, 0, mod_size) |
| |
| #define CONVERT_TO_TENSOR3D_STRUCT_NO_UPDATE_PTR(name) \ |
| tensor3D_ptr_no_update(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ |
| name##_stride_z, name##_step_z) |
| |
| |
| typedef struct Vector |
| { |
| __global uchar *ptr; |
| int offset_first_element_in_bytes; |
| int stride_x; |
| } Vector; |
| |
| |
| typedef struct Image |
| { |
| __global uchar *ptr; |
| int offset_first_element_in_bytes; |
| int stride_x; |
| int stride_y; |
| } Image; |
| |
| |
| typedef struct Tensor3D |
| { |
| __global uchar *ptr; |
| int offset_first_element_in_bytes; |
| int stride_x; |
| int stride_y; |
| int stride_z; |
| } Tensor3D; |
| |
| |
| typedef struct Tensor4D |
| { |
| __global uchar *ptr; |
| int offset_first_element_in_bytes; |
| int stride_x; |
| int stride_y; |
| int stride_z; |
| int stride_w; |
| } Tensor4D; |
| |
| |
| inline Vector update_vector_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x) |
| { |
| Vector vector = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| }; |
| vector.ptr += vector.offset_first_element_in_bytes + get_global_id(0) * step_x; |
| return vector; |
| } |
| |
| |
| inline Image update_image_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y) |
| { |
| Image img = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y |
| }; |
| img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y; |
| return img; |
| } |
| |
| |
| inline Image update_image_from_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) |
| { |
| Image img = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y |
| }; |
| img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z; |
| return img; |
| } |
| |
| |
| inline Tensor3D update_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) |
| { |
| Tensor3D tensor = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y, |
| .stride_z = stride_z |
| }; |
| tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z; |
| return tensor; |
| } |
| |
| |
| inline Tensor3D tensor3D_ptr_no_update(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) |
| { |
| Tensor3D tensor = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y, |
| .stride_z = stride_z |
| }; |
| return tensor; |
| } |
| |
| inline Tensor4D update_tensor4D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z, uint stride_w, |
| uint step_w, |
| uint mod_size) |
| { |
| Tensor4D tensor = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y, |
| .stride_z = stride_z, |
| .stride_w = stride_w |
| }; |
| |
| tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + (get_global_id(2) % mod_size) * step_z + (get_global_id(2) / mod_size) * step_w; |
| return tensor; |
| } |
| |
| |
| inline __global const uchar *vector_offset(const Vector *vec, int x) |
| { |
| return vec->ptr + x * vec->stride_x; |
| } |
| |
| |
| inline __global uchar *offset(const Image *img, int x, int y) |
| { |
| return img->ptr + x * img->stride_x + y * img->stride_y; |
| } |
| |
| |
| inline __global const uchar *tensor3D_offset(const Tensor3D *tensor, int x, int y, int z) |
| { |
| return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z; |
| } |
| |
| |
| inline __global const uchar *tensor4D_offset(const Tensor4D *tensor, int x, int y, int z, int w) |
| { |
| return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + w * tensor->stride_w; |
| } |
| |
| |
| inline __global const uchar *tensor3D_index2ptr(const Tensor3D *tensor, uint width, uint height, uint depth, uint index) |
| { |
| uint num_elements = width * height; |
| |
| const uint z = index / num_elements; |
| |
| index %= num_elements; |
| |
| const uint y = index / width; |
| |
| index %= width; |
| |
| const uint x = index; |
| |
| return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + tensor->offset_first_element_in_bytes; |
| } |
| |
| #endif |
| |
| #if GPU_ARCH == GPU_ARCH_BIFROST |
| #define MLA(a, b, c) (fma(c, b, a)) |
| #else |
| #define MLA(a, b, c) ((b) * (c) + (a)) |
| #endif |
| |
| |
| #define hard_swish_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (x * ((min(max((x + (DATA_TYPE)3.0), (DATA_TYPE)0.0), (DATA_TYPE)6.0)) * (DATA_TYPE)0.166666667)) |
| |
| |
| #define logistic_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) ((DATA_TYPE)1.0 / ((DATA_TYPE)1.0 + exp(-x))) |
| |
| |
| #define tanh_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) ((DATA_TYPE)A_VAL * tanh((DATA_TYPE)B_VAL * x)) |
| |
| |
| #define relu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (max((DATA_TYPE)0.0, x)) |
| |
| |
| #define brelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (min((DATA_TYPE)A_VAL, max((DATA_TYPE)0.0, x))) |
| |
| |
| #define lu_brelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (min(max(x, (DATA_TYPE)B_VAL), (DATA_TYPE)A_VAL)) |
| |
| |
| #define lrelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) ((min(x, (DATA_TYPE)0.0) * (DATA_TYPE)A_VAL) + max(x, (DATA_TYPE)0.0)) |
| |
| |
| #define srelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (log((DATA_TYPE)1.0 + exp(x))) |
| |
| |
| #define elu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (select(((DATA_TYPE)A_VAL * (exp(x) - (DATA_TYPE)1.0)), x, (SELECT_VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))isgreaterequal(x, (DATA_TYPE)0.0))) |
| |
| |
| #define abs_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (fabs(x)) |
| |
| |
| #define square_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (x * x) |
| |
| |
| #define sqrt_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (sqrt(x)) |
| |
| |
| #define linear_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (MLA((DATA_TYPE)B_VAL, (DATA_TYPE)A_VAL, x)) |
| |
| |
| #define gelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (x * (DATA_TYPE)0.5 * ((DATA_TYPE)1.0 + erf(x / (DATA_TYPE)1.41421356237))) |
| |
| |
| #define identity_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (x) |
| |
| #define ACT_OP(op, DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) op##_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) |
| |
| #define ACTIVATION(op, DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) ACT_OP(op, DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) |
| |
| #ifndef ARM_COMPUTE_HELPER_H |
| #define ARM_COMPUTE_HELPER_H |
| |
| |
| |
| |
| #define STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); |
| |
| #define STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); |
| |
| #define STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); |
| |
| #define STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); |
| |
| #define STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); |
| |
| #define STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); |
| |
| #define STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); |
| |
| #define STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); |
| |
| #define STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); |
| |
| #define STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); |
| |
| #define STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); |
| |
| #define STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); |
| |
| #define STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); |
| |
| #define STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); |
| |
| #define STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); |
| |
| #define STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); |
| |
| |
| |
| #define CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##0), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); |
| |
| #define CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##1), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); |
| |
| #define CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##2), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); |
| |
| #define CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##3), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); |
| |
| #define CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##4), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); |
| |
| #define CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##5), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); |
| |
| #define CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##6), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); |
| |
| #define CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##7), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); |
| |
| #define CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##8), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); |
| |
| #define CONVERT_STORE_ROW_10(N0, DATA, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##9), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); |
| |
| #define CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##A), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); |
| |
| #define CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##B), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); |
| |
| #define CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##C), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); |
| |
| #define CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##D), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); |
| |
| #define CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##E), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); |
| |
| #define CONVERT_STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##F), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); |
| |
| |
| |
| |
| #define STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| #define STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| |
| |
| |
| #define CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| #define CONVERT_STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| |
| |
| |
| #define STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); |
| |
| #define STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); |
| |
| #define STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); |
| |
| #define STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); |
| |
| #define STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); |
| |
| #define STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); |
| |
| #define STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); |
| |
| #define STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); |
| |
| #define STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); |
| |
| #define STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); |
| |
| #define STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); |
| |
| #define STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); |
| |
| #define STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); |
| |
| #define STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); |
| |
| #define STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); |
| |
| #define STORE_ROW_PARTIAL_16(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); |
| |
| |
| |
| #define STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_PARTIAL_##STORE_M0(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| #define STORE_BLOCK_PARTIAL(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| |
| #define STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| if(!(PARTIAL_COND_X) && !(PARTIAL_COND_Y)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else if((PARTIAL_COND_Y) && !(PARTIAL_COND_X)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else if(!(PARTIAL_COND_Y) && (PARTIAL_COND_X)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else \ |
| { \ |
| STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } |
| |
| #define STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) \ |
| if(!(PARTIAL_COND_X)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } |
| |
| #define STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) \ |
| if(!(PARTIAL_COND_Y)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else \ |
| { \ |
| STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } |
| |
| |
| #if defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0) |
| |
| |
| #if PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0 |
| |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| |
| #elif PARTIAL_STORE_M0 > 0 && PARTIAL_STORE_N0 == 0 |
| |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) |
| |
| #elif PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 > 0 |
| |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) |
| |
| #else |
| |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) |
| |
| #endif |
| |
| #endif |
| |
| |
| #if defined(PARTIAL_STORE_M0) |
| |
| #define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \ |
| ((uint)(max(0, (int)(y * M0) - (int)((M0 - PARTIAL_STORE_M0) % M0)))) |
| #else |
| #define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \ |
| ((uint)(y * M0)) |
| #endif |
| |
| |
| |
| #define STORE_VECTOR_SELECT(basename, data_type, ptr, vec_size, leftover, cond) \ |
| STORE_BLOCK_PARTIAL_IN_X(1, vec_size, data_type, basename, ptr, 0, 0, leftover, cond) |
| |
| |
| #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) |
| #pragma OPENCL EXTENSION cl_khr_fp16 : enable |
| #endif |
| |
| #if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) |
| #pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : enable |
| #endif |
| |
| #if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8) |
| #pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : enable |
| #endif |
| |
| #if defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf) |
| #pragma OPENCL EXTENSION cl_arm_printf : enable |
| #endif |
| |
| #define GPU_ARCH_MIDGARD 0x100 |
| #define GPU_ARCH_BIFROST 0x200 |
| #define GPU_ARCH_VALHALL 0x300 |
| |
| |
| #define CONCAT(a, b) a##b |
| |
| |
| #define EXPAND(x) x |
| |
| |
| #define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val) |
| |
| |
| #define REV1(x) ((x)) |
| #define REV2(x) ((x).s10) |
| #define REV3(x) ((x).s210) |
| #define REV4(x) ((x).s3210) |
| #define REV8(x) ((x).s76543210) |
| #define REV16(x) ((x).sFEDCBA9876543210) |
| |
| |
| |
| #define REVERSE_STR(x, s) REV##s((x)) |
| #define REVERSE(x, s) REVERSE_STR(x, s) |
| |
| |
| |
| #define ROT1_0(x) ((x)) |
| #define ROT1_1(x) ((x)) |
| |
| #define ROT2_0(x) ((x)) |
| #define ROT2_1(x) ((x).s10) |
| #define ROT2_2(x) ((x)) |
| |
| #define ROT3_0(x) ((x)) |
| #define ROT3_1(x) ((x).s201) |
| #define ROT3_2(x) ((x).s120) |
| #define ROT3_3(x) ((x)) |
| |
| #define ROT4_0(x) ((x)) |
| #define ROT4_1(x) ((x).s3012) |
| #define ROT4_2(x) ((x).s2301) |
| #define ROT4_3(x) ((x).s1230) |
| #define ROT4_4(x) ((x)) |
| |
| #define ROT8_0(x) ((x)) |
| #define ROT8_1(x) ((x).s70123456) |
| #define ROT8_2(x) ((x).s67012345) |
| #define ROT8_3(x) ((x).s56701234) |
| #define ROT8_4(x) ((x).s45670123) |
| #define ROT8_5(x) ((x).s34567012) |
| #define ROT8_6(x) ((x).s23456701) |
| #define ROT8_7(x) ((x).s12345670) |
| #define ROT8_8(x) ((x)) |
| |
| #define ROT16_0(x) ((x)) |
| #define ROT16_1(x) ((x).sF0123456789ABCDE) |
| #define ROT16_2(x) ((x).sEF0123456789ABCD) |
| #define ROT16_3(x) ((x).sDEF0123456789ABC) |
| #define ROT16_4(x) ((x).sCDEF0123456789AB) |
| #define ROT16_5(x) ((x).sBCDEF0123456789A) |
| #define ROT16_6(x) ((x).sABCDEF0123456789) |
| #define ROT16_7(x) ((x).s9ABCDEF012345678) |
| #define ROT16_8(x) ((x).s89ABCDEF01234567) |
| #define ROT16_9(x) ((x).s789ABCDEF0123456) |
| #define ROT16_10(x) ((x).s6789ABCDEF012345) |
| #define ROT16_11(x) ((x).s56789ABCDEF01234) |
| #define ROT16_12(x) ((x).s456789ABCDEF0123) |
| #define ROT16_13(x) ((x).s3456789ABCDEF012) |
| #define ROT16_14(x) ((x).s23456789ABCDEF01) |
| #define ROT16_15(x) ((x).s123456789ABCDEF0) |
| #define ROT16_16(x) ((x)) |
| |
| |
| |
| #define ROTATE_STR(x, s, n) ROT##s##_##n(x) |
| #define ROTATE(x, s, n) ROTATE_STR(x, s, n) |
| |
| |
| |
| #define V_OFFS1(dt) (dt##1)(0) |
| #define V_OFFS2(dt) (dt##2)(0, 1) |
| #define V_OFFS3(dt) (dt##3)(0, 1, 2) |
| #define V_OFFS4(dt) (dt##4)(0, 1, 2, 3) |
| #define V_OFFS8(dt) (dt##8)(0, 1, 2, 3, 4, 5, 6, 7) |
| #define V_OFFS16(dt) (dt##16)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15) |
| |
| |
| |
| #define VEC_OFFS_STR(dt, s) V_OFFS##s(dt) |
| #define VEC_OFFS(dt, s) VEC_OFFS_STR(dt, s) |
| |
| |
| #define VLOAD_STR(size) vload##size |
| #define VLOAD(size) VLOAD_STR(size) |
| |
| |
| #define VLOAD_PARTIAL_STR(size, load_size) vload_partial_##size##_##load_size |
| #define VLOAD_PARTIAL(size, load_size) VLOAD_PARTIAL_STR(size, load_size) |
| |
| #define NO_LOAD(data, offs, ptr) \ |
| { \ |
| } |
| |
| |
| #define vload_partial_1_0 NO_LOAD |
| #define vload_partial_1_1 vload1 |
| #define vload_partial_1_2 NO_LOAD |
| #define vload_partial_1_3 NO_LOAD |
| #define vload_partial_1_4 NO_LOAD |
| #define vload_partial_1_5 NO_LOAD |
| #define vload_partial_1_6 NO_LOAD |
| #define vload_partial_1_7 NO_LOAD |
| #define vload_partial_1_8 NO_LOAD |
| #define vload_partial_1_9 NO_LOAD |
| #define vload_partial_1_10 NO_LOAD |
| #define vload_partial_1_11 NO_LOAD |
| #define vload_partial_1_12 NO_LOAD |
| #define vload_partial_1_13 NO_LOAD |
| #define vload_partial_1_14 NO_LOAD |
| #define vload_partial_1_15 NO_LOAD |
| #define vload_partial_1_16 NO_LOAD |
| |
| #define vload_partial_2_0 NO_LOAD |
| #define vload_partial_2_1 vload_partial_1 |
| #define vload_partial_2_2 vload_partial_2 |
| #define vload_partial_2_3 NO_LOAD |
| #define vload_partial_2_4 NO_LOAD |
| #define vload_partial_2_5 NO_LOAD |
| #define vload_partial_2_6 NO_LOAD |
| #define vload_partial_2_7 NO_LOAD |
| #define vload_partial_2_8 NO_LOAD |
| #define vload_partial_2_9 NO_LOAD |
| #define vload_partial_2_10 NO_LOAD |
| #define vload_partial_2_11 NO_LOAD |
| #define vload_partial_2_12 NO_LOAD |
| #define vload_partial_2_13 NO_LOAD |
| #define vload_partial_2_14 NO_LOAD |
| #define vload_partial_2_15 NO_LOAD |
| #define vload_partial_2_16 NO_LOAD |
| |
| #define vload_partial_3_0 NO_LOAD |
| #define vload_partial_3_1 vload_partial_1 |
| #define vload_partial_3_2 vload_partial_2 |
| #define vload_partial_3_3 vload_partial_3 |
| #define vload_partial_3_4 NO_LOAD |
| #define vload_partial_3_5 NO_LOAD |
| #define vload_partial_3_6 NO_LOAD |
| #define vload_partial_3_7 NO_LOAD |
| #define vload_partial_3_8 NO_LOAD |
| #define vload_partial_3_9 NO_LOAD |
| #define vload_partial_3_10 NO_LOAD |
| #define vload_partial_3_11 NO_LOAD |
| #define vload_partial_3_12 NO_LOAD |
| #define vload_partial_3_13 NO_LOAD |
| #define vload_partial_3_14 NO_LOAD |
| #define vload_partial_3_15 NO_LOAD |
| #define vload_partial_3_16 NO_LOAD |
| |
| #define vload_partial_4_0 NO_LOAD |
| #define vload_partial_4_1 vload_partial_1 |
| #define vload_partial_4_2 vload_partial_2 |
| #define vload_partial_4_3 vload_partial_3 |
| #define vload_partial_4_4 vload_partial_4 |
| #define vload_partial_4_5 NO_LOAD |
| #define vload_partial_4_6 NO_LOAD |
| #define vload_partial_4_7 NO_LOAD |
| #define vload_partial_4_8 NO_LOAD |
| #define vload_partial_4_9 NO_LOAD |
| #define vload_partial_4_10 NO_LOAD |
| #define vload_partial_4_11 NO_LOAD |
| #define vload_partial_4_12 NO_LOAD |
| #define vload_partial_4_13 NO_LOAD |
| #define vload_partial_4_14 NO_LOAD |
| #define vload_partial_4_15 NO_LOAD |
| #define vload_partial_4_16 NO_LOAD |
| |
| #define vload_partial_8_0 NO_LOAD |
| #define vload_partial_8_1 vload_partial_1 |
| #define vload_partial_8_2 vload_partial_2 |
| #define vload_partial_8_3 vload_partial_3 |
| #define vload_partial_8_4 vload_partial_4 |
| #define vload_partial_8_5 vload_partial_5 |
| #define vload_partial_8_6 vload_partial_6 |
| #define vload_partial_8_7 vload_partial_7 |
| #define vload_partial_8_8 vload_partial_8 |
| #define vload_partial_8_9 NO_LOAD |
| #define vload_partial_8_10 NO_LOAD |
| #define vload_partial_8_11 NO_LOAD |
| #define vload_partial_8_12 NO_LOAD |
| #define vload_partial_8_13 NO_LOAD |
| #define vload_partial_8_14 NO_LOAD |
| #define vload_partial_8_15 NO_LOAD |
| #define vload_partial_8_16 NO_LOAD |
| |
| #define vload_partial_16_0 NO_LOAD |
| #define vload_partial_16_1 vload_partial_1 |
| #define vload_partial_16_2 vload_partial_2 |
| #define vload_partial_16_3 vload_partial_3 |
| #define vload_partial_16_4 vload_partial_4 |
| #define vload_partial_16_5 vload_partial_5 |
| #define vload_partial_16_6 vload_partial_6 |
| #define vload_partial_16_7 vload_partial_7 |
| #define vload_partial_16_8 vload_partial_8 |
| #define vload_partial_16_9 vload_partial_9 |
| #define vload_partial_16_10 vload_partial_10 |
| #define vload_partial_16_11 vload_partial_11 |
| #define vload_partial_16_12 vload_partial_12 |
| #define vload_partial_16_13 vload_partial_13 |
| #define vload_partial_16_14 vload_partial_14 |
| #define vload_partial_16_15 vload_partial_15 |
| #define vload_partial_16_16 vload_partial_16 |
| |
| |
| #define vload_partial_1(DATA, OFFSET, PTR) \ |
| DATA.s0 = vload1(OFFSET, PTR); |
| |
| #define vload_partial_2(DATA, OFFSET, PTR) \ |
| DATA.s01 = vload2(OFFSET, PTR); |
| |
| #define vload_partial_3(DATA, OFFSET, PTR) \ |
| DATA.s012 = vload3(OFFSET, PTR); |
| |
| #define vload_partial_4(DATA, OFFSET, PTR) \ |
| DATA.s0123 = vload4(OFFSET, PTR); |
| |
| #define vload_partial_5(DATA, OFFSET, PTR) \ |
| vload_partial_4(DATA.s0123, OFFSET, PTR); \ |
| DATA.s4 = vload1(OFFSET, PTR + 4); |
| |
| #define vload_partial_6(DATA, OFFSET, PTR) \ |
| vload_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vload_partial_2(DATA.s45, OFFSET, PTR + 4); |
| |
| #define vload_partial_7(DATA, OFFSET, PTR) \ |
| vload_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vload_partial_3(DATA.s456, OFFSET, PTR + 4); |
| |
| #define vload_partial_8(DATA, OFFSET, PTR) \ |
| DATA.s01234567 = vload8(OFFSET, PTR); |
| |
| #define vload_partial_9(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| DATA.s8 = vload1(OFFSET, PTR + 8); |
| |
| #define vload_partial_10(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_2(DATA.s89, OFFSET, PTR + 8); |
| |
| #define vload_partial_11(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_3(DATA.s89A, OFFSET, PTR + 8); |
| |
| #define vload_partial_12(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_4(DATA.s89AB, OFFSET, PTR + 8); |
| |
| #define vload_partial_13(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_5(DATA.s89ABCDEF, OFFSET, PTR + 8); |
| |
| #define vload_partial_14(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_6(DATA.s89ABCDEF, OFFSET, PTR + 8); |
| |
| #define vload_partial_15(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_7(DATA.s89ABCDEF, OFFSET, PTR + 8); |
| |
| #define vload_partial_16(DATA, OFFSET, PTR) \ |
| DATA = vload16(OFFSET, PTR); |
| |
| |
| |
| #define PIXEL_UNIT4 1 |
| #define PIXEL_UNIT8 2 |
| #define PIXEL_UNIT16 4 |
| |
| |
| #define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) PIXEL_UNIT##vec_size |
| #define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(vec_size) CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) |
| |
| |
| #define read_image2d_floatx1(img, x_coord, y_coord) (float4)(read_imagef(img, (int2)(x_coord, y_coord))); |
| #define read_image2d_floatx2(img, x_coord, y_coord) (float8)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord))); |
| #define read_image2d_floatx4(img, x_coord, y_coord) (float16)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)), read_imagef(img, (int2)(x_coord + 2, y_coord)), read_imagef(img, (int2)(x_coord + 3, y_coord))); |
| |
| #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) |
| #define read_image2d_halfx1(img, x_coord, y_coord) (half4)(read_imageh(img, (int2)(x_coord, y_coord))); |
| #define read_image2d_halfx2(img, x_coord, y_coord) (half8)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord))); |
| #define read_image2d_halfx4(img, x_coord, y_coord) (half16)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)), read_imageh(img, (int2)(x_coord + 2, y_coord)), read_imageh(img, (int2)(x_coord + 3, y_coord))); |
| #endif |
| |
| #define write_image2d_floatx1(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values)); |
| #define write_image2d_floatx2(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values.s0123), write_imagef(img, (int2)(x_coord + 1, y_coord), values.s4567)); |
| #define write_image2d_floatx4(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values.s0123), write_imagef(img, (int2)(x_coord + 1, y_coord), values.s4567), write_imagef(img, (int2)(x_coord + 2, y_coord), values.s89AB), write_imagef(img, (int2)(x_coord + 3, y_coord), values.sCDEF)); |
| |
| #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) |
| #define write_image2d_halfx1(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values)); |
| #define write_image2d_halfx2(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values.s0123), write_imageh(img, (int2)(x_coord + 1, y_coord), values.s4567)); |
| #define write_image2d_halfx4(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values.s0123), write_imageh(img, (int2)(x_coord + 1, y_coord), values.s4567), write_imageh(img, (int2)(x_coord + 2, y_coord), values.s89AB), write_imageh(img, (int2)(x_coord + 3, y_coord), values.sCDEF)); |
| #endif |
| |
| |
| #define READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) read_image2d_##data_type##x##n0(img, x_coord, y_coord) |
| #define READ_IMAGE2D(data_type, n0, img, x_coord, y_coord) READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) |
| |
| |
| #define WRITE_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord, values) write_image2d_##data_type##x##n0(img, x_coord, y_coord, values) |
| #define WRITE_IMAGE2D(data_type, n0, img, x_coord, y_coord, values) WRITE_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord, values) |
| |
| #define VSTORE_STR(size) vstore##size |
| #define VSTORE(size) VSTORE_STR(size) |
| |
| #define float1 float |
| #define half1 half |
| #define char1 char |
| #define uchar1 uchar |
| #define short1 short |
| #define ushort1 ushort |
| #define int1 int |
| #define uint1 uint |
| #define long1 long |
| #define ulong1 ulong |
| #define double1 double |
| |
| #define vload1(OFFSET, PTR) *(OFFSET + PTR) |
| #define vstore1(DATA, OFFSET, PTR) *(OFFSET + PTR) = DATA |
| |
| |
| #define VSTORE_PARTIAL_STR(size, store_size) vstore_partial_##size##_##store_size |
| #define VSTORE_PARTIAL(size, store_size) VSTORE_PARTIAL_STR(size, store_size) |
| |
| #define NO_STORE(data, offs, ptr) \ |
| { \ |
| } |
| |
| |
| #define vstore_partial_1_0 NO_STORE |
| #define vstore_partial_1_1 vstore1 |
| #define vstore_partial_1_2 NO_STORE |
| #define vstore_partial_1_3 NO_STORE |
| #define vstore_partial_1_4 NO_STORE |
| #define vstore_partial_1_5 NO_STORE |
| #define vstore_partial_1_6 NO_STORE |
| #define vstore_partial_1_7 NO_STORE |
| #define vstore_partial_1_8 NO_STORE |
| #define vstore_partial_1_9 NO_STORE |
| #define vstore_partial_1_10 NO_STORE |
| #define vstore_partial_1_11 NO_STORE |
| #define vstore_partial_1_12 NO_STORE |
| #define vstore_partial_1_13 NO_STORE |
| #define vstore_partial_1_14 NO_STORE |
| #define vstore_partial_1_15 NO_STORE |
| #define vstore_partial_1_16 NO_STORE |
| |
| #define vstore_partial_2_0 NO_STORE |
| #define vstore_partial_2_1 vstore_partial_1 |
| #define vstore_partial_2_2 vstore_partial_2 |
| #define vstore_partial_2_3 NO_STORE |
| #define vstore_partial_2_4 NO_STORE |
| #define vstore_partial_2_5 NO_STORE |
| #define vstore_partial_2_6 NO_STORE |
| #define vstore_partial_2_7 NO_STORE |
| #define vstore_partial_2_8 NO_STORE |
| #define vstore_partial_2_9 NO_STORE |
| #define vstore_partial_2_10 NO_STORE |
| #define vstore_partial_2_11 NO_STORE |
| #define vstore_partial_2_12 NO_STORE |
| #define vstore_partial_2_13 NO_STORE |
| #define vstore_partial_2_14 NO_STORE |
| #define vstore_partial_2_15 NO_STORE |
| #define vstore_partial_2_16 NO_STORE |
| |
| #define vstore_partial_3_0 NO_STORE |
| #define vstore_partial_3_1 vstore_partial_1 |
| #define vstore_partial_3_2 vstore_partial_2 |
| #define vstore_partial_3_3 vstore_partial_3 |
| #define vstore_partial_3_4 NO_STORE |
| #define vstore_partial_3_5 NO_STORE |
| #define vstore_partial_3_6 NO_STORE |
| #define vstore_partial_3_7 NO_STORE |
| #define vstore_partial_3_8 NO_STORE |
| #define vstore_partial_3_9 NO_STORE |
| #define vstore_partial_3_10 NO_STORE |
| #define vstore_partial_3_11 NO_STORE |
| #define vstore_partial_3_12 NO_STORE |
| #define vstore_partial_3_13 NO_STORE |
| #define vstore_partial_3_14 NO_STORE |
| #define vstore_partial_3_15 NO_STORE |
| #define vstore_partial_3_16 NO_STORE |
| |
| #define vstore_partial_4_0 NO_STORE |
| #define vstore_partial_4_1 vstore_partial_1 |
| #define vstore_partial_4_2 vstore_partial_2 |
| #define vstore_partial_4_3 vstore_partial_3 |
| #define vstore_partial_4_4 vstore_partial_4 |
| #define vstore_partial_4_5 NO_STORE |
| #define vstore_partial_4_6 NO_STORE |
| #define vstore_partial_4_7 NO_STORE |
| #define vstore_partial_4_8 NO_STORE |
| #define vstore_partial_4_9 NO_STORE |
| #define vstore_partial_4_10 NO_STORE |
| #define vstore_partial_4_11 NO_STORE |
| #define vstore_partial_4_12 NO_STORE |
| #define vstore_partial_4_13 NO_STORE |
| #define vstore_partial_4_14 NO_STORE |
| #define vstore_partial_4_15 NO_STORE |
| #define vstore_partial_4_16 NO_STORE |
| |
| #define vstore_partial_8_0 NO_STORE |
| #define vstore_partial_8_1 vstore_partial_1 |
| #define vstore_partial_8_2 vstore_partial_2 |
| #define vstore_partial_8_3 vstore_partial_3 |
| #define vstore_partial_8_4 vstore_partial_4 |
| #define vstore_partial_8_5 vstore_partial_5 |
| #define vstore_partial_8_6 vstore_partial_6 |
| #define vstore_partial_8_7 vstore_partial_7 |
| #define vstore_partial_8_8 vstore_partial_8 |
| #define vstore_partial_8_9 NO_STORE |
| #define vstore_partial_8_10 NO_STORE |
| #define vstore_partial_8_11 NO_STORE |
| #define vstore_partial_8_12 NO_STORE |
| #define vstore_partial_8_13 NO_STORE |
| #define vstore_partial_8_14 NO_STORE |
| #define vstore_partial_8_15 NO_STORE |
| #define vstore_partial_8_16 NO_STORE |
| |
| #define vstore_partial_16_0 NO_STORE |
| #define vstore_partial_16_1 vstore_partial_1 |
| #define vstore_partial_16_2 vstore_partial_2 |
| #define vstore_partial_16_3 vstore_partial_3 |
| #define vstore_partial_16_4 vstore_partial_4 |
| #define vstore_partial_16_5 vstore_partial_5 |
| #define vstore_partial_16_6 vstore_partial_6 |
| #define vstore_partial_16_7 vstore_partial_7 |
| #define vstore_partial_16_8 vstore_partial_8 |
| #define vstore_partial_16_9 vstore_partial_9 |
| #define vstore_partial_16_10 vstore_partial_10 |
| #define vstore_partial_16_11 vstore_partial_11 |
| #define vstore_partial_16_12 vstore_partial_12 |
| #define vstore_partial_16_13 vstore_partial_13 |
| #define vstore_partial_16_14 vstore_partial_14 |
| #define vstore_partial_16_15 vstore_partial_15 |
| #define vstore_partial_16_16 vstore_partial_16 |
| |
| |
| #define vstore_partial_1(DATA, OFFSET, PTR) \ |
| vstore1(DATA.s0, OFFSET, PTR); |
| |
| #define vstore_partial_2(DATA, OFFSET, PTR) \ |
| vstore2(DATA.s01, OFFSET, PTR); |
| |
| #define vstore_partial_3(DATA, OFFSET, PTR) \ |
| vstore3(DATA.s012, OFFSET, PTR); |
| |
| #define vstore_partial_4(DATA, OFFSET, PTR) \ |
| vstore4(DATA.s0123, OFFSET, PTR); |
| |
| #define vstore_partial_5(DATA, OFFSET, PTR) \ |
| vstore_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vstore1(DATA.s4, OFFSET, PTR + 4); |
| |
| #define vstore_partial_6(DATA, OFFSET, PTR) \ |
| vstore_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vstore_partial_2(DATA.s45, OFFSET, PTR + 4); |
| |
| #define vstore_partial_7(DATA, OFFSET, PTR) \ |
| vstore_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vstore_partial_3(DATA.s456, OFFSET, PTR + 4); |
| |
| #define vstore_partial_8(DATA, OFFSET, PTR) \ |
| vstore8(DATA.s01234567, OFFSET, PTR); |
| |
| #define vstore_partial_9(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore1(DATA.s8, OFFSET, PTR + 8); |
| |
| #define vstore_partial_10(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_2(DATA.s89, OFFSET, PTR + 8); |
| |
| #define vstore_partial_11(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_3(DATA.s89a, OFFSET, PTR + 8); |
| |
| #define vstore_partial_12(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_4(DATA.s89ab, OFFSET, PTR + 8); |
| |
| #define vstore_partial_13(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_5(DATA.s89abcdef, OFFSET, PTR + 8); |
| |
| #define vstore_partial_14(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_6(DATA.s89abcdef, OFFSET, PTR + 8); |
| |
| #define vstore_partial_15(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_7(DATA.s89abcdef, OFFSET, PTR + 8); |
| |
| #define vstore_partial_16(DATA, OFFSET, PTR) \ |
| vstore16(DATA, OFFSET, PTR); |
| |
| |
| |
| |
| |
| #define convert_float_sat convert_float |
| #define convert_float1_sat convert_float |
| #define convert_float2_sat convert_float2 |
| #define convert_float3_sat convert_float3 |
| #define convert_float4_sat convert_float4 |
| #define convert_float8_sat convert_float8 |
| #define convert_float16_sat convert_float16 |
| #define convert_half_sat convert_float |
| #define convert_half1_sat convert_half |
| #define convert_half2_sat convert_half2 |
| #define convert_half3_sat convert_half3 |
| #define convert_half4_sat convert_half4 |
| #define convert_half8_sat convert_half8 |
| #define convert_half16_sat convert_half16 |
| |
| #define convert_float1 convert_float |
| #define convert_half1 convert_half |
| #define convert_char1 convert_char |
| #define convert_uchar1 convert_uchar |
| #define convert_short1 convert_short |
| #define convert_ushort1 convert_ushort |
| #define convert_int1 convert_int |
| #define convert_uint1 convert_uint |
| #define convert_long1 convert_long |
| #define convert_ulong1 convert_ulong |
| #define convert_double1 convert_double |
| |
| #define convert_char1_sat convert_char_sat |
| #define convert_uchar1_sat convert_uchar_sat |
| #define convert_uchar2_sat convert_uchar2_sat |
| #define convert_uchar3_sat convert_uchar3_sat |
| #define convert_uchar4_sat convert_uchar4_sat |
| #define convert_uchar8_sat convert_uchar8_sat |
| #define convert_uchar16_sat convert_uchar16_sat |
| #define convert_short1_sat convert_short_sat |
| #define convert_ushort1_sat convert_ushort_sat |
| #define convert_int1_sat convert_int_sat |
| #define convert_uint1_sat convert_uint_sat |
| #define convert_long1_sat convert_long_sat |
| #define convert_ulong1_sat convert_ulong_sat |
| #define convert_double1_sat convert_double_sat |
| |
| #define VEC_DATA_TYPE_STR(type, size) type##size |
| #define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size) |
| |
| #define CONVERT_STR(x, type) (convert_##type((x))) |
| #define CONVERT(x, type) CONVERT_STR(x, type) |
| |
| #define CONVERT_SAT_STR(x, type) (convert_##type##_sat((x))) |
| #define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type) |
| |
| #define CONVERT_SAT_ROUND_STR(x, type, round) (convert_##type##_sat_##round((x))) |
| #define CONVERT_SAT_ROUND(x, type, round) CONVERT_SAT_ROUND_STR(x, type, round) |
| |
| #define select_vec_dt_uchar(size) uchar##size |
| #define select_vec_dt_char(size) char##size |
| #define select_vec_dt_ushort(size) ushort##size |
| #define select_vec_dt_short(size) short##size |
| #define select_vec_dt_half(size) short##size |
| #define select_vec_dt_uint(size) uint##size |
| #define select_vec_dt_int(size) int##size |
| #define select_vec_dt_float(size) int##size |
| #define select_vec_dt_ulong(size) ulong##size |
| #define select_vec_dt_long(size) long##size |
| |
| #define SELECT_VEC_DATA_TYPE_STR(type, size) select_vec_dt_##type(size) |
| #define SELECT_VEC_DATA_TYPE(type, size) SELECT_VEC_DATA_TYPE_STR(type, size) |
| #define SELECT_DATA_TYPE(type) SELECT_VEC_DATA_TYPE_STR(type, 1) |
| |
| #define signed_int_vec_dt_uchar(size) char##size |
| #define signed_int_vec_dt_char(size) char##size |
| #define signed_int_vec_dt_ushort(size) short##size |
| #define signed_int_vec_dt_short(size) short##size |
| #define signed_int_vec_dt_half(size) short##size |
| #define signed_int_vec_dt_uint(size) int##size |
| #define signed_int_vec_dt_int(size) int##size |
| #define signed_int_vec_dt_float(size) int##size |
| #define signed_int_vec_dt_ulong(size) long##size |
| #define signed_int_vec_dt_long(size) long##size |
| |
| #define SIGNED_INT_VEC_DATA_TYPE_STR(type, size) signed_int_vec_dt_##type(size) |
| #define SIGNED_INT_VEC_DATA_TYPE(type, size) SIGNED_INT_VEC_DATA_TYPE_STR(type, size) |
| #define SIGNED_INT_DATA_TYPE(type) SIGNED_INT_VEC_DATA_TYPE_STR(type, 1) |
| |
| #define sum_reduce_1(x) (x) |
| #define sum_reduce_2(x) ((x).s0) + ((x).s1) |
| #define sum_reduce_3(x) sum_reduce_2((x).s01) + ((x).s2) |
| #define sum_reduce_4(x) sum_reduce_2((x).s01) + sum_reduce_2((x).s23) |
| #define sum_reduce_8(x) sum_reduce_4((x).s0123) + sum_reduce_4((x).s4567) |
| #define sum_reduce_16(x) sum_reduce_8((x).s01234567) + sum_reduce_8((x).s89ABCDEF) |
| |
| #define SUM_REDUCE_STR(x, size) sum_reduce_##size(x) |
| #define SUM_REDUCE(x, size) SUM_REDUCE_STR(x, size) |
| |
| #define prod_reduce_1(x) (x) |
| #define prod_reduce_2(x) ((x).s0) * ((x).s1) |
| #define prod_reduce_3(x) prod_reduce_2((x).s01) * ((x).s2) |
| #define prod_reduce_4(x) prod_reduce_2((x).s01) * prod_reduce_2((x).s23) |
| #define prod_reduce_8(x) prod_reduce_4((x).s0123) * prod_reduce_4((x).s4567) |
| #define prod_reduce_16(x) prod_reduce_8((x).s01234567) * prod_reduce_8((x).s89ABCDEF) |
| |
| #define PROD_REDUCE_STR(x, size) prod_reduce_##size(x) |
| #define PROD_REDUCE(x, size) PROD_REDUCE_STR(x, size) |
| |
| #define max_reduce_1(x) (x) |
| #define max_reduce_2(x) max(((x).s0), ((x).s1)) |
| #define max_reduce_3(x) max(max_reduce_2((x).s01), ((x).s2)) |
| #define max_reduce_4(x) max(max_reduce_2((x).s01), max_reduce_2((x).s23)) |
| #define max_reduce_8(x) max(max_reduce_4((x).s0123), max_reduce_4((x).s4567)) |
| #define max_reduce_16(x) max(max_reduce_8((x).s01234567), max_reduce_8((x).s89ABCDEF)) |
| |
| #define MAX_REDUCE_STR(x, size) max_reduce_##size(x) |
| #define MAX_REDUCE(x, size) MAX_REDUCE_STR(x, size) |
| |
| #define VECTOR_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define IMAGE_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define TENSOR3D_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_stride_z, \ |
| uint name##_step_z, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define TENSOR4D_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_stride_z, \ |
| uint name##_step_z, \ |
| uint name##_stride_w, \ |
| uint name##_step_w, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define TENSOR5D_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_stride_z, \ |
| uint name##_step_z, \ |
| uint name##_stride_w, \ |
| uint name##_step_w, \ |
| uint name##_stride_v, \ |
| uint name##_step_v, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define CONVERT_TO_VECTOR_STRUCT(name) \ |
| update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x) |
| |
| #define CONVERT_TO_VECTOR_STRUCT_NO_STEP(name) \ |
| update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0) |
| |
| #define CONVERT_TO_IMAGE_STRUCT(name) \ |
| update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y) |
| |
| #define CONVERT_TO_IMAGE_STRUCT_NO_STEP(name) \ |
| update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0) |
| |
| #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \ |
| update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(name) \ |
| update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \ |
| update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TO_TENSOR3D_STRUCT(name) \ |
| update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ |
| name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(name) \ |
| update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0) |
| |
| #define CONVERT_TO_TENSOR4D_STRUCT(name, mod_size) \ |
| update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ |
| name##_stride_z, name##_step_z, name##_stride_w, name##_step_w, mod_size) |
| |
| #define CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(name, mod_size) \ |
| update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0, name##_stride_w, 0, mod_size) |
| |
| #define CONVERT_TO_TENSOR3D_STRUCT_NO_UPDATE_PTR(name) \ |
| tensor3D_ptr_no_update(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ |
| name##_stride_z, name##_step_z) |
| |
| |
| typedef struct Vector |
| { |
| __global uchar *ptr; |
| int offset_first_element_in_bytes; |
| int stride_x; |
| } Vector; |
| |
| |
| typedef struct Image |
| { |
| __global uchar *ptr; |
| int offset_first_element_in_bytes; |
| int stride_x; |
| int stride_y; |
| } Image; |
| |
| |
| typedef struct Tensor3D |
| { |
| __global uchar *ptr; |
| int offset_first_element_in_bytes; |
| int stride_x; |
| int stride_y; |
| int stride_z; |
| } Tensor3D; |
| |
| |
| typedef struct Tensor4D |
| { |
| __global uchar *ptr; |
| int offset_first_element_in_bytes; |
| int stride_x; |
| int stride_y; |
| int stride_z; |
| int stride_w; |
| } Tensor4D; |
| |
| |
| inline Vector update_vector_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x) |
| { |
| Vector vector = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| }; |
| vector.ptr += vector.offset_first_element_in_bytes + get_global_id(0) * step_x; |
| return vector; |
| } |
| |
| |
| inline Image update_image_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y) |
| { |
| Image img = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y |
| }; |
| img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y; |
| return img; |
| } |
| |
| |
| inline Image update_image_from_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) |
| { |
| Image img = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y |
| }; |
| img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z; |
| return img; |
| } |
| |
| |
| inline Tensor3D update_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) |
| { |
| Tensor3D tensor = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y, |
| .stride_z = stride_z |
| }; |
| tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z; |
| return tensor; |
| } |
| |
| |
| inline Tensor3D tensor3D_ptr_no_update(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) |
| { |
| Tensor3D tensor = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y, |
| .stride_z = stride_z |
| }; |
| return tensor; |
| } |
| |
| inline Tensor4D update_tensor4D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z, uint stride_w, |
| uint step_w, |
| uint mod_size) |
| { |
| Tensor4D tensor = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y, |
| .stride_z = stride_z, |
| .stride_w = stride_w |
| }; |
| |
| tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + (get_global_id(2) % mod_size) * step_z + (get_global_id(2) / mod_size) * step_w; |
| return tensor; |
| } |
| |
| |
| inline __global const uchar *vector_offset(const Vector *vec, int x) |
| { |
| return vec->ptr + x * vec->stride_x; |
| } |
| |
| |
| inline __global uchar *offset(const Image *img, int x, int y) |
| { |
| return img->ptr + x * img->stride_x + y * img->stride_y; |
| } |
| |
| |
| inline __global const uchar *tensor3D_offset(const Tensor3D *tensor, int x, int y, int z) |
| { |
| return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z; |
| } |
| |
| |
| inline __global const uchar *tensor4D_offset(const Tensor4D *tensor, int x, int y, int z, int w) |
| { |
| return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + w * tensor->stride_w; |
| } |
| |
| |
| inline __global const uchar *tensor3D_index2ptr(const Tensor3D *tensor, uint width, uint height, uint depth, uint index) |
| { |
| uint num_elements = width * height; |
| |
| const uint z = index / num_elements; |
| |
| index %= num_elements; |
| |
| const uint y = index / width; |
| |
| index %= width; |
| |
| const uint x = index; |
| |
| return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + tensor->offset_first_element_in_bytes; |
| } |
| |
| #endif |
| |
| |
| #define SCALAR_ACCESS_STR(offset, n0, x) scalar_access_##offset##_##n0(x) |
| #define SCALAR_ACCESS(offset, n0, x) SCALAR_ACCESS_STR(offset, n0, x) |
| |
| |
| #define scalar_access_0_1(x) ((x).s0) |
| #define scalar_access_0_2(x) ((x).s01) |
| #define scalar_access_0_3(x) ((x).s012) |
| #define scalar_access_0_4(x) ((x).s0123) |
| #define scalar_access_0_8(x) ((x).s01234567) |
| #define scalar_access_0_16(x) ((x).s0123456789ABCDEF) |
| |
| |
| #define scalar_access_1_1(x) ((x).s1) |
| #define scalar_access_1_2(x) ((x).s12) |
| #define scalar_access_1_3(x) ((x).s123) |
| #define scalar_access_1_4(x) ((x).s1234) |
| #define scalar_access_1_8(x) ((x).s12345678) |
| |
| |
| #define scalar_access_2_1(x) ((x).s2) |
| #define scalar_access_2_2(x) ((x).s23) |
| #define scalar_access_2_3(x) ((x).s234) |
| #define scalar_access_2_4(x) ((x).s2345) |
| #define scalar_access_2_8(x) ((x).s23456789) |
| |
| |
| #define scalar_access_3_1(x) ((x).s3) |
| #define scalar_access_3_2(x) ((x).s34) |
| #define scalar_access_3_3(x) ((x).s345) |
| #define scalar_access_3_4(x) ((x).s3456) |
| #define scalar_access_3_8(x) ((x).s3456789A) |
| |
| |
| #define scalar_access_4_1(x) ((x).s4) |
| #define scalar_access_4_2(x) ((x).s45) |
| #define scalar_access_4_3(x) ((x).s456) |
| #define scalar_access_4_4(x) ((x).s4567) |
| #define scalar_access_4_8(x) ((x).s456789AB) |
| |
| |
| #define scalar_access_8_1(x) ((x).s8) |
| #define scalar_access_8_2(x) ((x).s89) |
| #define scalar_access_8_3(x) ((x).s89A) |
| #define scalar_access_8_4(x) ((x).s89AB) |
| #define scalar_access_8_8(x) ((x).s89ABCDEF) |
| |
| |
| #define scalar_access_12_1(x) ((x).sC) |
| #define scalar_access_12_2(x) ((x).sCD) |
| #define scalar_access_12_3(x) ((x).sCDE) |
| #define scalar_access_12_4(x) ((x).sCDEF) |
| |
| |
| #define scalar_access_16_1(x) ((x).sF) |
| |
| |
| #define LOAD_TENSOR_ROW_0(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| ({}) |
| |
| #define LOAD_TENSOR_ROW_1(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##0) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); |
| |
| #define LOAD_TENSOR_ROW_2(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| LOAD_TENSOR_ROW_1(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##1) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); |
| |
| #define LOAD_TENSOR_ROW_3(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| LOAD_TENSOR_ROW_2(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##2) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); |
| |
| #define LOAD_TENSOR_ROW_4(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| LOAD_TENSOR_ROW_3(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##3) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); |
| |
| #define LOAD_TENSOR_ROW_5(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| LOAD_TENSOR_ROW_4(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##4) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); |
| |
| #define LOAD_TENSOR_ROW_6(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| LOAD_TENSOR_ROW_5(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##5) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); |
| |
| #define LOAD_TENSOR_ROW_7(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| LOAD_TENSOR_ROW_6(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##6) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); |
| |
| #define LOAD_TENSOR_ROW_8(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| LOAD_TENSOR_ROW_7(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##7) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); |
| |
| #define LOAD_TENSOR_ROW_9(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| LOAD_TENSOR_ROW_8(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##8) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); |
| |
| #define LOAD_TENSOR_ROW_10(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| LOAD_TENSOR_ROW_9(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##9) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); |
| |
| #define LOAD_TENSOR_ROW_11(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| LOAD_TENSOR_ROW_10(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##A) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); |
| |
| #define LOAD_TENSOR_ROW_12(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| LOAD_TENSOR_ROW_11(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##B) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); |
| |
| #define LOAD_TENSOR_ROW_13(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| LOAD_TENSOR_ROW_12(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##C) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); |
| |
| #define LOAD_TENSOR_ROW_14(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| LOAD_TENSOR_ROW_13(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##D) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); |
| |
| #define LOAD_TENSOR_ROW_15(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| LOAD_TENSOR_ROW_14(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##E) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); |
| |
| #define LOAD_TENSOR_ROW_16(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| LOAD_TENSOR_ROW_15(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \ |
| SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##F) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); |
| |
| |
| |
| #define LOAD_TENSOR_STR(M0, N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) LOAD_TENSOR_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) |
| #define LOAD_TENSOR(M0, N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) LOAD_TENSOR_STR(M0, N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) |
| |
| |
| |
| #define LOAD_TENSOR_M0X0(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \ |
| ({}) |
| |
| #define LOAD_TENSOR_M0X1(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \ |
| LOAD_TENSOR(M0, N0, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin); |
| |
| #define LOAD_TENSOR_M0X2(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \ |
| LOAD_TENSOR(M0, N0, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin); |
| |
| #define LOAD_TENSOR_M0X3(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \ |
| LOAD_TENSOR(M0, N0, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin); |
| |
| #define LOAD_TENSOR_M0X4(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \ |
| LOAD_TENSOR(M0, N0, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin); |
| |
| #define LOAD_TENSOR_M0X5(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \ |
| LOAD_TENSOR(M0, 4, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin); \ |
| LOAD_TENSOR(M0, 1, DATA_TYPE, a, input_ptr + 4 * sizeof(DATA_TYPE), 4, src_stride_y, zin); |
| |
| #define LOAD_TENSOR_M0X6(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \ |
| LOAD_TENSOR(M0, 4, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin); \ |
| LOAD_TENSOR(M0, 2, DATA_TYPE, a, input_ptr + 4 * sizeof(DATA_TYPE), 4, src_stride_y, zin); |
| |
| #define LOAD_TENSOR_M0X7(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \ |
| LOAD_TENSOR(M0, 4, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin); \ |
| LOAD_TENSOR(M0, 3, DATA_TYPE, a, input_ptr + 4 * sizeof(DATA_TYPE), 4, src_stride_y, zin); |
| |
| #define LOAD_TENSOR_M0X8(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \ |
| LOAD_TENSOR(M0, N0, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin); |
| |
| #define LOAD_TENSOR_M0X9(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \ |
| LOAD_TENSOR(M0, 8, DATA_TYPE, a, input_ptr 0, src_stride_y, zin); \ |
| LOAD_TENSOR(M0, 1, DATA_TYPE, a, input_ptr + 8 * sizeof(DATA_TYPE), 8, src_stride_y, zin); |
| |
| #define LOAD_TENSOR_M0X10(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \ |
| LOAD_TENSOR(M0, 8, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin); \ |
| LOAD_TENSOR(M0, 2, DATA_TYPE, a, input_ptr + 8 * sizeof(DATA_TYPE), 8, src_stride_y, zin); |
| |
| #define LOAD_TENSOR_M0X11(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \ |
| LOAD_TENSOR(M0, 8, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin); \ |
| LOAD_TENSOR(M0, 3, DATA_TYPE, a, input_ptr + 8 * sizeof(DATA_TYPE), 8, src_stride_y, zin); |
| |
| #define LOAD_TENSOR_M0X12(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \ |
| LOAD_TENSOR(M0, 8, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin); \ |
| LOAD_TENSOR(M0, 4, DATA_TYPE, a, input_ptr + 8 * sizeof(DATA_TYPE), 8, src_stride_y, zin); |
| |
| #define LOAD_TENSOR_M0X13(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \ |
| LOAD_TENSOR(M0, 8, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin); \ |
| LOAD_TENSOR(M0, 4, DATA_TYPE, a, input_ptr + 8 * sizeof(DATA_TYPE), 8, src_stride_y, zin); \ |
| LOAD_TENSOR(M0, 1, DATA_TYPE, a, input_ptr + 12 * sizeof(DATA_TYPE), 12, src_stride_y, zin); |
| |
| #define LOAD_TENSOR_M0X14(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \ |
| LOAD_TENSOR(M0, 8, DATA_TYPE, a, input_ptr 0, src_stride_y, zin); \ |
| LOAD_TENSOR(M0, 4, DATA_TYPE, a, input_ptr + 8 * sizeof(DATA_TYPE), 8, src_stride_y, zin); \ |
| LOAD_TENSOR(M0, 2, DATA_TYPE, a, input_ptr + 12 * sizeof(DATA_TYPE), 12, src_stride_y, zin); |
| |
| #define LOAD_TENSOR_M0X15(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \ |
| LOAD_TENSOR(M0, 8, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin); \ |
| LOAD_TENSOR(M0, 4, DATA_TYPE, a, input_ptr + 8 * sizeof(DATA_TYPE), 8, src_stride_y, zin); \ |
| LOAD_TENSOR(M0, 3, DATA_TYPE, a, input_ptr + 12 * sizeof(DATA_TYPE), 12, src_stride_y, zin); |
| |
| #define LOAD_TENSOR_M0X16(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \ |
| LOAD_TENSOR(M0, N0, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin); |
| |
| |
| |
| #define LOAD_TENSOR_M0XN0_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) LOAD_TENSOR_M0X##N0(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| #define LOAD_TENSOR_M0XN0(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) LOAD_TENSOR_M0XN0_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| |
| |
| #define LOAD_ROW_1(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##0 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 0 * STRIDE_Y + Z##0)); |
| |
| #define LOAD_ROW_2(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_1(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##1 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 1 * STRIDE_Y + Z##1)); |
| |
| #define LOAD_ROW_3(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_2(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##2 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 2 * STRIDE_Y + Z##2)); |
| |
| #define LOAD_ROW_4(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_3(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##3 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 3 * STRIDE_Y + Z##3)); |
| |
| #define LOAD_ROW_5(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_4(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##4 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 4 * STRIDE_Y + Z##4)); |
| |
| #define LOAD_ROW_6(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_5(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##5 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 5 * STRIDE_Y + Z##5)); |
| |
| #define LOAD_ROW_7(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_6(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##6 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 6 * STRIDE_Y + Z##6)); |
| |
| #define LOAD_ROW_8(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_7(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##7 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 7 * STRIDE_Y + Z##7)); |
| |
| #define LOAD_ROW_9(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_8(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##8 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 8 * STRIDE_Y + Z##8)); |
| |
| #define LOAD_ROW_10(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_9(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##9 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 9 * STRIDE_Y + Z##9)); |
| |
| #define LOAD_ROW_11(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_10(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##A = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 10 * STRIDE_Y + Z##A)); |
| |
| #define LOAD_ROW_12(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_11(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##B = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 11 * STRIDE_Y + Z##B)); |
| |
| #define LOAD_ROW_13(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_12(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##C = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 12 * STRIDE_Y + Z##C)); |
| |
| #define LOAD_ROW_14(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_13(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##D = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 13 * STRIDE_Y + Z##D)); |
| |
| #define LOAD_ROW_15(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_14(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##E = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 14 * STRIDE_Y + Z##E)); |
| |
| #define LOAD_ROW_16(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_15(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##F = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 15 * STRIDE_Y + Z##F)); |
| |
| |
| |
| |
| #define LOAD_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) LOAD_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) |
| #define LOAD_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) LOAD_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) |
| |
| |
| |
| #define LOAD_ROW_PARTIAL_1(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VLOAD_PARTIAL(N0, LOAD_N0) \ |
| (BASENAME##0, 0, (__global DATA_TYPE *)(PTR + OFFSET + 0 * STRIDE_Y + Z##0)); |
| |
| #define LOAD_ROW_PARTIAL_2(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_PARTIAL_1(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VLOAD_PARTIAL(N0, LOAD_N0) \ |
| (BASENAME##1, 0, (__global DATA_TYPE *)(PTR + OFFSET + 1 * STRIDE_Y + Z##1)); |
| |
| #define LOAD_ROW_PARTIAL_3(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_PARTIAL_2(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VLOAD_PARTIAL(N0, LOAD_N0) \ |
| (BASENAME##2, 0, (__global DATA_TYPE *)(PTR + OFFSET + 2 * STRIDE_Y + Z##2)); |
| |
| #define LOAD_ROW_PARTIAL_4(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_PARTIAL_3(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VLOAD_PARTIAL(N0, LOAD_N0) \ |
| (BASENAME##3, 0, (__global DATA_TYPE *)(PTR + OFFSET + 3 * STRIDE_Y + Z##3)); |
| |
| #define LOAD_ROW_PARTIAL_5(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_PARTIAL_4(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VLOAD_PARTIAL(N0, LOAD_N0) \ |
| (BASENAME##4, 0, (__global DATA_TYPE *)(PTR + OFFSET + 4 * STRIDE_Y + Z##4)); |
| |
| #define LOAD_ROW_PARTIAL_6(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_PARTIAL_5(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VLOAD_PARTIAL(N0, LOAD_N0) \ |
| (BASENAME##5, 0, (__global DATA_TYPE *)(PTR + OFFSET + 5 * STRIDE_Y + Z##5)); |
| |
| #define LOAD_ROW_PARTIAL_7(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_PARTIAL_6(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VLOAD_PARTIAL(N0, LOAD_N0) \ |
| (BASENAME##6, 0, (__global DATA_TYPE *)(PTR + OFFSET + 6 * STRIDE_Y + Z##6)); |
| |
| #define LOAD_ROW_PARTIAL_8(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_PARTIAL_7(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VLOAD_PARTIAL(N0, LOAD_N0) \ |
| (BASENAME##7, 0, (__global DATA_TYPE *)(PTR + OFFSET + 7 * STRIDE_Y + Z##7)); |
| |
| #define LOAD_ROW_PARTIAL_9(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_PARTIAL_8(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VLOAD_PARTIAL(N0, LOAD_N0) \ |
| (BASENAME##8, 0, (__global DATA_TYPE *)(PTR + OFFSET + 8 * STRIDE_Y + Z##8)); |
| |
| #define LOAD_ROW_PARTIAL_10(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_PARTIAL_9(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VLOAD_PARTIAL(N0, LOAD_N0) \ |
| (BASENAME##9, 0, (__global DATA_TYPE *)(PTR + OFFSET + 9 * STRIDE_Y + Z##9)); |
| |
| #define LOAD_ROW_PARTIAL_11(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_PARTIAL_10(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VLOAD_PARTIAL(N0, LOAD_N0) \ |
| (BASENAME##A, 0, (__global DATA_TYPE *)(PTR + OFFSET + 10 * STRIDE_Y + Z##A)); |
| |
| #define LOAD_ROW_PARTIAL_12(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_PARTIAL_11(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VLOAD_PARTIAL(N0, LOAD_N0) \ |
| (BASENAME##B, 0, (__global DATA_TYPE *)(PTR + OFFSET + 11 * STRIDE_Y + Z##B)); |
| |
| #define LOAD_ROW_PARTIAL_13(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_PARTIAL_12(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VLOAD_PARTIAL(N0, LOAD_N0) \ |
| (BASENAME##C, 0, (__global DATA_TYPE *)(PTR + OFFSET + 12 * STRIDE_Y + Z##C)); |
| |
| #define LOAD_ROW_PARTIAL_14(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_PARTIAL_13(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VLOAD_PARTIAL(N0, LOAD_N0) \ |
| (BASENAME##D, 0, (__global DATA_TYPE *)(PTR + OFFSET + 13 * STRIDE_Y + Z##D)); |
| |
| #define LOAD_ROW_PARTIAL_15(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_PARTIAL_14(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VLOAD_PARTIAL(N0, LOAD_N0) \ |
| (BASENAME##E, 0, (__global DATA_TYPE *)(PTR + OFFSET + 14 * STRIDE_Y + Z##E)); |
| |
| #define LOAD_ROW_PARTIAL_16(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| LOAD_ROW_PARTIAL_15(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \ |
| VLOAD_PARTIAL(N0, LOAD_N0) \ |
| (BASENAME##F, 0, (__global DATA_TYPE *)(PTR + OFFSET + 15 * STRIDE_Y + Z##F)); |
| |
| |
| |
| #define LOAD_BLOCK_PARTIAL_STR(LOAD_M0, LOAD_N0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) LOAD_ROW_PARTIAL_##LOAD_M0(N0, LOAD_N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) |
| #define LOAD_BLOCK_PARTIAL(LOAD_M0, LOAD_N0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) LOAD_BLOCK_PARTIAL_STR(LOAD_M0, LOAD_N0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) |
| |
| #define LOAD_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| if(!(PARTIAL_COND_X) && !(PARTIAL_COND_Y)) \ |
| { \ |
| LOAD_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z); \ |
| } \ |
| else if((PARTIAL_COND_Y) && !(PARTIAL_COND_X)) \ |
| { \ |
| LOAD_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z); \ |
| } \ |
| else if(!(PARTIAL_COND_Y) && (PARTIAL_COND_X)) \ |
| { \ |
| LOAD_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z); \ |
| } \ |
| else \ |
| { \ |
| LOAD_BLOCK_PARTIAL(PARTIAL_STORE_M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z); \ |
| } |
| |
| #define LOAD_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) \ |
| if(!(PARTIAL_COND_X)) \ |
| { \ |
| LOAD_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z); \ |
| } \ |
| else \ |
| { \ |
| LOAD_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z); \ |
| } |
| |
| #define LOAD_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) \ |
| if(!(PARTIAL_COND_Y)) \ |
| { \ |
| LOAD_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z); \ |
| } \ |
| else \ |
| { \ |
| LOAD_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z); \ |
| } |
| |
| |
| #if PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0 |
| |
| #define LOAD_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| LOAD_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) |
| |
| #elif PARTIAL_STORE_M0 > 0 && PARTIAL_STORE_N0 == 0 |
| |
| #define LOAD_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DATA_TYPE, N0), BASENAME, 0); \ |
| LOAD_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) |
| |
| #elif PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 > 0 |
| |
| #define LOAD_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DATA_TYPE, N0), BASENAME, 0); \ |
| LOAD_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) |
| |
| #else |
| |
| #define LOAD_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DATA_TYPE, N0), BASENAME, 0); \ |
| LOAD_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) |
| |
| #endif |
| |
| |
| #define LOAD_TEXTURE2D_ROW_1(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| BASENAME##0 = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 0 * X_STEP_ROW), (Y_COORD + 0 * Y_STEP_ROW)) |
| |
| #define LOAD_TEXTURE2D_ROW_2(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| LOAD_TEXTURE2D_ROW_1(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| BASENAME##1 = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 1 * X_STEP_ROW), (Y_COORD + 1 * Y_STEP_ROW)) |
| |
| #define LOAD_TEXTURE2D_ROW_3(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| LOAD_TEXTURE2D_ROW_2(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| BASENAME##2 = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 2 * X_STEP_ROW), (Y_COORD + 2 * Y_STEP_ROW)) |
| |
| #define LOAD_TEXTURE2D_ROW_4(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| LOAD_TEXTURE2D_ROW_3(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| BASENAME##3 = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 3 * X_STEP_ROW), (Y_COORD + 3 * Y_STEP_ROW)) |
| |
| #define LOAD_TEXTURE2D_ROW_5(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| LOAD_TEXTURE2D_ROW_4(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| BASENAME##4 = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 4 * X_STEP_ROW), (Y_COORD + 4 * Y_STEP_ROW)) |
| |
| #define LOAD_TEXTURE2D_ROW_6(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| LOAD_TEXTURE2D_ROW_5(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| BASENAME##5 = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 5 * X_STEP_ROW), (Y_COORD + 5 * Y_STEP_ROW)) |
| |
| #define LOAD_TEXTURE2D_ROW_7(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| LOAD_TEXTURE2D_ROW_6(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| BASENAME##6 = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 6 * X_STEP_ROW), (Y_COORD + 6 * Y_STEP_ROW)) |
| |
| #define LOAD_TEXTURE2D_ROW_8(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| LOAD_TEXTURE2D_ROW_7(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| BASENAME##7 = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 7 * X_STEP_ROW), (Y_COORD + 7 * Y_STEP_ROW)) |
| |
| #define LOAD_TEXTURE2D_ROW_9(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| LOAD_TEXTURE2D_ROW_8(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| BASENAME##8 = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 8 * X_STEP_ROW), (Y_COORD + 8 * Y_STEP_ROW)) |
| |
| #define LOAD_TEXTURE2D_ROW_10(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| LOAD_TEXTURE2D_ROW_9(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| BASENAME##9 = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 9 * X_STEP_ROW), (Y_COORD + 9 * Y_STEP_ROW)) |
| |
| #define LOAD_TEXTURE2D_ROW_11(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| LOAD_TEXTURE2D_ROW_10(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| BASENAME##A = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 10 * X_STEP_ROW), (Y_COORD + 10 * Y_STEP_ROW)) |
| |
| #define LOAD_TEXTURE2D_ROW_12(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| LOAD_TEXTURE2D_ROW_11(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| BASENAME##B = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 11 * X_STEP_ROW), (Y_COORD + 11 * Y_STEP_ROW)) |
| |
| #define LOAD_TEXTURE2D_ROW_13(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| LOAD_TEXTURE2D_ROW_12(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| BASENAME##C = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 12 * X_STEP_ROW), (Y_COORD + 12 * Y_STEP_ROW)) |
| |
| #define LOAD_TEXTURE2D_ROW_14(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| LOAD_TEXTURE2D_ROW_13(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| BASENAME##D = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 13 * X_STEP_ROW), (Y_COORD + 13 * Y_STEP_ROW)) |
| |
| #define LOAD_TEXTURE2D_ROW_15(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| LOAD_TEXTURE2D_ROW_14(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| BASENAME##E = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 14 * X_STEP_ROW), (Y_COORD + 14 * Y_STEP_ROW)) |
| |
| #define LOAD_TEXTURE2D_ROW_16(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| LOAD_TEXTURE2D_ROW_15(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \ |
| BASENAME##F = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 15 * X_STEP_ROW), (Y_COORD + 15 * Y_STEP_ROW)) |
| |
| |
| |
| #define LOAD_TEXTURE2D_STR(M0, N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) LOAD_TEXTURE2D_ROW_##M0(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) |
| #define LOAD_TEXTURE2D(M0, N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) LOAD_TEXTURE2D_STR(M0, N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) |
| |
| |
| |
| #define LOAD_ROW_INDIRECT_1(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##0; \ |
| if(Y_MASK##0 != 0) \ |
| BASENAME##0 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##0 * STRIDE_Y)); \ |
| else \ |
| BASENAME##0 = 0; |
| |
| #define LOAD_ROW_INDIRECT_2(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| LOAD_ROW_INDIRECT_1(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##1; \ |
| if(Y_MASK##1 != 0) \ |
| BASENAME##1 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##1 * STRIDE_Y)); \ |
| else \ |
| BASENAME##1 = 0; |
| |
| #define LOAD_ROW_INDIRECT_3(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| LOAD_ROW_INDIRECT_2(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##2; \ |
| if(Y_MASK##2 != 0) \ |
| BASENAME##2 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##2 * STRIDE_Y)); \ |
| else \ |
| BASENAME##2 = 0; |
| |
| #define LOAD_ROW_INDIRECT_4(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| LOAD_ROW_INDIRECT_3(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##3; \ |
| if(Y_MASK##3 != 0) \ |
| BASENAME##3 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##3 * STRIDE_Y)); \ |
| else \ |
| BASENAME##3 = 0; |
| |
| #define LOAD_ROW_INDIRECT_5(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| LOAD_ROW_INDIRECT_4(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##4; \ |
| if(Y_MASK##4 != 0) \ |
| BASENAME##4 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##4 * STRIDE_Y)); \ |
| else \ |
| BASENAME##4 = 0; |
| |
| #define LOAD_ROW_INDIRECT_6(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| LOAD_ROW_INDIRECT_5(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##5; \ |
| if(Y_MASK##5 != 0) \ |
| BASENAME##5 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##5 * STRIDE_Y)); \ |
| else \ |
| BASENAME##5 = 0; |
| |
| #define LOAD_ROW_INDIRECT_7(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| LOAD_ROW_INDIRECT_6(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##6; \ |
| if(Y_MASK##6 != 0) \ |
| BASENAME##6 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##6 * STRIDE_Y)); \ |
| else \ |
| BASENAME##6 = 0; |
| |
| #define LOAD_ROW_INDIRECT_8(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| LOAD_ROW_INDIRECT_7(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##7; \ |
| if(Y_MASK##7 != 0) \ |
| BASENAME##7 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##7 * STRIDE_Y)); \ |
| else \ |
| BASENAME##7 = 0; |
| |
| #define LOAD_ROW_INDIRECT_9(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| LOAD_ROW_INDIRECT_8(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##8; \ |
| if(Y_MASK##8 != 0) \ |
| BASENAME##8 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##8 * STRIDE_Y)); \ |
| else \ |
| BASENAME##8 = 0; |
| |
| #define LOAD_ROW_INDIRECT_10(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| LOAD_ROW_INDIRECT_9(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##9; \ |
| if(Y_MASK##9 != 0) \ |
| BASENAME##9 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##9 * STRIDE_Y)); \ |
| else \ |
| BASENAME##9 = 0; |
| |
| #define LOAD_ROW_INDIRECT_11(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| LOAD_ROW_INDIRECT_10(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##A; \ |
| if(Y_MASK##A != 0) \ |
| BASENAME##A = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##A * STRIDE_Y)); \ |
| else \ |
| BASENAME##A = 0; |
| |
| #define LOAD_ROW_INDIRECT_12(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| LOAD_ROW_INDIRECT_11(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##B; \ |
| if(Y_MASK##B != 0) \ |
| BASENAME##B = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##B * STRIDE_Y)); \ |
| else \ |
| BASENAME##B = 0; |
| |
| #define LOAD_ROW_INDIRECT_13(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| LOAD_ROW_INDIRECT_12(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##C; \ |
| if(Y_MASK##C != 0) \ |
| BASENAME##C = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##C * STRIDE_Y)); \ |
| else \ |
| BASENAME##C = 0; |
| |
| #define LOAD_ROW_INDIRECT_14(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| LOAD_ROW_INDIRECT_13(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##D; \ |
| if(Y_MASK##D != 0) \ |
| BASENAME##D = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##D * STRIDE_Y)); \ |
| else \ |
| BASENAME##D = 0; |
| |
| #define LOAD_ROW_INDIRECT_15(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| LOAD_ROW_INDIRECT_14(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##E; \ |
| if(Y_MASK##E != 0) \ |
| BASENAME##E = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##E * STRIDE_Y)); \ |
| else \ |
| BASENAME##E = 0; |
| |
| #define LOAD_ROW_INDIRECT_16(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| LOAD_ROW_INDIRECT_15(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##F; \ |
| if(Y_MASK##F != 0) \ |
| BASENAME##F = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + Y##F * STRIDE_Y)); \ |
| else \ |
| BASENAME##F = 0; |
| |
| |
| #define LOAD_BLOCK_INDIRECT_STR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) LOAD_ROW_INDIRECT_##M0(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) |
| #define LOAD_BLOCK_INDIRECT(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) LOAD_BLOCK_INDIRECT_STR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Y, Y_MASK) |
| |
| |
| #define LOAD_ELEMENT_1(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##0 = *((__global DATA_TYPE *)(PTR + OFFSET + 0 * STRIDE_Y)); |
| |
| #define LOAD_ELEMENT_2(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| LOAD_ELEMENT_1(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##1 = *((__global DATA_TYPE *)(PTR + OFFSET + 1 * STRIDE_Y)); |
| |
| #define LOAD_ELEMENT_3(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| LOAD_ELEMENT_2(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##2 = *((__global DATA_TYPE *)(PTR + OFFSET + 2 * STRIDE_Y)); |
| |
| #define LOAD_ELEMENT_4(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| LOAD_ELEMENT_3(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##3 = *((__global DATA_TYPE *)(PTR + OFFSET + 3 * STRIDE_Y)); |
| |
| #define LOAD_ELEMENT_5(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| LOAD_ELEMENT_4(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##4 = *((__global DATA_TYPE *)(PTR + OFFSET + 4 * STRIDE_Y)); |
| |
| #define LOAD_ELEMENT_6(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| LOAD_ELEMENT_5(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##5 = *((__global DATA_TYPE *)(PTR + OFFSET + 5 * STRIDE_Y)); |
| |
| #define LOAD_ELEMENT_7(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| LOAD_ELEMENT_6(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##6 = *((__global DATA_TYPE *)(PTR + OFFSET + 6 * STRIDE_Y)); |
| |
| #define LOAD_ELEMENT_8(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| LOAD_ELEMENT_7(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##7 = *((__global DATA_TYPE *)(PTR + OFFSET + 7 * STRIDE_Y)); |
| |
| #define LOAD_ELEMENT_9(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| LOAD_ELEMENT_8(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##8 = *((__global DATA_TYPE *)(PTR + OFFSET + 8 * STRIDE_Y)); |
| |
| #define LOAD_ELEMENT_10(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| LOAD_ELEMENT_9(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##9 = *((__global DATA_TYPE *)(PTR + OFFSET + 9 * STRIDE_Y)); |
| |
| #define LOAD_ELEMENT_11(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| LOAD_ELEMENT_10(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##A = *((__global DATA_TYPE *)(PTR + OFFSET + 10 * STRIDE_Y)); |
| |
| #define LOAD_ELEMENT_12(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| LOAD_ELEMENT_11(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##B = *((__global DATA_TYPE *)(PTR + OFFSET + 11 * STRIDE_Y)); |
| |
| #define LOAD_ELEMENT_13(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| LOAD_ELEMENT_12(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##C = *((__global DATA_TYPE *)(PTR + OFFSET + 12 * STRIDE_Y)); |
| |
| #define LOAD_ELEMENT_14(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| LOAD_ELEMENT_13(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##D = *((__global DATA_TYPE *)(PTR + OFFSET + 13 * STRIDE_Y)); |
| |
| #define LOAD_ELEMENT_15(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| LOAD_ELEMENT_14(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##E = *((__global DATA_TYPE *)(PTR + OFFSET + 14 * STRIDE_Y)); |
| |
| #define LOAD_ELEMENT_16(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| LOAD_ELEMENT_15(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \ |
| VEC_DATA_TYPE(DATA_TYPE, N0) \ |
| BASENAME##F = *((__global DATA_TYPE *)(PTR + OFFSET + 15 * STRIDE_Y)); |
| |
| |
| |
| |
| #define LOAD_SCALAR_AS_VECTOR_STR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) LOAD_ELEMENT_##M0(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) |
| #define LOAD_SCALAR_AS_VECTOR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) LOAD_SCALAR_AS_VECTOR_STR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) |
| |
| |
| |
| #define CALCULATE_Z_OFFSET_1(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \ |
| Z##0 = (0 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \ |
| Z##0 = min((DATA_TYPE)(DEPTH_GEMM3D - 1), Z##0); \ |
| Z##0 *= (CROSS_PLANE_PAD * STRIDE_Y); |
| |
| #define CALCULATE_Z_OFFSET_2(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \ |
| CALCULATE_Z_OFFSET_1(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \ |
| Z##1 = (1 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \ |
| Z##1 = min((DATA_TYPE)(DEPTH_GEMM3D - 1), Z##1); \ |
| Z##1 *= (CROSS_PLANE_PAD * STRIDE_Y); |
| |
| #define CALCULATE_Z_OFFSET_3(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \ |
| CALCULATE_Z_OFFSET_2(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \ |
| Z##2 = (2 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \ |
| Z##2 = min((DATA_TYPE)(DEPTH_GEMM3D - 1), Z##2); \ |
| Z##2 *= (CROSS_PLANE_PAD * STRIDE_Y); |
| |
| #define CALCULATE_Z_OFFSET_4(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \ |
| CALCULATE_Z_OFFSET_3(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \ |
| Z##3 = (3 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \ |
| Z##3 = min((DATA_TYPE)(DEPTH_GEMM3D - 1), Z##3); \ |
| Z##3 *= (CROSS_PLANE_PAD * STRIDE_Y); |
| |
| #define CALCULATE_Z_OFFSET_5(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \ |
| CALCULATE_Z_OFFSET_4(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \ |
| Z##4 = (4 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \ |
| Z##4 = min((DATA_TYPE)(DEPTH_GEMM3D - 1), Z##4); \ |
| Z##4 *= (CROSS_PLANE_PAD * STRIDE_Y); |
| |
| #define CALCULATE_Z_OFFSET_6(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \ |
| CALCULATE_Z_OFFSET_5(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \ |
| Z##5 = (5 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \ |
| Z##5 = min((DATA_TYPE)(DEPTH_GEMM3D - 1), Z##5); \ |
| Z##5 *= (CROSS_PLANE_PAD * STRIDE_Y); |
| |
| #define CALCULATE_Z_OFFSET_7(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \ |
| CALCULATE_Z_OFFSET_6(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \ |
| Z##6 = (6 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \ |
| Z##6 = min((DATA_TYPE)(DEPTH_GEMM3D - 1), Z##6); \ |
| Z##6 *= (CROSS_PLANE_PAD * STRIDE_Y); |
| |
| #define CALCULATE_Z_OFFSET_8(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \ |
| CALCULATE_Z_OFFSET_7(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \ |
| Z##7 = (7 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \ |
| Z##7 = min((DATA_TYPE)(DEPTH_GEMM3D - 1), Z##7); \ |
| Z##7 *= (CROSS_PLANE_PAD * STRIDE_Y); |
| |
| |
| |
| |
| #define CALCULATE_Z_OFFSET_STR(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) CALCULATE_Z_OFFSET_##M0(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) |
| #define CALCULATE_Z_OFFSET(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) CALCULATE_Z_OFFSET_STR(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) |
| |
| |
| |
| #define SCALE_ROW_1(DATA_TYPE, BASENAME, SCALE) \ |
| BASENAME##0 *= (DATA_TYPE)SCALE; |
| |
| #define SCALE_ROW_2(DATA_TYPE, BASENAME, SCALE) \ |
| SCALE_ROW_1(DATA_TYPE, BASENAME, SCALE) \ |
| BASENAME##1 *= (DATA_TYPE)SCALE; |
| |
| #define SCALE_ROW_3(DATA_TYPE, BASENAME, SCALE) \ |
| SCALE_ROW_2(DATA_TYPE, BASENAME, SCALE) \ |
| BASENAME##2 *= (DATA_TYPE)SCALE; |
| |
| #define SCALE_ROW_4(DATA_TYPE, BASENAME, SCALE) \ |
| SCALE_ROW_3(DATA_TYPE, BASENAME, SCALE) \ |
| BASENAME##3 *= (DATA_TYPE)SCALE; |
| |
| #define SCALE_ROW_5(DATA_TYPE, BASENAME, SCALE) \ |
| SCALE_ROW_4(DATA_TYPE, BASENAME, SCALE) \ |
| BASENAME##4 *= (DATA_TYPE)SCALE; |
| |
| #define SCALE_ROW_6(DATA_TYPE, BASENAME, SCALE) \ |
| SCALE_ROW_5(DATA_TYPE, BASENAME, SCALE) \ |
| BASENAME##5 *= (DATA_TYPE)SCALE; |
| |
| #define SCALE_ROW_7(DATA_TYPE, BASENAME, SCALE) \ |
| SCALE_ROW_6(DATA_TYPE, BASENAME, SCALE) \ |
| BASENAME##6 *= (DATA_TYPE)SCALE; |
| |
| #define SCALE_ROW_8(DATA_TYPE, BASENAME, SCALE) \ |
| SCALE_ROW_7(DATA_TYPE, BASENAME, SCALE) \ |
| BASENAME##7 *= (DATA_TYPE)SCALE; |
| |
| #define SCALE_ROW_9(DATA_TYPE, BASENAME, SCALE) \ |
| SCALE_ROW_8(DATA_TYPE, BASENAME, SCALE) \ |
| BASENAME##8 *= (DATA_TYPE)SCALE; |
| |
| #define SCALE_ROW_10(DATA_TYPE, BASENAME, SCALE) \ |
| SCALE_ROW_9(DATA_TYPE, BASENAME, SCALE) \ |
| BASENAME##9 *= (DATA_TYPE)SCALE; |
| |
| #define SCALE_ROW_11(DATA_TYPE, BASENAME, SCALE) \ |
| SCALE_ROW_10(DATA_TYPE, BASENAME, SCALE) \ |
| BASENAME##A *= (DATA_TYPE)SCALE; |
| |
| #define SCALE_ROW_12(DATA_TYPE, BASENAME, SCALE) \ |
| SCALE_ROW_11(DATA_TYPE, BASENAME, SCALE) \ |
| BASENAME##B *= (DATA_TYPE)SCALE; |
| |
| #define SCALE_ROW_13(DATA_TYPE, BASENAME, SCALE) \ |
| SCALE_ROW_12(DATA_TYPE, BASENAME, SCALE) \ |
| BASENAME##C *= (DATA_TYPE)SCALE; |
| |
| #define SCALE_ROW_14(DATA_TYPE, BASENAME, SCALE) \ |
| SCALE_ROW_13(DATA_TYPE, BASENAME, SCALE) \ |
| BASENAME##D *= (DATA_TYPE)SCALE; |
| |
| #define SCALE_ROW_15(DATA_TYPE, BASENAME, SCALE) \ |
| SCALE_ROW_14(DATA_TYPE, BASENAME, SCALE) \ |
| BASENAME##E *= (DATA_TYPE)SCALE; |
| |
| #define SCALE_ROW_16(DATA_TYPE, BASENAME, SCALE) \ |
| SCALE_ROW_15(DATA_TYPE, BASENAME, SCALE) \ |
| BASENAME##F *= (DATA_TYPE)SCALE; |
| |
| |
| |
| #define SCALE_BLOCK_STR(N, DATA_TYPE, BASENAME, SCALE) SCALE_ROW_##N(DATA_TYPE, BASENAME, SCALE) |
| #define SCALE_BLOCK(N, DATA_TYPE, BASENAME, SCALE) SCALE_BLOCK_STR(N, DATA_TYPE, BASENAME, SCALE) |
| |
| |
| |
| #define COLUMN_VECTOR1(IDX_COL, BASENAME, X, TYPE) \ |
| TYPE BASENAME##IDX_COL = (TYPE)((X##0).s##IDX_COL); |
| #define COLUMN_VECTOR2(IDX_COL, BASENAME, X, TYPE) \ |
| VEC_DATA_TYPE(TYPE, 2) \ |
| BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 2))((X##0).s##IDX_COL, (X##1).s##IDX_COL); |
| #define COLUMN_VECTOR3(IDX_COL, BASENAME, X, TYPE) \ |
| VEC_DATA_TYPE(TYPE, 3) \ |
| BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 3))((X##0).s##IDX_COL, (X##1).s##IDX_COL, (X##2).s##IDX_COL); |
| #define COLUMN_VECTOR4(IDX_COL, BASENAME, X, TYPE) \ |
| VEC_DATA_TYPE(TYPE, 4) \ |
| BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 4))((X##0).s##IDX_COL, (X##1).s##IDX_COL, (X##2).s##IDX_COL, (X##3).s##IDX_COL); |
| #define COLUMN_VECTOR8(IDX_COL, BASENAME, X, TYPE) \ |
| VEC_DATA_TYPE(TYPE, 8) \ |
| BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 8))((X##0).s##IDX_COL, (X##1).s##IDX_COL, (X##2).s##IDX_COL, (X##3).s##IDX_COL, (X##4).s##IDX_COL, (X##5).s##IDX_COL, (X##6).s##IDX_COL, (X##7).s##IDX_COL); |
| #define COLUMN_VECTOR16(IDX_COL, BASENAME, X, TYPE) \ |
| VEC_DATA_TYPE(TYPE, 16) \ |
| BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 16))((X##0).s##IDX_COL, (X##1).s##IDX_COL, (X##2).s##IDX_COL, (X##3).s##IDX_COL, (X##4).s##IDX_COL, (X##5).s##IDX_COL, (X##6).s##IDX_COL, (X##7).s##IDX_COL, (X##8).s##IDX_COL, (X##9).s##IDX_COL, (X##A).s##IDX_COL, (X##B).s##IDX_COL, (X##C).s##IDX_COL, (X##D).s##IDX_COL, (X##E).s##IDX_COL, (X##F).s##IDX_COL); |
| |
| |
| |
| #define COLUMN_VECTOR_SCALAR1(IDX_COL, BASENAME, X, TYPE) \ |
| TYPE BASENAME##IDX_COL = (TYPE)((X##0)); |
| #define COLUMN_VECTOR_SCALAR2(IDX_COL, BASENAME, X, TYPE) \ |
| VEC_DATA_TYPE(TYPE, 2) \ |
| BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 2))((X##0), (X##1)); |
| #define COLUMN_VECTOR_SCALAR3(IDX_COL, BASENAME, X, TYPE) \ |
| VEC_DATA_TYPE(TYPE, 3) \ |
| BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 3))((X##0), (X##1), (X##2)); |
| #define COLUMN_VECTOR_SCALAR4(IDX_COL, BASENAME, X, TYPE) \ |
| VEC_DATA_TYPE(TYPE, 4) \ |
| BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 4))((X##0), (X##1), (X##2), (X##3)); |
| #define COLUMN_VECTOR_SCALAR8(IDX_COL, BASENAME, X, TYPE) \ |
| VEC_DATA_TYPE(TYPE, 8) \ |
| BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 8))((X##0), (X##1), (X##2), (X##3), (X##4), (X##5), (X##6), (X##7)); |
| #define COLUMN_VECTOR_SCALAR16(IDX_COL, BASENAME, X, TYPE) \ |
| VEC_DATA_TYPE(TYPE, 16) \ |
| BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 16))((X##0), (X##1), (X##2), (X##3), (X##4), (X##5), (X##6), (X##7), (X##8), (X##9), (X##A), (X##B), (X##C), (X##D), (X##E), (X##F)); |
| |
| |
| |
| #define TRANSPOSE_K0X1(K0, BASENAME, BS, TYPE) \ |
| COLUMN_VECTOR_SCALAR(K0, 0, BASENAME, BS, TYPE); |
| #define TRANSPOSE_K0X2(K0, BASENAME, BS, TYPE) \ |
| COLUMN_VECTOR(K0, 0, BASENAME, BS, TYPE); \ |
| COLUMN_VECTOR(K0, 1, BASENAME, BS, TYPE); |
| #define TRANSPOSE_K0X3(K0, BASENAME, BS, TYPE) \ |
| TRANSPOSE_K0X2(K0, BASENAME, BS, TYPE); \ |
| COLUMN_VECTOR(K0, 2, BASENAME, BS, TYPE); |
| #define TRANSPOSE_K0X4(K0, BASENAME, BS, TYPE) \ |
| TRANSPOSE_K0X3(K0, BASENAME, BS, TYPE); \ |
| COLUMN_VECTOR(K0, 3, BASENAME, BS, TYPE); |
| #define TRANSPOSE_K0X8(K0, BASENAME, BS, TYPE) \ |
| TRANSPOSE_K0X4(K0, BASENAME, BS, TYPE); \ |
| COLUMN_VECTOR(K0, 4, BASENAME, BS, TYPE); \ |
| COLUMN_VECTOR(K0, 5, BASENAME, BS, TYPE); \ |
| COLUMN_VECTOR(K0, 6, BASENAME, BS, TYPE); \ |
| COLUMN_VECTOR(K0, 7, BASENAME, BS, TYPE); |
| #define TRANSPOSE_K0X16(K0, BASENAME, BS, TYPE) \ |
| TRANSPOSE_K0X8(K0, BASENAME, BS, TYPE); \ |
| COLUMN_VECTOR(K0, 8, BASENAME, BS, TYPE); \ |
| COLUMN_VECTOR(K0, 9, BASENAME, BS, TYPE); \ |
| COLUMN_VECTOR(K0, A, BASENAME, BS, TYPE); \ |
| COLUMN_VECTOR(K0, B, BASENAME, BS, TYPE); \ |
| COLUMN_VECTOR(K0, C, BASENAME, BS, TYPE); \ |
| COLUMN_VECTOR(K0, D, BASENAME, BS, TYPE); \ |
| COLUMN_VECTOR(K0, E, BASENAME, BS, TYPE); \ |
| COLUMN_VECTOR(K0, F, BASENAME, BS, TYPE); |
| |
| |
| |
| |
| #define COLUMN_VECTOR(K0, IDX_COL, BASENAME, BS, TYPE) \ |
| CONCAT(COLUMN_VECTOR, K0) \ |
| (IDX_COL, BASENAME, BS, TYPE); |
| |
| |
| #define COLUMN_VECTOR_SCALAR(K0, IDX_COL, BASENAME, BS, TYPE) \ |
| CONCAT(COLUMN_VECTOR_SCALAR, K0) \ |
| (IDX_COL, BASENAME, BS, TYPE); |
| |
| |
| #define TRANSPOSE_K0XN0(K0, N0, BASENAME, BS, TYPE) \ |
| CONCAT(TRANSPOSE_K0X, N0) \ |
| (K0, BASENAME, BS, TYPE); |
| |
| |
| #define ADD_ROW_1(BASENAME, BIAS) \ |
| BASENAME##0 += BIAS##0; |
| |
| #define ADD_ROW_2(BASENAME, BIAS) \ |
| ADD_ROW_1(BASENAME, BIAS) \ |
| BASENAME##1 += BIAS##1; |
| |
| #define ADD_ROW_3(BASENAME, BIAS) \ |
| ADD_ROW_2(BASENAME, BIAS) \ |
| BASENAME##2 += BIAS##2; |
| |
| #define ADD_ROW_4(BASENAME, BIAS) \ |
| ADD_ROW_3(BASENAME, BIAS) \ |
| BASENAME##3 += BIAS##3; |
| |
| #define ADD_ROW_5(BASENAME, BIAS) \ |
| ADD_ROW_4(BASENAME, BIAS) \ |
| BASENAME##4 += BIAS##4; |
| |
| #define ADD_ROW_6(BASENAME, BIAS) \ |
| ADD_ROW_5(BASENAME, BIAS) \ |
| BASENAME##5 += BIAS##5; |
| |
| #define ADD_ROW_7(BASENAME, BIAS) \ |
| ADD_ROW_6(BASENAME, BIAS) \ |
| BASENAME##6 += BIAS##6; |
| |
| #define ADD_ROW_8(BASENAME, BIAS) \ |
| ADD_ROW_7(BASENAME, BIAS) \ |
| BASENAME##7 += BIAS##7; |
| |
| #define ADD_ROW_9(BASENAME, BIAS) \ |
| ADD_ROW_8(BASENAME, BIAS) \ |
| BASENAME##8 += BIAS##8; |
| |
| #define ADD_ROW_10(BASENAME, BIAS) \ |
| ADD_ROW_9(BASENAME, BIAS) \ |
| BASENAME##9 += BIAS##9; |
| |
| #define ADD_ROW_11(BASENAME, BIAS) \ |
| ADD_ROW_10(BASENAME, BIAS) \ |
| BASENAME##A += BIAS##A; |
| |
| #define ADD_ROW_12(BASENAME, BIAS) \ |
| ADD_ROW_11(BASENAME, BIAS) \ |
| BASENAME##B += BIAS##B; |
| |
| #define ADD_ROW_13(BASENAME, BIAS) \ |
| ADD_ROW_12(BASENAME, BIAS) \ |
| BASENAME##C += BIAS##C; |
| |
| #define ADD_ROW_14(BASENAME, BIAS) \ |
| ADD_ROW_13(BASENAME, BIAS) \ |
| BASENAME##D += BIAS##D; |
| |
| #define ADD_ROW_15(BASENAME, BIAS) \ |
| ADD_ROW_14(BASENAME, BIAS) \ |
| BASENAME##E += BIAS##E; |
| |
| #define ADD_ROW_16(BASENAME, BIAS) \ |
| ADD_ROW_15(BASENAME, BIAS) \ |
| BASENAME##F += BIAS##F; |
| |
| |
| |
| |
| #define ADD_BLOCK_STR(N, BASENAME, BIAS) ADD_ROW_##N(BASENAME, BIAS) |
| #define ADD_BLOCK(N, BASENAME, BIAS) ADD_BLOCK_STR(N, BASENAME, BIAS) |
| |
| |
| |
| #define ADD_ROW_BROADCAST_1(BASENAME, BIAS) \ |
| BASENAME##0 += BIAS; |
| |
| #define ADD_ROW_BROADCAST_2(BASENAME, BIAS) \ |
| ADD_ROW_BROADCAST_1(BASENAME, BIAS) \ |
| BASENAME##1 += BIAS; |
| |
| #define ADD_ROW_BROADCAST_3(BASENAME, BIAS) \ |
| ADD_ROW_BROADCAST_2(BASENAME, BIAS) \ |
| BASENAME##2 += BIAS; |
| |
| #define ADD_ROW_BROADCAST_4(BASENAME, BIAS) \ |
| ADD_ROW_BROADCAST_3(BASENAME, BIAS) \ |
| BASENAME##3 += BIAS; |
| |
| #define ADD_ROW_BROADCAST_5(BASENAME, BIAS) \ |
| ADD_ROW_BROADCAST_4(BASENAME, BIAS) \ |
| BASENAME##4 += BIAS; |
| |
| #define ADD_ROW_BROADCAST_6(BASENAME, BIAS) \ |
| ADD_ROW_BROADCAST_5(BASENAME, BIAS) \ |
| BASENAME##5 += BIAS; |
| |
| #define ADD_ROW_BROADCAST_7(BASENAME, BIAS) \ |
| ADD_ROW_BROADCAST_6(BASENAME, BIAS) \ |
| BASENAME##6 += BIAS; |
| |
| #define ADD_ROW_BROADCAST_8(BASENAME, BIAS) \ |
| ADD_ROW_BROADCAST_7(BASENAME, BIAS) \ |
| BASENAME##7 += BIAS; |
| |
| #define ADD_ROW_BROADCAST_9(BASENAME, BIAS) \ |
| ADD_ROW_BROADCAST_8(BASENAME, BIAS) \ |
| BASENAME##8 += BIAS; |
| |
| #define ADD_ROW_BROADCAST_10(BASENAME, BIAS) \ |
| ADD_ROW_BROADCAST_9(BASENAME, BIAS) \ |
| BASENAME##9 += BIAS; |
| |
| #define ADD_ROW_BROADCAST_11(BASENAME, BIAS) \ |
| ADD_ROW_BROADCAST_10(BASENAME, BIAS) \ |
| BASENAME##A += BIAS; |
| |
| #define ADD_ROW_BROADCAST_12(BASENAME, BIAS) \ |
| ADD_ROW_BROADCAST_11(BASENAME, BIAS) \ |
| BASENAME##B += BIAS; |
| |
| #define ADD_ROW_BROADCAST_13(BASENAME, BIAS) \ |
| ADD_ROW_BROADCAST_12(BASENAME, BIAS) \ |
| BASENAME##C += BIAS; |
| |
| #define ADD_ROW_BROADCAST_14(BASENAME, BIAS) \ |
| ADD_ROW_BROADCAST_13(BASENAME, BIAS) \ |
| BASENAME##D += BIAS; |
| |
| #define ADD_ROW_BROADCAST_15(BASENAME, BIAS) \ |
| ADD_ROW_BROADCAST_14(BASENAME, BIAS) \ |
| BASENAME##E += BIAS; |
| |
| #define ADD_ROW_BROADCAST_16(BASENAME, BIAS) \ |
| ADD_ROW_BROADCAST_15(BASENAME, BIAS) \ |
| BASENAME##F += BIAS; |
| |
| |
| #define ADD_BLOCK_BROADCAST_STR(N, BASENAME, BIAS) ADD_ROW_BROADCAST_##N(BASENAME, BIAS) |
| #define ADD_BLOCK_BROADCAST(N, BASENAME, BIAS) ADD_BLOCK_BROADCAST_STR(N, BASENAME, BIAS) |
| |
| |
| |
| #define ACTIVATION_ROW_1(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| BASENAME##0 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##0, A_VAL, B_VAL); |
| |
| #define ACTIVATION_ROW_2(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| ACTIVATION_ROW_1(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| BASENAME##1 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##1, A_VAL, B_VAL); |
| |
| #define ACTIVATION_ROW_3(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| ACTIVATION_ROW_2(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| BASENAME##2 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##2, A_VAL, B_VAL); |
| |
| #define ACTIVATION_ROW_4(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| ACTIVATION_ROW_3(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| BASENAME##3 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##3, A_VAL, B_VAL); |
| |
| #define ACTIVATION_ROW_5(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| ACTIVATION_ROW_4(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| BASENAME##4 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##4, A_VAL, B_VAL); |
| |
| #define ACTIVATION_ROW_6(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| ACTIVATION_ROW_5(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| BASENAME##5 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##5, A_VAL, B_VAL); |
| |
| #define ACTIVATION_ROW_7(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| ACTIVATION_ROW_6(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| BASENAME##6 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##6, A_VAL, B_VAL); |
| |
| #define ACTIVATION_ROW_8(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| ACTIVATION_ROW_7(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| BASENAME##7 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##7, A_VAL, B_VAL); |
| |
| #define ACTIVATION_ROW_9(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| ACTIVATION_ROW_8(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| BASENAME##8 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##8, A_VAL, B_VAL); |
| |
| #define ACTIVATION_ROW_10(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| ACTIVATION_ROW_9(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| BASENAME##9 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##9, A_VAL, B_VAL); |
| |
| #define ACTIVATION_ROW_11(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| ACTIVATION_ROW_10(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| BASENAME##A = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##A, A_VAL, B_VAL); |
| |
| #define ACTIVATION_ROW_12(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| ACTIVATION_ROW_11(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| BASENAME##B = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##B, A_VAL, B_VAL); |
| |
| #define ACTIVATION_ROW_13(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| ACTIVATION_ROW_12(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| BASENAME##C = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##C, A_VAL, B_VAL); |
| |
| #define ACTIVATION_ROW_14(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| ACTIVATION_ROW_13(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| BASENAME##D = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##D, A_VAL, B_VAL); |
| |
| #define ACTIVATION_ROW_15(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| ACTIVATION_ROW_14(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| BASENAME##E = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##E, A_VAL, B_VAL); |
| |
| #define ACTIVATION_ROW_16(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| ACTIVATION_ROW_15(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \ |
| BASENAME##F = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##F, A_VAL, B_VAL); |
| |
| |
| |
| #define ACTIVATION_BLOCK_STR(N, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) ACTIVATION_ROW_##N(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) |
| #define ACTIVATION_BLOCK(N, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) ACTIVATION_BLOCK_STR(N, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) |
| |
| |
| |
| #define CONVERT_ROW_1(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| VEC_DATA_TYPE(DATA_TYPE, N) \ |
| BASENAME_DST##0 = CONVERT(BASENAME_SRC##0, VEC_DATA_TYPE(DATA_TYPE, N)); |
| |
| #define CONVERT_ROW_2(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| CONVERT_ROW_1(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| VEC_DATA_TYPE(DATA_TYPE, N) \ |
| BASENAME_DST##1 = CONVERT(BASENAME_SRC##1, VEC_DATA_TYPE(DATA_TYPE, N)); |
| |
| #define CONVERT_ROW_3(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| CONVERT_ROW_2(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| VEC_DATA_TYPE(DATA_TYPE, N) \ |
| BASENAME_DST##2 = CONVERT(BASENAME_SRC##2, VEC_DATA_TYPE(DATA_TYPE, N)); |
| |
| #define CONVERT_ROW_4(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| CONVERT_ROW_3(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| VEC_DATA_TYPE(DATA_TYPE, N) \ |
| BASENAME_DST##3 = CONVERT(BASENAME_SRC##3, VEC_DATA_TYPE(DATA_TYPE, N)); |
| |
| #define CONVERT_ROW_5(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| CONVERT_ROW_4(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| VEC_DATA_TYPE(DATA_TYPE, N) \ |
| BASENAME_DST##4 = CONVERT(BASENAME_SRC##4, VEC_DATA_TYPE(DATA_TYPE, N)); |
| |
| #define CONVERT_ROW_6(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| CONVERT_ROW_5(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| VEC_DATA_TYPE(DATA_TYPE, N) \ |
| BASENAME_DST##5 = CONVERT(BASENAME_SRC##5, VEC_DATA_TYPE(DATA_TYPE, N)); |
| |
| #define CONVERT_ROW_7(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| CONVERT_ROW_6(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| VEC_DATA_TYPE(DATA_TYPE, N) \ |
| BASENAME_DST##6 = CONVERT(BASENAME_SRC##6, VEC_DATA_TYPE(DATA_TYPE, N)); |
| |
| #define CONVERT_ROW_8(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| CONVERT_ROW_7(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| VEC_DATA_TYPE(DATA_TYPE, N) \ |
| BASENAME_DST##7 = CONVERT(BASENAME_SRC##7, VEC_DATA_TYPE(DATA_TYPE, N)); |
| |
| #define CONVERT_ROW_9(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| CONVERT_ROW_8(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| VEC_DATA_TYPE(DATA_TYPE, N) \ |
| BASENAME_DST##8 = CONVERT(BASENAME_SRC##8, VEC_DATA_TYPE(DATA_TYPE, N)); |
| |
| #define CONVERT_ROW_10(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| CONVERT_ROW_9(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| VEC_DATA_TYPE(DATA_TYPE, N) \ |
| BASENAME_DST##9 = CONVERT(BASENAME_SRC##9, VEC_DATA_TYPE(DATA_TYPE, N)); |
| |
| #define CONVERT_ROW_11(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| CONVERT_ROW_10(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| VEC_DATA_TYPE(DATA_TYPE, N) \ |
| BASENAME_DST##A = CONVERT(BASENAME_SRC##A, VEC_DATA_TYPE(DATA_TYPE, N)); |
| |
| #define CONVERT_ROW_12(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| CONVERT_ROW_11(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| VEC_DATA_TYPE(DATA_TYPE, N) \ |
| BASENAME_DST##B = CONVERT(BASENAME_SRC##B, VEC_DATA_TYPE(DATA_TYPE, N)); |
| |
| #define CONVERT_ROW_13(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| CONVERT_ROW_12(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| VEC_DATA_TYPE(DATA_TYPE, N) \ |
| BASENAME_DST##C = CONVERT(BASENAME_SRC##C, VEC_DATA_TYPE(DATA_TYPE, N)); |
| |
| #define CONVERT_ROW_14(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| CONVERT_ROW_13(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| VEC_DATA_TYPE(DATA_TYPE, N) \ |
| BASENAME_DST##D = CONVERT(BASENAME_SRC##D, VEC_DATA_TYPE(DATA_TYPE, N)); |
| |
| #define CONVERT_ROW_15(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| CONVERT_ROW_14(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| VEC_DATA_TYPE(DATA_TYPE, N) \ |
| BASENAME_DST##E = CONVERT(BASENAME_SRC##E, VEC_DATA_TYPE(DATA_TYPE, N)); |
| |
| #define CONVERT_ROW_16(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| CONVERT_ROW_15(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \ |
| VEC_DATA_TYPE(DATA_TYPE, N) \ |
| BASENAME_DST##F = CONVERT(BASENAME_SRC##F, VEC_DATA_TYPE(DATA_TYPE, N)); |
| |
| |
| |
| #define CONVERT_BLOCK_STR(M, N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) CONVERT_ROW_##M(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) |
| #define CONVERT_BLOCK(M, N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) CONVERT_BLOCK_STR(M, N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) |
| |
| |
| #ifndef ARM_COMPUTE_HELPERS_ASYMM_H |
| #define ARM_COMPUTE_HELPERS_ASYMM_H |
| |
| |
| #ifndef ARM_COMPUTE_HELPER_H |
| #define ARM_COMPUTE_HELPER_H |
| |
| |
| |
| |
| #define STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); |
| |
| #define STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); |
| |
| #define STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); |
| |
| #define STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); |
| |
| #define STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); |
| |
| #define STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); |
| |
| #define STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); |
| |
| #define STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); |
| |
| #define STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); |
| |
| #define STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); |
| |
| #define STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); |
| |
| #define STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); |
| |
| #define STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); |
| |
| #define STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); |
| |
| #define STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); |
| |
| #define STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); |
| |
| |
| |
| #define CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##0), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); |
| |
| #define CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##1), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); |
| |
| #define CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##2), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); |
| |
| #define CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##3), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); |
| |
| #define CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##4), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); |
| |
| #define CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##5), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); |
| |
| #define CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##6), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); |
| |
| #define CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##7), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); |
| |
| #define CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##8), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); |
| |
| #define CONVERT_STORE_ROW_10(N0, DATA, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##9), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); |
| |
| #define CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##A), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); |
| |
| #define CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##B), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); |
| |
| #define CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##C), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); |
| |
| #define CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##D), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); |
| |
| #define CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##E), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); |
| |
| #define CONVERT_STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##F), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); |
| |
| |
| |
| |
| #define STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| #define STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| |
| |
| |
| #define CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| #define CONVERT_STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| |
| |
| |
| #define STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); |
| |
| #define STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); |
| |
| #define STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); |
| |
| #define STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); |
| |
| #define STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); |
| |
| #define STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); |
| |
| #define STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); |
| |
| #define STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); |
| |
| #define STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); |
| |
| #define STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); |
| |
| #define STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); |
| |
| #define STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); |
| |
| #define STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); |
| |
| #define STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); |
| |
| #define STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); |
| |
| #define STORE_ROW_PARTIAL_16(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); |
| |
| |
| |
| #define STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_PARTIAL_##STORE_M0(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| #define STORE_BLOCK_PARTIAL(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| |
| #define STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| if(!(PARTIAL_COND_X) && !(PARTIAL_COND_Y)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else if((PARTIAL_COND_Y) && !(PARTIAL_COND_X)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else if(!(PARTIAL_COND_Y) && (PARTIAL_COND_X)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else \ |
| { \ |
| STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } |
| |
| #define STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) \ |
| if(!(PARTIAL_COND_X)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } |
| |
| #define STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) \ |
| if(!(PARTIAL_COND_Y)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else \ |
| { \ |
| STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } |
| |
| |
| #if defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0) |
| |
| |
| #if PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0 |
| |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| |
| #elif PARTIAL_STORE_M0 > 0 && PARTIAL_STORE_N0 == 0 |
| |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) |
| |
| #elif PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 > 0 |
| |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) |
| |
| #else |
| |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) |
| |
| #endif |
| |
| #endif |
| |
| |
| #if defined(PARTIAL_STORE_M0) |
| |
| #define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \ |
| ((uint)(max(0, (int)(y * M0) - (int)((M0 - PARTIAL_STORE_M0) % M0)))) |
| #else |
| #define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \ |
| ((uint)(y * M0)) |
| #endif |
| |
| |
| |
| #define STORE_VECTOR_SELECT(basename, data_type, ptr, vec_size, leftover, cond) \ |
| STORE_BLOCK_PARTIAL_IN_X(1, vec_size, data_type, basename, ptr, 0, 0, leftover, cond) |
| |
| |
| #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) |
| #pragma OPENCL EXTENSION cl_khr_fp16 : enable |
| #endif |
| |
| #if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) |
| #pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : enable |
| #endif |
| |
| #if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8) |
| #pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : enable |
| #endif |
| |
| #if defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf) |
| #pragma OPENCL EXTENSION cl_arm_printf : enable |
| #endif |
| |
| #define GPU_ARCH_MIDGARD 0x100 |
| #define GPU_ARCH_BIFROST 0x200 |
| #define GPU_ARCH_VALHALL 0x300 |
| |
| |
| #define CONCAT(a, b) a##b |
| |
| |
| #define EXPAND(x) x |
| |
| |
| #define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val) |
| |
| |
| #define REV1(x) ((x)) |
| #define REV2(x) ((x).s10) |
| #define REV3(x) ((x).s210) |
| #define REV4(x) ((x).s3210) |
| #define REV8(x) ((x).s76543210) |
| #define REV16(x) ((x).sFEDCBA9876543210) |
| |
| |
| |
| #define REVERSE_STR(x, s) REV##s((x)) |
| #define REVERSE(x, s) REVERSE_STR(x, s) |
| |
| |
| |
| #define ROT1_0(x) ((x)) |
| #define ROT1_1(x) ((x)) |
| |
| #define ROT2_0(x) ((x)) |
| #define ROT2_1(x) ((x).s10) |
| #define ROT2_2(x) ((x)) |
| |
| #define ROT3_0(x) ((x)) |
| #define ROT3_1(x) ((x).s201) |
| #define ROT3_2(x) ((x).s120) |
| #define ROT3_3(x) ((x)) |
| |
| #define ROT4_0(x) ((x)) |
| #define ROT4_1(x) ((x).s3012) |
| #define ROT4_2(x) ((x).s2301) |
| #define ROT4_3(x) ((x).s1230) |
| #define ROT4_4(x) ((x)) |
| |
| #define ROT8_0(x) ((x)) |
| #define ROT8_1(x) ((x).s70123456) |
| #define ROT8_2(x) ((x).s67012345) |
| #define ROT8_3(x) ((x).s56701234) |
| #define ROT8_4(x) ((x).s45670123) |
| #define ROT8_5(x) ((x).s34567012) |
| #define ROT8_6(x) ((x).s23456701) |
| #define ROT8_7(x) ((x).s12345670) |
| #define ROT8_8(x) ((x)) |
| |
| #define ROT16_0(x) ((x)) |
| #define ROT16_1(x) ((x).sF0123456789ABCDE) |
| #define ROT16_2(x) ((x).sEF0123456789ABCD) |
| #define ROT16_3(x) ((x).sDEF0123456789ABC) |
| #define ROT16_4(x) ((x).sCDEF0123456789AB) |
| #define ROT16_5(x) ((x).sBCDEF0123456789A) |
| #define ROT16_6(x) ((x).sABCDEF0123456789) |
| #define ROT16_7(x) ((x).s9ABCDEF012345678) |
| #define ROT16_8(x) ((x).s89ABCDEF01234567) |
| #define ROT16_9(x) ((x).s789ABCDEF0123456) |
| #define ROT16_10(x) ((x).s6789ABCDEF012345) |
| #define ROT16_11(x) ((x).s56789ABCDEF01234) |
| #define ROT16_12(x) ((x).s456789ABCDEF0123) |
| #define ROT16_13(x) ((x).s3456789ABCDEF012) |
| #define ROT16_14(x) ((x).s23456789ABCDEF01) |
| #define ROT16_15(x) ((x).s123456789ABCDEF0) |
| #define ROT16_16(x) ((x)) |
| |
| |
| |
| #define ROTATE_STR(x, s, n) ROT##s##_##n(x) |
| #define ROTATE(x, s, n) ROTATE_STR(x, s, n) |
| |
| |
| |
| #define V_OFFS1(dt) (dt##1)(0) |
| #define V_OFFS2(dt) (dt##2)(0, 1) |
| #define V_OFFS3(dt) (dt##3)(0, 1, 2) |
| #define V_OFFS4(dt) (dt##4)(0, 1, 2, 3) |
| #define V_OFFS8(dt) (dt##8)(0, 1, 2, 3, 4, 5, 6, 7) |
| #define V_OFFS16(dt) (dt##16)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15) |
| |
| |
| |
| #define VEC_OFFS_STR(dt, s) V_OFFS##s(dt) |
| #define VEC_OFFS(dt, s) VEC_OFFS_STR(dt, s) |
| |
| |
| #define VLOAD_STR(size) vload##size |
| #define VLOAD(size) VLOAD_STR(size) |
| |
| |
| #define VLOAD_PARTIAL_STR(size, load_size) vload_partial_##size##_##load_size |
| #define VLOAD_PARTIAL(size, load_size) VLOAD_PARTIAL_STR(size, load_size) |
| |
| #define NO_LOAD(data, offs, ptr) \ |
| { \ |
| } |
| |
| |
| #define vload_partial_1_0 NO_LOAD |
| #define vload_partial_1_1 vload1 |
| #define vload_partial_1_2 NO_LOAD |
| #define vload_partial_1_3 NO_LOAD |
| #define vload_partial_1_4 NO_LOAD |
| #define vload_partial_1_5 NO_LOAD |
| #define vload_partial_1_6 NO_LOAD |
| #define vload_partial_1_7 NO_LOAD |
| #define vload_partial_1_8 NO_LOAD |
| #define vload_partial_1_9 NO_LOAD |
| #define vload_partial_1_10 NO_LOAD |
| #define vload_partial_1_11 NO_LOAD |
| #define vload_partial_1_12 NO_LOAD |
| #define vload_partial_1_13 NO_LOAD |
| #define vload_partial_1_14 NO_LOAD |
| #define vload_partial_1_15 NO_LOAD |
| #define vload_partial_1_16 NO_LOAD |
| |
| #define vload_partial_2_0 NO_LOAD |
| #define vload_partial_2_1 vload_partial_1 |
| #define vload_partial_2_2 vload_partial_2 |
| #define vload_partial_2_3 NO_LOAD |
| #define vload_partial_2_4 NO_LOAD |
| #define vload_partial_2_5 NO_LOAD |
| #define vload_partial_2_6 NO_LOAD |
| #define vload_partial_2_7 NO_LOAD |
| #define vload_partial_2_8 NO_LOAD |
| #define vload_partial_2_9 NO_LOAD |
| #define vload_partial_2_10 NO_LOAD |
| #define vload_partial_2_11 NO_LOAD |
| #define vload_partial_2_12 NO_LOAD |
| #define vload_partial_2_13 NO_LOAD |
| #define vload_partial_2_14 NO_LOAD |
| #define vload_partial_2_15 NO_LOAD |
| #define vload_partial_2_16 NO_LOAD |
| |
| #define vload_partial_3_0 NO_LOAD |
| #define vload_partial_3_1 vload_partial_1 |
| #define vload_partial_3_2 vload_partial_2 |
| #define vload_partial_3_3 vload_partial_3 |
| #define vload_partial_3_4 NO_LOAD |
| #define vload_partial_3_5 NO_LOAD |
| #define vload_partial_3_6 NO_LOAD |
| #define vload_partial_3_7 NO_LOAD |
| #define vload_partial_3_8 NO_LOAD |
| #define vload_partial_3_9 NO_LOAD |
| #define vload_partial_3_10 NO_LOAD |
| #define vload_partial_3_11 NO_LOAD |
| #define vload_partial_3_12 NO_LOAD |
| #define vload_partial_3_13 NO_LOAD |
| #define vload_partial_3_14 NO_LOAD |
| #define vload_partial_3_15 NO_LOAD |
| #define vload_partial_3_16 NO_LOAD |
| |
| #define vload_partial_4_0 NO_LOAD |
| #define vload_partial_4_1 vload_partial_1 |
| #define vload_partial_4_2 vload_partial_2 |
| #define vload_partial_4_3 vload_partial_3 |
| #define vload_partial_4_4 vload_partial_4 |
| #define vload_partial_4_5 NO_LOAD |
| #define vload_partial_4_6 NO_LOAD |
| #define vload_partial_4_7 NO_LOAD |
| #define vload_partial_4_8 NO_LOAD |
| #define vload_partial_4_9 NO_LOAD |
| #define vload_partial_4_10 NO_LOAD |
| #define vload_partial_4_11 NO_LOAD |
| #define vload_partial_4_12 NO_LOAD |
| #define vload_partial_4_13 NO_LOAD |
| #define vload_partial_4_14 NO_LOAD |
| #define vload_partial_4_15 NO_LOAD |
| #define vload_partial_4_16 NO_LOAD |
| |
| #define vload_partial_8_0 NO_LOAD |
| #define vload_partial_8_1 vload_partial_1 |
| #define vload_partial_8_2 vload_partial_2 |
| #define vload_partial_8_3 vload_partial_3 |
| #define vload_partial_8_4 vload_partial_4 |
| #define vload_partial_8_5 vload_partial_5 |
| #define vload_partial_8_6 vload_partial_6 |
| #define vload_partial_8_7 vload_partial_7 |
| #define vload_partial_8_8 vload_partial_8 |
| #define vload_partial_8_9 NO_LOAD |
| #define vload_partial_8_10 NO_LOAD |
| #define vload_partial_8_11 NO_LOAD |
| #define vload_partial_8_12 NO_LOAD |
| #define vload_partial_8_13 NO_LOAD |
| #define vload_partial_8_14 NO_LOAD |
| #define vload_partial_8_15 NO_LOAD |
| #define vload_partial_8_16 NO_LOAD |
| |
| #define vload_partial_16_0 NO_LOAD |
| #define vload_partial_16_1 vload_partial_1 |
| #define vload_partial_16_2 vload_partial_2 |
| #define vload_partial_16_3 vload_partial_3 |
| #define vload_partial_16_4 vload_partial_4 |
| #define vload_partial_16_5 vload_partial_5 |
| #define vload_partial_16_6 vload_partial_6 |
| #define vload_partial_16_7 vload_partial_7 |
| #define vload_partial_16_8 vload_partial_8 |
| #define vload_partial_16_9 vload_partial_9 |
| #define vload_partial_16_10 vload_partial_10 |
| #define vload_partial_16_11 vload_partial_11 |
| #define vload_partial_16_12 vload_partial_12 |
| #define vload_partial_16_13 vload_partial_13 |
| #define vload_partial_16_14 vload_partial_14 |
| #define vload_partial_16_15 vload_partial_15 |
| #define vload_partial_16_16 vload_partial_16 |
| |
| |
| #define vload_partial_1(DATA, OFFSET, PTR) \ |
| DATA.s0 = vload1(OFFSET, PTR); |
| |
| #define vload_partial_2(DATA, OFFSET, PTR) \ |
| DATA.s01 = vload2(OFFSET, PTR); |
| |
| #define vload_partial_3(DATA, OFFSET, PTR) \ |
| DATA.s012 = vload3(OFFSET, PTR); |
| |
| #define vload_partial_4(DATA, OFFSET, PTR) \ |
| DATA.s0123 = vload4(OFFSET, PTR); |
| |
| #define vload_partial_5(DATA, OFFSET, PTR) \ |
| vload_partial_4(DATA.s0123, OFFSET, PTR); \ |
| DATA.s4 = vload1(OFFSET, PTR + 4); |
| |
| #define vload_partial_6(DATA, OFFSET, PTR) \ |
| vload_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vload_partial_2(DATA.s45, OFFSET, PTR + 4); |
| |
| #define vload_partial_7(DATA, OFFSET, PTR) \ |
| vload_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vload_partial_3(DATA.s456, OFFSET, PTR + 4); |
| |
| #define vload_partial_8(DATA, OFFSET, PTR) \ |
| DATA.s01234567 = vload8(OFFSET, PTR); |
| |
| #define vload_partial_9(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| DATA.s8 = vload1(OFFSET, PTR + 8); |
| |
| #define vload_partial_10(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_2(DATA.s89, OFFSET, PTR + 8); |
| |
| #define vload_partial_11(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_3(DATA.s89A, OFFSET, PTR + 8); |
| |
| #define vload_partial_12(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_4(DATA.s89AB, OFFSET, PTR + 8); |
| |
| #define vload_partial_13(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_5(DATA.s89ABCDEF, OFFSET, PTR + 8); |
| |
| #define vload_partial_14(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_6(DATA.s89ABCDEF, OFFSET, PTR + 8); |
| |
| #define vload_partial_15(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_7(DATA.s89ABCDEF, OFFSET, PTR + 8); |
| |
| #define vload_partial_16(DATA, OFFSET, PTR) \ |
| DATA = vload16(OFFSET, PTR); |
| |
| |
| |
| #define PIXEL_UNIT4 1 |
| #define PIXEL_UNIT8 2 |
| #define PIXEL_UNIT16 4 |
| |
| |
| #define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) PIXEL_UNIT##vec_size |
| #define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(vec_size) CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) |
| |
| |
| #define read_image2d_floatx1(img, x_coord, y_coord) (float4)(read_imagef(img, (int2)(x_coord, y_coord))); |
| #define read_image2d_floatx2(img, x_coord, y_coord) (float8)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord))); |
| #define read_image2d_floatx4(img, x_coord, y_coord) (float16)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)), read_imagef(img, (int2)(x_coord + 2, y_coord)), read_imagef(img, (int2)(x_coord + 3, y_coord))); |
| |
| #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) |
| #define read_image2d_halfx1(img, x_coord, y_coord) (half4)(read_imageh(img, (int2)(x_coord, y_coord))); |
| #define read_image2d_halfx2(img, x_coord, y_coord) (half8)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord))); |
| #define read_image2d_halfx4(img, x_coord, y_coord) (half16)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)), read_imageh(img, (int2)(x_coord + 2, y_coord)), read_imageh(img, (int2)(x_coord + 3, y_coord))); |
| #endif |
| |
| #define write_image2d_floatx1(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values)); |
| #define write_image2d_floatx2(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values.s0123), write_imagef(img, (int2)(x_coord + 1, y_coord), values.s4567)); |
| #define write_image2d_floatx4(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values.s0123), write_imagef(img, (int2)(x_coord + 1, y_coord), values.s4567), write_imagef(img, (int2)(x_coord + 2, y_coord), values.s89AB), write_imagef(img, (int2)(x_coord + 3, y_coord), values.sCDEF)); |
| |
| #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) |
| #define write_image2d_halfx1(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values)); |
| #define write_image2d_halfx2(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values.s0123), write_imageh(img, (int2)(x_coord + 1, y_coord), values.s4567)); |
| #define write_image2d_halfx4(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values.s0123), write_imageh(img, (int2)(x_coord + 1, y_coord), values.s4567), write_imageh(img, (int2)(x_coord + 2, y_coord), values.s89AB), write_imageh(img, (int2)(x_coord + 3, y_coord), values.sCDEF)); |
| #endif |
| |
| |
| #define READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) read_image2d_##data_type##x##n0(img, x_coord, y_coord) |
| #define READ_IMAGE2D(data_type, n0, img, x_coord, y_coord) READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) |
| |
| |
| #define WRITE_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord, values) write_image2d_##data_type##x##n0(img, x_coord, y_coord, values) |
| #define WRITE_IMAGE2D(data_type, n0, img, x_coord, y_coord, values) WRITE_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord, values) |
| |
| #define VSTORE_STR(size) vstore##size |
| #define VSTORE(size) VSTORE_STR(size) |
| |
| #define float1 float |
| #define half1 half |
| #define char1 char |
| #define uchar1 uchar |
| #define short1 short |
| #define ushort1 ushort |
| #define int1 int |
| #define uint1 uint |
| #define long1 long |
| #define ulong1 ulong |
| #define double1 double |
| |
| #define vload1(OFFSET, PTR) *(OFFSET + PTR) |
| #define vstore1(DATA, OFFSET, PTR) *(OFFSET + PTR) = DATA |
| |
| |
| #define VSTORE_PARTIAL_STR(size, store_size) vstore_partial_##size##_##store_size |
| #define VSTORE_PARTIAL(size, store_size) VSTORE_PARTIAL_STR(size, store_size) |
| |
| #define NO_STORE(data, offs, ptr) \ |
| { \ |
| } |
| |
| |
| #define vstore_partial_1_0 NO_STORE |
| #define vstore_partial_1_1 vstore1 |
| #define vstore_partial_1_2 NO_STORE |
| #define vstore_partial_1_3 NO_STORE |
| #define vstore_partial_1_4 NO_STORE |
| #define vstore_partial_1_5 NO_STORE |
| #define vstore_partial_1_6 NO_STORE |
| #define vstore_partial_1_7 NO_STORE |
| #define vstore_partial_1_8 NO_STORE |
| #define vstore_partial_1_9 NO_STORE |
| #define vstore_partial_1_10 NO_STORE |
| #define vstore_partial_1_11 NO_STORE |
| #define vstore_partial_1_12 NO_STORE |
| #define vstore_partial_1_13 NO_STORE |
| #define vstore_partial_1_14 NO_STORE |
| #define vstore_partial_1_15 NO_STORE |
| #define vstore_partial_1_16 NO_STORE |
| |
| #define vstore_partial_2_0 NO_STORE |
| #define vstore_partial_2_1 vstore_partial_1 |
| #define vstore_partial_2_2 vstore_partial_2 |
| #define vstore_partial_2_3 NO_STORE |
| #define vstore_partial_2_4 NO_STORE |
| #define vstore_partial_2_5 NO_STORE |
| #define vstore_partial_2_6 NO_STORE |
| #define vstore_partial_2_7 NO_STORE |
| #define vstore_partial_2_8 NO_STORE |
| #define vstore_partial_2_9 NO_STORE |
| #define vstore_partial_2_10 NO_STORE |
| #define vstore_partial_2_11 NO_STORE |
| #define vstore_partial_2_12 NO_STORE |
| #define vstore_partial_2_13 NO_STORE |
| #define vstore_partial_2_14 NO_STORE |
| #define vstore_partial_2_15 NO_STORE |
| #define vstore_partial_2_16 NO_STORE |
| |
| #define vstore_partial_3_0 NO_STORE |
| #define vstore_partial_3_1 vstore_partial_1 |
| #define vstore_partial_3_2 vstore_partial_2 |
| #define vstore_partial_3_3 vstore_partial_3 |
| #define vstore_partial_3_4 NO_STORE |
| #define vstore_partial_3_5 NO_STORE |
| #define vstore_partial_3_6 NO_STORE |
| #define vstore_partial_3_7 NO_STORE |
| #define vstore_partial_3_8 NO_STORE |
| #define vstore_partial_3_9 NO_STORE |
| #define vstore_partial_3_10 NO_STORE |
| #define vstore_partial_3_11 NO_STORE |
| #define vstore_partial_3_12 NO_STORE |
| #define vstore_partial_3_13 NO_STORE |
| #define vstore_partial_3_14 NO_STORE |
| #define vstore_partial_3_15 NO_STORE |
| #define vstore_partial_3_16 NO_STORE |
| |
| #define vstore_partial_4_0 NO_STORE |
| #define vstore_partial_4_1 vstore_partial_1 |
| #define vstore_partial_4_2 vstore_partial_2 |
| #define vstore_partial_4_3 vstore_partial_3 |
| #define vstore_partial_4_4 vstore_partial_4 |
| #define vstore_partial_4_5 NO_STORE |
| #define vstore_partial_4_6 NO_STORE |
| #define vstore_partial_4_7 NO_STORE |
| #define vstore_partial_4_8 NO_STORE |
| #define vstore_partial_4_9 NO_STORE |
| #define vstore_partial_4_10 NO_STORE |
| #define vstore_partial_4_11 NO_STORE |
| #define vstore_partial_4_12 NO_STORE |
| #define vstore_partial_4_13 NO_STORE |
| #define vstore_partial_4_14 NO_STORE |
| #define vstore_partial_4_15 NO_STORE |
| #define vstore_partial_4_16 NO_STORE |
| |
| #define vstore_partial_8_0 NO_STORE |
| #define vstore_partial_8_1 vstore_partial_1 |
| #define vstore_partial_8_2 vstore_partial_2 |
| #define vstore_partial_8_3 vstore_partial_3 |
| #define vstore_partial_8_4 vstore_partial_4 |
| #define vstore_partial_8_5 vstore_partial_5 |
| #define vstore_partial_8_6 vstore_partial_6 |
| #define vstore_partial_8_7 vstore_partial_7 |
| #define vstore_partial_8_8 vstore_partial_8 |
| #define vstore_partial_8_9 NO_STORE |
| #define vstore_partial_8_10 NO_STORE |
| #define vstore_partial_8_11 NO_STORE |
| #define vstore_partial_8_12 NO_STORE |
| #define vstore_partial_8_13 NO_STORE |
| #define vstore_partial_8_14 NO_STORE |
| #define vstore_partial_8_15 NO_STORE |
| #define vstore_partial_8_16 NO_STORE |
| |
| #define vstore_partial_16_0 NO_STORE |
| #define vstore_partial_16_1 vstore_partial_1 |
| #define vstore_partial_16_2 vstore_partial_2 |
| #define vstore_partial_16_3 vstore_partial_3 |
| #define vstore_partial_16_4 vstore_partial_4 |
| #define vstore_partial_16_5 vstore_partial_5 |
| #define vstore_partial_16_6 vstore_partial_6 |
| #define vstore_partial_16_7 vstore_partial_7 |
| #define vstore_partial_16_8 vstore_partial_8 |
| #define vstore_partial_16_9 vstore_partial_9 |
| #define vstore_partial_16_10 vstore_partial_10 |
| #define vstore_partial_16_11 vstore_partial_11 |
| #define vstore_partial_16_12 vstore_partial_12 |
| #define vstore_partial_16_13 vstore_partial_13 |
| #define vstore_partial_16_14 vstore_partial_14 |
| #define vstore_partial_16_15 vstore_partial_15 |
| #define vstore_partial_16_16 vstore_partial_16 |
| |
| |
| #define vstore_partial_1(DATA, OFFSET, PTR) \ |
| vstore1(DATA.s0, OFFSET, PTR); |
| |
| #define vstore_partial_2(DATA, OFFSET, PTR) \ |
| vstore2(DATA.s01, OFFSET, PTR); |
| |
| #define vstore_partial_3(DATA, OFFSET, PTR) \ |
| vstore3(DATA.s012, OFFSET, PTR); |
| |
| #define vstore_partial_4(DATA, OFFSET, PTR) \ |
| vstore4(DATA.s0123, OFFSET, PTR); |
| |
| #define vstore_partial_5(DATA, OFFSET, PTR) \ |
| vstore_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vstore1(DATA.s4, OFFSET, PTR + 4); |
| |
| #define vstore_partial_6(DATA, OFFSET, PTR) \ |
| vstore_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vstore_partial_2(DATA.s45, OFFSET, PTR + 4); |
| |
| #define vstore_partial_7(DATA, OFFSET, PTR) \ |
| vstore_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vstore_partial_3(DATA.s456, OFFSET, PTR + 4); |
| |
| #define vstore_partial_8(DATA, OFFSET, PTR) \ |
| vstore8(DATA.s01234567, OFFSET, PTR); |
| |
| #define vstore_partial_9(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore1(DATA.s8, OFFSET, PTR + 8); |
| |
| #define vstore_partial_10(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_2(DATA.s89, OFFSET, PTR + 8); |
| |
| #define vstore_partial_11(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_3(DATA.s89a, OFFSET, PTR + 8); |
| |
| #define vstore_partial_12(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_4(DATA.s89ab, OFFSET, PTR + 8); |
| |
| #define vstore_partial_13(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_5(DATA.s89abcdef, OFFSET, PTR + 8); |
| |
| #define vstore_partial_14(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_6(DATA.s89abcdef, OFFSET, PTR + 8); |
| |
| #define vstore_partial_15(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_7(DATA.s89abcdef, OFFSET, PTR + 8); |
| |
| #define vstore_partial_16(DATA, OFFSET, PTR) \ |
| vstore16(DATA, OFFSET, PTR); |
| |
| |
| |
| |
| |
| #define convert_float_sat convert_float |
| #define convert_float1_sat convert_float |
| #define convert_float2_sat convert_float2 |
| #define convert_float3_sat convert_float3 |
| #define convert_float4_sat convert_float4 |
| #define convert_float8_sat convert_float8 |
| #define convert_float16_sat convert_float16 |
| #define convert_half_sat convert_float |
| #define convert_half1_sat convert_half |
| #define convert_half2_sat convert_half2 |
| #define convert_half3_sat convert_half3 |
| #define convert_half4_sat convert_half4 |
| #define convert_half8_sat convert_half8 |
| #define convert_half16_sat convert_half16 |
| |
| #define convert_float1 convert_float |
| #define convert_half1 convert_half |
| #define convert_char1 convert_char |
| #define convert_uchar1 convert_uchar |
| #define convert_short1 convert_short |
| #define convert_ushort1 convert_ushort |
| #define convert_int1 convert_int |
| #define convert_uint1 convert_uint |
| #define convert_long1 convert_long |
| #define convert_ulong1 convert_ulong |
| #define convert_double1 convert_double |
| |
| #define convert_char1_sat convert_char_sat |
| #define convert_uchar1_sat convert_uchar_sat |
| #define convert_uchar2_sat convert_uchar2_sat |
| #define convert_uchar3_sat convert_uchar3_sat |
| #define convert_uchar4_sat convert_uchar4_sat |
| #define convert_uchar8_sat convert_uchar8_sat |
| #define convert_uchar16_sat convert_uchar16_sat |
| #define convert_short1_sat convert_short_sat |
| #define convert_ushort1_sat convert_ushort_sat |
| #define convert_int1_sat convert_int_sat |
| #define convert_uint1_sat convert_uint_sat |
| #define convert_long1_sat convert_long_sat |
| #define convert_ulong1_sat convert_ulong_sat |
| #define convert_double1_sat convert_double_sat |
| |
| #define VEC_DATA_TYPE_STR(type, size) type##size |
| #define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size) |
| |
| #define CONVERT_STR(x, type) (convert_##type((x))) |
| #define CONVERT(x, type) CONVERT_STR(x, type) |
| |
| #define CONVERT_SAT_STR(x, type) (convert_##type##_sat((x))) |
| #define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type) |
| |
| #define CONVERT_SAT_ROUND_STR(x, type, round) (convert_##type##_sat_##round((x))) |
| #define CONVERT_SAT_ROUND(x, type, round) CONVERT_SAT_ROUND_STR(x, type, round) |
| |
| #define select_vec_dt_uchar(size) uchar##size |
| #define select_vec_dt_char(size) char##size |
| #define select_vec_dt_ushort(size) ushort##size |
| #define select_vec_dt_short(size) short##size |
| #define select_vec_dt_half(size) short##size |
| #define select_vec_dt_uint(size) uint##size |
| #define select_vec_dt_int(size) int##size |
| #define select_vec_dt_float(size) int##size |
| #define select_vec_dt_ulong(size) ulong##size |
| #define select_vec_dt_long(size) long##size |
| |
| #define SELECT_VEC_DATA_TYPE_STR(type, size) select_vec_dt_##type(size) |
| #define SELECT_VEC_DATA_TYPE(type, size) SELECT_VEC_DATA_TYPE_STR(type, size) |
| #define SELECT_DATA_TYPE(type) SELECT_VEC_DATA_TYPE_STR(type, 1) |
| |
| #define signed_int_vec_dt_uchar(size) char##size |
| #define signed_int_vec_dt_char(size) char##size |
| #define signed_int_vec_dt_ushort(size) short##size |
| #define signed_int_vec_dt_short(size) short##size |
| #define signed_int_vec_dt_half(size) short##size |
| #define signed_int_vec_dt_uint(size) int##size |
| #define signed_int_vec_dt_int(size) int##size |
| #define signed_int_vec_dt_float(size) int##size |
| #define signed_int_vec_dt_ulong(size) long##size |
| #define signed_int_vec_dt_long(size) long##size |
| |
| #define SIGNED_INT_VEC_DATA_TYPE_STR(type, size) signed_int_vec_dt_##type(size) |
| #define SIGNED_INT_VEC_DATA_TYPE(type, size) SIGNED_INT_VEC_DATA_TYPE_STR(type, size) |
| #define SIGNED_INT_DATA_TYPE(type) SIGNED_INT_VEC_DATA_TYPE_STR(type, 1) |
| |
| #define sum_reduce_1(x) (x) |
| #define sum_reduce_2(x) ((x).s0) + ((x).s1) |
| #define sum_reduce_3(x) sum_reduce_2((x).s01) + ((x).s2) |
| #define sum_reduce_4(x) sum_reduce_2((x).s01) + sum_reduce_2((x).s23) |
| #define sum_reduce_8(x) sum_reduce_4((x).s0123) + sum_reduce_4((x).s4567) |
| #define sum_reduce_16(x) sum_reduce_8((x).s01234567) + sum_reduce_8((x).s89ABCDEF) |
| |
| #define SUM_REDUCE_STR(x, size) sum_reduce_##size(x) |
| #define SUM_REDUCE(x, size) SUM_REDUCE_STR(x, size) |
| |
| #define prod_reduce_1(x) (x) |
| #define prod_reduce_2(x) ((x).s0) * ((x).s1) |
| #define prod_reduce_3(x) prod_reduce_2((x).s01) * ((x).s2) |
| #define prod_reduce_4(x) prod_reduce_2((x).s01) * prod_reduce_2((x).s23) |
| #define prod_reduce_8(x) prod_reduce_4((x).s0123) * prod_reduce_4((x).s4567) |
| #define prod_reduce_16(x) prod_reduce_8((x).s01234567) * prod_reduce_8((x).s89ABCDEF) |
| |
| #define PROD_REDUCE_STR(x, size) prod_reduce_##size(x) |
| #define PROD_REDUCE(x, size) PROD_REDUCE_STR(x, size) |
| |
| #define max_reduce_1(x) (x) |
| #define max_reduce_2(x) max(((x).s0), ((x).s1)) |
| #define max_reduce_3(x) max(max_reduce_2((x).s01), ((x).s2)) |
| #define max_reduce_4(x) max(max_reduce_2((x).s01), max_reduce_2((x).s23)) |
| #define max_reduce_8(x) max(max_reduce_4((x).s0123), max_reduce_4((x).s4567)) |
| #define max_reduce_16(x) max(max_reduce_8((x).s01234567), max_reduce_8((x).s89ABCDEF)) |
| |
| #define MAX_REDUCE_STR(x, size) max_reduce_##size(x) |
| #define MAX_REDUCE(x, size) MAX_REDUCE_STR(x, size) |
| |
| #define VECTOR_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define IMAGE_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define TENSOR3D_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_stride_z, \ |
| uint name##_step_z, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define TENSOR4D_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_stride_z, \ |
| uint name##_step_z, \ |
| uint name##_stride_w, \ |
| uint name##_step_w, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define TENSOR5D_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_stride_z, \ |
| uint name##_step_z, \ |
| uint name##_stride_w, \ |
| uint name##_step_w, \ |
| uint name##_stride_v, \ |
| uint name##_step_v, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define CONVERT_TO_VECTOR_STRUCT(name) \ |
| update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x) |
| |
| #define CONVERT_TO_VECTOR_STRUCT_NO_STEP(name) \ |
| update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0) |
| |
| #define CONVERT_TO_IMAGE_STRUCT(name) \ |
| update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y) |
| |
| #define CONVERT_TO_IMAGE_STRUCT_NO_STEP(name) \ |
| update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0) |
| |
| #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \ |
| update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(name) \ |
| update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \ |
| update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TO_TENSOR3D_STRUCT(name) \ |
| update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ |
| name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(name) \ |
| update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0) |
| |
| #define CONVERT_TO_TENSOR4D_STRUCT(name, mod_size) \ |
| update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ |
| name##_stride_z, name##_step_z, name##_stride_w, name##_step_w, mod_size) |
| |
| #define CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(name, mod_size) \ |
| update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0, name##_stride_w, 0, mod_size) |
| |
| #define CONVERT_TO_TENSOR3D_STRUCT_NO_UPDATE_PTR(name) \ |
| tensor3D_ptr_no_update(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ |
| name##_stride_z, name##_step_z) |
| |
| |
| typedef struct Vector |
| { |
| __global uchar *ptr; |
| int offset_first_element_in_bytes; |
| int stride_x; |
| } Vector; |
| |
| |
| typedef struct Image |
| { |
| __global uchar *ptr; |
| int offset_first_element_in_bytes; |
| int stride_x; |
| int stride_y; |
| } Image; |
| |
| |
| typedef struct Tensor3D |
| { |
| __global uchar *ptr; |
| int offset_first_element_in_bytes; |
| int stride_x; |
| int stride_y; |
| int stride_z; |
| } Tensor3D; |
| |
| |
| typedef struct Tensor4D |
| { |
| __global uchar *ptr; |
| int offset_first_element_in_bytes; |
| int stride_x; |
| int stride_y; |
| int stride_z; |
| int stride_w; |
| } Tensor4D; |
| |
| |
| inline Vector update_vector_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x) |
| { |
| Vector vector = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| }; |
| vector.ptr += vector.offset_first_element_in_bytes + get_global_id(0) * step_x; |
| return vector; |
| } |
| |
| |
| inline Image update_image_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y) |
| { |
| Image img = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y |
| }; |
| img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y; |
| return img; |
| } |
| |
| |
| inline Image update_image_from_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) |
| { |
| Image img = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y |
| }; |
| img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z; |
| return img; |
| } |
| |
| |
| inline Tensor3D update_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) |
| { |
| Tensor3D tensor = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y, |
| .stride_z = stride_z |
| }; |
| tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z; |
| return tensor; |
| } |
| |
| |
| inline Tensor3D tensor3D_ptr_no_update(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) |
| { |
| Tensor3D tensor = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y, |
| .stride_z = stride_z |
| }; |
| return tensor; |
| } |
| |
| inline Tensor4D update_tensor4D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z, uint stride_w, |
| uint step_w, |
| uint mod_size) |
| { |
| Tensor4D tensor = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y, |
| .stride_z = stride_z, |
| .stride_w = stride_w |
| }; |
| |
| tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + (get_global_id(2) % mod_size) * step_z + (get_global_id(2) / mod_size) * step_w; |
| return tensor; |
| } |
| |
| |
| inline __global const uchar *vector_offset(const Vector *vec, int x) |
| { |
| return vec->ptr + x * vec->stride_x; |
| } |
| |
| |
| inline __global uchar *offset(const Image *img, int x, int y) |
| { |
| return img->ptr + x * img->stride_x + y * img->stride_y; |
| } |
| |
| |
| inline __global const uchar *tensor3D_offset(const Tensor3D *tensor, int x, int y, int z) |
| { |
| return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z; |
| } |
| |
| |
| inline __global const uchar *tensor4D_offset(const Tensor4D *tensor, int x, int y, int z, int w) |
| { |
| return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + w * tensor->stride_w; |
| } |
| |
| |
| inline __global const uchar *tensor3D_index2ptr(const Tensor3D *tensor, uint width, uint height, uint depth, uint index) |
| { |
| uint num_elements = width * height; |
| |
| const uint z = index / num_elements; |
| |
| index %= num_elements; |
| |
| const uint y = index / width; |
| |
| index %= width; |
| |
| const uint x = index; |
| |
| return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + tensor->offset_first_element_in_bytes; |
| } |
| |
| #endif |
| |
| |
| #define CONVERT_DOWN_RTE_STR(x, type) (convert_##type##_rte((x))) |
| #define CONVERT_DOWN_RTE(x, type) CONVERT_DOWN_RTE_STR(x, type) |
| |
| |
| inline uchar quantize_qasymm8(float input, float offset, float scale) |
| { |
| float out_f32 = input / scale + offset; |
| uchar res_u8 = CONVERT_SAT(CONVERT_DOWN_RTE(out_f32, int), uchar); |
| return res_u8; |
| } |
| |
| |
| inline float dequantize_qasymm8(uchar input, float offset, float scale) |
| { |
| return ((float)input - offset) * scale; |
| } |
| |
| |
| inline float dequantize_qasymm8_signed(char input, float offset, float scale) |
| { |
| return ((float)input - offset) * scale; |
| } |
| |
| |
| #define QUANTIZE_IMPL(type, size) \ |
| inline VEC_DATA_TYPE(type, size) quantize_##type##size(VEC_DATA_TYPE(float, size) input, float offset, float scale) \ |
| { \ |
| VEC_DATA_TYPE(float, size) \ |
| out_f32 = input / (VEC_DATA_TYPE(float, size))(scale) + (VEC_DATA_TYPE(float, size))(offset); \ |
| VEC_DATA_TYPE(type, size) \ |
| res = CONVERT_SAT(CONVERT_DOWN_RTE(out_f32, VEC_DATA_TYPE(int, size)), VEC_DATA_TYPE(type, size)); \ |
| return res; \ |
| } |
| |
| |
| #define DEQUANTIZE_IMPL(type, size) \ |
| inline VEC_DATA_TYPE(float, size) dequantize_##type##size(VEC_DATA_TYPE(type, size) input, float offset, float scale) \ |
| { \ |
| return (CONVERT(input, VEC_DATA_TYPE(float, size)) - offset) * scale; \ |
| } |
| |
| |
| #define ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(size) \ |
| inline VEC_DATA_TYPE(int, size) asymm_rounding_divide_by_POW2_##size(VEC_DATA_TYPE(int, size) x, VEC_DATA_TYPE(int, size) exponent) \ |
| { \ |
| const VEC_DATA_TYPE(int, size) \ |
| zero = (VEC_DATA_TYPE(int, size))0; \ |
| const VEC_DATA_TYPE(int, size) \ |
| one = (VEC_DATA_TYPE(int, size))1; \ |
| VEC_DATA_TYPE(int, size) \ |
| mask = (one << exponent) - one; \ |
| VEC_DATA_TYPE(int, size) \ |
| threshold = (mask >> 1) + select(zero, one, (SELECT_VEC_DATA_TYPE(int, size))(x < 0)); \ |
| return (x >> exponent) + select(zero, one, (SELECT_VEC_DATA_TYPE(int, size))((x & mask) > threshold)); \ |
| } |
| |
| |
| #define ASYMM_MULT_IMPL(size) \ |
| inline VEC_DATA_TYPE(int, size) asymm_mult##size(VEC_DATA_TYPE(int, size) a, VEC_DATA_TYPE(int, size) b) \ |
| { \ |
| VEC_DATA_TYPE(int, size) \ |
| overflow = a == b && a == INT_MIN; \ |
| VEC_DATA_TYPE(long, size) \ |
| a_64 = convert_long##size(a); \ |
| VEC_DATA_TYPE(long, size) \ |
| b_64 = convert_long##size(b); \ |
| VEC_DATA_TYPE(long, size) \ |
| ab_64 = a_64 * b_64; \ |
| \ |
| VEC_DATA_TYPE(long, size) \ |
| mask1 = 1 << 30; \ |
| VEC_DATA_TYPE(long, size) \ |
| mask2 = 1 - (1 << 30); \ |
| VEC_DATA_TYPE(long, size) \ |
| is_positive_or_zero = ab_64 >= 0; \ |
| VEC_DATA_TYPE(long, size) \ |
| nudge = select(mask2, mask1, (SELECT_VEC_DATA_TYPE(long, size))(is_positive_or_zero)); \ |
| VEC_DATA_TYPE(long, size) \ |
| mask = 1ll << 31; \ |
| VEC_DATA_TYPE(int, size) \ |
| ab_x2_high32 = convert_int##size((ab_64 + nudge) / mask); \ |
| return select(ab_x2_high32, INT_MAX, (SELECT_VEC_DATA_TYPE(int, size))(overflow)); \ |
| } |
| |
| |
| #define ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(size) \ |
| inline VEC_DATA_TYPE(int, size) asymm_exp_on_interval_between_negative_one_quarter_and_0_excl##size(VEC_DATA_TYPE(int, size) a) \ |
| { \ |
| const VEC_DATA_TYPE(int, size) constant_term = 1895147668; \ |
| const VEC_DATA_TYPE(int, size) constant_1_over_3 = 715827883; \ |
| const int k_fractional_bits = 31; \ |
| VEC_DATA_TYPE(int, size) \ |
| x = a + (1 << (k_fractional_bits - 3)); \ |
| VEC_DATA_TYPE(int, size) \ |
| x2 = ASYMM_MULT(x, x, size); \ |
| VEC_DATA_TYPE(int, size) \ |
| x3 = ASYMM_MULT(x2, x, size); \ |
| VEC_DATA_TYPE(int, size) \ |
| x4 = ASYMM_MULT(x2, x2, size); \ |
| VEC_DATA_TYPE(int, size) \ |
| x4_over_4 = ASYMM_ROUNDING_DIVIDE_BY_POW2(x4, 2, size); \ |
| VEC_DATA_TYPE(int, size) \ |
| x4_over_24_plus_x3_over_6_plus_x2 = ASYMM_MULT((x4_over_4 + x3), constant_1_over_3, size) + x2; \ |
| VEC_DATA_TYPE(int, size) \ |
| x4_over_24_plus_x3_over_6_plus_x2_over_2 = ASYMM_ROUNDING_DIVIDE_BY_POW2(x4_over_24_plus_x3_over_6_plus_x2, 1, size); \ |
| return constant_term + ASYMM_MULT(constant_term, x + x4_over_24_plus_x3_over_6_plus_x2_over_2, size); \ |
| } |
| |
| |
| #define ASYMM_SELECT_USING_MASK_IMPL(size) \ |
| inline VEC_DATA_TYPE(int, size) asymm_select_using_mask##size(VEC_DATA_TYPE(int, size) if_mask, VEC_DATA_TYPE(int, size) then_val, VEC_DATA_TYPE(int, size) else_val) \ |
| { \ |
| return (if_mask & then_val) ^ (~if_mask & else_val); \ |
| } |
| |
| |
| #define ASYMM_MASK_IF_ZERO_IMPL(size) \ |
| inline VEC_DATA_TYPE(int, size) asymm_mask_if_zero##size(VEC_DATA_TYPE(int, size) a) \ |
| { \ |
| const VEC_DATA_TYPE(int, size) all_zeros = 0; \ |
| const VEC_DATA_TYPE(int, size) all_ones = ~0; \ |
| return select(all_zeros, all_ones, (SELECT_VEC_DATA_TYPE(int, size))(a == 0)); \ |
| } |
| |
| |
| #define ASYMM_MASK_IF_NON_ZERO_IMPL(size) \ |
| inline VEC_DATA_TYPE(int, size) asymm_mask_if_non_zero##size(VEC_DATA_TYPE(int, size) a) \ |
| { \ |
| const VEC_DATA_TYPE(int, size) all_zeros = 0; \ |
| const VEC_DATA_TYPE(int, size) all_ones = ~0; \ |
| return select(all_zeros, all_ones, (SELECT_VEC_DATA_TYPE(int, size))(a != 0)); \ |
| } |
| |
| #define EXP_BARREL_SHIFTER_IMPL(size) \ |
| inline VEC_DATA_TYPE(int, size) exp_barrel_shifter##size(VEC_DATA_TYPE(int, size) result, int exponent, int fp_multiplier, int k_integer_bits, int k_fractional_bits, VEC_DATA_TYPE(int, size) remainder) \ |
| { \ |
| if(k_integer_bits > exponent) \ |
| { \ |
| const int k_shift_amount = k_integer_bits > exponent ? k_fractional_bits + exponent : 0; \ |
| return ASYMM_SELECT_USING_MASK( \ |
| ASYMM_MASK_IF_NON_ZERO(remainder & (1 << k_shift_amount), size), \ |
| ASYMM_MULT(result, fp_multiplier, size), result, size); \ |
| } \ |
| \ |
| return result; \ |
| } |
| |
| |
| #define ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(size) \ |
| inline VEC_DATA_TYPE(int, size) asymm_exp_on_negative_values##size(VEC_DATA_TYPE(int, size) a, int k_integer_bits) \ |
| { \ |
| const int k_fractional_bits = 31 - k_integer_bits; \ |
| VEC_DATA_TYPE(int, size) \ |
| k_one_quarter = 1 << (k_fractional_bits - 2); \ |
| VEC_DATA_TYPE(int, size) \ |
| mask = k_one_quarter - 1; \ |
| VEC_DATA_TYPE(int, size) \ |
| a_mod_quarter_minus_one_quarter = (a & mask) - k_one_quarter; \ |
| VEC_DATA_TYPE(int, size) \ |
| a_mod_quarter_minus_one_quarter_scaled = a_mod_quarter_minus_one_quarter << k_integer_bits; \ |
| VEC_DATA_TYPE(int, size) \ |
| result = ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL(a_mod_quarter_minus_one_quarter_scaled, size); \ |
| VEC_DATA_TYPE(int, size) \ |
| remainder = a_mod_quarter_minus_one_quarter - a; \ |
| \ |
| result = EXP_BARREL_SHIFTER(result, -2, 1672461947, k_integer_bits, k_fractional_bits, remainder, size); \ |
| result = EXP_BARREL_SHIFTER(result, -1, 1302514674, k_integer_bits, k_fractional_bits, remainder, size); \ |
| result = EXP_BARREL_SHIFTER(result, +0, 790015084, k_integer_bits, k_fractional_bits, remainder, size); \ |
| result = EXP_BARREL_SHIFTER(result, +1, 290630308, k_integer_bits, k_fractional_bits, remainder, size); \ |
| result = EXP_BARREL_SHIFTER(result, +2, 39332535, k_integer_bits, k_fractional_bits, remainder, size); \ |
| result = EXP_BARREL_SHIFTER(result, +3, 720401, k_integer_bits, k_fractional_bits, remainder, size); \ |
| result = EXP_BARREL_SHIFTER(result, +4, 242, k_integer_bits, k_fractional_bits, remainder, size); \ |
| \ |
| if(k_integer_bits > 5) \ |
| { \ |
| const VEC_DATA_TYPE(int, size) clamp = -(1 << (k_fractional_bits + 5)); \ |
| result = ASYMM_SELECT_USING_MASK(ASYMM_MASK_IF_NON_ZERO(a < clamp, size), 0, result, size); \ |
| } \ |
| \ |
| const VEC_DATA_TYPE(int, size) Q0_one = INT_MAX; \ |
| return ASYMM_SELECT_USING_MASK(ASYMM_MASK_IF_ZERO(a, size), Q0_one, result, size); \ |
| } |
| |
| |
| #define ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(size) \ |
| inline VEC_DATA_TYPE(int, size) asymm_saturating_rounding_mult_by_pow2##size(VEC_DATA_TYPE(int, size) x, int exponent) \ |
| { \ |
| if(exponent < 0) \ |
| { \ |
| return ASYMM_ROUNDING_DIVIDE_BY_POW2(x, -exponent, size); \ |
| } \ |
| \ |
| const VEC_DATA_TYPE(int, size) min = INT_MIN; \ |
| const VEC_DATA_TYPE(int, size) max = INT_MAX; \ |
| int threshold = ((1 << (31 - exponent)) - 1); \ |
| VEC_DATA_TYPE(int, size) \ |
| positive_mask = ASYMM_MASK_IF_NON_ZERO(x > threshold, size); \ |
| VEC_DATA_TYPE(int, size) \ |
| negative_mask = ASYMM_MASK_IF_NON_ZERO(x < -threshold, size); \ |
| VEC_DATA_TYPE(int, size) \ |
| result = x << exponent; \ |
| result = ASYMM_SELECT_USING_MASK(positive_mask, max, result, size); \ |
| result = ASYMM_SELECT_USING_MASK(negative_mask, min, result, size); \ |
| return result; \ |
| } |
| |
| |
| #define ASYMM_ROUNDING_HALF_SUM_IMPL(size) \ |
| inline VEC_DATA_TYPE(int, size) asymm_rounding_half_sum##size(VEC_DATA_TYPE(int, size) a, VEC_DATA_TYPE(int, size) b) \ |
| { \ |
| VEC_DATA_TYPE(long, size) \ |
| a64 = convert_long##size(a); \ |
| VEC_DATA_TYPE(long, size) \ |
| b64 = convert_long##size(b); \ |
| VEC_DATA_TYPE(long, size) \ |
| sum = a64 + b64; \ |
| const VEC_DATA_TYPE(long, size) one = 1; \ |
| const VEC_DATA_TYPE(long, size) minus_one = -1; \ |
| VEC_DATA_TYPE(long, size) \ |
| sign = select(minus_one, one, (SELECT_VEC_DATA_TYPE(long, size))(sum >= 0)); \ |
| return convert_int##size((sum + sign) / 2); \ |
| } |
| |
| |
| #define ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_IMPL(size) \ |
| inline VEC_DATA_TYPE(int, size) asymm_one_over_one_plus_x_for_x_in_0_1##size(VEC_DATA_TYPE(int, size) a) \ |
| { \ |
| const VEC_DATA_TYPE(int, size) Q0_one = INT_MAX; \ |
| const VEC_DATA_TYPE(int, size) Q2_one = 1 << (31 - 2); \ |
| VEC_DATA_TYPE(int, size) \ |
| half_denominator = ASYMM_ROUNDING_HALF_SUM(a, Q0_one, size); \ |
| const VEC_DATA_TYPE(int, size) Q2_48_over_17 = 1515870810; \ |
| const VEC_DATA_TYPE(int, size) Q2_neg_32_over_17 = -1010580540; \ |
| VEC_DATA_TYPE(int, size) \ |
| x = Q2_48_over_17 + ASYMM_MULT(half_denominator, Q2_neg_32_over_17, size); \ |
| for(int i = 0; i < 3; i++) \ |
| { \ |
| VEC_DATA_TYPE(int, size) \ |
| half_denominator_times_x = ASYMM_MULT(half_denominator, x, size); \ |
| VEC_DATA_TYPE(int, size) \ |
| one_minus_half_denominator_times_x = Q2_one - half_denominator_times_x; \ |
| VEC_DATA_TYPE(int, size) \ |
| tmp = ASYMM_MULT(x, one_minus_half_denominator_times_x, size); \ |
| x = x + ASYMM_SATURATING_ROUNDING_MULT_BY_POW2(tmp, 2, size); \ |
| } \ |
| return ASYMM_SATURATING_ROUNDING_MULT_BY_POW2(x, 1, size); \ |
| } |
| |
| |
| #define ASYMM_RESCALE_IMPL(size) \ |
| inline VEC_DATA_TYPE(int, size) asymm_rescale##size(VEC_DATA_TYPE(int, size) value, int src_integer_bits, int dst_integer_bits) \ |
| { \ |
| int exponent = src_integer_bits - dst_integer_bits; \ |
| return ASYMM_SATURATING_ROUNDING_MULT_BY_POW2(value, exponent, size); \ |
| } |
| |
| #define QUANTIZE_STR(input, offset, scale, type, size) quantize_##type##size(input, offset, scale) |
| #define QUANTIZE(input, offset, scale, type, size) QUANTIZE_STR(input, offset, scale, type, size) |
| #define DEQUANTIZE_STR(input, offset, scale, type, size) dequantize_##type##size(input, offset, scale) |
| #define DEQUANTIZE(input, offset, scale, type, size) DEQUANTIZE_STR(input, offset, scale, type, size) |
| |
| #define ASYMM_ROUNDING_DIVIDE_BY_POW2_STR(x, exponent, size) asymm_rounding_divide_by_POW2_##size(x, exponent) |
| #define ASYMM_ROUNDING_DIVIDE_BY_POW2(x, exponent, size) ASYMM_ROUNDING_DIVIDE_BY_POW2_STR(x, exponent, size) |
| #define ASYMM_MULT_STR(a, b, size) asymm_mult##size(a, b) |
| #define ASYMM_MULT(a, b, size) ASYMM_MULT_STR(a, b, size) |
| #define ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(x, quantized_multiplier, left_shift, size) \ |
| ASYMM_MULT(x *((VEC_DATA_TYPE(int, size))(1) << (-left_shift)), quantized_multiplier, size) |
| #define ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(x, quantized_multiplier, right_shift, size) \ |
| ASYMM_ROUNDING_DIVIDE_BY_POW2(ASYMM_MULT(x, quantized_multiplier, size), right_shift, size) |
| #define ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL(a, size) asymm_exp_on_interval_between_negative_one_quarter_and_0_excl##size(a) |
| #define ASYMM_SELECT_USING_MASK(if_mask, then_val, else_val, size) asymm_select_using_mask##size(if_mask, then_val, else_val) |
| #define ASYMM_MASK_IF_ZERO(a, size) asymm_mask_if_zero##size(a) |
| #define ASYMM_MASK_IF_NON_ZERO(a, size) asymm_mask_if_non_zero##size(a) |
| #define EXP_BARREL_SHIFTER(result, exponent, fp_multiplier, k_integer_bits, k_fractional_bits, remainder, size) exp_barrel_shifter##size(result, exponent, fp_multiplier, k_integer_bits, k_fractional_bits, remainder) |
| #define ASYMM_EXP_ON_NEGATIVE_VALUES_STR(a, k_integer_bits, size) asymm_exp_on_negative_values##size(a, k_integer_bits) |
| #define ASYMM_EXP_ON_NEGATIVE_VALUES(a, k_integer_bits, size) ASYMM_EXP_ON_NEGATIVE_VALUES_STR(a, k_integer_bits, size) |
| #define ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_STR(a, size) asymm_one_over_one_plus_x_for_x_in_0_1##size(a) |
| #define ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1(a, size) ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_STR(a, size) |
| #define ASYMM_SATURATING_ROUNDING_MULT_BY_POW2(x, exponent, size) asymm_saturating_rounding_mult_by_pow2##size(x, exponent) |
| #define ASYMM_ROUNDING_HALF_SUM(a, b, size) asymm_rounding_half_sum##size(a, b) |
| #define ASYMM_RESCALE_STR(value, src_integer_bits, dst_integer_bits, size) asymm_rescale##size(value, src_integer_bits, dst_integer_bits) |
| #define ASYMM_RESCALE(value, src_integer_bits, dst_integer_bits, size) ASYMM_RESCALE_STR(value, src_integer_bits, dst_integer_bits, size) |
| |
| #define MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(size) \ |
| inline VEC_DATA_TYPE(int, size) multiply_by_quantized_multiplier##size(VEC_DATA_TYPE(int, size) input, int qmul, int shift) \ |
| { \ |
| const int left_shift = shift > 0 ? shift : 0; \ |
| const int right_shift = shift > 0 ? 0 : -shift; \ |
| return ASYMM_ROUNDING_DIVIDE_BY_POW2(ASYMM_MULT(input * (1 << left_shift), qmul, size), right_shift, size); \ |
| } |
| #define MULTIPLY_BY_QUANTIZED_MULTIPLIER(input, qmul, shift, size) multiply_by_quantized_multiplier##size(input, qmul, shift) |
| |
| QUANTIZE_IMPL(uchar, 1) |
| QUANTIZE_IMPL(char, 1) |
| QUANTIZE_IMPL(uint, 1) |
| QUANTIZE_IMPL(int, 1) |
| QUANTIZE_IMPL(uchar, 2) |
| QUANTIZE_IMPL(char, 2) |
| QUANTIZE_IMPL(uint, 2) |
| QUANTIZE_IMPL(int, 2) |
| QUANTIZE_IMPL(uchar, 3) |
| QUANTIZE_IMPL(char, 3) |
| QUANTIZE_IMPL(uint, 3) |
| QUANTIZE_IMPL(int, 3) |
| QUANTIZE_IMPL(uchar, 4) |
| QUANTIZE_IMPL(ushort, 4) |
| QUANTIZE_IMPL(short, 4) |
| QUANTIZE_IMPL(int, 4) |
| QUANTIZE_IMPL(uchar, 8) |
| QUANTIZE_IMPL(char, 8) |
| QUANTIZE_IMPL(uint, 8) |
| QUANTIZE_IMPL(int, 8) |
| QUANTIZE_IMPL(uchar, 16) |
| QUANTIZE_IMPL(char, 16) |
| QUANTIZE_IMPL(ushort, 16) |
| QUANTIZE_IMPL(short, 16) |
| QUANTIZE_IMPL(uint, 16) |
| QUANTIZE_IMPL(int, 16) |
| |
| DEQUANTIZE_IMPL(uchar, 1) |
| DEQUANTIZE_IMPL(char, 1) |
| DEQUANTIZE_IMPL(uint, 1) |
| DEQUANTIZE_IMPL(int, 1) |
| DEQUANTIZE_IMPL(uchar, 2) |
| DEQUANTIZE_IMPL(char, 2) |
| DEQUANTIZE_IMPL(uint, 2) |
| DEQUANTIZE_IMPL(int, 2) |
| DEQUANTIZE_IMPL(uchar, 3) |
| DEQUANTIZE_IMPL(char, 3) |
| DEQUANTIZE_IMPL(uint, 3) |
| DEQUANTIZE_IMPL(int, 3) |
| DEQUANTIZE_IMPL(uchar, 4) |
| DEQUANTIZE_IMPL(ushort, 4) |
| DEQUANTIZE_IMPL(short, 4) |
| DEQUANTIZE_IMPL(int, 4) |
| DEQUANTIZE_IMPL(uchar, 8) |
| DEQUANTIZE_IMPL(char, 8) |
| DEQUANTIZE_IMPL(uint, 8) |
| DEQUANTIZE_IMPL(int, 8) |
| DEQUANTIZE_IMPL(uchar, 16) |
| DEQUANTIZE_IMPL(char, 16) |
| DEQUANTIZE_IMPL(ushort, 16) |
| DEQUANTIZE_IMPL(short, 16) |
| DEQUANTIZE_IMPL(uint, 16) |
| DEQUANTIZE_IMPL(int, 16) |
| |
| ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(1) |
| ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(2) |
| ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(3) |
| ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(4) |
| ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(8) |
| ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(16) |
| |
| ASYMM_MULT_IMPL(1) |
| ASYMM_MULT_IMPL(2) |
| ASYMM_MULT_IMPL(3) |
| ASYMM_MULT_IMPL(4) |
| ASYMM_MULT_IMPL(8) |
| ASYMM_MULT_IMPL(16) |
| |
| ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(1) |
| ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(2) |
| ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(3) |
| ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(4) |
| ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(8) |
| ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(16) |
| |
| ASYMM_SELECT_USING_MASK_IMPL(1) |
| ASYMM_SELECT_USING_MASK_IMPL(2) |
| ASYMM_SELECT_USING_MASK_IMPL(3) |
| ASYMM_SELECT_USING_MASK_IMPL(4) |
| ASYMM_SELECT_USING_MASK_IMPL(8) |
| ASYMM_SELECT_USING_MASK_IMPL(16) |
| |
| ASYMM_MASK_IF_ZERO_IMPL(1) |
| ASYMM_MASK_IF_ZERO_IMPL(2) |
| ASYMM_MASK_IF_ZERO_IMPL(3) |
| ASYMM_MASK_IF_ZERO_IMPL(4) |
| ASYMM_MASK_IF_ZERO_IMPL(8) |
| ASYMM_MASK_IF_ZERO_IMPL(16) |
| |
| ASYMM_MASK_IF_NON_ZERO_IMPL(1) |
| ASYMM_MASK_IF_NON_ZERO_IMPL(2) |
| ASYMM_MASK_IF_NON_ZERO_IMPL(3) |
| ASYMM_MASK_IF_NON_ZERO_IMPL(4) |
| ASYMM_MASK_IF_NON_ZERO_IMPL(8) |
| ASYMM_MASK_IF_NON_ZERO_IMPL(16) |
| |
| EXP_BARREL_SHIFTER_IMPL(1) |
| EXP_BARREL_SHIFTER_IMPL(2) |
| EXP_BARREL_SHIFTER_IMPL(3) |
| EXP_BARREL_SHIFTER_IMPL(4) |
| EXP_BARREL_SHIFTER_IMPL(8) |
| EXP_BARREL_SHIFTER_IMPL(16) |
| |
| ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(1) |
| ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(2) |
| ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(3) |
| ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(4) |
| ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(8) |
| ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(16) |
| |
| ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(1) |
| ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(2) |
| ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(3) |
| ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(4) |
| ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(8) |
| ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(16) |
| |
| ASYMM_ROUNDING_HALF_SUM_IMPL(1) |
| ASYMM_ROUNDING_HALF_SUM_IMPL(2) |
| ASYMM_ROUNDING_HALF_SUM_IMPL(3) |
| ASYMM_ROUNDING_HALF_SUM_IMPL(4) |
| ASYMM_ROUNDING_HALF_SUM_IMPL(8) |
| ASYMM_ROUNDING_HALF_SUM_IMPL(16) |
| |
| ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_IMPL(1) |
| ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_IMPL(2) |
| ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_IMPL(3) |
| ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_IMPL(4) |
| ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_IMPL(8) |
| ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_IMPL(16) |
| |
| ASYMM_RESCALE_IMPL(1) |
| ASYMM_RESCALE_IMPL(2) |
| ASYMM_RESCALE_IMPL(3) |
| ASYMM_RESCALE_IMPL(4) |
| ASYMM_RESCALE_IMPL(8) |
| ASYMM_RESCALE_IMPL(16) |
| |
| MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(1) |
| MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(2) |
| MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(3) |
| MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(4) |
| MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(8) |
| MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(16) |
| |
| #endif |
| |
| #ifndef ARM_COMPUTE_REPEAT_H |
| #define ARM_COMPUTE_REPEAT_H |
| |
| |
| #ifndef ARM_COMPUTE_HELPER_H |
| #define ARM_COMPUTE_HELPER_H |
| |
| |
| |
| |
| #define STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); |
| |
| #define STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); |
| |
| #define STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); |
| |
| #define STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); |
| |
| #define STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); |
| |
| #define STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); |
| |
| #define STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); |
| |
| #define STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); |
| |
| #define STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); |
| |
| #define STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); |
| |
| #define STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); |
| |
| #define STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); |
| |
| #define STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); |
| |
| #define STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); |
| |
| #define STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); |
| |
| #define STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); |
| |
| |
| |
| #define CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##0), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); |
| |
| #define CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##1), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); |
| |
| #define CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##2), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); |
| |
| #define CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##3), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); |
| |
| #define CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##4), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); |
| |
| #define CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##5), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); |
| |
| #define CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##6), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); |
| |
| #define CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##7), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); |
| |
| #define CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##8), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); |
| |
| #define CONVERT_STORE_ROW_10(N0, DATA, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##9), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); |
| |
| #define CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##A), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); |
| |
| #define CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##B), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); |
| |
| #define CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##C), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); |
| |
| #define CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##D), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); |
| |
| #define CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##E), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); |
| |
| #define CONVERT_STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##F), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); |
| |
| |
| |
| |
| #define STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| #define STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| |
| |
| |
| #define CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| #define CONVERT_STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| |
| |
| |
| #define STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); |
| |
| #define STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); |
| |
| #define STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); |
| |
| #define STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); |
| |
| #define STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); |
| |
| #define STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); |
| |
| #define STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); |
| |
| #define STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); |
| |
| #define STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); |
| |
| #define STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); |
| |
| #define STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); |
| |
| #define STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); |
| |
| #define STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); |
| |
| #define STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); |
| |
| #define STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); |
| |
| #define STORE_ROW_PARTIAL_16(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); |
| |
| |
| |
| #define STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_PARTIAL_##STORE_M0(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| #define STORE_BLOCK_PARTIAL(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| |
| #define STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| if(!(PARTIAL_COND_X) && !(PARTIAL_COND_Y)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else if((PARTIAL_COND_Y) && !(PARTIAL_COND_X)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else if(!(PARTIAL_COND_Y) && (PARTIAL_COND_X)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else \ |
| { \ |
| STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } |
| |
| #define STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) \ |
| if(!(PARTIAL_COND_X)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } |
| |
| #define STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) \ |
| if(!(PARTIAL_COND_Y)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else \ |
| { \ |
| STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } |
| |
| |
| #if defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0) |
| |
| |
| #if PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0 |
| |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| |
| #elif PARTIAL_STORE_M0 > 0 && PARTIAL_STORE_N0 == 0 |
| |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) |
| |
| #elif PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 > 0 |
| |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) |
| |
| #else |
| |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) |
| |
| #endif |
| |
| #endif |
| |
| |
| #if defined(PARTIAL_STORE_M0) |
| |
| #define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \ |
| ((uint)(max(0, (int)(y * M0) - (int)((M0 - PARTIAL_STORE_M0) % M0)))) |
| #else |
| #define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \ |
| ((uint)(y * M0)) |
| #endif |
| |
| |
| |
| #define STORE_VECTOR_SELECT(basename, data_type, ptr, vec_size, leftover, cond) \ |
| STORE_BLOCK_PARTIAL_IN_X(1, vec_size, data_type, basename, ptr, 0, 0, leftover, cond) |
| |
| |
| #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) |
| #pragma OPENCL EXTENSION cl_khr_fp16 : enable |
| #endif |
| |
| #if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) |
| #pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : enable |
| #endif |
| |
| #if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8) |
| #pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : enable |
| #endif |
| |
| #if defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf) |
| #pragma OPENCL EXTENSION cl_arm_printf : enable |
| #endif |
| |
| #define GPU_ARCH_MIDGARD 0x100 |
| #define GPU_ARCH_BIFROST 0x200 |
| #define GPU_ARCH_VALHALL 0x300 |
| |
| |
| #define CONCAT(a, b) a##b |
| |
| |
| #define EXPAND(x) x |
| |
| |
| #define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val) |
| |
| |
| #define REV1(x) ((x)) |
| #define REV2(x) ((x).s10) |
| #define REV3(x) ((x).s210) |
| #define REV4(x) ((x).s3210) |
| #define REV8(x) ((x).s76543210) |
| #define REV16(x) ((x).sFEDCBA9876543210) |
| |
| |
| |
| #define REVERSE_STR(x, s) REV##s((x)) |
| #define REVERSE(x, s) REVERSE_STR(x, s) |
| |
| |
| |
| #define ROT1_0(x) ((x)) |
| #define ROT1_1(x) ((x)) |
| |
| #define ROT2_0(x) ((x)) |
| #define ROT2_1(x) ((x).s10) |
| #define ROT2_2(x) ((x)) |
| |
| #define ROT3_0(x) ((x)) |
| #define ROT3_1(x) ((x).s201) |
| #define ROT3_2(x) ((x).s120) |
| #define ROT3_3(x) ((x)) |
| |
| #define ROT4_0(x) ((x)) |
| #define ROT4_1(x) ((x).s3012) |
| #define ROT4_2(x) ((x).s2301) |
| #define ROT4_3(x) ((x).s1230) |
| #define ROT4_4(x) ((x)) |
| |
| #define ROT8_0(x) ((x)) |
| #define ROT8_1(x) ((x).s70123456) |
| #define ROT8_2(x) ((x).s67012345) |
| #define ROT8_3(x) ((x).s56701234) |
| #define ROT8_4(x) ((x).s45670123) |
| #define ROT8_5(x) ((x).s34567012) |
| #define ROT8_6(x) ((x).s23456701) |
| #define ROT8_7(x) ((x).s12345670) |
| #define ROT8_8(x) ((x)) |
| |
| #define ROT16_0(x) ((x)) |
| #define ROT16_1(x) ((x).sF0123456789ABCDE) |
| #define ROT16_2(x) ((x).sEF0123456789ABCD) |
| #define ROT16_3(x) ((x).sDEF0123456789ABC) |
| #define ROT16_4(x) ((x).sCDEF0123456789AB) |
| #define ROT16_5(x) ((x).sBCDEF0123456789A) |
| #define ROT16_6(x) ((x).sABCDEF0123456789) |
| #define ROT16_7(x) ((x).s9ABCDEF012345678) |
| #define ROT16_8(x) ((x).s89ABCDEF01234567) |
| #define ROT16_9(x) ((x).s789ABCDEF0123456) |
| #define ROT16_10(x) ((x).s6789ABCDEF012345) |
| #define ROT16_11(x) ((x).s56789ABCDEF01234) |
| #define ROT16_12(x) ((x).s456789ABCDEF0123) |
| #define ROT16_13(x) ((x).s3456789ABCDEF012) |
| #define ROT16_14(x) ((x).s23456789ABCDEF01) |
| #define ROT16_15(x) ((x).s123456789ABCDEF0) |
| #define ROT16_16(x) ((x)) |
| |
| |
| |
| #define ROTATE_STR(x, s, n) ROT##s##_##n(x) |
| #define ROTATE(x, s, n) ROTATE_STR(x, s, n) |
| |
| |
| |
| #define V_OFFS1(dt) (dt##1)(0) |
| #define V_OFFS2(dt) (dt##2)(0, 1) |
| #define V_OFFS3(dt) (dt##3)(0, 1, 2) |
| #define V_OFFS4(dt) (dt##4)(0, 1, 2, 3) |
| #define V_OFFS8(dt) (dt##8)(0, 1, 2, 3, 4, 5, 6, 7) |
| #define V_OFFS16(dt) (dt##16)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15) |
| |
| |
| |
| #define VEC_OFFS_STR(dt, s) V_OFFS##s(dt) |
| #define VEC_OFFS(dt, s) VEC_OFFS_STR(dt, s) |
| |
| |
| #define VLOAD_STR(size) vload##size |
| #define VLOAD(size) VLOAD_STR(size) |
| |
| |
| #define VLOAD_PARTIAL_STR(size, load_size) vload_partial_##size##_##load_size |
| #define VLOAD_PARTIAL(size, load_size) VLOAD_PARTIAL_STR(size, load_size) |
| |
| #define NO_LOAD(data, offs, ptr) \ |
| { \ |
| } |
| |
| |
| #define vload_partial_1_0 NO_LOAD |
| #define vload_partial_1_1 vload1 |
| #define vload_partial_1_2 NO_LOAD |
| #define vload_partial_1_3 NO_LOAD |
| #define vload_partial_1_4 NO_LOAD |
| #define vload_partial_1_5 NO_LOAD |
| #define vload_partial_1_6 NO_LOAD |
| #define vload_partial_1_7 NO_LOAD |
| #define vload_partial_1_8 NO_LOAD |
| #define vload_partial_1_9 NO_LOAD |
| #define vload_partial_1_10 NO_LOAD |
| #define vload_partial_1_11 NO_LOAD |
| #define vload_partial_1_12 NO_LOAD |
| #define vload_partial_1_13 NO_LOAD |
| #define vload_partial_1_14 NO_LOAD |
| #define vload_partial_1_15 NO_LOAD |
| #define vload_partial_1_16 NO_LOAD |
| |
| #define vload_partial_2_0 NO_LOAD |
| #define vload_partial_2_1 vload_partial_1 |
| #define vload_partial_2_2 vload_partial_2 |
| #define vload_partial_2_3 NO_LOAD |
| #define vload_partial_2_4 NO_LOAD |
| #define vload_partial_2_5 NO_LOAD |
| #define vload_partial_2_6 NO_LOAD |
| #define vload_partial_2_7 NO_LOAD |
| #define vload_partial_2_8 NO_LOAD |
| #define vload_partial_2_9 NO_LOAD |
| #define vload_partial_2_10 NO_LOAD |
| #define vload_partial_2_11 NO_LOAD |
| #define vload_partial_2_12 NO_LOAD |
| #define vload_partial_2_13 NO_LOAD |
| #define vload_partial_2_14 NO_LOAD |
| #define vload_partial_2_15 NO_LOAD |
| #define vload_partial_2_16 NO_LOAD |
| |
| #define vload_partial_3_0 NO_LOAD |
| #define vload_partial_3_1 vload_partial_1 |
| #define vload_partial_3_2 vload_partial_2 |
| #define vload_partial_3_3 vload_partial_3 |
| #define vload_partial_3_4 NO_LOAD |
| #define vload_partial_3_5 NO_LOAD |
| #define vload_partial_3_6 NO_LOAD |
| #define vload_partial_3_7 NO_LOAD |
| #define vload_partial_3_8 NO_LOAD |
| #define vload_partial_3_9 NO_LOAD |
| #define vload_partial_3_10 NO_LOAD |
| #define vload_partial_3_11 NO_LOAD |
| #define vload_partial_3_12 NO_LOAD |
| #define vload_partial_3_13 NO_LOAD |
| #define vload_partial_3_14 NO_LOAD |
| #define vload_partial_3_15 NO_LOAD |
| #define vload_partial_3_16 NO_LOAD |
| |
| #define vload_partial_4_0 NO_LOAD |
| #define vload_partial_4_1 vload_partial_1 |
| #define vload_partial_4_2 vload_partial_2 |
| #define vload_partial_4_3 vload_partial_3 |
| #define vload_partial_4_4 vload_partial_4 |
| #define vload_partial_4_5 NO_LOAD |
| #define vload_partial_4_6 NO_LOAD |
| #define vload_partial_4_7 NO_LOAD |
| #define vload_partial_4_8 NO_LOAD |
| #define vload_partial_4_9 NO_LOAD |
| #define vload_partial_4_10 NO_LOAD |
| #define vload_partial_4_11 NO_LOAD |
| #define vload_partial_4_12 NO_LOAD |
| #define vload_partial_4_13 NO_LOAD |
| #define vload_partial_4_14 NO_LOAD |
| #define vload_partial_4_15 NO_LOAD |
| #define vload_partial_4_16 NO_LOAD |
| |
| #define vload_partial_8_0 NO_LOAD |
| #define vload_partial_8_1 vload_partial_1 |
| #define vload_partial_8_2 vload_partial_2 |
| #define vload_partial_8_3 vload_partial_3 |
| #define vload_partial_8_4 vload_partial_4 |
| #define vload_partial_8_5 vload_partial_5 |
| #define vload_partial_8_6 vload_partial_6 |
| #define vload_partial_8_7 vload_partial_7 |
| #define vload_partial_8_8 vload_partial_8 |
| #define vload_partial_8_9 NO_LOAD |
| #define vload_partial_8_10 NO_LOAD |
| #define vload_partial_8_11 NO_LOAD |
| #define vload_partial_8_12 NO_LOAD |
| #define vload_partial_8_13 NO_LOAD |
| #define vload_partial_8_14 NO_LOAD |
| #define vload_partial_8_15 NO_LOAD |
| #define vload_partial_8_16 NO_LOAD |
| |
| #define vload_partial_16_0 NO_LOAD |
| #define vload_partial_16_1 vload_partial_1 |
| #define vload_partial_16_2 vload_partial_2 |
| #define vload_partial_16_3 vload_partial_3 |
| #define vload_partial_16_4 vload_partial_4 |
| #define vload_partial_16_5 vload_partial_5 |
| #define vload_partial_16_6 vload_partial_6 |
| #define vload_partial_16_7 vload_partial_7 |
| #define vload_partial_16_8 vload_partial_8 |
| #define vload_partial_16_9 vload_partial_9 |
| #define vload_partial_16_10 vload_partial_10 |
| #define vload_partial_16_11 vload_partial_11 |
| #define vload_partial_16_12 vload_partial_12 |
| #define vload_partial_16_13 vload_partial_13 |
| #define vload_partial_16_14 vload_partial_14 |
| #define vload_partial_16_15 vload_partial_15 |
| #define vload_partial_16_16 vload_partial_16 |
| |
| |
| #define vload_partial_1(DATA, OFFSET, PTR) \ |
| DATA.s0 = vload1(OFFSET, PTR); |
| |
| #define vload_partial_2(DATA, OFFSET, PTR) \ |
| DATA.s01 = vload2(OFFSET, PTR); |
| |
| #define vload_partial_3(DATA, OFFSET, PTR) \ |
| DATA.s012 = vload3(OFFSET, PTR); |
| |
| #define vload_partial_4(DATA, OFFSET, PTR) \ |
| DATA.s0123 = vload4(OFFSET, PTR); |
| |
| #define vload_partial_5(DATA, OFFSET, PTR) \ |
| vload_partial_4(DATA.s0123, OFFSET, PTR); \ |
| DATA.s4 = vload1(OFFSET, PTR + 4); |
| |
| #define vload_partial_6(DATA, OFFSET, PTR) \ |
| vload_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vload_partial_2(DATA.s45, OFFSET, PTR + 4); |
| |
| #define vload_partial_7(DATA, OFFSET, PTR) \ |
| vload_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vload_partial_3(DATA.s456, OFFSET, PTR + 4); |
| |
| #define vload_partial_8(DATA, OFFSET, PTR) \ |
| DATA.s01234567 = vload8(OFFSET, PTR); |
| |
| #define vload_partial_9(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| DATA.s8 = vload1(OFFSET, PTR + 8); |
| |
| #define vload_partial_10(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_2(DATA.s89, OFFSET, PTR + 8); |
| |
| #define vload_partial_11(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_3(DATA.s89A, OFFSET, PTR + 8); |
| |
| #define vload_partial_12(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_4(DATA.s89AB, OFFSET, PTR + 8); |
| |
| #define vload_partial_13(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_5(DATA.s89ABCDEF, OFFSET, PTR + 8); |
| |
| #define vload_partial_14(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_6(DATA.s89ABCDEF, OFFSET, PTR + 8); |
| |
| #define vload_partial_15(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_7(DATA.s89ABCDEF, OFFSET, PTR + 8); |
| |
| #define vload_partial_16(DATA, OFFSET, PTR) \ |
| DATA = vload16(OFFSET, PTR); |
| |
| |
| |
| #define PIXEL_UNIT4 1 |
| #define PIXEL_UNIT8 2 |
| #define PIXEL_UNIT16 4 |
| |
| |
| #define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) PIXEL_UNIT##vec_size |
| #define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(vec_size) CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) |
| |
| |
| #define read_image2d_floatx1(img, x_coord, y_coord) (float4)(read_imagef(img, (int2)(x_coord, y_coord))); |
| #define read_image2d_floatx2(img, x_coord, y_coord) (float8)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord))); |
| #define read_image2d_floatx4(img, x_coord, y_coord) (float16)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)), read_imagef(img, (int2)(x_coord + 2, y_coord)), read_imagef(img, (int2)(x_coord + 3, y_coord))); |
| |
| #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) |
| #define read_image2d_halfx1(img, x_coord, y_coord) (half4)(read_imageh(img, (int2)(x_coord, y_coord))); |
| #define read_image2d_halfx2(img, x_coord, y_coord) (half8)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord))); |
| #define read_image2d_halfx4(img, x_coord, y_coord) (half16)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)), read_imageh(img, (int2)(x_coord + 2, y_coord)), read_imageh(img, (int2)(x_coord + 3, y_coord))); |
| #endif |
| |
| #define write_image2d_floatx1(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values)); |
| #define write_image2d_floatx2(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values.s0123), write_imagef(img, (int2)(x_coord + 1, y_coord), values.s4567)); |
| #define write_image2d_floatx4(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values.s0123), write_imagef(img, (int2)(x_coord + 1, y_coord), values.s4567), write_imagef(img, (int2)(x_coord + 2, y_coord), values.s89AB), write_imagef(img, (int2)(x_coord + 3, y_coord), values.sCDEF)); |
| |
| #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) |
| #define write_image2d_halfx1(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values)); |
| #define write_image2d_halfx2(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values.s0123), write_imageh(img, (int2)(x_coord + 1, y_coord), values.s4567)); |
| #define write_image2d_halfx4(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values.s0123), write_imageh(img, (int2)(x_coord + 1, y_coord), values.s4567), write_imageh(img, (int2)(x_coord + 2, y_coord), values.s89AB), write_imageh(img, (int2)(x_coord + 3, y_coord), values.sCDEF)); |
| #endif |
| |
| |
| #define READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) read_image2d_##data_type##x##n0(img, x_coord, y_coord) |
| #define READ_IMAGE2D(data_type, n0, img, x_coord, y_coord) READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) |
| |
| |
| #define WRITE_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord, values) write_image2d_##data_type##x##n0(img, x_coord, y_coord, values) |
| #define WRITE_IMAGE2D(data_type, n0, img, x_coord, y_coord, values) WRITE_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord, values) |
| |
| #define VSTORE_STR(size) vstore##size |
| #define VSTORE(size) VSTORE_STR(size) |
| |
| #define float1 float |
| #define half1 half |
| #define char1 char |
| #define uchar1 uchar |
| #define short1 short |
| #define ushort1 ushort |
| #define int1 int |
| #define uint1 uint |
| #define long1 long |
| #define ulong1 ulong |
| #define double1 double |
| |
| #define vload1(OFFSET, PTR) *(OFFSET + PTR) |
| #define vstore1(DATA, OFFSET, PTR) *(OFFSET + PTR) = DATA |
| |
| |
| #define VSTORE_PARTIAL_STR(size, store_size) vstore_partial_##size##_##store_size |
| #define VSTORE_PARTIAL(size, store_size) VSTORE_PARTIAL_STR(size, store_size) |
| |
| #define NO_STORE(data, offs, ptr) \ |
| { \ |
| } |
| |
| |
| #define vstore_partial_1_0 NO_STORE |
| #define vstore_partial_1_1 vstore1 |
| #define vstore_partial_1_2 NO_STORE |
| #define vstore_partial_1_3 NO_STORE |
| #define vstore_partial_1_4 NO_STORE |
| #define vstore_partial_1_5 NO_STORE |
| #define vstore_partial_1_6 NO_STORE |
| #define vstore_partial_1_7 NO_STORE |
| #define vstore_partial_1_8 NO_STORE |
| #define vstore_partial_1_9 NO_STORE |
| #define vstore_partial_1_10 NO_STORE |
| #define vstore_partial_1_11 NO_STORE |
| #define vstore_partial_1_12 NO_STORE |
| #define vstore_partial_1_13 NO_STORE |
| #define vstore_partial_1_14 NO_STORE |
| #define vstore_partial_1_15 NO_STORE |
| #define vstore_partial_1_16 NO_STORE |
| |
| #define vstore_partial_2_0 NO_STORE |
| #define vstore_partial_2_1 vstore_partial_1 |
| #define vstore_partial_2_2 vstore_partial_2 |
| #define vstore_partial_2_3 NO_STORE |
| #define vstore_partial_2_4 NO_STORE |
| #define vstore_partial_2_5 NO_STORE |
| #define vstore_partial_2_6 NO_STORE |
| #define vstore_partial_2_7 NO_STORE |
| #define vstore_partial_2_8 NO_STORE |
| #define vstore_partial_2_9 NO_STORE |
| #define vstore_partial_2_10 NO_STORE |
| #define vstore_partial_2_11 NO_STORE |
| #define vstore_partial_2_12 NO_STORE |
| #define vstore_partial_2_13 NO_STORE |
| #define vstore_partial_2_14 NO_STORE |
| #define vstore_partial_2_15 NO_STORE |
| #define vstore_partial_2_16 NO_STORE |
| |
| #define vstore_partial_3_0 NO_STORE |
| #define vstore_partial_3_1 vstore_partial_1 |
| #define vstore_partial_3_2 vstore_partial_2 |
| #define vstore_partial_3_3 vstore_partial_3 |
| #define vstore_partial_3_4 NO_STORE |
| #define vstore_partial_3_5 NO_STORE |
| #define vstore_partial_3_6 NO_STORE |
| #define vstore_partial_3_7 NO_STORE |
| #define vstore_partial_3_8 NO_STORE |
| #define vstore_partial_3_9 NO_STORE |
| #define vstore_partial_3_10 NO_STORE |
| #define vstore_partial_3_11 NO_STORE |
| #define vstore_partial_3_12 NO_STORE |
| #define vstore_partial_3_13 NO_STORE |
| #define vstore_partial_3_14 NO_STORE |
| #define vstore_partial_3_15 NO_STORE |
| #define vstore_partial_3_16 NO_STORE |
| |
| #define vstore_partial_4_0 NO_STORE |
| #define vstore_partial_4_1 vstore_partial_1 |
| #define vstore_partial_4_2 vstore_partial_2 |
| #define vstore_partial_4_3 vstore_partial_3 |
| #define vstore_partial_4_4 vstore_partial_4 |
| #define vstore_partial_4_5 NO_STORE |
| #define vstore_partial_4_6 NO_STORE |
| #define vstore_partial_4_7 NO_STORE |
| #define vstore_partial_4_8 NO_STORE |
| #define vstore_partial_4_9 NO_STORE |
| #define vstore_partial_4_10 NO_STORE |
| #define vstore_partial_4_11 NO_STORE |
| #define vstore_partial_4_12 NO_STORE |
| #define vstore_partial_4_13 NO_STORE |
| #define vstore_partial_4_14 NO_STORE |
| #define vstore_partial_4_15 NO_STORE |
| #define vstore_partial_4_16 NO_STORE |
| |
| #define vstore_partial_8_0 NO_STORE |
| #define vstore_partial_8_1 vstore_partial_1 |
| #define vstore_partial_8_2 vstore_partial_2 |
| #define vstore_partial_8_3 vstore_partial_3 |
| #define vstore_partial_8_4 vstore_partial_4 |
| #define vstore_partial_8_5 vstore_partial_5 |
| #define vstore_partial_8_6 vstore_partial_6 |
| #define vstore_partial_8_7 vstore_partial_7 |
| #define vstore_partial_8_8 vstore_partial_8 |
| #define vstore_partial_8_9 NO_STORE |
| #define vstore_partial_8_10 NO_STORE |
| #define vstore_partial_8_11 NO_STORE |
| #define vstore_partial_8_12 NO_STORE |
| #define vstore_partial_8_13 NO_STORE |
| #define vstore_partial_8_14 NO_STORE |
| #define vstore_partial_8_15 NO_STORE |
| #define vstore_partial_8_16 NO_STORE |
| |
| #define vstore_partial_16_0 NO_STORE |
| #define vstore_partial_16_1 vstore_partial_1 |
| #define vstore_partial_16_2 vstore_partial_2 |
| #define vstore_partial_16_3 vstore_partial_3 |
| #define vstore_partial_16_4 vstore_partial_4 |
| #define vstore_partial_16_5 vstore_partial_5 |
| #define vstore_partial_16_6 vstore_partial_6 |
| #define vstore_partial_16_7 vstore_partial_7 |
| #define vstore_partial_16_8 vstore_partial_8 |
| #define vstore_partial_16_9 vstore_partial_9 |
| #define vstore_partial_16_10 vstore_partial_10 |
| #define vstore_partial_16_11 vstore_partial_11 |
| #define vstore_partial_16_12 vstore_partial_12 |
| #define vstore_partial_16_13 vstore_partial_13 |
| #define vstore_partial_16_14 vstore_partial_14 |
| #define vstore_partial_16_15 vstore_partial_15 |
| #define vstore_partial_16_16 vstore_partial_16 |
| |
| |
| #define vstore_partial_1(DATA, OFFSET, PTR) \ |
| vstore1(DATA.s0, OFFSET, PTR); |
| |
| #define vstore_partial_2(DATA, OFFSET, PTR) \ |
| vstore2(DATA.s01, OFFSET, PTR); |
| |
| #define vstore_partial_3(DATA, OFFSET, PTR) \ |
| vstore3(DATA.s012, OFFSET, PTR); |
| |
| #define vstore_partial_4(DATA, OFFSET, PTR) \ |
| vstore4(DATA.s0123, OFFSET, PTR); |
| |
| #define vstore_partial_5(DATA, OFFSET, PTR) \ |
| vstore_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vstore1(DATA.s4, OFFSET, PTR + 4); |
| |
| #define vstore_partial_6(DATA, OFFSET, PTR) \ |
| vstore_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vstore_partial_2(DATA.s45, OFFSET, PTR + 4); |
| |
| #define vstore_partial_7(DATA, OFFSET, PTR) \ |
| vstore_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vstore_partial_3(DATA.s456, OFFSET, PTR + 4); |
| |
| #define vstore_partial_8(DATA, OFFSET, PTR) \ |
| vstore8(DATA.s01234567, OFFSET, PTR); |
| |
| #define vstore_partial_9(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore1(DATA.s8, OFFSET, PTR + 8); |
| |
| #define vstore_partial_10(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_2(DATA.s89, OFFSET, PTR + 8); |
| |
| #define vstore_partial_11(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_3(DATA.s89a, OFFSET, PTR + 8); |
| |
| #define vstore_partial_12(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_4(DATA.s89ab, OFFSET, PTR + 8); |
| |
| #define vstore_partial_13(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_5(DATA.s89abcdef, OFFSET, PTR + 8); |
| |
| #define vstore_partial_14(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_6(DATA.s89abcdef, OFFSET, PTR + 8); |
| |
| #define vstore_partial_15(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_7(DATA.s89abcdef, OFFSET, PTR + 8); |
| |
| #define vstore_partial_16(DATA, OFFSET, PTR) \ |
| vstore16(DATA, OFFSET, PTR); |
| |
| |
| |
| |
| |
| #define convert_float_sat convert_float |
| #define convert_float1_sat convert_float |
| #define convert_float2_sat convert_float2 |
| #define convert_float3_sat convert_float3 |
| #define convert_float4_sat convert_float4 |
| #define convert_float8_sat convert_float8 |
| #define convert_float16_sat convert_float16 |
| #define convert_half_sat convert_float |
| #define convert_half1_sat convert_half |
| #define convert_half2_sat convert_half2 |
| #define convert_half3_sat convert_half3 |
| #define convert_half4_sat convert_half4 |
| #define convert_half8_sat convert_half8 |
| #define convert_half16_sat convert_half16 |
| |
| #define convert_float1 convert_float |
| #define convert_half1 convert_half |
| #define convert_char1 convert_char |
| #define convert_uchar1 convert_uchar |
| #define convert_short1 convert_short |
| #define convert_ushort1 convert_ushort |
| #define convert_int1 convert_int |
| #define convert_uint1 convert_uint |
| #define convert_long1 convert_long |
| #define convert_ulong1 convert_ulong |
| #define convert_double1 convert_double |
| |
| #define convert_char1_sat convert_char_sat |
| #define convert_uchar1_sat convert_uchar_sat |
| #define convert_uchar2_sat convert_uchar2_sat |
| #define convert_uchar3_sat convert_uchar3_sat |
| #define convert_uchar4_sat convert_uchar4_sat |
| #define convert_uchar8_sat convert_uchar8_sat |
| #define convert_uchar16_sat convert_uchar16_sat |
| #define convert_short1_sat convert_short_sat |
| #define convert_ushort1_sat convert_ushort_sat |
| #define convert_int1_sat convert_int_sat |
| #define convert_uint1_sat convert_uint_sat |
| #define convert_long1_sat convert_long_sat |
| #define convert_ulong1_sat convert_ulong_sat |
| #define convert_double1_sat convert_double_sat |
| |
| #define VEC_DATA_TYPE_STR(type, size) type##size |
| #define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size) |
| |
| #define CONVERT_STR(x, type) (convert_##type((x))) |
| #define CONVERT(x, type) CONVERT_STR(x, type) |
| |
| #define CONVERT_SAT_STR(x, type) (convert_##type##_sat((x))) |
| #define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type) |
| |
| #define CONVERT_SAT_ROUND_STR(x, type, round) (convert_##type##_sat_##round((x))) |
| #define CONVERT_SAT_ROUND(x, type, round) CONVERT_SAT_ROUND_STR(x, type, round) |
| |
| #define select_vec_dt_uchar(size) uchar##size |
| #define select_vec_dt_char(size) char##size |
| #define select_vec_dt_ushort(size) ushort##size |
| #define select_vec_dt_short(size) short##size |
| #define select_vec_dt_half(size) short##size |
| #define select_vec_dt_uint(size) uint##size |
| #define select_vec_dt_int(size) int##size |
| #define select_vec_dt_float(size) int##size |
| #define select_vec_dt_ulong(size) ulong##size |
| #define select_vec_dt_long(size) long##size |
| |
| #define SELECT_VEC_DATA_TYPE_STR(type, size) select_vec_dt_##type(size) |
| #define SELECT_VEC_DATA_TYPE(type, size) SELECT_VEC_DATA_TYPE_STR(type, size) |
| #define SELECT_DATA_TYPE(type) SELECT_VEC_DATA_TYPE_STR(type, 1) |
| |
| #define signed_int_vec_dt_uchar(size) char##size |
| #define signed_int_vec_dt_char(size) char##size |
| #define signed_int_vec_dt_ushort(size) short##size |
| #define signed_int_vec_dt_short(size) short##size |
| #define signed_int_vec_dt_half(size) short##size |
| #define signed_int_vec_dt_uint(size) int##size |
| #define signed_int_vec_dt_int(size) int##size |
| #define signed_int_vec_dt_float(size) int##size |
| #define signed_int_vec_dt_ulong(size) long##size |
| #define signed_int_vec_dt_long(size) long##size |
| |
| #define SIGNED_INT_VEC_DATA_TYPE_STR(type, size) signed_int_vec_dt_##type(size) |
| #define SIGNED_INT_VEC_DATA_TYPE(type, size) SIGNED_INT_VEC_DATA_TYPE_STR(type, size) |
| #define SIGNED_INT_DATA_TYPE(type) SIGNED_INT_VEC_DATA_TYPE_STR(type, 1) |
| |
| #define sum_reduce_1(x) (x) |
| #define sum_reduce_2(x) ((x).s0) + ((x).s1) |
| #define sum_reduce_3(x) sum_reduce_2((x).s01) + ((x).s2) |
| #define sum_reduce_4(x) sum_reduce_2((x).s01) + sum_reduce_2((x).s23) |
| #define sum_reduce_8(x) sum_reduce_4((x).s0123) + sum_reduce_4((x).s4567) |
| #define sum_reduce_16(x) sum_reduce_8((x).s01234567) + sum_reduce_8((x).s89ABCDEF) |
| |
| #define SUM_REDUCE_STR(x, size) sum_reduce_##size(x) |
| #define SUM_REDUCE(x, size) SUM_REDUCE_STR(x, size) |
| |
| #define prod_reduce_1(x) (x) |
| #define prod_reduce_2(x) ((x).s0) * ((x).s1) |
| #define prod_reduce_3(x) prod_reduce_2((x).s01) * ((x).s2) |
| #define prod_reduce_4(x) prod_reduce_2((x).s01) * prod_reduce_2((x).s23) |
| #define prod_reduce_8(x) prod_reduce_4((x).s0123) * prod_reduce_4((x).s4567) |
| #define prod_reduce_16(x) prod_reduce_8((x).s01234567) * prod_reduce_8((x).s89ABCDEF) |
| |
| #define PROD_REDUCE_STR(x, size) prod_reduce_##size(x) |
| #define PROD_REDUCE(x, size) PROD_REDUCE_STR(x, size) |
| |
| #define max_reduce_1(x) (x) |
| #define max_reduce_2(x) max(((x).s0), ((x).s1)) |
| #define max_reduce_3(x) max(max_reduce_2((x).s01), ((x).s2)) |
| #define max_reduce_4(x) max(max_reduce_2((x).s01), max_reduce_2((x).s23)) |
| #define max_reduce_8(x) max(max_reduce_4((x).s0123), max_reduce_4((x).s4567)) |
| #define max_reduce_16(x) max(max_reduce_8((x).s01234567), max_reduce_8((x).s89ABCDEF)) |
| |
| #define MAX_REDUCE_STR(x, size) max_reduce_##size(x) |
| #define MAX_REDUCE(x, size) MAX_REDUCE_STR(x, size) |
| |
| #define VECTOR_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define IMAGE_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define TENSOR3D_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_stride_z, \ |
| uint name##_step_z, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define TENSOR4D_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_stride_z, \ |
| uint name##_step_z, \ |
| uint name##_stride_w, \ |
| uint name##_step_w, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define TENSOR5D_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_stride_z, \ |
| uint name##_step_z, \ |
| uint name##_stride_w, \ |
| uint name##_step_w, \ |
| uint name##_stride_v, \ |
| uint name##_step_v, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define CONVERT_TO_VECTOR_STRUCT(name) \ |
| update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x) |
| |
| #define CONVERT_TO_VECTOR_STRUCT_NO_STEP(name) \ |
| update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0) |
| |
| #define CONVERT_TO_IMAGE_STRUCT(name) \ |
| update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y) |
| |
| #define CONVERT_TO_IMAGE_STRUCT_NO_STEP(name) \ |
| update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0) |
| |
| #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \ |
| update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(name) \ |
| update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \ |
| update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TO_TENSOR3D_STRUCT(name) \ |
| update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ |
| name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(name) \ |
| update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0) |
| |
| #define CONVERT_TO_TENSOR4D_STRUCT(name, mod_size) \ |
| update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ |
| name##_stride_z, name##_step_z, name##_stride_w, name##_step_w, mod_size) |
| |
| #define CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(name, mod_size) \ |
| update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0, name##_stride_w, 0, mod_size) |
| |
| #define CONVERT_TO_TENSOR3D_STRUCT_NO_UPDATE_PTR(name) \ |
| tensor3D_ptr_no_update(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ |
| name##_stride_z, name##_step_z) |
| |
| |
| typedef struct Vector |
| { |
| __global uchar *ptr; |
| int offset_first_element_in_bytes; |
| int stride_x; |
| } Vector; |
| |
| |
| typedef struct Image |
| { |
| __global uchar *ptr; |
| int offset_first_element_in_bytes; |
| int stride_x; |
| int stride_y; |
| } Image; |
| |
| |
| typedef struct Tensor3D |
| { |
| __global uchar *ptr; |
| int offset_first_element_in_bytes; |
| int stride_x; |
| int stride_y; |
| int stride_z; |
| } Tensor3D; |
| |
| |
| typedef struct Tensor4D |
| { |
| __global uchar *ptr; |
| int offset_first_element_in_bytes; |
| int stride_x; |
| int stride_y; |
| int stride_z; |
| int stride_w; |
| } Tensor4D; |
| |
| |
| inline Vector update_vector_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x) |
| { |
| Vector vector = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| }; |
| vector.ptr += vector.offset_first_element_in_bytes + get_global_id(0) * step_x; |
| return vector; |
| } |
| |
| |
| inline Image update_image_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y) |
| { |
| Image img = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y |
| }; |
| img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y; |
| return img; |
| } |
| |
| |
| inline Image update_image_from_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) |
| { |
| Image img = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y |
| }; |
| img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z; |
| return img; |
| } |
| |
| |
| inline Tensor3D update_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) |
| { |
| Tensor3D tensor = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y, |
| .stride_z = stride_z |
| }; |
| tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z; |
| return tensor; |
| } |
| |
| |
| inline Tensor3D tensor3D_ptr_no_update(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) |
| { |
| Tensor3D tensor = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y, |
| .stride_z = stride_z |
| }; |
| return tensor; |
| } |
| |
| inline Tensor4D update_tensor4D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z, uint stride_w, |
| uint step_w, |
| uint mod_size) |
| { |
| Tensor4D tensor = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y, |
| .stride_z = stride_z, |
| .stride_w = stride_w |
| }; |
| |
| tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + (get_global_id(2) % mod_size) * step_z + (get_global_id(2) / mod_size) * step_w; |
| return tensor; |
| } |
| |
| |
| inline __global const uchar *vector_offset(const Vector *vec, int x) |
| { |
| return vec->ptr + x * vec->stride_x; |
| } |
| |
| |
| inline __global uchar *offset(const Image *img, int x, int y) |
| { |
| return img->ptr + x * img->stride_x + y * img->stride_y; |
| } |
| |
| |
| inline __global const uchar *tensor3D_offset(const Tensor3D *tensor, int x, int y, int z) |
| { |
| return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z; |
| } |
| |
| |
| inline __global const uchar *tensor4D_offset(const Tensor4D *tensor, int x, int y, int z, int w) |
| { |
| return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + w * tensor->stride_w; |
| } |
| |
| |
| inline __global const uchar *tensor3D_index2ptr(const Tensor3D *tensor, uint width, uint height, uint depth, uint index) |
| { |
| uint num_elements = width * height; |
| |
| const uint z = index / num_elements; |
| |
| index %= num_elements; |
| |
| const uint y = index / width; |
| |
| index %= width; |
| |
| const uint x = index; |
| |
| return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + tensor->offset_first_element_in_bytes; |
| } |
| |
| #endif |
| |
| |
| |
| #define REPEAT_3_1(P_X, P_A, P_B, P_C) P_X##_DEF(0, P_A, P_B, P_C) |
| #define REPEAT_3_2(P_X, P_A, P_B, P_C) \ |
| P_X##_DEF(1, P_A, P_B, P_C); \ |
| REPEAT_3_1(P_X, P_A, P_B, P_C) |
| #define REPEAT_3_3(P_X, P_A, P_B, P_C) \ |
| P_X##_DEF(2, P_A, P_B, P_C); \ |
| REPEAT_3_2(P_X, P_A, P_B, P_C) |
| #define REPEAT_3_4(P_X, P_A, P_B, P_C) \ |
| P_X##_DEF(3, P_A, P_B, P_C); \ |
| REPEAT_3_3(P_X, P_A, P_B, P_C) |
| #define REPEAT_3_5(P_X, P_A, P_B, P_C) \ |
| P_X##_DEF(4, P_A, P_B, P_C); \ |
| REPEAT_3_4(P_X, P_A, P_B, P_C) |
| #define REPEAT_3_6(P_X, P_A, P_B, P_C) \ |
| P_X##_DEF(5, P_A, P_B, P_C); \ |
| REPEAT_3_5(P_X, P_A, P_B, P_C) |
| #define REPEAT_3_7(P_X, P_A, P_B, P_C) \ |
| P_X##_DEF(6, P_A, P_B, P_C); \ |
| REPEAT_3_6(P_X, P_A, P_B, P_C) |
| #define REPEAT_3_8(P_X, P_A, P_B, P_C) \ |
| P_X##_DEF(7, P_A, P_B, P_C); \ |
| REPEAT_3_7(P_X, P_A, P_B, P_C) |
| #define REPEAT_3_9(P_X, P_A, P_B, P_C) \ |
| P_X##_DEF(8, P_A, P_B, P_C); \ |
| REPEAT_3_8(P_X, P_A, P_B, P_C) |
| #define REPEAT_3_10(P_X, P_A, P_B, P_C) \ |
| P_X##_DEF(9, P_A, P_B, P_C); \ |
| REPEAT_3_9(P_X, P_A, P_B, P_C) |
| #define REPEAT_3_11(P_X, P_A, P_B, P_C) \ |
| P_X##_DEF(A, P_A, P_B, P_C); \ |
| REPEAT_3_10(P_X, P_A, P_B, P_C) |
| #define REPEAT_3_12(P_X, P_A, P_B, P_C) \ |
| P_X##_DEF(B, P_A, P_B, P_C); \ |
| REPEAT_3_11(P_X, P_A, P_B, P_C) |
| #define REPEAT_3_13(P_X, P_A, P_B, P_C) \ |
| P_X##_DEF(C, P_A, P_B, P_C); \ |
| REPEAT_3_12(P_X, P_A, P_B, P_C) |
| #define REPEAT_3_14(P_X, P_A, P_B, P_C) \ |
| P_X##_DEF(D, P_A, P_B, P_C); \ |
| REPEAT_3_13(P_X, P_A, P_B, P_C) |
| #define REPEAT_3_15(P_X, P_A, P_B, P_C) \ |
| P_X##_DEF(E, P_A, P_B, P_C); \ |
| REPEAT_3_14(P_X, P_A, P_B, P_C) |
| #define REPEAT_3_16(P_X, P_A, P_B, P_C) \ |
| P_X##_DEF(F, P_A, P_B, P_C); \ |
| REPEAT_3_15(P_X, P_A, P_B, P_C) |
| |
| #define REPEAT_DEF_3_N(P_NUM, P_OP, P_A, P_B, P_C) REPEAT_3_##P_NUM(P_OP, P_A, P_B, P_C) |
| #define REPEAT_3_N(P_NUM, P_OP, P_A, P_B, P_C) REPEAT_DEF_3_N(P_NUM, P_OP, P_A, P_B, P_C) |
| |
| |
| #define REPEAT_4_1(P_X, P_A, P_B, P_C, P_D) P_X##_DEF(0, P_A, P_B, P_C, P_D) |
| #define REPEAT_4_2(P_X, P_A, P_B, P_C, P_D) \ |
| P_X##_DEF(1, P_A, P_B, P_C, P_D); \ |
| REPEAT_4_1(P_X, P_A, P_B, P_C, P_D) |
| #define REPEAT_4_3(P_X, P_A, P_B, P_C, P_D) \ |
| P_X##_DEF(2, P_A, P_B, P_C, P_D); \ |
| REPEAT_4_2(P_X, P_A, P_B, P_C, P_D) |
| #define REPEAT_4_4(P_X, P_A, P_B, P_C, P_D) \ |
| P_X##_DEF(3, P_A, P_B, P_C, P_D); \ |
| REPEAT_4_3(P_X, P_A, P_B, P_C, P_D) |
| #define REPEAT_4_5(P_X, P_A, P_B, P_C, P_D) \ |
| P_X##_DEF(4, P_A, P_B, P_C, P_D); \ |
| REPEAT_4_4(P_X, P_A, P_B, P_C, P_D) |
| #define REPEAT_4_6(P_X, P_A, P_B, P_C, P_D) \ |
| P_X##_DEF(5, P_A, P_B, P_C, P_D); \ |
| REPEAT_4_5(P_X, P_A, P_B, P_C, P_D) |
| #define REPEAT_4_7(P_X, P_A, P_B, P_C, P_D) \ |
| P_X##_DEF(6, P_A, P_B, P_C, P_D); \ |
| REPEAT_4_6(P_X, P_A, P_B, P_C, P_D) |
| #define REPEAT_4_8(P_X, P_A, P_B, P_C, P_D) \ |
| P_X##_DEF(7, P_A, P_B, P_C, P_D); \ |
| REPEAT_4_7(P_X, P_A, P_B, P_C, P_D) |
| #define REPEAT_4_9(P_X, P_A, P_B, P_C, P_D) \ |
| P_X##_DEF(8, P_A, P_B, P_C, P_D); \ |
| REPEAT_4_8(P_X, P_A, P_B, P_C, P_D) |
| #define REPEAT_4_10(P_X, P_A, P_B, P_C, P_D) \ |
| P_X##_DEF(9, P_A, P_B, P_C, P_D); \ |
| REPEAT_4_9(P_X, P_A, P_B, P_C, P_D) |
| #define REPEAT_4_11(P_X, P_A, P_B, P_C, P_D) \ |
| P_X##_DEF(A, P_A, P_B, P_C, P_D); \ |
| REPEAT_4_10(P_X, P_A, P_B, P_C, P_D) |
| #define REPEAT_4_12(P_X, P_A, P_B, P_C, P_D) \ |
| P_X##_DEF(B, P_A, P_B, P_C, P_D); \ |
| REPEAT_4_11(P_X, P_A, P_B, P_C, P_D) |
| #define REPEAT_4_13(P_X, P_A, P_B, P_C, P_D) \ |
| P_X##_DEF(C, P_A, P_B, P_C, P_D); \ |
| REPEAT_4_12(P_X, P_A, P_B, P_C, P_D) |
| #define REPEAT_4_14(P_X, P_A, P_B, P_C, P_D) \ |
| P_X##_DEF(D, P_A, P_B, P_C, P_D); \ |
| REPEAT_4_13(P_X, P_A, P_B, P_C, P_D) |
| #define REPEAT_4_15(P_X, P_A, P_B, P_C, P_D) \ |
| P_X##_DEF(E, P_A, P_B, P_C, P_D); \ |
| REPEAT_4_14(P_X, P_A, P_B, P_C, P_D) |
| #define REPEAT_4_16(P_X, P_A, P_B, P_C, P_D) \ |
| P_X##_DEF(F, P_A, P_B, P_C, P_D); \ |
| REPEAT_4_15(P_X, P_A, P_B, P_C, P_D) |
| |
| #define REPEAT_DEF_4_N(P_NUM, P_OP, P_A, P_B, P_C, P_D) REPEAT_4_##P_NUM(P_OP, P_A, P_B, P_C, P_D) |
| #define REPEAT_4_N(P_NUM, P_OP, P_A, P_B, P_C, P_D) REPEAT_DEF_4_N(P_NUM, P_OP, P_A, P_B, P_C, P_D) |
| |
| |
| #define VAR_INIT_TO_CONST_DEF(ID, TYPE, VAR, VAL) TYPE VAR##ID = VAL |
| #define REPEAT_VAR_INIT_TO_CONST(N, TYPE, VAR, VAL) REPEAT_3_N(N, VAR_INIT_TO_CONST, TYPE, VAR, VAL) |
| |
| |
| #define VAR_INIT_CONVERT_DEF(ID, TYPE_OUT, VAR_IN, VAR_OUT) TYPE_OUT VAR_OUT##ID = CONVERT(VAR_IN##ID, TYPE_OUT) |
| #define REPEAT_VAR_INIT_CONVERT(N, TYPE_OUT, VAR_IN, VAR_OUT) REPEAT_3_N(N, VAR_INIT_CONVERT, TYPE_OUT, VAR_IN, VAR_OUT) |
| |
| |
| #define VAR_INIT_CONVERT_SAT_DEF(ID, TYPE_OUT, VAR_IN, VAR_OUT) TYPE_OUT VAR_OUT##ID = CONVERT_SAT(VAR_IN##ID, TYPE_OUT) |
| #define REPEAT_VAR_INIT_CONVERT_SAT(N, TYPE_OUT, VAR_IN, VAR_OUT) REPEAT_3_N(N, VAR_INIT_CONVERT_SAT, TYPE_OUT, VAR_IN, VAR_OUT) |
| |
| |
| #define ADD_CONST_TO_VAR_DEF(ID, TYPE, VAR, VAL) VAR##ID += (TYPE)VAL |
| #define REPEAT_ADD_CONST_TO_VAR(N, TYPE, VAR, VAL) REPEAT_3_N(N, ADD_CONST_TO_VAR, TYPE, VAR, VAL) |
| |
| |
| #define MLA_VAR_WITH_CONST_VEC_DEF(ID, VAR_A, VAR_B, VAL) VAR_A##ID += VAR_B##ID * VAL |
| #define REPEAT_MLA_VAR_WITH_CONST_VEC(N, VAR_A, VAR_B, VAL) REPEAT_3_N(N, MLA_VAR_WITH_CONST_VEC, VAR_A, VAR_B, VAL) |
| |
| |
| #define ADD_VECTOR_TO_VAR_DEF(ID, TYPE, VAR, VEC) VAR##ID += VEC |
| #define REPEAT_ADD_VECTOR_TO_VAR(N, VAR, VEC) REPEAT_3_N(N, ADD_VECTOR_TO_VAR, "", VAR, VEC) |
| |
| |
| #define ADD_TWO_VARS_DEF(ID, TYPE, VAR_A, VAR_B) VAR_A##ID += VAR_B##ID |
| #define REPEAT_ADD_TWO_VARS(N, VAR_A, VAR_B) REPEAT_3_N(N, ADD_TWO_VARS, "", VAR_A, VAR_B) |
| |
| |
| #define MAX_CONST_VAR_DEF(ID, TYPE, VAR, VAL) VAR##ID = max(VAR##ID, (TYPE)VAL) |
| #define REPEAT_MAX_CONST_VAR(N, TYPE, VAR, VAL) REPEAT_3_N(N, MAX_CONST_VAR, TYPE, VAR, VAL) |
| |
| |
| #define MIN_CONST_VAR_DEF(ID, TYPE, VAR, VAL) VAR##ID = min(VAR##ID, (TYPE)VAL) |
| #define REPEAT_MIN_CONST_VAR(N, TYPE, VAR, VAL) REPEAT_3_N(N, MIN_CONST_VAR, TYPE, VAR, VAL) |
| |
| |
| #define ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE_DEF(ID, SIZE, VAR, RES_MUL, RES_SHIFT) VAR##ID = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(VAR##ID, RES_MUL, RES_SHIFT, SIZE) |
| #define REPEAT_ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(N, SIZE, VAR, RES_MUL, RES_SHIFT) REPEAT_4_N(N, ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE, SIZE, VAR, RES_MUL, RES_SHIFT) |
| |
| |
| #define ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE_DEF(ID, SIZE, VAR, RES_MUL, RES_SHIFT) VAR##ID = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(VAR##ID, RES_MUL, RES_SHIFT, SIZE) |
| #define REPEAT_ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(N, SIZE, VAR, RES_MUL, RES_SHIFT) REPEAT_4_N(N, ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE, SIZE, VAR, RES_MUL, RES_SHIFT) |
| |
| |
| #define ASYMM_MULT_BY_QUANT_MULTIPLIER_PER_CHANNEL_DEF(ID, SIZE, VAR, RES_MUL, RES_SHIFT) \ |
| ({ \ |
| VEC_DATA_TYPE(int, N0) \ |
| VAR##ID_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(VAR##ID, RES_MUL, RES_SHIFT, N0); \ |
| VEC_DATA_TYPE(int, N0) \ |
| VAR##ID_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(VAR##ID, RES_MUL, RES_SHIFT, N0); \ |
| VAR##ID = select(VAR##ID_shift_lt0, VAR##ID_shift_gt0, RES_SHIFT >= 0); \ |
| }) |
| #define REPEAT_ASYMM_MULT_BY_QUANT_MULTIPLIER_PER_CHANNEL(N, SIZE, VAR, RES_MUL, RES_SHIFT) REPEAT_4_N(N, ASYMM_MULT_BY_QUANT_MULTIPLIER_PER_CHANNEL, SIZE, VAR, RES_MUL, RES_SHIFT) |
| |
| #endif |
| |
| #ifndef SRC_CORE_CL_CL_KERNELS_TILE_HELPERS |
| #define SRC_CORE_CL_CL_KERNELS_TILE_HELPERS |
| |
| |
| |
| |
| #define TILE_VECTOR_SIZE1 1 |
| #define TILE_VECTOR_SIZE2 2 |
| #define TILE_VECTOR_SIZE3 3 |
| #define TILE_VECTOR_SIZE4 4 |
| #define TILE_VECTOR_SIZE5 8 |
| #define TILE_VECTOR_SIZE6 8 |
| #define TILE_VECTOR_SIZE7 8 |
| #define TILE_VECTOR_SIZE8 8 |
| #define TILE_VECTOR_SIZE9 16 |
| #define TILE_VECTOR_SIZE10 16 |
| #define TILE_VECTOR_SIZE11 16 |
| #define TILE_VECTOR_SIZE12 16 |
| #define TILE_VECTOR_SIZE13 16 |
| #define TILE_VECTOR_SIZE14 16 |
| #define TILE_VECTOR_SIZE15 16 |
| #define TILE_VECTOR_SIZE16 16 |
| |
| #define TILE_VECTOR_TYPE1(DATA_TYPE) DATA_TYPE##1 |
| #define TILE_VECTOR_TYPE2(DATA_TYPE) DATA_TYPE##2 |
| #define TILE_VECTOR_TYPE3(DATA_TYPE) DATA_TYPE##3 |
| #define TILE_VECTOR_TYPE4(DATA_TYPE) DATA_TYPE##4 |
| #define TILE_VECTOR_TYPE5(DATA_TYPE) DATA_TYPE##8 |
| #define TILE_VECTOR_TYPE6(DATA_TYPE) DATA_TYPE##8 |
| #define TILE_VECTOR_TYPE7(DATA_TYPE) DATA_TYPE##8 |
| #define TILE_VECTOR_TYPE8(DATA_TYPE) DATA_TYPE##8 |
| #define TILE_VECTOR_TYPE9(DATA_TYPE) DATA_TYPE##16 |
| #define TILE_VECTOR_TYPE10(DATA_TYPE) DATA_TYPE##16 |
| #define TILE_VECTOR_TYPE11(DATA_TYPE) DATA_TYPE##16 |
| #define TILE_VECTOR_TYPE12(DATA_TYPE) DATA_TYPE##16 |
| #define TILE_VECTOR_TYPE13(DATA_TYPE) DATA_TYPE##16 |
| #define TILE_VECTOR_TYPE14(DATA_TYPE) DATA_TYPE##16 |
| #define TILE_VECTOR_TYPE15(DATA_TYPE) DATA_TYPE##16 |
| #define TILE_VECTOR_TYPE16(DATA_TYPE) DATA_TYPE##16 |
| |
| |
| #define TILE(DATA_TYPE, H, W, BASENAME) TILE_STR(DATA_TYPE, H, W, BASENAME) |
| #define TILE_STR(DATA_TYPE, H, W, BASENAME) \ |
| union { \ |
| DATA_TYPE s[TILE_VECTOR_SIZE##W]; \ |
| TILE_VECTOR_TYPE##W(DATA_TYPE) v; \ |
| } BASENAME[H] |
| |
| #define TENSOR4D_IMAGE(name) \ |
| __read_only image2d_t name##_img, \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_stride_z, \ |
| uint name##_step_z, \ |
| uint name##_stride_w, \ |
| uint name##_step_w, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define TENSOR4D_BUFFER(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_stride_z, \ |
| uint name##_step_z, \ |
| uint name##_stride_w, \ |
| uint name##_step_w, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define TENSOR4D_STR(name, type) TENSOR4D_##type(name) |
| #define TENSOR4D(name, type) TENSOR4D_STR(name, type) |
| |
| #define TENSOR4D_T_IMAGE(name) \ |
| __read_only image2d_t name##_img, \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_y, \ |
| uint name##_stride_z, \ |
| uint name##_stride_w, \ |
| uint name##_c, \ |
| uint name##_w, \ |
| uint name##_h, \ |
| uint name##_n, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define TENSOR4D_T_BUFFER(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_y, \ |
| uint name##_stride_z, \ |
| uint name##_stride_w, \ |
| uint name##_c, \ |
| uint name##_w, \ |
| uint name##_h, \ |
| uint name##_n, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define TENSOR4D_T_STR(name, type) TENSOR4D_T_##type(name) |
| |
| |
| #define TENSOR4D_T(name, type) TENSOR4D_T_STR(name, type) |
| |
| #define TENSOR4D_RO_T_IMAGE(name) \ |
| __read_only image2d_t name##_img, \ |
| TENSOR4D_T_BUFFER(name) |
| |
| #define TENSOR4D_RO_T_BUFFER(name) TENSOR4D_T_BUFFER(name) |
| |
| #define TENSOR4D_RO_T_STR(name, type) TENSOR4D_RO_T_##type(name) |
| |
| |
| #define TENSOR4D_RO_T(name, type) TENSOR4D_RO_T_STR(name, type) |
| |
| #define TENSOR4D_WO_T_IMAGE(name) \ |
| __write_only image2d_t name##_img, \ |
| TENSOR4D_T_BUFFER(name) |
| |
| #define TENSOR4D_WO_T_BUFFER(name) TENSOR4D_T_BUFFER(name) |
| |
| #define TENSOR4D_WO_T_STR(name, type) TENSOR4D_WO_T_##type(name) |
| |
| |
| #define TENSOR4D_WO_T(name, type) TENSOR4D_WO_T_STR(name, type) |
| |
| #define TENSOR3D_T_IMAGE(name) \ |
| __read_only image2d_t name##_img, \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_y, \ |
| uint name##_stride_z, \ |
| uint name##_w, \ |
| uint name##_h, \ |
| uint name##_n, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define TENSOR3D_T_BUFFER(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_y, \ |
| uint name##_stride_z, \ |
| uint name##_w, \ |
| uint name##_h, \ |
| uint name##_n, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define TENSOR3D_T_STR(name, type) TENSOR3D_T_##type(name) |
| #define TENSOR3D_T(name, type) TENSOR3D_T_STR(name, type) |
| |
| #if !defined(UNROLL_WITH_PRAGMA) |
| #define UNROLL_INCR(idx, step, macro) idx += (step); (macro) |
| |
| #define LOOP_UNROLLING_1(idx, step, macro) (macro) |
| #define LOOP_UNROLLING_2(idx, step, macro) LOOP_UNROLLING_1(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_3(idx, step, macro) LOOP_UNROLLING_2(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_4(idx, step, macro) LOOP_UNROLLING_3(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_5(idx, step, macro) LOOP_UNROLLING_4(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_6(idx, step, macro) LOOP_UNROLLING_5(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_7(idx, step, macro) LOOP_UNROLLING_6(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_8(idx, step, macro) LOOP_UNROLLING_7(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_9(idx, step, macro) LOOP_UNROLLING_8(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_10(idx, step, macro) LOOP_UNROLLING_9(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_11(idx, step, macro) LOOP_UNROLLING_10(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_12(idx, step, macro) LOOP_UNROLLING_11(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_13(idx, step, macro) LOOP_UNROLLING_12(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_14(idx, step, macro) LOOP_UNROLLING_13(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_15(idx, step, macro) LOOP_UNROLLING_14(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_16(idx, step, macro) LOOP_UNROLLING_15(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_17(idx, step, macro) LOOP_UNROLLING_16(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_18(idx, step, macro) LOOP_UNROLLING_17(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_19(idx, step, macro) LOOP_UNROLLING_18(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_20(idx, step, macro) LOOP_UNROLLING_19(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_21(idx, step, macro) LOOP_UNROLLING_20(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_22(idx, step, macro) LOOP_UNROLLING_21(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_23(idx, step, macro) LOOP_UNROLLING_22(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_24(idx, step, macro) LOOP_UNROLLING_23(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_25(idx, step, macro) LOOP_UNROLLING_24(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_26(idx, step, macro) LOOP_UNROLLING_25(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_27(idx, step, macro) LOOP_UNROLLING_26(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_28(idx, step, macro) LOOP_UNROLLING_27(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_29(idx, step, macro) LOOP_UNROLLING_28(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_30(idx, step, macro) LOOP_UNROLLING_29(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_31(idx, step, macro) LOOP_UNROLLING_30(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_32(idx, step, macro) LOOP_UNROLLING_31(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_33(idx, step, macro) LOOP_UNROLLING_32(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_34(idx, step, macro) LOOP_UNROLLING_33(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_35(idx, step, macro) LOOP_UNROLLING_34(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_36(idx, step, macro) LOOP_UNROLLING_35(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_37(idx, step, macro) LOOP_UNROLLING_36(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_38(idx, step, macro) LOOP_UNROLLING_37(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_39(idx, step, macro) LOOP_UNROLLING_38(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_40(idx, step, macro) LOOP_UNROLLING_39(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_41(idx, step, macro) LOOP_UNROLLING_40(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_42(idx, step, macro) LOOP_UNROLLING_41(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_43(idx, step, macro) LOOP_UNROLLING_42(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_44(idx, step, macro) LOOP_UNROLLING_43(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_45(idx, step, macro) LOOP_UNROLLING_44(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_46(idx, step, macro) LOOP_UNROLLING_45(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_47(idx, step, macro) LOOP_UNROLLING_46(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_48(idx, step, macro) LOOP_UNROLLING_47(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_49(idx, step, macro) LOOP_UNROLLING_48(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_50(idx, step, macro) LOOP_UNROLLING_49(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_51(idx, step, macro) LOOP_UNROLLING_50(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_52(idx, step, macro) LOOP_UNROLLING_51(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_53(idx, step, macro) LOOP_UNROLLING_52(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_54(idx, step, macro) LOOP_UNROLLING_53(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_55(idx, step, macro) LOOP_UNROLLING_54(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_56(idx, step, macro) LOOP_UNROLLING_55(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_57(idx, step, macro) LOOP_UNROLLING_56(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_58(idx, step, macro) LOOP_UNROLLING_57(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_59(idx, step, macro) LOOP_UNROLLING_58(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_60(idx, step, macro) LOOP_UNROLLING_59(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_61(idx, step, macro) LOOP_UNROLLING_60(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_62(idx, step, macro) LOOP_UNROLLING_61(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_63(idx, step, macro) LOOP_UNROLLING_62(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_64(idx, step, macro) LOOP_UNROLLING_63(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_65(idx, step, macro) LOOP_UNROLLING_64(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_66(idx, step, macro) LOOP_UNROLLING_65(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_67(idx, step, macro) LOOP_UNROLLING_66(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_68(idx, step, macro) LOOP_UNROLLING_67(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_69(idx, step, macro) LOOP_UNROLLING_68(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_70(idx, step, macro) LOOP_UNROLLING_69(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_71(idx, step, macro) LOOP_UNROLLING_70(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_72(idx, step, macro) LOOP_UNROLLING_71(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_73(idx, step, macro) LOOP_UNROLLING_72(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_74(idx, step, macro) LOOP_UNROLLING_73(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_75(idx, step, macro) LOOP_UNROLLING_74(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_76(idx, step, macro) LOOP_UNROLLING_75(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_77(idx, step, macro) LOOP_UNROLLING_76(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_78(idx, step, macro) LOOP_UNROLLING_77(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_79(idx, step, macro) LOOP_UNROLLING_78(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_80(idx, step, macro) LOOP_UNROLLING_79(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_81(idx, step, macro) LOOP_UNROLLING_80(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_82(idx, step, macro) LOOP_UNROLLING_81(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_83(idx, step, macro) LOOP_UNROLLING_82(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_84(idx, step, macro) LOOP_UNROLLING_83(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_85(idx, step, macro) LOOP_UNROLLING_84(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_86(idx, step, macro) LOOP_UNROLLING_85(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_87(idx, step, macro) LOOP_UNROLLING_86(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_88(idx, step, macro) LOOP_UNROLLING_87(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_89(idx, step, macro) LOOP_UNROLLING_88(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_90(idx, step, macro) LOOP_UNROLLING_89(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_91(idx, step, macro) LOOP_UNROLLING_90(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_92(idx, step, macro) LOOP_UNROLLING_91(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_93(idx, step, macro) LOOP_UNROLLING_92(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_94(idx, step, macro) LOOP_UNROLLING_93(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_95(idx, step, macro) LOOP_UNROLLING_94(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_96(idx, step, macro) LOOP_UNROLLING_95(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_97(idx, step, macro) LOOP_UNROLLING_96(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_98(idx, step, macro) LOOP_UNROLLING_97(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_99(idx, step, macro) LOOP_UNROLLING_98(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_100(idx, step, macro) LOOP_UNROLLING_99(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_101(idx, step, macro) LOOP_UNROLLING_100(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_102(idx, step, macro) LOOP_UNROLLING_101(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_103(idx, step, macro) LOOP_UNROLLING_102(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_104(idx, step, macro) LOOP_UNROLLING_103(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_105(idx, step, macro) LOOP_UNROLLING_104(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_106(idx, step, macro) LOOP_UNROLLING_105(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_107(idx, step, macro) LOOP_UNROLLING_106(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_108(idx, step, macro) LOOP_UNROLLING_107(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_109(idx, step, macro) LOOP_UNROLLING_108(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_110(idx, step, macro) LOOP_UNROLLING_109(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_111(idx, step, macro) LOOP_UNROLLING_110(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_112(idx, step, macro) LOOP_UNROLLING_111(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_113(idx, step, macro) LOOP_UNROLLING_112(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_114(idx, step, macro) LOOP_UNROLLING_113(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_115(idx, step, macro) LOOP_UNROLLING_114(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_116(idx, step, macro) LOOP_UNROLLING_115(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_117(idx, step, macro) LOOP_UNROLLING_116(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_118(idx, step, macro) LOOP_UNROLLING_117(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_119(idx, step, macro) LOOP_UNROLLING_118(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_120(idx, step, macro) LOOP_UNROLLING_119(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_121(idx, step, macro) LOOP_UNROLLING_120(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_122(idx, step, macro) LOOP_UNROLLING_121(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_123(idx, step, macro) LOOP_UNROLLING_122(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_124(idx, step, macro) LOOP_UNROLLING_123(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_125(idx, step, macro) LOOP_UNROLLING_124(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_126(idx, step, macro) LOOP_UNROLLING_125(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_127(idx, step, macro) LOOP_UNROLLING_126(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| #define LOOP_UNROLLING_128(idx, step, macro) LOOP_UNROLLING_127(idx, step, macro); UNROLL_INCR(idx, step, macro) |
| |
| #define LOOP_UNROLLING_STR(type, idx, start, step, num, macro) \ |
| { \ |
| type idx = start; \ |
| LOOP_UNROLLING_##num(idx, step, macro); \ |
| } |
| #else |
| #define LOOP_UNROLLING_STR(type, idx, start, step, num, macro) \ |
| { \ |
| _Pragma("unroll") \ |
| for(type idx = start; idx < (num * step); idx += step) \ |
| { \ |
| (macro); \ |
| } \ |
| } |
| #endif |
| #define LOOP_UNROLLING(type, idx, start, step, num, macro) LOOP_UNROLLING_STR(type, idx, start, step, num, macro) |
| |
| |
| #define GET_SPATIAL_IDX(IDX, N0, PARTIAL_N0) (max((int)(get_global_id(IDX) * N0 - (N0 - PARTIAL_N0) % N0), 0)) |
| |
| |
| #define DOT_PRODUCT_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, b, c) DOT_PRODUCT_INTEGER8_STR(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, b, c) |
| #define DOT_PRODUCT_INTEGER8_STR(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, b, c) DOT_PRODUCT##K0##_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) |
| #define DOT_PRODUCT1_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \ |
| ({ \ |
| c += (C_DATA_TYPE)(a) * (C_DATA_TYPE)(b); \ |
| }) |
| #if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_khr_integer_dot_product) |
| #define DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c += dot((A_DATA_TYPE##4)((a).s01, (A_DATA_TYPE##2)(0)), (B_DATA_TYPE##4)(((b).s01), (B_DATA_TYPE##2)(0))); |
| #define DOT_PRODUCT3_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c += dot((A_DATA_TYPE##4)((a).s012, (A_DATA_TYPE)0), (B_DATA_TYPE##4)(((b).s012), (B_DATA_TYPE)0)); |
| #define DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c += dot((a), (b)); |
| #elif defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8) |
| #define DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c = arm_dot_acc((A_DATA_TYPE##4)((a).s01, (A_DATA_TYPE##2)(0)), (B_DATA_TYPE##4)(((b).s01), (B_DATA_TYPE##2)(0)), (c)); |
| #define DOT_PRODUCT3_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c = arm_dot_acc((A_DATA_TYPE##4)((a).s012, (A_DATA_TYPE)0), (B_DATA_TYPE##4)(((b).s012), (B_DATA_TYPE)0), (c)); |
| #define DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c = arm_dot_acc((a), (b), (c)); |
| #elif defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) |
| #define DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c += arm_dot((A_DATA_TYPE##4)((a).s01, (A_DATA_TYPE##2)(0)), (B_DATA_TYPE##4)(((b).s01), (B_DATA_TYPE##2)(0))); |
| #define DOT_PRODUCT3_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c += arm_dot((A_DATA_TYPE##4)((a).s012, (A_DATA_TYPE)0), (B_DATA_TYPE##4)(((b).s012), (B_DATA_TYPE)0)); |
| #define DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) c += arm_dot((a), (b)); |
| #else |
| #define DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \ |
| ({ \ |
| c += (C_DATA_TYPE)(a).s0 * (C_DATA_TYPE)(b).s0; \ |
| c += (C_DATA_TYPE)(a).s1 * (C_DATA_TYPE)(b).s1; \ |
| }) |
| #define DOT_PRODUCT3_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \ |
| ({ \ |
| DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c); \ |
| c += (C_DATA_TYPE)(a).s2 * (C_DATA_TYPE)(b).s2; \ |
| }) |
| #define DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, x, y, val) \ |
| ({ \ |
| val += (C_DATA_TYPE)(x).s0 * (C_DATA_TYPE)(y).s0; \ |
| val += (C_DATA_TYPE)(x).s1 * (C_DATA_TYPE)(y).s1; \ |
| val += (C_DATA_TYPE)(x).s2 * (C_DATA_TYPE)(y).s2; \ |
| val += (C_DATA_TYPE)(x).s3 * (C_DATA_TYPE)(y).s3; \ |
| }) |
| #endif |
| #define DOT_PRODUCT5_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \ |
| ({ \ |
| DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s0123), ((b).s0123), c); \ |
| DOT_PRODUCT1_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s4), ((b).s4), c); \ |
| }) |
| #define DOT_PRODUCT6_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \ |
| ({ \ |
| DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s0123), ((b).s0123), c); \ |
| DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s45), ((b).s45), c); \ |
| }) |
| #define DOT_PRODUCT7_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \ |
| ({ \ |
| DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s0123), ((b).s0123), c); \ |
| DOT_PRODUCT3_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s456), ((b).s456), c); \ |
| }) |
| #define DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \ |
| ({ \ |
| DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).lo), ((b).lo), c); \ |
| DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).hi), ((b).hi), c); \ |
| }) |
| #define DOT_PRODUCT9_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \ |
| ({ \ |
| DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \ |
| DOT_PRODUCT1_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s8), ((b).s8), c); \ |
| }) |
| #define DOT_PRODUCT10_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \ |
| ({ \ |
| DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \ |
| DOT_PRODUCT2_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s89), ((b).s89), c); \ |
| }) |
| #define DOT_PRODUCT11_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \ |
| ({ \ |
| DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \ |
| DOT_PRODUCT3_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s89A), ((b).s89A), c); \ |
| }) |
| #define DOT_PRODUCT12_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \ |
| ({ \ |
| DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \ |
| DOT_PRODUCT4_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s89AB), ((b).s89AB), c); \ |
| }) |
| #define DOT_PRODUCT13_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \ |
| ({ \ |
| DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \ |
| DOT_PRODUCT5_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s89ABC), ((b).s89ABC), c); \ |
| }) |
| #define DOT_PRODUCT14_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \ |
| ({ \ |
| DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \ |
| DOT_PRODUCT6_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s89ABCD), ((b).s89ABCD), c); \ |
| }) |
| #define DOT_PRODUCT15_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \ |
| ({ \ |
| DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s01234567), ((b).s01234567), c); \ |
| DOT_PRODUCT7_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).s89ABCDE), ((b).s89ABCDE), c); \ |
| }) |
| #define DOT_PRODUCT16_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, a, b, c) \ |
| ({ \ |
| DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).lo), ((b).lo), c); \ |
| DOT_PRODUCT8_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, ((a).hi), ((b).hi), c); \ |
| }) |
| |
| |
| #define REDUCE_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, c) REDUCE_INTEGER8_STR(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, c) |
| #define REDUCE_INTEGER8_STR(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, c) DOT_PRODUCT_INTEGER8(A_DATA_TYPE, B_DATA_TYPE, C_DATA_TYPE, K0, a, (TILE_VECTOR_TYPE##K0(B_DATA_TYPE))1, c) |
| |
| |
| #define V_LOAD(DATA_TYPE, WIDTH, TENSOR_TYPE, TENSOR, X, Y, STRIDE_Y) V_LOAD_STR(DATA_TYPE, WIDTH, TENSOR_TYPE, TENSOR, X, Y, STRIDE_Y) |
| #define V_LOAD_STR(DATA_TYPE, WIDTH, TENSOR_TYPE, TENSOR, X, Y, STRIDE_Y) V_LOAD_##TENSOR_TYPE(DATA_TYPE, WIDTH, TENSOR, X, Y, STRIDE_Y) |
| #define V_LOAD_BUFFER(DATA_TYPE, WIDTH, TENSOR, X, Y, STRIDE_Y) \ |
| VLOAD(WIDTH) \ |
| (0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (X) * sizeof(DATA_TYPE) + (Y) * (STRIDE_Y))) |
| #define V_LOAD_IMAGE(DATA_TYPE, WIDTH, TENSOR, X, Y, STRIDE_Y) READ_IMAGE2D(DATA_TYPE, CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(WIDTH), TENSOR##_img, (X) / 4, (Y)) |
| |
| |
| #define V_STORE(DATA_TYPE, WIDTH, TENSOR_TYPE, TENSOR, X, Y, STRIDE_Y, VALUES) V_STORE_STR(DATA_TYPE, WIDTH, TENSOR_TYPE, TENSOR, X, Y, STRIDE_Y, VALUES) |
| #define V_STORE_STR(DATA_TYPE, WIDTH, TENSOR_TYPE, TENSOR, X, Y, STRIDE_Y, VALUES) V_STORE_##TENSOR_TYPE(DATA_TYPE, WIDTH, TENSOR, X, Y, STRIDE_Y, VALUES) |
| #define V_STORE_BUFFER(DATA_TYPE, WIDTH, TENSOR, X, Y, STRIDE_Y, VALUES) \ |
| VSTORE(WIDTH) \ |
| (VALUES, 0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (X) * sizeof(DATA_TYPE) + (Y) * (STRIDE_Y))) |
| #define V_STORE_IMAGE(DATA_TYPE, WIDTH, TENSOR, X, Y, STRIDE_Y, VALUES) WRITE_IMAGE2D(DATA_TYPE, CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(WIDTH), TENSOR##_img, (X) / 4, (Y), VALUES) |
| |
| |
| #define T_LOAD(DATA_TYPE, HEIGHT, WIDTH, TENSOR_TYPE, TENSOR, X, Y, YI_MULTIPLIER, STRIDE_Y, dst) \ |
| ({ \ |
| LOOP_UNROLLING(int, _i, 0, 1, HEIGHT, \ |
| { \ |
| dst[_i].v = V_LOAD(DATA_TYPE, WIDTH, TENSOR_TYPE, TENSOR, X, ((Y) + _i * (int)(YI_MULTIPLIER)), STRIDE_Y); \ |
| }) \ |
| }) |
| |
| |
| #define T_LOAD_INDIRECT(DATA_TYPE, HEIGHT, WIDTH, TENSOR_TYPE, TENSOR, X, STRIDE_Y, indirect_y, dst) \ |
| ({ \ |
| LOOP_UNROLLING(int, _i, 0, 1, HEIGHT, \ |
| { \ |
| dst[_i].v = V_LOAD(DATA_TYPE, WIDTH, TENSOR_TYPE, TENSOR, X, (indirect_y[_i].v), STRIDE_Y); \ |
| }) \ |
| }) |
| |
| |
| #define T_LOAD_INDIRECT_WIDTH_SELECT(DATA_TYPE, HEIGHT, WIDTH0, WIDTH1, TENSOR_TYPE, TENSOR, X, STRIDE_Y, WIDTH1_CONDITION, dst, indirect_y) \ |
| ({ \ |
| if(WIDTH1_CONDITION) \ |
| { \ |
| LOOP_UNROLLING(int, _i, 0, 1, HEIGHT, \ |
| { \ |
| VLOAD_PARTIAL(WIDTH0, WIDTH1) \ |
| (dst[HEIGHT - 1 - _i].v, 0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (X) * sizeof(DATA_TYPE) + (indirect_y[HEIGHT - 1 - _i].v) * STRIDE_Y)); \ |
| }) \ |
| } \ |
| else \ |
| { \ |
| LOOP_UNROLLING(int, _i, 0, 1, HEIGHT, \ |
| { \ |
| dst[HEIGHT - 1 - _i].v = V_LOAD(DATA_TYPE, WIDTH0, TENSOR_TYPE, TENSOR, X, (indirect_y[HEIGHT - 1 - _i].v), STRIDE_Y); \ |
| }) \ |
| } \ |
| }) |
| |
| #define T_LOAD_NHWC(DATA_TYPE, TILE_HEIGHT, TILE_WIDTH, TILE_CHANNELS, TENSOR_TYPE, TENSOR, B, Y, X, C, TENSOR_WIDTH, TENSOR_HEIGHT, STRIDE_Y, dst) \ |
| ({ \ |
| LOOP_UNROLLING(int, _yk, 0, 1, TILE_HEIGHT, \ |
| { \ |
| LOOP_UNROLLING(int, _xk, 0, 1, TILE_WIDTH, \ |
| { \ |
| int _src_y = (X) + _xk + ((Y) + _yk) * (TENSOR_WIDTH); \ |
| _src_y += (B) * (int)(TENSOR_WIDTH) * (int)(TENSOR_HEIGHT); \ |
| int _src_valid_y = (((X) + _xk) >= 0 && ((X) + _xk) < (int)(TENSOR_WIDTH) && ((Y) + _yk) >= 0 && ((Y) + _yk) < (int)(TENSOR_HEIGHT)); \ |
| if(_src_valid_y != 0) \ |
| { \ |
| dst[_xk + _yk * (TILE_WIDTH)].v = V_LOAD(DATA_TYPE, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, _src_y, STRIDE_Y); \ |
| } \ |
| }) \ |
| }) \ |
| }) |
| |
| |
| #define T_LOAD_NHWC_WITH_DILATION(DATA_TYPE, TILE_HEIGHT, TILE_WIDTH, TILE_CHANNELS, TENSOR_TYPE, TENSOR, B, Y, X, C, TENSOR_WIDTH, TENSOR_HEIGHT, DILATION_X, DILATION_Y, BOUNDARY_CHECK, dst) \ |
| ({ \ |
| LOOP_UNROLLING(int, _yk, 0, 1, TILE_HEIGHT, \ |
| { \ |
| LOOP_UNROLLING(int, _xk, 0, 1, TILE_WIDTH, \ |
| { \ |
| int _src_y = (X) + _xk * (DILATION_X); \ |
| int _src_z = ((Y) + _yk * (DILATION_Y)); \ |
| int _src_w = (B); \ |
| bool _src_valid_y = (((X) + _xk * (DILATION_X)) >= 0) && (((X) + _xk * (DILATION_X)) < (int)(TENSOR_WIDTH)) && (((Y) + _yk * (DILATION_Y)) >= 0) && (((Y) + _yk * (DILATION_Y)) < (int)(TENSOR_HEIGHT)); \ |
| if(!(BOUNDARY_CHECK)) \ |
| { \ |
| dst[_xk + _yk * (TILE_WIDTH)].v = VLOAD(TILE_CHANNELS) \ |
| (0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (C) * sizeof(DATA_TYPE) + (_src_y) * (TENSOR##_stride_y) + (_src_z) * (TENSOR##_stride_z) + (_src_w) * (TENSOR##_stride_w))); \ |
| } \ |
| else \ |
| { \ |
| if(_src_valid_y) \ |
| { \ |
| dst[_xk + _yk * (TILE_WIDTH)].v = VLOAD(TILE_CHANNELS) \ |
| (0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (C) * sizeof(DATA_TYPE) + (_src_y) * (TENSOR##_stride_y) + (_src_z) * (TENSOR##_stride_z) + (_src_w) * (TENSOR##_stride_w))); \ |
| } \ |
| } \ |
| }) \ |
| }) \ |
| }) |
| |
| |
| #define T_LOAD_NHWC_INDIRECT(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, B, Y, X, C, TENSOR_WIDTH, TENSOR_HEIGHT, STRIDE_Y, xi, yi, dst) \ |
| ({ \ |
| LOOP_UNROLLING(int, _i, 0, 1, TILE_AREA, \ |
| { \ |
| int _src_y = (X) + xi[_i].v + ((Y) + yi[_i].v) * (TENSOR_WIDTH); \ |
| _src_y += (B) * (int)(TENSOR_WIDTH) * (int)(TENSOR_HEIGHT); \ |
| int _src_valid_y = (((X) + xi[_i].v) >= 0 && ((X) + xi[_i].v) < (int)(TENSOR_WIDTH) && ((Y) + yi[_i].v) >= 0 && ((Y) + yi[_i].v) < (int)(TENSOR_HEIGHT)); \ |
| if(_src_valid_y != 0) \ |
| { \ |
| dst[_i].v = V_LOAD(DATA_TYPE, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, _src_y, STRIDE_Y); \ |
| } \ |
| }) \ |
| }) |
| |
| |
| #define T_LOAD2D_INDIRECT(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, STRIDE_Y, yi, dst) T_LOAD2D_INDIRECT_STR(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, STRIDE_Y, yi, dst) |
| #define T_LOAD2D_INDIRECT_STR(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, STRIDE_Y, yi, dst) T_LOAD2D_INDIRECT_##TENSOR_TYPE(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, STRIDE_Y, yi, dst) |
| #define T_LOAD2D_INDIRECT_BUFFER(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, STRIDE_Y, yi, dst) \ |
| ({ \ |
| LOOP_UNROLLING(int, _i, 0, 1, TILE_AREA, \ |
| { \ |
| if(yi[0].s[_i] >= 0) \ |
| { \ |
| dst[_i].v = V_LOAD(DATA_TYPE, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, yi[0].s[_i], STRIDE_Y); \ |
| } \ |
| }) \ |
| }) |
| |
| #define T_LOAD2D_INDIRECT_IMAGE(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, STRIDE_Y, yi, dst) \ |
| ({ \ |
| LOOP_UNROLLING(int, _i, 0, 1, TILE_AREA, \ |
| { \ |
| dst[_i].v = V_LOAD(DATA_TYPE, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, yi[0].s[_i], STRIDE_Y); \ |
| }) \ |
| }) |
| |
| |
| #define T_LOAD_NDHWC_INDIRECT(DATA_TYPE, TILE_AREA, TILE_CHANNELS, TENSOR_TYPE, TENSOR, B, Z, Y, X, C, TENSOR_WIDTH, TENSOR_HEIGHT, TENSOR_DEPTH, STRIDE_Y, xi, yi, zi, dst) \ |
| ({ \ |
| LOOP_UNROLLING(int, _i, 0, 1, TILE_AREA, \ |
| { \ |
| int _src_y = (X) + xi[_i].v + ((Y) + yi[_i].v) * (TENSOR_WIDTH) + ((Z) + zi[_i].v) * (TENSOR_WIDTH * TENSOR_HEIGHT); \ |
| _src_y += (B) * (int)(TENSOR_WIDTH) * (int)(TENSOR_HEIGHT) * (int)(TENSOR_DEPTH); \ |
| int _src_valid_y = (((X) + xi[_i].v) >= 0 && ((X) + xi[_i].v) < (int)(TENSOR_WIDTH) && ((Y) + yi[_i].v) >= 0 && ((Y) + yi[_i].v) < (int)(TENSOR_HEIGHT) \ |
| && ((Z) + zi[_i].v) >= 0 && ((Z) + zi[_i].v) < (int)(TENSOR_DEPTH)); \ |
| if(_src_valid_y != 0) \ |
| { \ |
| dst[_i].v = V_LOAD(DATA_TYPE, TILE_CHANNELS, TENSOR_TYPE, TENSOR, C, _src_y, STRIDE_Y); \ |
| } \ |
| }) \ |
| }) |
| |
| |
| #define T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, HEIGHT, WIDTH0, WIDTH1, TENSOR_TYPE, TENSOR, X, STRIDE_Y, WIDTH1_CONDITION, src, indirect_y) \ |
| ({ \ |
| if(WIDTH1_CONDITION) \ |
| { \ |
| LOOP_UNROLLING(int, _i, 0, 1, HEIGHT, \ |
| { \ |
| VSTORE_PARTIAL(WIDTH0, WIDTH1) \ |
| (CONVERT(src[HEIGHT - 1 - _i].v, VEC_DATA_TYPE(DATA_TYPE, WIDTH0)), 0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (X) * sizeof(DATA_TYPE) + (indirect_y[HEIGHT - 1 - _i].v) * STRIDE_Y)); \ |
| }) \ |
| } \ |
| else \ |
| { \ |
| LOOP_UNROLLING(int, _i, 0, 1, HEIGHT, \ |
| { \ |
| VSTORE(WIDTH0) \ |
| (CONVERT(src[HEIGHT - 1 - _i].v, VEC_DATA_TYPE(DATA_TYPE, WIDTH0)), 0, (__global DATA_TYPE *)(TENSOR##_ptr + TENSOR##_offset_first_element_in_bytes + (X) * sizeof(DATA_TYPE) + (indirect_y[HEIGHT - 1 - _i].v) * STRIDE_Y)); \ |
| }) \ |
| } \ |
| }) |
| |
| |
| #define T_OFFSET_CORRECTION(ACC_DATA_TYPE, M0, N0, K0, SRC_OFFSET, WEI_OFFSET, lhs, rhs, dst) \ |
| ({ \ |
| LOOP_UNROLLING(int, _m0, 0, 1, M0, \ |
| { \ |
| ACC_DATA_TYPE _tm = 0; \ |
| LOOP_UNROLLING(int, _k0, 0, 1, K0, \ |
| { \ |
| _tm += ((ACC_DATA_TYPE)lhs[_m0].s[_k0] * (ACC_DATA_TYPE)WEI_OFFSET); \ |
| }) \ |
| LOOP_UNROLLING(int, _n0, 0, 1, N0, \ |
| { \ |
| dst[_m0].s[_n0] += _tm; \ |
| LOOP_UNROLLING(int, _k0, 0, 1, K0, \ |
| { \ |
| dst[_m0].s[_n0] += ((ACC_DATA_TYPE)rhs[_n0].s[_k0] * (ACC_DATA_TYPE)SRC_OFFSET); \ |
| }) \ |
| }) \ |
| }) \ |
| }) |
| |
| |
| #define T_QUANTIZE8(SRC_DATA_TYPE, DST_DATA_TYPE, QUANTIZATION_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst_multipliers, dst_shifts, dst) T_QUANTIZE8_STR(SRC_DATA_TYPE, DST_DATA_TYPE, QUANTIZATION_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst_multipliers, dst_shifts, dst) |
| #define T_QUANTIZE8_STR(SRC_DATA_TYPE, DST_DATA_TYPE, QUANTIZATION_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst_multipliers, dst_shifts, dst) T_QUANTIZE8_##QUANTIZATION_TYPE(SRC_DATA_TYPE, DST_DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst_multipliers, dst_shifts, dst) |
| |
| |
| #define T_QUANTIZE8_PER_TENSOR(SRC_DATA_TYPE, DST_DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst_multipliers, dst_shifts, dst) \ |
| ({ \ |
| LOOP_UNROLLING(int, _m0, 0, 1, M0, \ |
| { \ |
| LOOP_UNROLLING(int, _n0, 0, 1, N0, \ |
| { \ |
| SRC_DATA_TYPE _tmp = 0; \ |
| SRC_DATA_TYPE _src = src[_m0].s[_n0]; \ |
| _src *= select((SRC_DATA_TYPE)1, ((SRC_DATA_TYPE)1 << (SRC_DATA_TYPE)(-DST_SHIFT)), ((SRC_DATA_TYPE)DST_SHIFT < (SRC_DATA_TYPE)0)); \ |
| SRC_DATA_TYPE overflow = _src == DST_MULTIPLIER && _src == INT_MIN; \ |
| long a_64 = (long)(_src); \ |
| long b_64 = (long)(DST_MULTIPLIER); \ |
| long ab_64 = a_64 * b_64; \ |
| long mask1 = 1 << 30; \ |
| long mask2 = 1 - (1 << 30); \ |
| long is_positive_or_zero = ab_64 >= 0; \ |
| long nudge = select(mask2, mask1, is_positive_or_zero); \ |
| SRC_DATA_TYPE ab_x2_high32 = CONVERT((ab_64 + nudge) / (long)(1ll << 31), SRC_DATA_TYPE); \ |
| _tmp = select(ab_x2_high32, (SRC_DATA_TYPE)INT_MAX, overflow); \ |
| if(DST_SHIFT >= 0) \ |
| { \ |
| long mask = ((((int)1) << DST_SHIFT) - (long)1); \ |
| long threshold = _tmp < (int)0 ? (mask >> 1) + (long)1 : (mask >> 1) + 0; \ |
| _tmp = (_tmp & mask) > threshold ? (_tmp >> DST_SHIFT) + (int)1 : (_tmp >> DST_SHIFT); \ |
| } \ |
| _tmp += DST_OFFSET; \ |
| dst[_m0].s[_n0] = CONVERT_SAT(_tmp, DST_DATA_TYPE); \ |
| }) \ |
| }) \ |
| }) |
| |
| |
| #define T_QUANTIZE8_PER_CHANNEL(SRC_DATA_TYPE, DST_DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst_multipliers, dst_shifts, dst) \ |
| ({ \ |
| LOOP_UNROLLING(int, _m0, 0, 1, M0, \ |
| { \ |
| LOOP_UNROLLING(int, _n0, 0, 1, N0, \ |
| { \ |
| SRC_DATA_TYPE _tmp = 0; \ |
| SRC_DATA_TYPE _tmp2 = 0; \ |
| SRC_DATA_TYPE _src = src[_m0].s[_n0]; \ |
| SRC_DATA_TYPE _dst_multiplier = dst_multipliers[0].s[_n0]; \ |
| SRC_DATA_TYPE _dst_shift = dst_shifts[0].s[_n0]; \ |
| _src *= select((SRC_DATA_TYPE)1, ((SRC_DATA_TYPE)1 << (SRC_DATA_TYPE)(-_dst_shift)), ((SRC_DATA_TYPE)_dst_shift < (SRC_DATA_TYPE)0)); \ |
| SRC_DATA_TYPE overflow = _src == _dst_multiplier && _src == INT_MIN; \ |
| long a_64 = (long)(_src); \ |
| long b_64 = (long)(_dst_multiplier); \ |
| long ab_64 = a_64 * b_64; \ |
| long mask1 = 1 << 30; \ |
| long mask2 = 1 - (1 << 30); \ |
| long is_positive_or_zero = ab_64 >= 0; \ |
| long nudge = select(mask2, mask1, is_positive_or_zero); \ |
| SRC_DATA_TYPE ab_x2_high32 = CONVERT((ab_64 + nudge) / (long)(1ll << 31), SRC_DATA_TYPE); \ |
| _tmp = select(ab_x2_high32, (SRC_DATA_TYPE)INT_MAX, overflow); \ |
| long mask = ((((int)1) << _dst_shift) - (int)1); \ |
| long threshold = (mask >> 1) + any(_tmp); \ |
| _tmp2 = _tmp >> _dst_shift; \ |
| _tmp2 += select(0, 1, (_tmp & mask) > threshold); \ |
| _tmp = select(_tmp, _tmp2, _dst_shift >= 0); \ |
| _tmp += DST_OFFSET; \ |
| dst[_m0].s[_n0] = CONVERT_SAT(_tmp, DST_DATA_TYPE); \ |
| }) \ |
| }) \ |
| }) |
| |
| |
| #define T_QUANTIZE8_ASYMMETRIC(SRC_DATA_TYPE, DST_DATA_TYPE, M0, N0, DST_OFFSET, DST_SHIFT, DST_MULTIPLIER, src, dst) \ |
| ({ \ |
| LOOP_UNROLLING(int, _m0, 0, 1, M0, \ |
| { \ |
| LOOP_UNROLLING(int, _n0, 0, 1, N0, \ |
| { \ |
| SRC_DATA_TYPE _tmp = 0; \ |
| SRC_DATA_TYPE _src = src[_m0].s[_n0]; \ |
| _src *= select((SRC_DATA_TYPE)1, ((SRC_DATA_TYPE)1 << (SRC_DATA_TYPE)(-DST_SHIFT)), ((SRC_DATA_TYPE)DST_SHIFT < (SRC_DATA_TYPE)0)); \ |
| SRC_DATA_TYPE overflow = _src == DST_MULTIPLIER && _src == INT_MIN; \ |
| long a_64 = (long)(_src); \ |
| long b_64 = (long)(DST_MULTIPLIER); \ |
| long ab_64 = a_64 * b_64; \ |
| long mask1 = 1 << 30; \ |
| long mask2 = 1 - (1 << 30); \ |
| long is_positive_or_zero = ab_64 >= 0; \ |
| long nudge = select(mask2, mask1, is_positive_or_zero); \ |
| SRC_DATA_TYPE ab_x2_high32 = CONVERT((ab_64 + nudge) / (long)(1ll << 31), SRC_DATA_TYPE); \ |
| _tmp = select(ab_x2_high32, (SRC_DATA_TYPE)INT_MAX, overflow); \ |
| if(DST_SHIFT >= 0) \ |
| { \ |
| long mask = ((((int)1) << DST_SHIFT) - (int)1); \ |
| long threshold = _tmp < (int)0 ? (mask >> 1) + (long)1 : (mask >> 1) + 0; \ |
| _tmp = (_tmp & mask) > threshold ? (_tmp >> DST_SHIFT) + (int)1 : (_tmp >> DST_SHIFT); \ |
| } \ |
| _tmp += DST_OFFSET; \ |
| dst[_m0].s[_n0] = CONVERT_SAT(_tmp, DST_DATA_TYPE); \ |
| }) \ |
| }) \ |
| }) |
| |
| |
| #define T_ROWSET_MASK(DATA_TYPE, M0, N0, VALUE_TO_SET, a, mask) \ |
| ({ \ |
| LOOP_UNROLLING(int, _m0, 0, 1, M0, \ |
| { \ |
| LOOP_UNROLLING(int, _n0, 0, 1, N0, \ |
| { \ |
| a[_m0].s[_n0] = select((DATA_TYPE)(a[_m0].s[_n0]), (DATA_TYPE)(VALUE_TO_SET), (SELECT_DATA_TYPE(DATA_TYPE))(mask[_m0].v == (DATA_TYPE)0)); \ |
| }) \ |
| }) \ |
| }) |
| |
| |
| #define T_ACTIVATION(DATA_TYPE, M0, N0, ACTIVATION_TYPE, A_VAL, B_VAL, src, dst) \ |
| ({ \ |
| LOOP_UNROLLING(int, _m0, 0, 1, M0, \ |
| { \ |
| dst[_m0].v = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, N0, src[_m0].v, A_VAL, B_VAL); \ |
| }) \ |
| }) |
| |
| |
| #define relu_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) (max((DATA_TYPE)ZERO_VALUE, x)) |
| |
| #define brelu_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) (min((DATA_TYPE)A_VAL, max((DATA_TYPE)ZERO_VALUE, x))) |
| |
| #define lu_brelu_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) (min(max(x, (DATA_TYPE)B_VAL), (DATA_TYPE)A_VAL)) |
| |
| #define hard_swish_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) (x * ((min(max((DATA_TYPE)(x + (DATA_TYPE)3.f), (DATA_TYPE)0.f), (DATA_TYPE)6.f)) * (DATA_TYPE)0.166666667f)) |
| |
| #define identity_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) (x) |
| |
| #define ACT_OP_QUANTIZED(op, DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) op##_op_quantized(DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) |
| #define ACTIVATION_QUANTIZED(op, DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) ACT_OP_QUANTIZED(op, DATA_TYPE, VEC_SIZE, ZERO_VALUE, A_VAL, B_VAL, x) |
| |
| #define V_ADD(A_VAL, B_VAL) ((A_VAL) + (B_VAL)) |
| #define V_SUB(A_VAL, B_VAL) ((A_VAL) - (B_VAL)) |
| #define V_DIV(A_VAL, B_VAL) ((A_VAL) / (B_VAL)) |
| #define V_MUL(A_VAL, B_VAL) ((A_VAL) * (B_VAL)) |
| |
| |
| #define T_ACTIVATION_QUANTIZED(DATA_TYPE, M0, N0, ACTIVATION_TYPE, ZERO_VALUE, A_VAL, B_VAL, src, dst) \ |
| ({ \ |
| LOOP_UNROLLING(int, _m0, 0, 1, M0, \ |
| { \ |
| dst[_m0].v = ACTIVATION_QUANTIZED(ACTIVATION_TYPE, DATA_TYPE, N0, ZERO_VALUE, A_VAL, B_VAL, src[_m0].v); \ |
| }) \ |
| }) |
| |
| |
| #define T_ADD(DATA_TYPE, M0, N0, lhs, rhs, dst) \ |
| ({ \ |
| LOOP_UNROLLING(int, _m0, 0, 1, M0, \ |
| { \ |
| dst[_m0].v = lhs[_m0].v + rhs[_m0].v; \ |
| }) \ |
| }) |
| |
| |
| #define T_ADD_CONSTANT(DATA_TYPE, M0, N0, lhs, rhs_constant, dst) \ |
| ({ \ |
| LOOP_UNROLLING(int, _m0, 0, 1, M0, \ |
| { \ |
| dst[_m0].v = lhs[_m0].v + (DATA_TYPE)rhs_constant; \ |
| }) \ |
| }) |
| |
| #define T_ELTWISE_BROADCAST_ADD_X(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE_BROADCAST_X(V_ADD, DST_DATA_TYPE, M0, N0, lhs, rhs, dst) |
| #define T_ELTWISE_BROADCAST_LHS_X_ADD(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE_BROADCAST_LHS_X(V_ADD, DST_DATA_TYPE, M0, N0, lhs, rhs, dst) |
| #define T_ELTWISE_BROADCAST_RHS_X_ADD(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE_BROADCAST_X(V_ADD, DST_DATA_TYPE, M0, N0, lhs, rhs, dst) |
| |
| #define T_ELTWISE_BROADCAST_LHS_X_SUB(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE_BROADCAST_LHS_X(V_SUB, DST_DATA_TYPE, M0, N0, lhs, rhs, dst) |
| #define T_ELTWISE_BROADCAST_RHS_X_SUB(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE_BROADCAST_X(V_SUB, DST_DATA_TYPE, M0, N0, lhs, rhs, dst) |
| |
| #define T_ELTWISE_BROADCAST_DIV_X(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE_BROADCAST_X(V_DIV, DST_DATA_TYPE, M0, N0, lhs, rhs, dst) |
| |
| #define T_ELTWISE_BROADCAST_LHS_X_MUL(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE_BROADCAST_LHS_X(V_MUL, DST_DATA_TYPE, M0, N0, lhs, rhs, dst) |
| #define T_ELTWISE_BROADCAST_RHS_X_MUL(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE_BROADCAST_X(V_MUL, DST_DATA_TYPE, M0, N0, lhs, rhs, dst) |
| |
| |
| #define T_SCALE_CONSTANT(DATA_TYPE, M0, N0, lhs, rhs_constant, dst) \ |
| ({ \ |
| LOOP_UNROLLING(int, _m0, 0, 1, M0, \ |
| { \ |
| dst[_m0].v = lhs[_m0].v * (DATA_TYPE)rhs_constant; \ |
| }) \ |
| }) |
| |
| |
| #define T_ELTWISE_BROADCAST_X(T_ELWISE_OP, DST_DATA_TYPE, M0, N0, lhs, rhs, dst) \ |
| ({ \ |
| LOOP_UNROLLING(int, _m0, 0, 1, M0, \ |
| { \ |
| dst[_m0].v = T_ELWISE_OP(CONVERT(lhs[_m0].v, VEC_DATA_TYPE(DST_DATA_TYPE, N0)), CONVERT(rhs[0].v, VEC_DATA_TYPE(DST_DATA_TYPE, N0))); \ |
| }) \ |
| }) |
| |
| |
| #define T_ELTWISE_BROADCAST_LHS_X(T_ELWISE_OP, DST_DATA_TYPE, M0, N0, lhs, rhs, dst) \ |
| ({ \ |
| LOOP_UNROLLING(int, _m0, 0, 1, M0, \ |
| { \ |
| dst[_m0].v = T_ELWISE_OP(CONVERT(lhs[0].v, VEC_DATA_TYPE(DST_DATA_TYPE, N0)), CONVERT(rhs[_m0].v, VEC_DATA_TYPE(DST_DATA_TYPE, N0))); \ |
| }) \ |
| }) |
| |
| #define T_ELTWISE_ADD(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE(V_ADD, DST_DATA_TYPE, M0, N0, lhs, rhs, dst) |
| #define T_ELTWISE_SUB(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE(V_SUB, DST_DATA_TYPE, M0, N0, lhs, rhs, dst) |
| #define T_ELTWISE_DIV(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE(V_DIV, DST_DATA_TYPE, M0, N0, lhs, rhs, dst) |
| #define T_ELTWISE_MUL(DST_DATA_TYPE, M0, N0, lhs, rhs, dst) T_ELTWISE(V_MUL, DST_DATA_TYPE, M0, N0, lhs, rhs, dst) |
| |
| |
| #define T_ELTWISE(T_ELWISE_OP, DST_DATA_TYPE, M0, N0, lhs, rhs, dst) \ |
| ({ \ |
| LOOP_UNROLLING(int, _m0, 0, 1, M0, \ |
| { \ |
| dst[_m0].v = T_ELWISE_OP(CONVERT(lhs[_m0].v, VEC_DATA_TYPE(DST_DATA_TYPE, N0)), CONVERT(rhs[_m0].v, VEC_DATA_TYPE(DST_DATA_TYPE, N0))); \ |
| }) \ |
| }) |
| |
| |
| #define T_FLOOR(DST_DATA_TYPE, M0, N0, src, dst) \ |
| ({ \ |
| LOOP_UNROLLING(int, _m0, 0, 1, M0, \ |
| { \ |
| dst[_m0].v = floor(CONVERT(src[_m0].v, VEC_DATA_TYPE(DST_DATA_TYPE, N0))); \ |
| }) \ |
| }) |
| |
| |
| #define T_MMUL(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, LHS_LAYOUT, RHS_LAYOUT, lhs, rhs, dst) T_MMUL_##LHS_LAYOUT##_##RHS_LAYOUT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) |
| #define T_MMUL_NT_T(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_##LHS_DATA_TYPE##_##RHS_DATA_TYPE##_##DST_DATA_TYPE(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) |
| #define T_MMUL_NT_T_float_float_float(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) |
| #define T_MMUL_NT_T_half_half_float(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) |
| #define T_MMUL_NT_T_half_half_half(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) |
| #define T_MMUL_NT_T_char_char_int(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_INTEGER8(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) |
| #define T_MMUL_NT_T_uchar_uchar_uint(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_INTEGER8(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) |
| #define T_MMUL_NT_T_uchar_uchar_int(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) T_MMUL_NT_T_INTEGER8(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) |
| #define T_MMUL_NT_T_FLOAT(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) \ |
| { \ |
| LOOP_UNROLLING(int, _m, 0, 1, M0, \ |
| { \ |
| LOOP_UNROLLING(int, _n, 0, 1, N0, \ |
| { \ |
| LOOP_UNROLLING(int, _k, 0, 1, K0, \ |
| { \ |
| dst[_m].s[_n] = fma((DST_DATA_TYPE)(lhs[_m].s[_k]), (DST_DATA_TYPE)(rhs[_n].s[_k]), dst[_m].s[_n]); \ |
| }) \ |
| }) \ |
| }) \ |
| } |
| |
| #define T_MMUL_NT_T_INTEGER8(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, M0, N0, K0, lhs, rhs, dst) \ |
| ({ \ |
| LOOP_UNROLLING(int, _m, 0, 1, M0, \ |
| { \ |
| LOOP_UNROLLING(int, _n, 0, 1, N0, \ |
| { \ |
| DOT_PRODUCT_INTEGER8(LHS_DATA_TYPE, RHS_DATA_TYPE, DST_DATA_TYPE, K0, (lhs[_m].v), (rhs[_n].v), dst[_m].s[_n]); \ |
| }) \ |
| }) \ |
| }) |
| |
| #endif |
| |
| #if defined(DATA_TYPE) && defined(ACC_DATA_TYPE) |
| |
| #if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) |
| #if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8) |
| #define ARM_DOT(x, y, val) val = arm_dot_acc((x), (y), (val)); |
| #else |
| #define ARM_DOT(x, y, val) val += arm_dot((x), (y)); |
| #endif |
| #endif |
| |
| #if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) |
| |
| #define ARM_DOT1(a, b, c) \ |
| ({ \ |
| ARM_DOT((VEC_DATA_TYPE(DATA_TYPE, 4))(a, (VEC_DATA_TYPE(DATA_TYPE, 3))0), (VEC_DATA_TYPE(DATA_TYPE, 4))(b, (VEC_DATA_TYPE(DATA_TYPE, 3))0), c); \ |
| }) |
| #define ARM_DOT2(a, b, c) \ |
| ({ \ |
| ARM_DOT((VEC_DATA_TYPE(DATA_TYPE, 4))(a, (VEC_DATA_TYPE(DATA_TYPE, 2))0), (VEC_DATA_TYPE(DATA_TYPE, 4))(b, (VEC_DATA_TYPE(DATA_TYPE, 2))0), c); \ |
| }) |
| #define ARM_DOT3(a, b, c) \ |
| ({ \ |
| ARM_DOT((VEC_DATA_TYPE(DATA_TYPE, 4))(a, (DATA_TYPE)0), (VEC_DATA_TYPE(DATA_TYPE, 4))(b, (DATA_TYPE)0), c); \ |
| }) |
| #define ARM_DOT4(a, b, c) \ |
| ({ \ |
| ARM_DOT(a, b, c); \ |
| }) |
| #define ARM_DOT8(a, b, c) \ |
| ({ \ |
| ARM_DOT4((a.lo), (b.lo), c); \ |
| ARM_DOT4((a.hi), (b.hi), c); \ |
| }) |
| #define ARM_DOT16(a, b, c) \ |
| ({ \ |
| ARM_DOT8((a.lo), (b.lo), c); \ |
| ARM_DOT8((a.hi), (b.hi), c); \ |
| }) |
| |
| #else |
| |
| |
| #define ARM_DOT1(a, b, c) \ |
| ({ \ |
| c += (ACC_DATA_TYPE)a * b; \ |
| }) |
| #define ARM_DOT2(a, b, c) \ |
| ({ \ |
| c += (ACC_DATA_TYPE)a.s0 * b.s0; \ |
| c += (ACC_DATA_TYPE)a.s1 * b.s1; \ |
| }) |
| #define ARM_DOT3(a, b, c) \ |
| ({ \ |
| ARM_DOT2(a, b, c); \ |
| c += (ACC_DATA_TYPE)a.s2 * b.s2; \ |
| }) |
| #define ARM_DOT4(a, b, c) \ |
| ({ \ |
| ARM_DOT3(a, b, c); \ |
| c += (ACC_DATA_TYPE)a.s3 * b.s3; \ |
| }) |
| #define ARM_DOT8(a, b, c) \ |
| ({ \ |
| ARM_DOT4((a.lo), (b.lo), c); \ |
| ARM_DOT4((a.hi), (b.hi), c); \ |
| }) |
| #define ARM_DOT16(a, b, c) \ |
| ({ \ |
| ARM_DOT8((a.lo), (b.lo), c); \ |
| ARM_DOT8((a.hi), (b.hi), c); \ |
| }) |
| #endif |
| |
| |
| #define ARM_DOT_K0X1(k0, a, b, c) \ |
| ({ \ |
| ARM_DOT_K0(k0, (a), (b##0), (c)); \ |
| }) |
| #define ARM_DOT_K0X2(k0, a, b, c) \ |
| ({ \ |
| ARM_DOT_K0(k0, (a), (b##0), (c.s0)); \ |
| ARM_DOT_K0(k0, (a), (b##1), (c.s1)); \ |
| }) |
| #define ARM_DOT_K0X3(k0, a, b, c) \ |
| ({ \ |
| ARM_DOT_K0X2(k0, a, b, c); \ |
| ARM_DOT_K0(k0, (a), (b##2), (c.s2)); \ |
| }) |
| #define ARM_DOT_K0X4(k0, a, b, c) \ |
| ({ \ |
| ARM_DOT_K0X3(k0, a, b, c); \ |
| ARM_DOT_K0(k0, (a), (b##3), (c.s3)); \ |
| }) |
| #define ARM_DOT_K0X8(k0, a, b, c) \ |
| ({ \ |
| ARM_DOT_K0X4(k0, a, b, c); \ |
| ARM_DOT_K0(k0, (a), (b##4), (c.s4)); \ |
| ARM_DOT_K0(k0, (a), (b##5), (c.s5)); \ |
| ARM_DOT_K0(k0, (a), (b##6), (c.s6)); \ |
| ARM_DOT_K0(k0, (a), (b##7), (c.s7)); \ |
| }) |
| #define ARM_DOT_K0X16(k0, a, b, c) \ |
| ({ \ |
| ARM_DOT_K0X8(k0, a, b, c); \ |
| ARM_DOT_K0(k0, (a), (b##8), (c.s8)); \ |
| ARM_DOT_K0(k0, (a), (b##9), (c.s9)); \ |
| ARM_DOT_K0(k0, (a), (b##A), (c.sA)); \ |
| ARM_DOT_K0(k0, (a), (b##B), (c.sB)); \ |
| ARM_DOT_K0(k0, (a), (b##C), (c.sC)); \ |
| ARM_DOT_K0(k0, (a), (b##D), (c.sD)); \ |
| ARM_DOT_K0(k0, (a), (b##E), (c.sE)); \ |
| ARM_DOT_K0(k0, (a), (b##F), (c.sF)); \ |
| }) |
| |
| |
| #define ARM_MM_K0XN0X1(n0, k0, a, b, c) \ |
| ({ \ |
| ARM_DOT_K0XN0(n0, k0, (a##0), b, (c##0)); \ |
| }) |
| #define ARM_MM_K0XN0X2(n0, k0, a, b, c) \ |
| ({ \ |
| ARM_MM_K0XN0X1(n0, k0, a, b, c); \ |
| ARM_DOT_K0XN0(n0, k0, (a##1), b, (c##1)); \ |
| }) |
| #define ARM_MM_K0XN0X3(n0, k0, a, b, c) \ |
| ({ \ |
| ARM_MM_K0XN0X2(n0, k0, a, b, c); \ |
| ARM_DOT_K0XN0(n0, k0, (a##2), b, (c##2)); \ |
| }) |
| #define ARM_MM_K0XN0X4(n0, k0, a, b, c) \ |
| ({ \ |
| ARM_MM_K0XN0X3(n0, k0, a, b, c); \ |
| ARM_DOT_K0XN0(n0, k0, (a##3), b, (c##3)); \ |
| }) |
| #define ARM_MM_K0XN0X5(n0, k0, a, b, c) \ |
| ({ \ |
| ARM_MM_K0XN0X4(n0, k0, a, b, c); \ |
| ARM_DOT_K0XN0(n0, k0, (a##4), b, (c##4)); \ |
| }) |
| #define ARM_MM_K0XN0X6(n0, k0, a, b, c) \ |
| ({ \ |
| ARM_MM_K0XN0X5(n0, k0, a, b, c); \ |
| ARM_DOT_K0XN0(n0, k0, (a##5), b, (c##5)); \ |
| }) |
| #define ARM_MM_K0XN0X7(n0, k0, a, b, c) \ |
| ({ \ |
| ARM_MM_K0XN0X6(n0, k0, a, b, c); \ |
| ARM_DOT_K0XN0(n0, k0, (a##6), b, (c##6)); \ |
| }) |
| #define ARM_MM_K0XN0X8(n0, k0, a, b, c) \ |
| ({ \ |
| ARM_MM_K0XN0X7(n0, k0, a, b, c); \ |
| ARM_DOT_K0XN0(n0, k0, (a##7), b, (c##7)); \ |
| }) |
| |
| #define ARM_DOT_K0(k0, a, b, c) \ |
| ({ \ |
| CONCAT(ARM_DOT, k0) \ |
| ((a), (b), (c)); \ |
| }) |
| |
| #define ARM_DOT_K0XN0(n0, k0, a, b, c) \ |
| ({ \ |
| CONCAT(ARM_DOT_K0X, n0) \ |
| (k0, (a), b, (c)); \ |
| }) |
| |
| #define ARM_MM_K0XN0XM0(m0, n0, k0, a, b, c) \ |
| ({ \ |
| CONCAT(ARM_MM_K0XN0X, m0) \ |
| (n0, k0, a, b, c); \ |
| }) |
| |
| |
| #define ARM_MUL_N0X1(VECTOR_ACC_TYPE, a, b, c) \ |
| ({ \ |
| c += CONVERT(b##0, VECTOR_ACC_TYPE) * a; \ |
| }) |
| #define ARM_MUL_N0X2(VECTOR_ACC_TYPE, a, b, c) \ |
| ({ \ |
| c += CONVERT(b##0, VECTOR_ACC_TYPE) * a.s##0; \ |
| c += CONVERT(b##1, VECTOR_ACC_TYPE) * a.s##1; \ |
| }) |
| #define ARM_MUL_N0X3(VECTOR_ACC_TYPE, a, b, c) \ |
| ({ \ |
| ARM_MUL_N0X2(VECTOR_ACC_TYPE, a, b, c); \ |
| c += CONVERT(b##2, VECTOR_ACC_TYPE) * a.s##2; \ |
| }) |
| #define ARM_MUL_N0X4(VECTOR_ACC_TYPE, a, b, c) \ |
| ({ \ |
| ARM_MUL_N0X3(VECTOR_ACC_TYPE, a, b, c); \ |
| c += CONVERT(b##3, VECTOR_ACC_TYPE) * a.s##3; \ |
| }) |
| #define ARM_MUL_N0X8(VECTOR_ACC_TYPE, a, b, c) \ |
| ({ \ |
| ARM_MUL_N0X4(VECTOR_ACC_TYPE, a, b, c); \ |
| c += CONVERT(b##4, VECTOR_ACC_TYPE) * a.s##4; \ |
| c += CONVERT(b##5, VECTOR_ACC_TYPE) * a.s##5; \ |
| c += CONVERT(b##6, VECTOR_ACC_TYPE) * a.s##6; \ |
| c += CONVERT(b##7, VECTOR_ACC_TYPE) * a.s##7; \ |
| }) |
| #define ARM_MUL_N0X16(VECTOR_ACC_TYPE, a, b, c) \ |
| ({ \ |
| ARM_MUL_N0X8(VECTOR_ACC_TYPE, a, b, c); \ |
| c += CONVERT(b##8, VECTOR_ACC_TYPE) * a.s##8; \ |
| c += CONVERT(b##9, VECTOR_ACC_TYPE) * a.s##9; \ |
| c += CONVERT(b##A, VECTOR_ACC_TYPE) * a.s##A; \ |
| c += CONVERT(b##B, VECTOR_ACC_TYPE) * a.s##B; \ |
| c += CONVERT(b##C, VECTOR_ACC_TYPE) * a.s##C; \ |
| c += CONVERT(b##D, VECTOR_ACC_TYPE) * a.s##D; \ |
| c += CONVERT(b##E, VECTOR_ACC_TYPE) * a.s##E; \ |
| c += CONVERT(b##F, VECTOR_ACC_TYPE) * a.s##F; \ |
| }) |
| |
| #define ARM_MM_NATIVE_N0XK0X1(VECTOR_ACC_TYPE, k0, a, b, c) \ |
| ({ \ |
| ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##0), b, (c##0)); \ |
| }) |
| #define ARM_MM_NATIVE_N0XK0X2(VECTOR_ACC_TYPE, k0, a, b, c) \ |
| ({ \ |
| ARM_MM_NATIVE_N0XK0X1(VECTOR_ACC_TYPE, k0, a, b, c); \ |
| ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##1), b, (c##1)); \ |
| }) |
| #define ARM_MM_NATIVE_N0XK0X3(VECTOR_ACC_TYPE, k0, a, b, c) \ |
| ({ \ |
| ARM_MM_NATIVE_N0XK0X2(VECTOR_ACC_TYPE, k0, a, b, c); \ |
| ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##2), b, (c##2)); \ |
| }) |
| #define ARM_MM_NATIVE_N0XK0X4(VECTOR_ACC_TYPE, k0, a, b, c) \ |
| ({ \ |
| ARM_MM_NATIVE_N0XK0X3(VECTOR_ACC_TYPE, k0, a, b, c); \ |
| ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##3), b, (c##3)); \ |
| }) |
| #define ARM_MM_NATIVE_N0XK0X5(VECTOR_ACC_TYPE, k0, a, b, c) \ |
| ({ \ |
| ARM_MM_NATIVE_N0XK0X4(VECTOR_ACC_TYPE, k0, a, b, c); \ |
| ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##4), b, (c##4)); \ |
| }) |
| #define ARM_MM_NATIVE_N0XK0X6(VECTOR_ACC_TYPE, k0, a, b, c) \ |
| ({ \ |
| ARM_MM_NATIVE_N0XK0X5(VECTOR_ACC_TYPE, k0, a, b, c); \ |
| ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##5), b, (c##5)); \ |
| }) |
| #define ARM_MM_NATIVE_N0XK0X7(VECTOR_ACC_TYPE, k0, a, b, c) \ |
| ({ \ |
| ARM_MM_NATIVE_N0XK0X6(VECTOR_ACC_TYPE, k0, a, b, c); \ |
| ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##6), b, (c##6)); \ |
| }) |
| #define ARM_MM_NATIVE_N0XK0X8(VECTOR_ACC_TYPE, k0, a, b, c) \ |
| ({ \ |
| ARM_MM_NATIVE_N0XK0X7(VECTOR_ACC_TYPE, k0, a, b, c); \ |
| ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, (a##7), b, (c##7)); \ |
| }) |
| #define ARM_MUL_N0XK0(VECTOR_ACC_TYPE, k0, a, b, c) \ |
| ({ \ |
| CONCAT(ARM_MUL_N0X, k0) \ |
| (VECTOR_ACC_TYPE, (a), b, (c)); \ |
| }) |
| #define ARM_MM_NATIVE_N0XK0XM0(VECTOR_ACC_TYPE, m0, k0, a, b, c) \ |
| ({ \ |
| CONCAT(ARM_MM_NATIVE_N0XK0X, m0) \ |
| (VECTOR_ACC_TYPE, k0, a, b, c); \ |
| }) |
| |
| #if defined(GEMMLOWP_MM_RESHAPED_LHS_NT_RHS_T) |
| |
| __kernel void gemmlowp_mm_reshaped_lhs_nt_rhs_t(IMAGE_DECLARATION(lhs), |
| IMAGE_DECLARATION(rhs), |
| IMAGE_DECLARATION(dst), |
| uint k, |
| uint lhs_stride_z, |
| uint rhs_stride_z, |
| uint dst_stride_z |
| #if defined(REINTERPRET_OUTPUT_AS_3D) |
| , |
| uint dst_cross_plane_pad |
| #endif |
| ) |
| { |
| |
| #define LHS_BLOCK_SIZE ((K0) * (M0)) |
| |
| #if defined(LHS_INTERLEAVE) |
| #define LHS_OFFSET_X (K0) |
| #define LHS_STEP_X ((K0) * (V0)) |
| #define LHS_STEP_LOOP (1) |
| #else |
| #define LHS_OFFSET_X (LHS_BLOCK_SIZE) |
| #define LHS_STEP_X (K0) |
| #define LHS_STEP_LOOP (V0) |
| #endif |
| |
| |
| #define RHS_BLOCK_SIZE ((K0) * (N0)) |
| |
| |
| #if defined(RHS_INTERLEAVE) |
| #define RHS_OFFSET_X (K0) |
| #define RHS_STEP_X ((K0) * (H0)) |
| #define RHS_STEP_LOOP (1) |
| #else |
| #define RHS_OFFSET_X (RHS_BLOCK_SIZE) |
| #define RHS_STEP_X (K0) |
| #define RHS_STEP_LOOP (H0) |
| #endif |
| |
| uint x = get_global_id(0); |
| uint y = get_global_id(1); |
| uint z = get_global_id(2); |
| |
| #if defined(DUMMY_WORK_ITEMS) |
| if((x * N0 >= N) || (y * M0 >= M)) |
| { |
| return; |
| } |
| #endif |
| |
| |
| __global DATA_TYPE *lhs_addr = (__global DATA_TYPE *)(lhs_ptr + lhs_offset_first_element_in_bytes + (y % V0) * (uint)LHS_OFFSET_X + (y / V0) * (uint)lhs_stride_y + (z * lhs_stride_z)); |
| |
| |
| __global DATA_TYPE *rhs_addr = (__global DATA_TYPE *)(rhs_ptr + rhs_offset_first_element_in_bytes + (x % H0) * (uint)RHS_OFFSET_X + (x / (uint)H0) * rhs_stride_y); |
| |
| #if defined(MATRIX_B_DEPTH) |
| |
| rhs_addr += (z % MATRIX_B_DEPTH) * rhs_stride_z; |
| #else |
| rhs_addr += z * rhs_stride_z; |
| #endif |
| |
| REPEAT_VAR_INIT_TO_CONST(8, uint, zlhs, 0); |
| REPEAT_VAR_INIT_TO_CONST(16, uint, zrhs, 0); |
| |
| |
| REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(ACC_DATA_TYPE, N0), c, 0); |
| |
| for(int i = 0; i < k; i += K0) |
| { |
| |
| LOAD_BLOCK(M0, K0, DATA_TYPE, a, lhs_addr, 0, LHS_STEP_X, zlhs); |
| |
| |
| LOAD_BLOCK(N0, K0, DATA_TYPE, b, rhs_addr, 0, RHS_STEP_X, zrhs); |
| |
| |
| ARM_MM_K0XN0XM0(M0, N0, K0, a, b, c); |
| |
| |
| lhs_addr += (M0 * LHS_STEP_X * LHS_STEP_LOOP); |
| rhs_addr += (N0 * RHS_STEP_X * RHS_STEP_LOOP); |
| } |
| |
| __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(int)) + (y * (uint)M0 * dst_stride_y); |
| |
| REPEAT_VAR_INIT_TO_CONST(8, uint, zout, 0); |
| |
| #if defined(REINTERPRET_OUTPUT_AS_3D) |
| |
| CALCULATE_Z_OFFSET(M0, uint, zout, y * M0, HEIGHT_GEMM3D, DEPTH_GEMM3D, dst_cross_plane_pad, dst_stride_y); |
| |
| |
| |
| dst_addr += z * dst_stride_z * DEPTH_GEMM3D; |
| |
| #else |
| |
| |
| dst_addr += z * dst_stride_z; |
| |
| #endif |
| |
| |
| const bool cond_y = ((get_global_id(1) + 1) * M0 >= M); |
| const bool cond_x = ((get_global_id(0) + 1) * N0 >= N); |
| |
| |
| REPEAT_VAR_INIT_CONVERT_SAT(M0, VEC_DATA_TYPE(int, N0), c, c_lp); |
| STORE_BLOCK_BOUNDARY_AWARE(M0, N0, int, c_lp, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x); |
| |
| #undef LHS_BLOCK_SIZE |
| #undef LHS_OFFSET_X |
| #undef LHS_STEP_X |
| #undef RHS_BLOCK_SIZE |
| #undef RHS_OFFSET_X |
| #undef RHS_STEP_X |
| } |
| #endif |
| |
| #if defined(GEMMLOWP_MM_RESHAPED_ONLY_RHS_T_FUSED_OUTPUT_STAGE_FIXEDPOINT) || defined(GEMMLOWP_MM_RESHAPED_ONLY_RHS_T) |
| #if defined(RESULT_OFFSET) && defined(RESULT_MULTIPLIER) && defined(RESULT_SHIFT) |
| #define FUSED_OUTPUT_STAGE_FIXED_POINT |
| #endif |
| |
| |
| #if defined(GEMMLOWP_MM_RESHAPED_ONLY_RHS_T_FUSED_OUTPUT_STAGE_FIXEDPOINT) |
| __kernel void gemmlowp_mm_reshaped_only_rhs_t_fused_output_stage_fixedpoint |
| #elif defined(GEMMLOWP_MM_RESHAPED_ONLY_RHS_T) |
| __kernel void gemmlowp_mm_reshaped_only_rhs_t |
| #endif |
| (IMAGE_DECLARATION(lhs), |
| IMAGE_DECLARATION(rhs), |
| IMAGE_DECLARATION(dst), |
| uint lhs_stride_z, |
| uint rhs_stride_z, |
| uint dst_stride_z |
| #if defined(REINTERPRET_INPUT_AS_3D) |
| , |
| uint lhs_cross_plane_pad |
| #endif |
| #if defined(REINTERPRET_OUTPUT_AS_3D) |
| , |
| uint dst_cross_plane_pad |
| #endif |
| #if defined(A_OFFSET) |
| , |
| IMAGE_DECLARATION(sum_col) |
| #endif |
| #if defined(B_OFFSET) |
| , |
| IMAGE_DECLARATION(sum_row) |
| #endif |
| #if defined(ADD_BIAS) |
| , |
| VECTOR_DECLARATION(biases) |
| #endif |
| #if defined(PER_CHANNEL_QUANTIZATION) |
| , |
| VECTOR_DECLARATION(result_multipliers), |
| VECTOR_DECLARATION(result_shifts) |
| #endif |
| ) |
| { |
| |
| #define FULL_LHS_HEIGHT (lhs_stride_z / lhs_stride_y) |
| #define FULL_DST_HEIGHT (dst_stride_z / dst_stride_y) |
| |
| |
| #if defined(RHS_INTERLEAVE) |
| #define RHS_OFFSET_X (K0) |
| #define RHS_STEP_X (K0 * H0) |
| #else |
| #define RHS_OFFSET_X (K0 * N0) |
| #define RHS_STEP_X (K0) |
| #endif |
| #define RHS_STEP_LOOP (N0 * K0 * H0) |
| |
| uint x = GET_SPATIAL_IDX(0, 1, 1); |
| uint y = GET_SPATIAL_IDX(1, M0, PARTIAL_STORE_M0); |
| uint z = GET_SPATIAL_IDX(2, 1, 1); |
| int xo = (x * N0); |
| |
| #if defined(DUMMY_WORK_ITEMS) |
| if((xo >= N) || (y >= M)) |
| { |
| return; |
| } |
| #endif |
| |
| |
| uint lhs_y = y + z * FULL_LHS_HEIGHT; |
| |
| |
| uint rhs_offset_x = (x % H0) * RHS_OFFSET_X; |
| uint rhs_offset_y = (x / H0) * rhs_stride_y; |
| |
| #if defined(MATRIX_B_DEPTH) |
| |
| rhs_offset_y += (z % MATRIX_B_DEPTH) * rhs_stride_z; |
| #else |
| rhs_offset_y += z * rhs_stride_z; |
| #endif |
| |
| |
| TILE(ACC_DATA_TYPE, M0, N0, c); |
| LOOP_UNROLLING(int, i, 0, 1, M0, |
| { |
| c[i].v = 0; |
| }) |
| |
| int i = 0; |
| for(; i <= (K - K0); i += K0) |
| { |
| TILE(DATA_TYPE, M0, K0, a); |
| TILE(DATA_TYPE, N0, K0, b); |
| |
| |
| T_LOAD(DATA_TYPE, M0, K0, BUFFER, lhs, i, lhs_y, 1, lhs_stride_y, a); |
| |
| |
| LOOP_UNROLLING(int, _i, 0, 1, N0, |
| { |
| b[_i].v = VLOAD(K0)(0, (__global DATA_TYPE *)(rhs_ptr + rhs_offset_first_element_in_bytes + rhs_offset_x + rhs_offset_y + _i * RHS_STEP_X)); |
| }) |
| |
| |
| T_MMUL(DATA_TYPE, DATA_TYPE, ACC_DATA_TYPE, M0, N0, K0, NT, T, a, b, c); |
| |
| rhs_offset_x += RHS_STEP_LOOP; |
| } |
| |
| #if((K % K0) != 0) |
| |
| |
| for(; i < K; ++i) |
| { |
| TILE(DATA_TYPE, M0, 1, a); |
| TILE(DATA_TYPE, N0, 1, b); |
| |
| |
| T_LOAD(DATA_TYPE, M0, 1, BUFFER, lhs, i, lhs_y, 1, lhs_stride_y, a); |
| |
| LOOP_UNROLLING(int, _i, 0, 1, N0, |
| { |
| b[_i].v = *(__global DATA_TYPE *)(rhs_ptr + rhs_offset_first_element_in_bytes + rhs_offset_x + rhs_offset_y + _i * RHS_STEP_X); |
| }) |
| |
| T_MMUL(DATA_TYPE, DATA_TYPE, ACC_DATA_TYPE, M0, N0, 1, NT, T, a, b, c); |
| |
| rhs_offset_x += 1; |
| } |
| #endif |
| |
| #if defined(FUSED_OUTPUT_STAGE_FIXED_POINT) |
| |
| TILE(int, M0, N0, c_int); |
| TILE(int, M0, N0, offset_s32); |
| LOOP_UNROLLING(int, i, 0, 1, M0, |
| { |
| offset_s32[i].v = (VEC_DATA_TYPE(int, N0))K_OFFSET; |
| }) |
| |
| LOOP_UNROLLING(int, i, 0, 1, M0, |
| { |
| c_int[i].v = CONVERT_SAT(c[i].v, VEC_DATA_TYPE(int, N0)); |
| }) |
| |
| #if defined(A_OFFSET) |
| |
| #if defined(SUM_COL_HAS_BATCHES) |
| int sum_col_y = z; |
| #else |
| int sum_col_y = 0; |
| #endif |
| TILE(int, 1, N0, a_offset_s32); |
| |
| T_LOAD(int, 1, N0, BUFFER, sum_col, xo, sum_col_y, 1, sum_col_stride_y, a_offset_s32); |
| |
| a_offset_s32[0].v *= A_OFFSET; |
| |
| T_ELTWISE_BROADCAST_ADD_X(int, M0, N0, offset_s32, a_offset_s32, offset_s32); |
| #endif |
| |
| #if defined(B_OFFSET) |
| |
| |
| |
| |
| TILE(int, M0, N0, b_offset_s32); |
| |
| T_LOAD(int, M0, 1, BUFFER, sum_row, y + z * (sum_row_stride_y / sizeof(int)), 0, 1, sum_row_stride_x, b_offset_s32); |
| |
| LOOP_UNROLLING(int, i, 0, 1, M0, |
| { |
| offset_s32[i].v += b_offset_s32[i].v *B_OFFSET; |
| }) |
| |
| #endif |
| |
| #if defined(ADD_BIAS) |
| |
| TILE(int, 1, N0, bias); |
| |
| T_LOAD(int, 1, N0, BUFFER, biases, xo, 0, 1, 0, bias); |
| |
| T_ELTWISE_BROADCAST_ADD_X(int, M0, N0, offset_s32, bias, offset_s32); |
| #endif |
| |
| LOOP_UNROLLING(int, i, 0, 1, M0, |
| { |
| c_int[i].v += offset_s32[i].v; |
| }) |
| |
| TILE(DATA_TYPE, M0, N0, c_lp); |
| |
| |
| #if defined(PER_CHANNEL_QUANTIZATION) |
| TILE(int, 1, N0, res_mul); |
| TILE(int, 1, N0, res_shift); |
| |
| T_LOAD(int, 1, N0, BUFFER, result_multipliers, xo, 0, 0, 0, res_mul); |
| T_LOAD(int, 1, N0, BUFFER, result_shifts, xo, 0, 0, 0, res_shift); |
| |
| T_QUANTIZE8(int, DATA_TYPE, PER_CHANNEL, M0, N0, RESULT_OFFSET, RESULT_SHIFT, RESULT_MULTIPLIER, c_int, res_mul, res_shift, c_lp); |
| #else |
| T_QUANTIZE8(int, DATA_TYPE, PER_TENSOR, M0, N0, RESULT_OFFSET, RESULT_SHIFT, RESULT_MULTIPLIER, c_int, 0, 0, c_lp); |
| #endif |
| |
| #if defined(MIN_BOUND) |
| LOOP_UNROLLING(int, i, 0, 1, M0, |
| { |
| c_lp[i].v = max(c_lp[i].v, (VEC_DATA_TYPE(DATA_TYPE, N0))MIN_BOUND); |
| }) |
| #endif |
| #if defined(MAX_BOUND) |
| LOOP_UNROLLING(int, i, 0, 1, M0, |
| { |
| c_lp[i].v = min(c_lp[i].v, (VEC_DATA_TYPE(DATA_TYPE, N0))MAX_BOUND); |
| }) |
| #endif |
| |
| #else |
| TILE(int, M0, N0, c_lp); |
| |
| LOOP_UNROLLING(int, i, 0, 1, M0, |
| { |
| c_lp[i].v = CONVERT_SAT(c[i].v, VEC_DATA_TYPE(int, N0)); |
| }) |
| #endif |
| |
| TILE(uint, M0, 1, dst_indirect_y); |
| |
| LOOP_UNROLLING(int, i, 0, 1, M0, |
| { |
| #if defined(REINTERPRET_OUTPUT_AS_3D) |
| dst_indirect_y[i].v = (uint)min((int)((y + i) % HEIGHT_GEMM3D), (int)HEIGHT_GEMM3D - 1); |
| dst_indirect_y[i].v += (uint)min((int)((y + i) / HEIGHT_GEMM3D), (int)DEPTH_GEMM3D - 1) * FULL_DST_HEIGHT; |
| dst_indirect_y[i].v += z *FULL_DST_HEIGHT *DEPTH_GEMM3D; |
| #else |
| dst_indirect_y[i].v = (uint)min((int)y + i, (int)M - 1) + z *FULL_DST_HEIGHT; |
| #endif |
| }) |
| |
| const bool cond_x = (xo > (N - N0)) & (PARTIAL_STORE_N0 != 0); |
| |
| #if defined(FUSED_OUTPUT_STAGE_FIXED_POINT) |
| T_STORE_INDIRECT_WIDTH_SELECT(DATA_TYPE, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, xo, dst_stride_y, cond_x, c_lp, dst_indirect_y); |
| #else |
| T_STORE_INDIRECT_WIDTH_SELECT(int, M0, N0, PARTIAL_STORE_N0, BUFFER, dst, xo, dst_stride_y, cond_x, c_lp, dst_indirect_y); |
| #endif |
| |
| #undef RHS_OFFSET_X |
| #undef RHS_STEP_X |
| #undef RHS_STEP_LOOP |
| } |
| #endif |
| |
| #if defined(GEMMLOWP_MM_NATIVE) |
| |
| |
| __kernel void gemmlowp_mm_native(IMAGE_DECLARATION(lhs), |
| IMAGE_DECLARATION(rhs), |
| IMAGE_DECLARATION(dst), |
| uint lhs_stride_z, |
| uint rhs_stride_z, |
| uint dst_stride_z |
| #if defined(REINTERPRET_INPUT_AS_3D) |
| , |
| uint lhs_cross_plane_pad |
| #endif |
| #if defined(REINTERPRET_OUTPUT_AS_3D) |
| , |
| uint dst_cross_plane_pad |
| #endif |
| ) |
| { |
| uint x = get_global_id(0); |
| uint y = get_global_id(1); |
| uint z = get_global_id(2); |
| |
| #if defined(DUMMY_WORK_ITEMS) |
| if((x * N0 >= N) || (y * M0 >= M)) |
| { |
| return; |
| } |
| #endif |
| |
| |
| uint lhs_offset = lhs_offset_first_element_in_bytes + COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * (uint)lhs_stride_y; |
| |
| |
| uint rhs_offset = rhs_offset_first_element_in_bytes + x * N0 * sizeof(DATA_TYPE); |
| |
| #if defined(MATRIX_B_DEPTH) |
| |
| rhs_offset += (z % MATRIX_B_DEPTH) * rhs_stride_z; |
| #else |
| rhs_offset += z * rhs_stride_z; |
| #endif |
| |
| REPEAT_VAR_INIT_TO_CONST(8, uint, zlhs, 0); |
| REPEAT_VAR_INIT_TO_CONST(16, uint, zrhs, 0); |
| |
| #if defined(REINTERPRET_INPUT_AS_3D) |
| |
| CALCULATE_Z_OFFSET(M0, uint, zlhs, COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, lhs_cross_plane_pad, lhs_stride_y); |
| |
| |
| |
| lhs_offset += z * lhs_stride_z * DEPTH_GEMM3D; |
| |
| #else |
| |
| |
| lhs_offset += z * lhs_stride_z; |
| |
| #endif |
| |
| |
| REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(ACC_DATA_TYPE, N0), c, 0); |
| |
| int i = 0; |
| |
| for(; i <= (K - K0); i += K0) |
| { |
| |
| LOAD_BLOCK(M0, K0, DATA_TYPE, a, lhs_ptr, lhs_offset, lhs_stride_y, zlhs); |
| |
| |
| LOAD_BLOCK(K0, N0, DATA_TYPE, b, rhs_ptr, rhs_offset, rhs_stride_y, zrhs); |
| |
| |
| #if(GPU_ARCH == GPU_ARCH_MIDGARD) |
| ARM_MM_NATIVE_N0XK0XM0(VEC_DATA_TYPE(ACC_DATA_TYPE, N0), M0, K0, a, b, c); |
| #else |
| |
| TRANSPOSE_K0XN0(K0, N0, b_t, b, DATA_TYPE); |
| |
| ARM_MM_K0XN0XM0(M0, N0, K0, a, b_t, c); |
| #endif |
| |
| |
| lhs_offset += K0; |
| rhs_offset += K0 * rhs_stride_y; |
| } |
| |
| |
| for(; i < K; ++i) |
| { |
| |
| LOAD_BLOCK(M0, 1, DATA_TYPE, a, lhs_ptr, lhs_offset, lhs_stride_y, zlhs); |
| |
| |
| LOAD_BLOCK(1, N0, DATA_TYPE, b, rhs_ptr, rhs_offset, rhs_stride_y, zrhs); |
| |
| |
| #if(GPU_ARCH == GPU_ARCH_MIDGARD) |
| ARM_MM_NATIVE_N0XK0XM0(VEC_DATA_TYPE(ACC_DATA_TYPE, N0), M0, 1, a, b, c); |
| #else |
| |
| TRANSPOSE_K0XN0(1, N0, b_t, b, DATA_TYPE); |
| |
| ARM_MM_K0XN0XM0(M0, N0, 1, a, b_t, c); |
| #endif |
| |
| |
| lhs_offset += 1; |
| rhs_offset += rhs_stride_y; |
| } |
| |
| __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(int)) + (COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * dst_stride_y); |
| |
| REPEAT_VAR_INIT_TO_CONST(M0, uint, zout, 0); |
| |
| #if defined(REINTERPRET_OUTPUT_AS_3D) |
| |
| CALCULATE_Z_OFFSET(M0, uint, zout, COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, dst_cross_plane_pad, dst_stride_y); |
| |
| |
| |
| dst_addr += z * dst_stride_z * DEPTH_GEMM3D; |
| |
| #else |
| |
| |
| dst_addr += z * dst_stride_z; |
| |
| #endif |
| const bool cond_y = y == 0; |
| const bool cond_x = ((x + 1) * N0 >= N); |
| |
| |
| REPEAT_VAR_INIT_CONVERT(M0, VEC_DATA_TYPE(int, N0), c, res); |
| STORE_BLOCK_BOUNDARY_AWARE(M0, N0, int, res, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x); |
| } |
| #endif |
| |
| #if defined(GEMMLOWP_MATRIX_A_REDUCTION) |
| |
| __kernel void gemmlowp_matrix_a_reduction(TENSOR3D_DECLARATION(src), |
| IMAGE_DECLARATION(dst)) |
| { |
| |
| Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src); |
| Image dst = CONVERT_TO_IMAGE_STRUCT(dst); |
| |
| VEC_DATA_TYPE(ACC_DATA_TYPE, 4) |
| sum_row_32 = (VEC_DATA_TYPE(ACC_DATA_TYPE, 4))0; |
| ACC_DATA_TYPE sum_row = 0; |
| |
| __global const DATA_TYPE *matrix_a = (__global const DATA_TYPE *)(src.ptr + get_global_id(0) * src_stride_y + get_global_id(1) * src_stride_z); |
| |
| int i = 0; |
| |
| |
| for(; i <= ((int)COLS_A - 16); i += 16) |
| { |
| const VEC_DATA_TYPE(DATA_TYPE, 16) a0 = vload16(0, matrix_a + i); |
| |
| sum_row_32 += CONVERT(a0.s0123, VEC_DATA_TYPE(ACC_DATA_TYPE, 4)) + CONVERT(a0.s4567, VEC_DATA_TYPE(ACC_DATA_TYPE, 4)) + CONVERT(a0.s89AB, VEC_DATA_TYPE(ACC_DATA_TYPE, 4)) + CONVERT(a0.sCDEF, |
| VEC_DATA_TYPE(ACC_DATA_TYPE, 4)); |
| } |
| |
| |
| for(; i < COLS_A; ++i) |
| { |
| sum_row += (ACC_DATA_TYPE)matrix_a[i]; |
| } |
| |
| sum_row += sum_row_32.s0 + sum_row_32.s1 + sum_row_32.s2 + sum_row_32.s3; |
| |
| #if defined(SCALAR) |
| sum_row *= (int)SCALAR; |
| #endif |
| *((__global int *)dst.ptr) = (int)sum_row; |
| } |
| #endif |
| |
| #if defined(GEMMLOWP_MATRIX_A_REDUCTION_DOT8) |
| |
| __kernel void gemmlowp_matrix_a_reduction_dot8(TENSOR3D_DECLARATION(src), |
| IMAGE_DECLARATION(dst)) |
| { |
| |
| Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src); |
| Image dst = CONVERT_TO_IMAGE_STRUCT(dst); |
| |
| ACC_DATA_TYPE sum_row = 0; |
| |
| __global const DATA_TYPE *matrix_a = (__global const DATA_TYPE *)(src.ptr + get_global_id(0) * src_stride_y + get_global_id(1) * src_stride_z); |
| |
| int i = 0; |
| |
| |
| for(; i <= ((int)COLS_A - 32); i += 32) |
| { |
| VEC_DATA_TYPE(DATA_TYPE, 16) |
| a0 = vload16(0, matrix_a + i); |
| |
| DOT_PRODUCT4_INTEGER8(DATA_TYPE, DATA_TYPE, DATA_TYPE, a0.s0123, (VEC_DATA_TYPE(DATA_TYPE, 4))(1), sum_row); |
| DOT_PRODUCT4_INTEGER8(DATA_TYPE, DATA_TYPE, DATA_TYPE, a0.s4567, (VEC_DATA_TYPE(DATA_TYPE, 4))(1), sum_row); |
| DOT_PRODUCT4_INTEGER8(DATA_TYPE, DATA_TYPE, DATA_TYPE, a0.s89AB, (VEC_DATA_TYPE(DATA_TYPE, 4))(1), sum_row); |
| DOT_PRODUCT4_INTEGER8(DATA_TYPE, DATA_TYPE, DATA_TYPE, a0.sCDEF, (VEC_DATA_TYPE(DATA_TYPE, 4))(1), sum_row); |
| |
| a0 = vload16(1, matrix_a + i); |
| |
| DOT_PRODUCT4_INTEGER8(DATA_TYPE, DATA_TYPE, DATA_TYPE, a0.s0123, (VEC_DATA_TYPE(DATA_TYPE, 4))(1), sum_row); |
| DOT_PRODUCT4_INTEGER8(DATA_TYPE, DATA_TYPE, DATA_TYPE, a0.s4567, (VEC_DATA_TYPE(DATA_TYPE, 4))(1), sum_row); |
| DOT_PRODUCT4_INTEGER8(DATA_TYPE, DATA_TYPE, DATA_TYPE, a0.s89AB, (VEC_DATA_TYPE(DATA_TYPE, 4))(1), sum_row); |
| DOT_PRODUCT4_INTEGER8(DATA_TYPE, DATA_TYPE, DATA_TYPE, a0.sCDEF, (VEC_DATA_TYPE(DATA_TYPE, 4))(1), sum_row); |
| } |
| |
| |
| for(; i < COLS_A; ++i) |
| { |
| sum_row += (ACC_DATA_TYPE)matrix_a[i]; |
| } |
| |
| #if defined(SCALAR) |
| sum_row *= (int)SCALAR; |
| #endif |
| *((__global int *)dst.ptr) = (int)sum_row; |
| } |
| #endif |
| |
| #if defined(GEMMLOWP_MATRIX_B_REDUCTION) |
| |
| __kernel void gemmlowp_matrix_b_reduction(TENSOR3D_DECLARATION(src), |
| IMAGE_DECLARATION(dst)) |
| { |
| |
| const uint x_offs = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0); |
| const uint y = get_global_id(1); |
| |
| __global const DATA_TYPE *matrix_b = (__global const DATA_TYPE *)(src_ptr + src_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE) + y * src_step_y + y * src_stride_z); |
| __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x_offs * sizeof(int) + y * dst_stride_y; |
| |
| VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE) |
| sum_col_32 = (VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE))0; |
| |
| int i = 0; |
| |
| for(; i <= ((int)ROWS_B - 4); i += 4) |
| { |
| const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) |
| b0 = VLOAD(VEC_SIZE)(0, matrix_b + 0 * src_stride_y); |
| const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) |
| b1 = VLOAD(VEC_SIZE)(0, matrix_b + 1 * src_stride_y); |
| const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) |
| b2 = VLOAD(VEC_SIZE)(0, matrix_b + 2 * src_stride_y); |
| const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) |
| b3 = VLOAD(VEC_SIZE)(0, matrix_b + 3 * src_stride_y); |
| |
| sum_col_32 += CONVERT(b0, VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)) + CONVERT(b1, VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)) + CONVERT(b2, VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)) + CONVERT(b3, |
| VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)); |
| |
| matrix_b += 4 * src_stride_y; |
| } |
| |
| |
| for(; i < (int)ROWS_B; ++i) |
| { |
| const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) |
| b0 = VLOAD(VEC_SIZE)(0, matrix_b); |
| |
| sum_col_32 += CONVERT(b0, VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE)); |
| |
| matrix_b += src_stride_y; |
| } |
| |
| #if defined(SCALAR) |
| sum_col_32 *= (VEC_DATA_TYPE(ACC_DATA_TYPE, VEC_SIZE))SCALAR; |
| #endif |
| VEC_DATA_TYPE(int, VEC_SIZE) |
| res0 = CONVERT(sum_col_32, VEC_DATA_TYPE(int, VEC_SIZE)); |
| |
| STORE_VECTOR_SELECT(res, int, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0) |
| } |
| #endif |
| |
| #endif |
| |
| #if defined(K_OFFSET) && defined(VEC_SIZE) && defined(VEC_SIZE_LEFTOVER) |
| |
| #define VEC_INT VEC_DATA_TYPE(int, VEC_SIZE) |
| |
| |
| inline VEC_INT offset_contribution( |
| int x, |
| int y, |
| int z |
| #if defined(A_OFFSET) |
| , |
| IMAGE_DECLARATION(sum_col) |
| #endif |
| #if defined(B_OFFSET) |
| , |
| IMAGE_DECLARATION(sum_row) |
| #endif |
| #if defined(ADD_BIAS) |
| , |
| VECTOR_DECLARATION(biases) |
| #endif |
| ) |
| { |
| VEC_INT a_offset_s32 = (VEC_INT)0; |
| VEC_INT b_offset_s32 = (VEC_INT)0; |
| |
| int batch_id = z; |
| #if defined(DEPTH_INPUT3D) |
| batch_id /= (int)DEPTH_INPUT3D; |
| #endif |
| |
| #if defined(A_OFFSET) |
| |
| __global uchar *sum_col_addr = sum_col_ptr + sum_col_offset_first_element_in_bytes + x * sizeof(int); |
| |
| |
| #if defined(SUM_COL_HAS_BATCHES) |
| a_offset_s32 = VLOAD(VEC_SIZE)(0, (__global int *)(sum_col_addr + batch_id * sum_col_stride_y)); |
| #else |
| a_offset_s32 = VLOAD(VEC_SIZE)(0, (__global int *)sum_col_addr); |
| #endif |
| |
| a_offset_s32 *= (VEC_INT)A_OFFSET; |
| #endif |
| |
| #if defined(B_OFFSET) |
| |
| __global uchar *sum_row_addr = sum_row_ptr + sum_row_offset_first_element_in_bytes + y * sizeof(int); |
| |
| |
| #if defined(HEIGHT_INPUT3D) && defined(DEPTH_INPUT3D) |
| b_offset_s32 = (VEC_INT) * (((__global int *)(sum_row_addr + batch_id * sum_row_stride_y)) + (z % (int)DEPTH_INPUT3D) * (int)HEIGHT_INPUT3D); |
| #else |
| b_offset_s32 = (VEC_INT) * (((__global int *)(sum_row_addr + batch_id * sum_row_stride_y))); |
| #endif |
| b_offset_s32 *= (VEC_INT)B_OFFSET; |
| #endif |
| |
| #if defined(ADD_BIAS) |
| |
| __global uchar *bias_addr = biases_ptr + biases_offset_first_element_in_bytes + x * sizeof(int); |
| |
| VEC_INT biases_values = VLOAD(VEC_SIZE)(0, (__global int *)bias_addr); |
| b_offset_s32 += (VEC_INT)biases_values; |
| #endif |
| |
| return (VEC_INT)K_OFFSET + a_offset_s32 + b_offset_s32; |
| } |
| |
| #if defined(GEMMLOWP_OFFSET_CONTRIBUTION) |
| |
| __kernel void gemmlowp_offset_contribution(TENSOR3D_DECLARATION(mm_result) |
| #if defined(A_OFFSET) |
| , |
| IMAGE_DECLARATION(sum_col) |
| #endif |
| #if defined(B_OFFSET) |
| , |
| IMAGE_DECLARATION(sum_row) |
| #endif |
| #if defined(ADD_BIAS) |
| , |
| VECTOR_DECLARATION(biases) |
| #endif |
| ) |
| { |
| const int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0); |
| const int y = get_global_id(1); |
| const int z = get_global_id(2); |
| |
| |
| VEC_INT offset_term_s32 = offset_contribution( |
| x, y, z |
| #if defined(A_OFFSET) |
| , |
| sum_col_ptr, |
| sum_col_stride_x, |
| sum_col_step_x, |
| sum_col_stride_y, |
| sum_col_step_y, |
| sum_col_offset_first_element_in_bytes |
| #endif |
| #if defined(B_OFFSET) |
| , |
| sum_row_ptr, |
| sum_row_stride_x, |
| sum_row_step_x, |
| sum_row_stride_y, |
| sum_row_step_y, |
| sum_row_offset_first_element_in_bytes |
| #endif |
| #if defined(ADD_BIAS) |
| , |
| biases_ptr, |
| biases_stride_x, |
| biases_step_x, |
| biases_offset_first_element_in_bytes |
| #endif |
| ); |
| |
| __global uchar *mm_result_addr = mm_result_ptr + mm_result_offset_first_element_in_bytes + x * sizeof(int) + y * mm_result_stride_y + z * mm_result_stride_z; |
| |
| VEC_INT in_s32_0 = VLOAD(VEC_SIZE)(0, (__global int *)mm_result_addr); |
| |
| |
| in_s32_0 += offset_term_s32; |
| |
| |
| STORE_VECTOR_SELECT(in_s32_, int, mm_result_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0) |
| } |
| #endif |
| |
| #if defined(GEMMLOWP_OFFSET_CONTRIBUTION_QUANTIZE_DOWN) |
| |
| __kernel void gemmlowp_offset_contribution_quantize_down(TENSOR3D_DECLARATION(mm_result) |
| #if defined(A_OFFSET) |
| , |
| IMAGE_DECLARATION(sum_col) |
| #endif |
| #if defined(B_OFFSET) |
| , |
| IMAGE_DECLARATION(sum_row) |
| #endif |
| , |
| #if defined(ADD_BIAS) |
| VECTOR_DECLARATION(biases), |
| #endif |
| TENSOR3D_DECLARATION(dst) |
| #if defined(PER_CHANNEL_QUANTIZATION) |
| , |
| VECTOR_DECLARATION(result_multipliers), |
| VECTOR_DECLARATION(result_shifts) |
| #endif |
| ) |
| { |
| const int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0); |
| const int y = get_global_id(1); |
| const int z = get_global_id(2); |
| |
| __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x + y * dst_stride_y + z * dst_stride_z; |
| |
| |
| VEC_INT offset_term_s32 = offset_contribution( |
| x, y, z |
| #if defined(A_OFFSET) |
| , |
| sum_col_ptr, |
| sum_col_stride_x, |
| sum_col_step_x, |
| sum_col_stride_y, |
| sum_col_step_y, |
| sum_col_offset_first_element_in_bytes |
| #endif |
| #if defined(B_OFFSET) |
| , |
| sum_row_ptr, |
| sum_row_stride_x, |
| sum_row_step_x, |
| sum_row_stride_y, |
| sum_row_step_y, |
| sum_row_offset_first_element_in_bytes |
| #endif |
| #if defined(ADD_BIAS) |
| , |
| biases_ptr, |
| biases_stride_x, |
| biases_step_x, |
| biases_offset_first_element_in_bytes |
| #endif |
| ); |
| |
| __global uchar *mm_result_addr = mm_result_ptr + mm_result_offset_first_element_in_bytes + x * sizeof(int) + y * mm_result_stride_y + z * mm_result_stride_z; |
| |
| VEC_INT in_s32 = VLOAD(VEC_SIZE)(0, (__global int *)mm_result_addr); |
| |
| |
| in_s32 += offset_term_s32; |
| |
| |
| |
| |
| in_s32 += (VEC_INT)RESULT_OFFSET; |
| |
| |
| #if defined(PER_CHANNEL_QUANTIZATION) |
| __global uchar *result_multipliers_addr = result_multipliers_ptr + result_multipliers_offset_first_element_in_bytes + x * sizeof(int); |
| __global uchar *result_shifts_addr = result_shifts_ptr + result_shifts_offset_first_element_in_bytes + x * sizeof(int); |
| VEC_INT result_multipliers_values = VLOAD(VEC_SIZE)(0, (__global int *)result_multipliers_addr); |
| VEC_INT result_shifts_values = VLOAD(VEC_SIZE)(0, (__global int *)result_shifts_addr); |
| |
| in_s32 *= result_multipliers_values; |
| in_s32 >>= result_shifts_values; |
| #else |
| in_s32 *= RESULT_MULTIPLIER; |
| |
| in_s32 >>= RESULT_SHIFT; |
| #endif |
| |
| VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE) |
| res0 = CONVERT_SAT(in_s32, VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE)); |
| |
| #if defined(MIN_BOUND) |
| res0 = max(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MIN_BOUND); |
| #endif |
| #if defined(MAX_BOUND) |
| res0 = min(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MAX_BOUND); |
| #endif |
| |
| |
| STORE_VECTOR_SELECT(res, OUTPUT_DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0) |
| } |
| #endif |
| |
| #if defined(GEMMLOWP_OFFSET_CONTRIBUTION_QUANTIZE_DOWN_FIXEDPOINT) |
| |
| __kernel void gemmlowp_offset_contribution_quantize_down_fixedpoint(TENSOR3D_DECLARATION(mm_result) |
| #if defined(A_OFFSET) |
| , |
| IMAGE_DECLARATION(sum_col) |
| #endif |
| #if defined(B_OFFSET) |
| , |
| IMAGE_DECLARATION(sum_row) |
| #endif |
| , |
| #if defined(ADD_BIAS) |
| VECTOR_DECLARATION(biases), |
| #endif |
| TENSOR3D_DECLARATION(dst) |
| #if defined(PER_CHANNEL_QUANTIZATION) |
| , |
| VECTOR_DECLARATION(result_multipliers), |
| VECTOR_DECLARATION(result_shifts) |
| #endif |
| ) |
| { |
| const int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0); |
| const int y = get_global_id(1); |
| const int z = get_global_id(2); |
| |
| |
| VEC_INT offset_term_s32 = offset_contribution( |
| x, y, z |
| #if defined(A_OFFSET) |
| , |
| sum_col_ptr, |
| sum_col_stride_x, |
| sum_col_step_x, |
| sum_col_stride_y, |
| sum_col_step_y, |
| sum_col_offset_first_element_in_bytes |
| #endif |
| #if defined(B_OFFSET) |
| , |
| sum_row_ptr, |
| sum_row_stride_x, |
| sum_row_step_x, |
| sum_row_stride_y, |
| sum_row_step_y, |
| sum_row_offset_first_element_in_bytes |
| #endif |
| #if defined(ADD_BIAS) |
| , |
| biases_ptr, |
| biases_stride_x, |
| biases_step_x, |
| biases_offset_first_element_in_bytes |
| #endif |
| ); |
| |
| __global uchar *mm_result_addr = mm_result_ptr + mm_result_offset_first_element_in_bytes + x * sizeof(int) + y * mm_result_stride_y + z * mm_result_stride_z; |
| |
| __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x + y * dst_stride_y + z * dst_stride_z; |
| |
| VEC_INT in_s32 = VLOAD(VEC_SIZE)(0, (__global int *)mm_result_addr); |
| |
| |
| in_s32 += offset_term_s32; |
| |
| |
| |
| |
| #if defined(PER_CHANNEL_QUANTIZATION) |
| __global uchar *result_multipliers_addr = result_multipliers_ptr + result_multipliers_offset_first_element_in_bytes + x * sizeof(int); |
| __global uchar *result_shifts_addr = result_shifts_ptr + result_shifts_offset_first_element_in_bytes + x * sizeof(int); |
| VEC_INT result_multipliers_values = VLOAD(VEC_SIZE)(0, (__global int *)result_multipliers_addr); |
| VEC_INT result_shifts_values = VLOAD(VEC_SIZE)(0, (__global int *)result_shifts_addr); |
| |
| VEC_INT in_s32_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(in_s32, result_multipliers_values, result_shifts_values, VEC_SIZE); |
| VEC_INT in_s32_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(in_s32, result_multipliers_values, result_shifts_values, VEC_SIZE); |
| in_s32 = select(in_s32_shift_lt0, in_s32_shift_gt0, result_shifts_values >= 0); |
| #else |
| |
| #if RESULT_SHIFT < 0 |
| in_s32 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(in_s32, RESULT_MULTIPLIER, RESULT_SHIFT, VEC_SIZE); |
| #else |
| in_s32 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(in_s32, RESULT_MULTIPLIER, RESULT_SHIFT, VEC_SIZE); |
| #endif |
| |
| #endif |
| |
| |
| in_s32 += (VEC_INT)RESULT_OFFSET; |
| |
| VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE) |
| res0 = CONVERT_SAT(in_s32, VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE)); |
| |
| #if defined(MIN_BOUND) |
| res0 = max(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MIN_BOUND); |
| #endif |
| #if defined(MAX_BOUND) |
| res0 = min(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MAX_BOUND); |
| #endif |
| |
| |
| STORE_VECTOR_SELECT(res, OUTPUT_DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0) |
| } |
| #endif |
| |
| #undef VEC_INT |
| |
| #endif |
| |
| #if defined(GEMMLOWP_OUTPUT_STAGE_QUANTIZE_DOWN) |
| |
| __kernel void gemmlowp_output_stage_quantize_down(TENSOR3D_DECLARATION(src), |
| #if defined(ADD_BIAS) |
| VECTOR_DECLARATION(biases), |
| #endif |
| TENSOR3D_DECLARATION(dst)) |
| { |
| |
| int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0); |
| int y = get_global_id(1); |
| int z = get_global_id(2); |
| |
| __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(int) + y * src_stride_y + z * src_stride_z; |
| |
| __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x + y * dst_stride_y + z * dst_stride_z; |
| |
| VEC_DATA_TYPE(int, VEC_SIZE) |
| input_values = VLOAD(VEC_SIZE)(0, (__global int *)src_addr); |
| |
| #if defined(ADD_BIAS) |
| |
| __global uchar *bias_addr = biases_ptr + biases_offset_first_element_in_bytes + x * sizeof(int); |
| |
| VEC_DATA_TYPE(int, VEC_SIZE) |
| biases_values = VLOAD(VEC_SIZE)(0, (__global int *)bias_addr); |
| input_values += biases_values; |
| #endif |
| |
| |
| input_values += (VEC_DATA_TYPE(int, VEC_SIZE))RESULT_OFFSET; |
| |
| |
| input_values *= RESULT_MULT_INT; |
| |
| #if RESULT_SHIFT < 0 |
| input_values >>= -RESULT_SHIFT; |
| #else |
| input_values >>= RESULT_SHIFT; |
| #endif |
| |
| VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE) |
| res0 = CONVERT_SAT(input_values, VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE)); |
| |
| #if defined(MIN_BOUND) |
| res0 = max(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MIN_BOUND); |
| #endif |
| #if defined(MAX_BOUND) |
| res0 = min(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MAX_BOUND); |
| #endif |
| |
| |
| STORE_VECTOR_SELECT(res, OUTPUT_DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0) |
| } |
| #endif |
| |
| #if defined(GEMMLOWP_OUTPUT_STAGE_QUANTIZE_DOWN_FIXEDPOINT) |
| |
| __kernel void gemmlowp_output_stage_quantize_down_fixedpoint(TENSOR3D_DECLARATION(src), |
| #if defined(ADD_BIAS) |
| VECTOR_DECLARATION(biases), |
| #endif |
| TENSOR3D_DECLARATION(dst)) |
| { |
| |
| int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0); |
| int y = get_global_id(1); |
| int z = get_global_id(2); |
| |
| __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(int) + y * src_stride_y + z * src_stride_z; |
| |
| __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x + y * dst_stride_y + z * dst_stride_z; |
| |
| VEC_DATA_TYPE(int, VEC_SIZE) |
| input_values = VLOAD(VEC_SIZE)(0, (__global int *)src_addr); |
| |
| #if defined(ADD_BIAS) |
| |
| __global uchar *bias_addr = biases_ptr + biases_offset_first_element_in_bytes + x * sizeof(int); |
| |
| VEC_DATA_TYPE(int, VEC_SIZE) |
| biases_values = VLOAD(VEC_SIZE)(0, (__global int *)bias_addr); |
| input_values += biases_values; |
| #endif |
| |
| |
| #if RESULT_SHIFT < 0 |
| input_values = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(input_values, RESULT_FIXEDPOINT_MULTIPLIER, RESULT_SHIFT, VEC_SIZE); |
| #else |
| input_values = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(input_values, RESULT_FIXEDPOINT_MULTIPLIER, RESULT_SHIFT, VEC_SIZE); |
| #endif |
| |
| |
| input_values += (VEC_DATA_TYPE(int, VEC_SIZE))RESULT_OFFSET_AFTER_SHIFT; |
| |
| VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE) |
| res0 = CONVERT_SAT(input_values, VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE)); |
| |
| #if defined(MIN_BOUND) |
| res0 = max(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MIN_BOUND); |
| #endif |
| #if defined(MAX_BOUND) |
| res0 = min(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MAX_BOUND); |
| #endif |
| |
| |
| STORE_VECTOR_SELECT(res, OUTPUT_DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0) |
| } |
| #endif |
| |
| #if defined(GEMMLOWP_OUTPUT_STAGE_QUANTIZE_DOWN_FIXEDPOINT_QSYMM16) |
| |
| __kernel void gemmlowp_output_stage_quantize_down_fixedpoint_qsymm16(TENSOR3D_DECLARATION(src), |
| #if defined(ADD_BIAS) |
| VECTOR_DECLARATION(biases), |
| #endif |
| TENSOR3D_DECLARATION(dst)) |
| { |
| |
| int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0); |
| int y = get_global_id(1); |
| int z = get_global_id(2); |
| |
| __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(int) + y * src_stride_y + z * src_stride_z; |
| |
| __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * sizeof(short) + y * dst_stride_y + z * dst_stride_z; |
| |
| VEC_DATA_TYPE(int, VEC_SIZE) |
| input_values = VLOAD(VEC_SIZE)(0, (__global int *)src_addr); |
| |
| #if defined(ADD_BIAS) |
| |
| __global uchar *bias_addr = biases_ptr + biases_offset_first_element_in_bytes + x * sizeof(int); |
| |
| VEC_DATA_TYPE(int, VEC_SIZE) |
| biases_values = VLOAD(VEC_SIZE)(0, (__global int *)bias_addr); |
| input_values += biases_values; |
| #endif |
| |
| |
| #if RESULT_SHIFT < 0 |
| input_values = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(input_values, RESULT_FIXEDPOINT_MULTIPLIER, RESULT_SHIFT, VEC_SIZE); |
| #else |
| input_values = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(input_values, RESULT_FIXEDPOINT_MULTIPLIER, RESULT_SHIFT, VEC_SIZE); |
| #endif |
| |
| VEC_DATA_TYPE(short, VEC_SIZE) |
| res0 = CONVERT_SAT(input_values, VEC_DATA_TYPE(short, VEC_SIZE)); |
| |
| #if defined(MIN_BOUND) |
| res0 = max(res0, (VEC_DATA_TYPE(short, VEC_SIZE))MIN_BOUND); |
| #endif |
| #if defined(MAX_BOUND) |
| res0 = min(res0, (VEC_DATA_TYPE(short, VEC_SIZE))MAX_BOUND); |
| #endif |
| |
| |
| STORE_VECTOR_SELECT(res, short, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0) |
| } |
| #endif |
| |
| #if defined(GEMMLOWP_OUTPUT_STAGE_QUANTIZE_DOWN_FLOAT) |
| |
| __kernel void gemmlowp_output_stage_quantize_down_float(TENSOR3D_DECLARATION(src), |
| #if defined(ADD_BIAS) |
| VECTOR_DECLARATION(biases), |
| #endif |
| #if defined(DST_HEIGHT) |
| TENSOR4D_DECLARATION(dst)) |
| #else |
| TENSOR3D_DECLARATION(dst)) |
| #endif |
| { |
| |
| int x = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0); |
| int y = get_global_id(1); |
| int z = get_global_id(2); |
| |
| __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * sizeof(int) + y * src_stride_y + z * src_stride_z; |
| |
| __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x + y * dst_stride_y + z * dst_stride_z; |
| |
| VEC_DATA_TYPE(int, VEC_SIZE) |
| input_values = VLOAD(VEC_SIZE)(0, (__global int *)src_addr); |
| |
| #if defined(ADD_BIAS) |
| |
| __global uchar *bias_addr = biases_ptr + biases_offset_first_element_in_bytes + x * sizeof(int); |
| |
| VEC_DATA_TYPE(int, VEC_SIZE) |
| biases_values = VLOAD(VEC_SIZE)(0, (__global int *)bias_addr); |
| input_values += (VEC_DATA_TYPE(int, VEC_SIZE))biases_values; |
| #endif |
| |
| |
| VEC_DATA_TYPE(float, VEC_SIZE) |
| input_values_f = CONVERT(input_values, VEC_DATA_TYPE(float, VEC_SIZE)); |
| input_values_f = round(input_values_f * (float)REAL_MULTIPLIER + (float)OUTPUT_OFFSET); |
| |
| VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE) |
| res0 = CONVERT_SAT(input_values_f, VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE)); |
| |
| #if defined(MIN_BOUND) |
| res0 = max(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MIN_BOUND); |
| #endif |
| #if defined(MAX_BOUND) |
| res0 = min(res0, (VEC_DATA_TYPE(OUTPUT_DATA_TYPE, VEC_SIZE))MAX_BOUND); |
| #endif |
| |
| |
| STORE_VECTOR_SELECT(res, OUTPUT_DATA_TYPE, dst_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0) |
| } |
| #endif )" |