| R"( |
| |
| /* |
| * Copyright (c) 2017-2020 Arm Limited. |
| * |
| * SPDX-License-Identifier: MIT |
| * |
| * Permission is hereby granted, free of charge, to any person obtaining a copy |
| * of this software and associated documentation files (the "Software"), to |
| * deal in the Software without restriction, including without limitation the |
| * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| * sell copies of the Software, and to permit persons to whom the Software is |
| * furnished to do so, subject to the following conditions: |
| * |
| * The above copyright notice and this permission notice shall be included in all |
| * copies or substantial portions of the Software. |
| * |
| * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| * SOFTWARE. |
| */ |
| |
| /* |
| * Copyright (c) 2017-2020 Arm Limited. |
| * |
| * SPDX-License-Identifier: MIT |
| * |
| * Permission is hereby granted, free of charge, to any person obtaining a copy |
| * of this software and associated documentation files (the "Software"), to |
| * deal in the Software without restriction, including without limitation the |
| * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| * sell copies of the Software, and to permit persons to whom the Software is |
| * furnished to do so, subject to the following conditions: |
| * |
| * The above copyright notice and this permission notice shall be included in all |
| * copies or substantial portions of the Software. |
| * |
| * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| * SOFTWARE. |
| */ |
| #ifndef ARM_COMPUTE_HELPERS_ASYMM_H |
| #define ARM_COMPUTE_HELPERS_ASYMM_H |
| |
| /* |
| * Copyright (c) 2016-2020 Arm Limited. |
| * |
| * SPDX-License-Identifier: MIT |
| * |
| * Permission is hereby granted, free of charge, to any person obtaining a copy |
| * of this software and associated documentation files (the "Software"), to |
| * deal in the Software without restriction, including without limitation the |
| * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| * sell copies of the Software, and to permit persons to whom the Software is |
| * furnished to do so, subject to the following conditions: |
| * |
| * The above copyright notice and this permission notice shall be included in all |
| * copies or substantial portions of the Software. |
| * |
| * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| * SOFTWARE. |
| */ |
| #ifndef ARM_COMPUTE_HELPER_H |
| #define ARM_COMPUTE_HELPER_H |
| |
| /* |
| * Copyright (c) 2020 Arm Limited. |
| * |
| * SPDX-License-Identifier: MIT |
| * |
| * Permission is hereby granted, free of charge, to any person obtaining a copy |
| * of this software and associated documentation files (the "Software"), to |
| * deal in the Software without restriction, including without limitation the |
| * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| * sell copies of the Software, and to permit persons to whom the Software is |
| * furnished to do so, subject to the following conditions: |
| * |
| * The above copyright notice and this permission notice shall be included in all |
| * copies or substantial portions of the Software. |
| * |
| * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| * SOFTWARE. |
| */ |
| |
| /** Store the 0 to (n-1)th rows of the given variables |
| * @name STORE_ROW_n |
| * |
| * @param[in] N0 The width of the passed in vector. Supported: 1, 2, 3, 4, 8, 16 |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @{ |
| */ |
| #define STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); |
| |
| #define STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); |
| |
| #define STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); |
| |
| #define STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); |
| |
| #define STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); |
| |
| #define STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); |
| |
| #define STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); |
| |
| #define STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); |
| |
| #define STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); |
| |
| #define STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); |
| |
| #define STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); |
| |
| #define STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); |
| |
| #define STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); |
| |
| #define STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); |
| |
| #define STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); |
| |
| #define STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); |
| /** @} */ // end of groupd STORE_ROW_n |
| |
| /** Convert and store the 0th to (n-1)th rows of the given variables |
| * @name CONVERT_STORE_ROW_n |
| * |
| * @param[in] N0 The size of the vectors |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @{ |
| */ |
| #define CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##0), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); |
| |
| #define CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##1), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); |
| |
| #define CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##2), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); |
| |
| #define CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##3), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); |
| |
| #define CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##4), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); |
| |
| #define CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##5), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); |
| |
| #define CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##6), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); |
| |
| #define CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##7), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); |
| |
| #define CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##8), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); |
| |
| #define CONVERT_STORE_ROW_10(N0, DATA, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##9), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); |
| |
| #define CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##A), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); |
| |
| #define CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##B), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); |
| |
| #define CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##C), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); |
| |
| #define CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##D), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); |
| |
| #define CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##E), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); |
| |
| #define CONVERT_STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##F), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); |
| |
| /** @} */ // end of groupd CONVERT_STORE_ROW_n |
| |
| /** Store a block of the given size M0xN0 |
| * @name STORE_BLOCK |
| * |
| * Supported cases are M0=1,2,3,...,16 and N0=2,3,4,8,16. |
| * The data to store is expected to have consecutive names for each row. |
| * E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2. |
| * The Z offset is expected to have consecutive names. |
| * E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2. |
| * |
| * @param[in] M0 The number of rows to store |
| * @param[in] N0 The size of each vector |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @{ |
| */ |
| #define STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| #define STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| /** @} */ // end of group STORE_BLOCK |
| |
| /** Convert and store a block of the given size M0xN0 |
| * @name CONVERT_STORE_BLOCK |
| * |
| * Supported cases are M0=1,2,3,...,16 and N0=2,3,4,8,16. |
| * The data to store is expected to have consecutive names for each row. |
| * E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2. |
| * The Z offset is expected to have consecutive names. |
| * E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2. |
| * |
| * @param[in] M0 The number of rows to store |
| * @param[in] N0 The size of each vector |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @{ |
| */ |
| #define CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| #define CONVERT_STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| /** @} */ // end of group CONVERT_STORE_BLOCK |
| |
| /** Partially store the 0 to (n-1)th rows of the given variables |
| * @name STORE_ROW_PARTIAL_n |
| * Within each row, store the lower @p STORE_N0 elements of vectors of width @p N0 |
| * |
| * @note in case @p STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty. |
| * |
| * @param[in] N0 The width of the passed in vector. Supported: 1, 2, 3, 4, 8, 16 |
| * @param[in] STORE_N0 The **lower** size of the vectors to store. Supported: [1-16 and <= @p N0 |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @{ |
| */ |
| #define STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); |
| |
| #define STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); |
| |
| #define STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); |
| |
| #define STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); |
| |
| #define STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); |
| |
| #define STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); |
| |
| #define STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); |
| |
| #define STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); |
| |
| #define STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); |
| |
| #define STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); |
| |
| #define STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); |
| |
| #define STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); |
| |
| #define STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); |
| |
| #define STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); |
| |
| #define STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); |
| |
| #define STORE_ROW_PARTIAL_16(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); |
| /** @} */ // end of groupd STORE_ROW_PARTIAL_n |
| |
| /** Partially store a block of the given size STORE_M0xSTORE_N0 |
| * @name STORE_BLOCK_PARTIAL |
| * |
| * @note The vector width @p N0 is also required for correct partial storing behaviour. |
| * @note in case @p STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty. |
| * |
| * The data to store is expected to have consecutive names for each row. |
| * E.g., for STORE_M0=3 and basename=c, the expected names are c0, c1 and c2. |
| * The Z offset is expected to have consecutive names. |
| * E.g., for STORE_M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2. |
| * |
| * @param[in] STORE_M0 The number of rows to store. Supported: 1-16 |
| * @param[in] STORE_N0 The lower number of elements of vectors to store. Supported: 1-16 and <= @p N0 |
| * @param[in] N0 The size of each vector. Supported: 1, 2, 3, 4, 8, 16 |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @{ |
| */ |
| #define STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_PARTIAL_##STORE_M0(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| #define STORE_BLOCK_PARTIAL(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| /** Store a block that can be partial in both x and y dimensions |
| * |
| * @note in cases @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty. |
| * |
| * The data to store is expected to have consecutive names for each row. |
| * E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2. |
| * The Z offset is expected to have consecutive names. |
| * E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2. |
| * |
| * @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16 |
| * @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16 |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported range: [1, @p M0) |
| * @param[in] PARTIAL_STORE_N0 The partial size in x, for partial blocks. Supported range: [1, @p N0) |
| * @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial store Y. True to use PARTIAL_STORE_M0 rather than M0. |
| * @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial store X. True to use PARTIAL_STORE_N0 rather than N0. |
| */ |
| #define STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| if(!(PARTIAL_COND_X) && !(PARTIAL_COND_Y)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else if((PARTIAL_COND_Y) && !(PARTIAL_COND_X)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else if(!(PARTIAL_COND_Y) && (PARTIAL_COND_X)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else \ |
| { \ |
| STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } |
| /** Store a block that can only be partial in x but not y. |
| * |
| * @note in case @p N0 or @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty. |
| * |
| * The data to store is expected to have consecutive names for each row. |
| * E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2. |
| * The Z offset is expected to have consecutive names. |
| * E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2. |
| * |
| * @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16 |
| * @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16 |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @param[in] PARTIAL_STORE_N0 The partial size in x, for partial blocks. Supported range: [1, @p N0) |
| * @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial store X. True to use PARTIAL_STORE_N0 rather than N0. |
| */ |
| #define STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) \ |
| if(!(PARTIAL_COND_X)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } |
| /** Store a block that can only be partial in y but not x. |
| * |
| * @note in case @p N0 or @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty. |
| * |
| * The data to store is expected to have consecutive names for each row. |
| * E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2. |
| * The Z offset is expected to have consecutive names. |
| * E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2. |
| * |
| * @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16 |
| * @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16 |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported range: [1, @p M0) |
| * @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial store Y. True to use PARTIAL_STORE_M0 rather than M0. |
| */ |
| #define STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) \ |
| if(!(PARTIAL_COND_Y)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else \ |
| { \ |
| STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } |
| /** @} */ // end of group STORE_BLOCK_PARTIAL |
| |
| #if defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0) |
| |
| /** Boundary-aware GEMM block store |
| * @name STORE_BLOCK_BOUNDARY_AWARE |
| * This macro assumes the following schemes to achieve boundary-awareness: |
| * - Overlapping load in Y axis from lhs tensor. This implies lhs has no padding along y dim. |
| * - Non-Overlapping(normal) load from rhs tensor. This imples rhs can have paddings. |
| * - Overlapping load in Y axis from bias tensor. This implies rhs has no padding along y dim. |
| * The macro then ensures that the dst tensor can be stored without any paddings in both x and y dim. |
| * |
| * In the y dimension, we place the partial blocks **at the beginning** while in the x dimension, we place the partial |
| * blocks **at the end**. |
| * Say, the dst tensor is of shape MxN and we have M0 and N0 as the block size, this is how we define "partial blocks"/ |
| * "boundary block" (we use the 2 terms "partial blocks" and "boundary blocks" interchangeably) and its various parameters: |
| * |
| * *--x--> x == 0 x == 1 |
| * | |<------------------------------N-------------------------->| |
| * y |<--------------N0------------->|<----PARTIAL_STORE_N0----->| |
| * | -------------############################################################# |
| * * | | |...............................|...........................| |
| * y == 0 | PAR_..._M0 |......Boundary block in y......|.Boundary block in x and y.| |
| * | | |...............................|...........................| |
| * M --############################################################# |
| * | | | |...........................| |
| * y == 1 | M0 | Non-boundary block |....Boundary block in x....| |
| * | | | |...........................| |
| * |------------############################################################# |
| * |
| * Then @p PARTIAL_STORE_M0 = M % M0 and @p PARTIAL_STORE_N0 = N % N0 |
| * |
| * @note in cases @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty. |
| * |
| * It automatically detects if a giving M,N,M0,N0 combination can yield partial blocks in either X and Y dimension, |
| * and select corresponding store methods such that the boundary detection logic is only added when needed. |
| * |
| * The data to store is expected to have consecutive names for each row. |
| * E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2. |
| * The Z offset is expected to have consecutive names. |
| * E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2. |
| * |
| * @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16 |
| * @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16 |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported: [0, @p M0) |
| * @param[in] PARTIAL_STORE_N0 The partial size in x, for partial blocks. Supported: [0, @p N0) |
| * @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial store Y. True to use PARTIAL_STORE_M0 rather than M0. |
| * @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial store X. True to use PARTIAL_STORE_N0 rather than N0. |
| * @{ |
| */ |
| #if PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0 |
| // Case1: No partial blocks in either x or y |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| |
| #elif PARTIAL_STORE_M0 > 0 && PARTIAL_STORE_N0 == 0 |
| // Case2: Partial blocks in y |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) |
| |
| #elif PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 > 0 |
| // Case3: Partial blocks in x |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) |
| |
| #else // PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0 |
| // Case4: Partial blocks in both x and y |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) |
| |
| #endif // PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0 |
| |
| #endif // defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0) |
| /** @} */ // end of group STORE_BLOCK_BOUNDARY_AWARE |
| |
| #if defined(PARTIAL_STORE_M0) |
| /** Compute the start m0 row (LHS, BIAS and DST) in a boundary-aware way so as to avoid padding |
| * @name COMPUTE_M0_START_ROW |
| * If there're any partial blocks in y dimension, they are placed at the beginning of the rows. |
| * This shift amount is added to all rows such that the partial block (at the beginning) overlaps with the subsequent |
| * blocks in the y dimension to avoid any padding. |
| * EG: M0=4, PARTIAL_STORE_M0=1: |
| * | Non-overlapping | +M0_ROW_SHIFT (Overlapping) |
| * block 0 (partial)| start row = 0 | start row = 0 |
| * block 1 (full) | start row = 4 | start row = 1 |
| * block 2 (full) | start row = 8 | start row = 5 |
| * |
| * @param[in] y Global id of current block in y. |
| * @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16 |
| * @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported: [0, @p M0) |
| * @{ |
| */ |
| #define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \ |
| ((uint)(max(0, (int)(y * M0) - (int)((M0 - PARTIAL_STORE_M0) % M0)))) |
| #else // defined(PARTIAL_STORE_M0) |
| #define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \ |
| ((uint)(y * M0)) |
| #endif // defined(PARTIAL_STORE_M0) |
| /** @} */ // end of group COMPUTE_M0_START_ROW |
| |
| /** Store a vector that can only be partial in x. |
| * |
| * @note in case @p vec_size or @p leftover != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty. |
| * |
| * The data to store is expected to end in a 0. |
| * E.g., for basename=c, the expected name is c0. |
| * |
| * @param[in] basename The name of the variable without trailing 0 |
| * @param[in] data_type The data type of the vector |
| * @param[in] ptr The base pointer |
| * @param[in] vec_size The vector size if cond = false. Supported: 1, 2, 3, 4, 8, 16 |
| * @param[in] leftover The vector size if cond = true. Supported range: [1, @p vec_size0) |
| * @param[in] cond Condition to select either vec_size0 or vec_size1 |
| * @{ |
| */ |
| #define STORE_VECTOR_SELECT(basename, data_type, ptr, vec_size, leftover, cond) \ |
| STORE_BLOCK_PARTIAL_IN_X(1, vec_size, data_type, basename, ptr, 0, 0, leftover, cond) |
| /** @} */ // end of group STORE_VECTOR_SELECT |
| |
| #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) |
| #pragma OPENCL EXTENSION cl_khr_fp16 : enable |
| #endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) |
| |
| #if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) |
| #pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : enable |
| #endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) |
| |
| #if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8) |
| #pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : enable |
| #endif // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8) |
| |
| #if defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf) |
| #pragma OPENCL EXTENSION cl_arm_printf : enable |
| #endif // defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf) |
| |
| #define GPU_ARCH_MIDGARD 0x100 |
| #define GPU_ARCH_BIFROST 0x200 |
| |
| /** Concatenate two inputs. |
| * |
| * @param[in] a The first input to be concatenated |
| * @param[in] b The second input to be concatenated |
| * |
| * @return The concatenated output |
| */ |
| #define CONCAT(a, b) a##b |
| |
| /** Expand the given vector |
| * |
| * @param[in] x The vector to be expanded |
| * |
| * @return The expanded output |
| */ |
| #define EXPAND(x) x |
| |
| /** Clamp the given value between an upper and lower bound. |
| * |
| * @param[in] x The value to be clamped |
| * @param[in] min_val The lower bound |
| * @param[in] max_val The upper bound |
| * |
| * @return The clamped value. |
| */ |
| #define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val) |
| |
| /** REVn reverses the given vector whose size is n. |
| * @name REVn |
| * |
| * @param[in] x The vector to be reversed |
| * |
| * @return The reversed vector |
| * @{ |
| */ |
| #define REV1(x) ((x)) |
| #define REV2(x) ((x).s10) |
| #define REV3(x) ((x).s210) |
| #define REV4(x) ((x).s3210) |
| #define REV8(x) ((x).s76543210) |
| #define REV16(x) ((x).sFEDCBA9876543210) |
| /** @} */ // end of group REVn |
| |
| /** Reverse the given vector. |
| * @name REVERSE |
| * |
| * @param[in] x The vector to be reversed |
| * @param[in] s The size of the vector |
| * |
| * @return The reversed vector |
| * @{ |
| */ |
| #define REVERSE_STR(x, s) REV##s((x)) |
| #define REVERSE(x, s) REVERSE_STR(x, s) |
| /** @} */ // end of group REVERSE |
| |
| /** Circular-right-shift (rotate-right) the vector of size s by the amount of n. |
| * @name ROTs_n |
| * |
| * @param[in] x The vector to be shifted |
| * |
| * @return The shifted vector |
| * @{ |
| */ |
| #define ROT1_0(x) ((x)) |
| |
| #define ROT2_0(x) ((x)) |
| #define ROT2_1(x) ((x).s10) |
| |
| #define ROT3_0(x) ((x)) |
| #define ROT3_1(x) ((x).s201) |
| #define ROT3_2(x) ((x).s120) |
| |
| #define ROT4_0(x) ((x)) |
| #define ROT4_1(x) ((x).s3012) |
| #define ROT4_2(x) ((x).s2301) |
| #define ROT4_3(x) ((x).s1230) |
| |
| #define ROT8_0(x) ((x)) |
| #define ROT8_1(x) ((x).s70123456) |
| #define ROT8_2(x) ((x).s67012345) |
| #define ROT8_3(x) ((x).s56701234) |
| #define ROT8_4(x) ((x).s45670123) |
| #define ROT8_5(x) ((x).s34567012) |
| #define ROT8_6(x) ((x).s23456701) |
| #define ROT8_7(x) ((x).s12345670) |
| |
| #define ROT16_0(x) ((x)) |
| #define ROT16_1(x) ((x).sF0123456789ABCDE) |
| #define ROT16_2(x) ((x).sEF0123456789ABCD) |
| #define ROT16_3(x) ((x).sDEF0123456789ABC) |
| #define ROT16_4(x) ((x).sCDEF0123456789AB) |
| #define ROT16_5(x) ((x).sBCDEF0123456789A) |
| #define ROT16_6(x) ((x).sABCDEF0123456789) |
| #define ROT16_7(x) ((x).s9ABCDEF012345678) |
| #define ROT16_8(x) ((x).s89ABCDEF01234567) |
| #define ROT16_9(x) ((x).s789ABCDEF0123456) |
| #define ROT16_10(x) ((x).s6789ABCDEF012345) |
| #define ROT16_11(x) ((x).s56789ABCDEF01234) |
| #define ROT16_12(x) ((x).s456789ABCDEF0123) |
| #define ROT16_13(x) ((x).s3456789ABCDEF012) |
| #define ROT16_14(x) ((x).s23456789ABCDEF01) |
| #define ROT16_15(x) ((x).s123456789ABCDEF0) |
| /** @} */ // end of group ROTs_n |
| |
| /** Circular-right-shift (rotate-right) the given vector by the given amount. |
| * @name ROTATE |
| * |
| * @param[in] x The vector to be shifted |
| * @param[in] s The size of the vector |
| * @param[in] n The amount to be shifted |
| * |
| * @return The shifted vector |
| * @{ |
| */ |
| #define ROTATE_STR(x, s, n) ROT##s##_##n(x) |
| #define ROTATE(x, s, n) ROTATE_STR(x, s, n) |
| /** @} */ // end of group ROTATE |
| |
| /** Creates a vector of size n filled with offset values corresponding to the location of each element. |
| * @name V_OFFSn |
| * |
| * @param[in] dt The data type of the output vector |
| * |
| * @return The vector filled with offset values |
| * @{ |
| */ |
| #define V_OFFS1(dt) (dt##1)(0) |
| #define V_OFFS2(dt) (dt##2)(0, 1) |
| #define V_OFFS3(dt) (dt##3)(0, 1, 2) |
| #define V_OFFS4(dt) (dt##4)(0, 1, 2, 3) |
| #define V_OFFS8(dt) (dt##8)(0, 1, 2, 3, 4, 5, 6, 7) |
| #define V_OFFS16(dt) (dt##16)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15) |
| /** @} */ // end of group V_OFFSn |
| |
| /** Create a vector filled with offset values corresponding to the location of each element. |
| * @name VEC_OFFS |
| * |
| * @param[in] dt The data type of the output vector |
| * @param[in] s The size of the output vector |
| * |
| * @return The vector filled with offset values |
| * @{ |
| */ |
| #define VEC_OFFS_STR(dt, s) V_OFFS##s(dt) |
| #define VEC_OFFS(dt, s) VEC_OFFS_STR(dt, s) |
| /** @} */ // end of group VEC_OFFS |
| |
| #define VLOAD_STR(size) vload##size |
| #define VLOAD(size) VLOAD_STR(size) |
| |
| #define PIXEL_UNIT4 1 |
| #define PIXEL_UNIT8 2 |
| #define PIXEL_UNIT16 4 |
| |
| /** Utility macro to convert a vector size in pixel unit. |
| * |
| * @name CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT |
| * |
| * @param[in] vec_size Vector size. Only 4,8 and 16 is supported |
| * |
| * @return The pixel unit (number of pixels) |
| * @{ |
| */ |
| #define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) PIXEL_UNIT##vec_size |
| #define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(vec_size) CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) |
| /** @} */ // end of group CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT |
| |
| #define read_image2d_floatx1(img, x_coord, y_coord) (float4)(read_imagef(img, (int2)(x_coord, y_coord))); |
| #define read_image2d_floatx2(img, x_coord, y_coord) (float8)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord))); |
| #define read_image2d_floatx4(img, x_coord, y_coord) (float16)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)), read_imagef(img, (int2)(x_coord + 2, y_coord)), read_imagef(img, (int2)(x_coord + 3, y_coord))); |
| |
| #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) |
| #define read_image2d_halfx1(img, x_coord, y_coord) (half4)(read_imageh(img, (int2)(x_coord, y_coord))); |
| #define read_image2d_halfx2(img, x_coord, y_coord) (half8)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord))); |
| #define read_image2d_halfx4(img, x_coord, y_coord) (half16)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)), read_imageh(img, (int2)(x_coord + 2, y_coord)), read_imageh(img, (int2)(x_coord + 3, y_coord))); |
| #endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) |
| |
| /** Utility macro to read a 2D OpenCL image object. |
| * |
| * @note Coordinates are not normalized |
| * |
| * @param[in] data_type Data type |
| * @param[in] n0 Number of pixel to read. Only 1,2 and 4 is supported |
| * @param[in] img OpenCL image object |
| * @param[in] x_coord The x coordinate for the top-left pixel |
| * @param[in] y_coord The y coordinate for the top-left pixel |
| * |
| * @return Pixels from the 2D OpenCL image object |
| * @{ |
| */ |
| #define READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) read_image2d_##data_type##x##n0(img, x_coord, y_coord) |
| #define READ_IMAGE2D(data_type, n0, img, x_coord, y_coord) READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) |
| |
| #define VSTORE_STR(size) vstore##size |
| #define VSTORE(size) VSTORE_STR(size) |
| |
| #define float1 float |
| #define half1 half |
| #define char1 char |
| #define uchar1 uchar |
| #define short1 short |
| #define ushort1 ushort |
| #define int1 int |
| #define uint1 uint |
| #define long1 long |
| #define ulong1 ulong |
| #define double1 double |
| |
| #define vload1(OFFSET, PTR) *(OFFSET + PTR) |
| #define vstore1(DATA, OFFSET, PTR) *(OFFSET + PTR) = DATA |
| |
| /** Extended partial vstore that correctly handles scalar values as well. |
| * Store the **lower** 0 to (n-1)th elements of the given vector while minimising the amount of vstore ops |
| * @name VSTORE_PARTIAL |
| * |
| * @note With this macro, the passed data can be both a vector and a scalar |
| * @note @p store_size needs to be <= @p size |
| * eg 1: Valid |
| * VSTORE_PARTIAL(16, 15) ...; |
| * eg 2: Invalid |
| * VSTORE_PARTIAL(4, 7) ...; |
| * |
| * @param[in] size The width of @p DATA. Supported values: 1(scalar), 2, 3, 4, 8, 16 |
| * @param[in] store_size The number of lower elements to store. Supported values: 1-16, but has to be <= @p size |
| * @{ |
| */ |
| #define VSTORE_PARTIAL_STR(size, store_size) vstore_partial_##size##_##store_size |
| #define VSTORE_PARTIAL(size, store_size) VSTORE_PARTIAL_STR(size, store_size) |
| |
| #define NO_STORE(data, offs, ptr) \ |
| { \ |
| } |
| |
| // Size == 1 (scalar) |
| #define vstore_partial_1_0 NO_STORE |
| #define vstore_partial_1_1 vstore1 |
| #define vstore_partial_1_2 NO_STORE |
| #define vstore_partial_1_3 NO_STORE |
| #define vstore_partial_1_4 NO_STORE |
| #define vstore_partial_1_5 NO_STORE |
| #define vstore_partial_1_6 NO_STORE |
| #define vstore_partial_1_7 NO_STORE |
| #define vstore_partial_1_8 NO_STORE |
| #define vstore_partial_1_9 NO_STORE |
| #define vstore_partial_1_10 NO_STORE |
| #define vstore_partial_1_11 NO_STORE |
| #define vstore_partial_1_12 NO_STORE |
| #define vstore_partial_1_13 NO_STORE |
| #define vstore_partial_1_14 NO_STORE |
| #define vstore_partial_1_15 NO_STORE |
| #define vstore_partial_1_16 NO_STORE |
| // Size == 2 |
| #define vstore_partial_2_0 NO_STORE |
| #define vstore_partial_2_1 vstore_partial_1 |
| #define vstore_partial_2_2 vstore_partial_2 |
| #define vstore_partial_2_3 NO_STORE |
| #define vstore_partial_2_4 NO_STORE |
| #define vstore_partial_2_5 NO_STORE |
| #define vstore_partial_2_6 NO_STORE |
| #define vstore_partial_2_7 NO_STORE |
| #define vstore_partial_2_8 NO_STORE |
| #define vstore_partial_2_9 NO_STORE |
| #define vstore_partial_2_10 NO_STORE |
| #define vstore_partial_2_11 NO_STORE |
| #define vstore_partial_2_12 NO_STORE |
| #define vstore_partial_2_13 NO_STORE |
| #define vstore_partial_2_14 NO_STORE |
| #define vstore_partial_2_15 NO_STORE |
| #define vstore_partial_2_16 NO_STORE |
| // Size == 3 |
| #define vstore_partial_3_0 NO_STORE |
| #define vstore_partial_3_1 vstore_partial_1 |
| #define vstore_partial_3_2 vstore_partial_2 |
| #define vstore_partial_3_3 vstore_partial_3 |
| #define vstore_partial_3_4 NO_STORE |
| #define vstore_partial_3_5 NO_STORE |
| #define vstore_partial_3_6 NO_STORE |
| #define vstore_partial_3_7 NO_STORE |
| #define vstore_partial_3_8 NO_STORE |
| #define vstore_partial_3_9 NO_STORE |
| #define vstore_partial_3_10 NO_STORE |
| #define vstore_partial_3_11 NO_STORE |
| #define vstore_partial_3_12 NO_STORE |
| #define vstore_partial_3_13 NO_STORE |
| #define vstore_partial_3_14 NO_STORE |
| #define vstore_partial_3_15 NO_STORE |
| #define vstore_partial_3_16 NO_STORE |
| // Size == 4 |
| #define vstore_partial_4_0 NO_STORE |
| #define vstore_partial_4_1 vstore_partial_1 |
| #define vstore_partial_4_2 vstore_partial_2 |
| #define vstore_partial_4_3 vstore_partial_3 |
| #define vstore_partial_4_4 vstore_partial_4 |
| #define vstore_partial_4_5 NO_STORE |
| #define vstore_partial_4_6 NO_STORE |
| #define vstore_partial_4_7 NO_STORE |
| #define vstore_partial_4_8 NO_STORE |
| #define vstore_partial_4_9 NO_STORE |
| #define vstore_partial_4_10 NO_STORE |
| #define vstore_partial_4_11 NO_STORE |
| #define vstore_partial_4_12 NO_STORE |
| #define vstore_partial_4_13 NO_STORE |
| #define vstore_partial_4_14 NO_STORE |
| #define vstore_partial_4_15 NO_STORE |
| #define vstore_partial_4_16 NO_STORE |
| // Size == 8 |
| #define vstore_partial_8_0 NO_STORE |
| #define vstore_partial_8_1 vstore_partial_1 |
| #define vstore_partial_8_2 vstore_partial_2 |
| #define vstore_partial_8_3 vstore_partial_3 |
| #define vstore_partial_8_4 vstore_partial_4 |
| #define vstore_partial_8_5 vstore_partial_5 |
| #define vstore_partial_8_6 vstore_partial_6 |
| #define vstore_partial_8_7 vstore_partial_7 |
| #define vstore_partial_8_8 vstore_partial_8 |
| #define vstore_partial_8_9 NO_STORE |
| #define vstore_partial_8_10 NO_STORE |
| #define vstore_partial_8_11 NO_STORE |
| #define vstore_partial_8_12 NO_STORE |
| #define vstore_partial_8_13 NO_STORE |
| #define vstore_partial_8_14 NO_STORE |
| #define vstore_partial_8_15 NO_STORE |
| #define vstore_partial_8_16 NO_STORE |
| // Size == 16 |
| #define vstore_partial_16_0 NO_STORE |
| #define vstore_partial_16_1 vstore_partial_1 |
| #define vstore_partial_16_2 vstore_partial_2 |
| #define vstore_partial_16_3 vstore_partial_3 |
| #define vstore_partial_16_4 vstore_partial_4 |
| #define vstore_partial_16_5 vstore_partial_5 |
| #define vstore_partial_16_6 vstore_partial_6 |
| #define vstore_partial_16_7 vstore_partial_7 |
| #define vstore_partial_16_8 vstore_partial_8 |
| #define vstore_partial_16_9 vstore_partial_9 |
| #define vstore_partial_16_10 vstore_partial_10 |
| #define vstore_partial_16_11 vstore_partial_11 |
| #define vstore_partial_16_12 vstore_partial_12 |
| #define vstore_partial_16_13 vstore_partial_13 |
| #define vstore_partial_16_14 vstore_partial_14 |
| #define vstore_partial_16_15 vstore_partial_15 |
| #define vstore_partial_16_16 vstore_partial_16 |
| |
| /** Partial vstore. Store the **lower** 0 to (n-1)th elements of the given vector while minimising the amount of vstore ops |
| * @name vstore_partial_n |
| * |
| * @note @p DATA needs to be a vector not a scalar |
| * @note n needs to be <= the vector width of the input variable @p DATA |
| * eg 1: Valid |
| * vstore_partial_15(var:float16, 0, 0xabcd); |
| * eg 2: Invalid |
| * vstore_partial_7(var:float4, 0, 0xabcd); |
| * |
| * @note in cases n == 1, 2, 3, 4, 8, 16, no extra vstore is invoked, thus there's no performance penalty. |
| * |
| * @param[in] DATA The name of the variable |
| * @param[in] OFFSET Offset in n |
| * @param[in] PTR The base pointer |
| * @{ |
| */ |
| #define vstore_partial_1(DATA, OFFSET, PTR) \ |
| vstore1(DATA.s0, OFFSET, PTR); |
| |
| #define vstore_partial_2(DATA, OFFSET, PTR) \ |
| vstore2(DATA.s01, OFFSET, PTR); |
| |
| #define vstore_partial_3(DATA, OFFSET, PTR) \ |
| vstore3(DATA.s012, OFFSET, PTR); |
| |
| #define vstore_partial_4(DATA, OFFSET, PTR) \ |
| vstore4(DATA.s0123, OFFSET, PTR); |
| |
| #define vstore_partial_5(DATA, OFFSET, PTR) \ |
| vstore_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vstore1(DATA.s4, OFFSET, PTR + 4); |
| |
| #define vstore_partial_6(DATA, OFFSET, PTR) \ |
| vstore_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vstore_partial_2(DATA.s45, OFFSET, PTR + 4); |
| |
| #define vstore_partial_7(DATA, OFFSET, PTR) \ |
| vstore_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vstore_partial_3(DATA.s456, OFFSET, PTR + 4); |
| |
| #define vstore_partial_8(DATA, OFFSET, PTR) \ |
| vstore8(DATA.s01234567, OFFSET, PTR); |
| |
| #define vstore_partial_9(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore1(DATA.s8, OFFSET, PTR + 8); |
| |
| #define vstore_partial_10(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_2(DATA.s89, OFFSET, PTR + 8); |
| |
| #define vstore_partial_11(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_3(DATA.s89a, OFFSET, PTR + 8); |
| |
| #define vstore_partial_12(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_4(DATA.s89ab, OFFSET, PTR + 8); |
| |
| #define vstore_partial_13(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_5(DATA.s89abcdef, OFFSET, PTR + 8); |
| |
| #define vstore_partial_14(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_6(DATA.s89abcdef, OFFSET, PTR + 8); |
| |
| #define vstore_partial_15(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_7(DATA.s89abcdef, OFFSET, PTR + 8); |
| |
| #define vstore_partial_16(DATA, OFFSET, PTR) \ |
| vstore16(DATA, OFFSET, PTR); |
| /** @} */ // end of groupd vstore_partial_n |
| /** @} */ // end of groupd VSTORE_PARTIAL |
| |
| // Convert built-in functions with _sat modifier are not supported in floating point so we create defines |
| // without _sat to overcome this issue |
| #define convert_float_sat convert_float |
| #define convert_float1_sat convert_float |
| #define convert_float2_sat convert_float2 |
| #define convert_float3_sat convert_float3 |
| #define convert_float4_sat convert_float4 |
| #define convert_float8_sat convert_float8 |
| #define convert_float16_sat convert_float16 |
| #define convert_half_sat convert_float |
| #define convert_half1_sat convert_half |
| #define convert_half2_sat convert_half2 |
| #define convert_half3_sat convert_half3 |
| #define convert_half4_sat convert_half4 |
| #define convert_half8_sat convert_half8 |
| #define convert_half16_sat convert_half16 |
| |
| #define convert_float1 convert_float |
| #define convert_half1 convert_half |
| #define convert_char1 convert_char |
| #define convert_uchar1 convert_uchar |
| #define convert_short1 convert_short |
| #define convert_ushort1 convert_ushort |
| #define convert_int1 convert_int |
| #define convert_uint1 convert_uint |
| #define convert_long1 convert_long |
| #define convert_ulong1 convert_ulong |
| #define convert_double1 convert_double |
| |
| #define convert_char1_sat convert_char_sat |
| #define convert_uchar1_sat convert_uchar_sat |
| #define convert_short1_sat convert_short_sat |
| #define convert_ushort1_sat convert_ushort_sat |
| #define convert_int1_sat convert_int_sat |
| #define convert_uint1_sat convert_uint_sat |
| #define convert_long1_sat convert_long_sat |
| #define convert_ulong1_sat convert_ulong_sat |
| #define convert_double1_sat convert_double_sat |
| |
| #define VEC_DATA_TYPE_STR(type, size) type##size |
| #define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size) |
| |
| #define CONVERT_STR(x, type) (convert_##type((x))) |
| #define CONVERT(x, type) CONVERT_STR(x, type) |
| |
| #define CONVERT_SAT_STR(x, type) (convert_##type##_sat((x))) |
| #define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type) |
| |
| #define CONVERT_SAT_ROUND_STR(x, type, round) (convert_##type##_sat_##round((x))) |
| #define CONVERT_SAT_ROUND(x, type, round) CONVERT_SAT_ROUND_STR(x, type, round) |
| |
| #define select_vec_dt_uchar(size) uchar##size |
| #define select_vec_dt_char(size) char##size |
| #define select_vec_dt_ushort(size) ushort##size |
| #define select_vec_dt_short(size) short##size |
| #define select_vec_dt_half(size) short##size |
| #define select_vec_dt_uint(size) uint##size |
| #define select_vec_dt_int(size) int##size |
| #define select_vec_dt_float(size) int##size |
| #define select_vec_dt_ulong(size) ulong##size |
| #define select_vec_dt_long(size) long##size |
| |
| #define SELECT_VEC_DATA_TYPE_STR(type, size) select_vec_dt_##type(size) |
| #define SELECT_VEC_DATA_TYPE(type, size) SELECT_VEC_DATA_TYPE_STR(type, size) |
| #define SELECT_DATA_TYPE(type) SELECT_VEC_DATA_TYPE_STR(type, 1) |
| |
| #define sum_reduce_1(x) (x) |
| #define sum_reduce_2(x) ((x).s0) + ((x).s1) |
| #define sum_reduce_3(x) sum_reduce_2((x).s01) + ((x).s2) |
| #define sum_reduce_4(x) sum_reduce_2((x).s01) + sum_reduce_2((x).s23) |
| #define sum_reduce_8(x) sum_reduce_4((x).s0123) + sum_reduce_4((x).s4567) |
| #define sum_reduce_16(x) sum_reduce_8((x).s01234567) + sum_reduce_8((x).s89ABCDEF) |
| |
| #define SUM_REDUCE_STR(x, size) sum_reduce_##size(x) |
| #define SUM_REDUCE(x, size) SUM_REDUCE_STR(x, size) |
| |
| #define max_reduce_1(x) (x) |
| #define max_reduce_2(x) max(((x).s0), ((x).s1)) |
| #define max_reduce_3(x) max(max_reduce_2((x).s01), ((x).s2)) |
| #define max_reduce_4(x) max(max_reduce_2((x).s01), max_reduce_2((x).s23)) |
| #define max_reduce_8(x) max(max_reduce_4((x).s0123), max_reduce_4((x).s4567)) |
| #define max_reduce_16(x) max(max_reduce_8((x).s01234567), max_reduce_8((x).s89ABCDEF)) |
| |
| #define MAX_REDUCE_STR(x, size) max_reduce_##size(x) |
| #define MAX_REDUCE(x, size) MAX_REDUCE_STR(x, size) |
| |
| #define VECTOR_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define IMAGE_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define TENSOR3D_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_stride_z, \ |
| uint name##_step_z, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define TENSOR4D_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_stride_z, \ |
| uint name##_step_z, \ |
| uint name##_stride_w, \ |
| uint name##_step_w, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define CONVERT_TO_VECTOR_STRUCT(name) \ |
| update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x) |
| |
| #define CONVERT_TO_VECTOR_STRUCT_NO_STEP(name) \ |
| update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0) |
| |
| #define CONVERT_TO_IMAGE_STRUCT(name) \ |
| update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y) |
| |
| #define CONVERT_TO_IMAGE_STRUCT_NO_STEP(name) \ |
| update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0) |
| |
| #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \ |
| update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(name) \ |
| update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \ |
| update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TO_TENSOR3D_STRUCT(name) \ |
| update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ |
| name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(name) \ |
| update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0) |
| |
| #define CONVERT_TO_TENSOR4D_STRUCT(name, mod_size) \ |
| update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ |
| name##_stride_z, name##_step_z, name##_stride_w, name##_step_w, mod_size) |
| |
| #define CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(name, mod_size) \ |
| update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0, name##_stride_w, 0, mod_size) |
| |
| #define CONVERT_TO_TENSOR3D_STRUCT_NO_UPDATE_PTR(name) \ |
| tensor3D_ptr_no_update(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ |
| name##_stride_z, name##_step_z) |
| |
| /** Structure to hold Vector information */ |
| typedef struct Vector |
| { |
| __global uchar *ptr; /**< Pointer to the starting postion of the buffer */ |
| int offset_first_element_in_bytes; /**< The offset of the first element in the source image */ |
| int stride_x; /**< Stride of the image in X dimension (in bytes) */ |
| } Vector; |
| |
| /** Structure to hold Image information */ |
| typedef struct Image |
| { |
| __global uchar *ptr; /**< Pointer to the starting postion of the buffer */ |
| int offset_first_element_in_bytes; /**< The offset of the first element in the source image */ |
| int stride_x; /**< Stride of the image in X dimension (in bytes) */ |
| int stride_y; /**< Stride of the image in Y dimension (in bytes) */ |
| } Image; |
| |
| /** Structure to hold 3D tensor information */ |
| typedef struct Tensor3D |
| { |
| __global uchar *ptr; /**< Pointer to the starting postion of the buffer */ |
| int offset_first_element_in_bytes; /**< The offset of the first element in the source image */ |
| int stride_x; /**< Stride of the image in X dimension (in bytes) */ |
| int stride_y; /**< Stride of the image in Y dimension (in bytes) */ |
| int stride_z; /**< Stride of the image in Z dimension (in bytes) */ |
| } Tensor3D; |
| |
| /** Structure to hold 4D tensor information */ |
| typedef struct Tensor4D |
| { |
| __global uchar *ptr; /**< Pointer to the starting postion of the buffer */ |
| int offset_first_element_in_bytes; /**< The offset of the first element in the source image */ |
| int stride_x; /**< Stride of the image in X dimension (in bytes) */ |
| int stride_y; /**< Stride of the image in Y dimension (in bytes) */ |
| int stride_z; /**< Stride of the image in Z dimension (in bytes) */ |
| int stride_w; /**< Stride of the image in W dimension (in bytes) */ |
| } Tensor4D; |
| |
| /** Wrap vector information into an Vector structure, and make the pointer point at this workitem's data. |
| * |
| * @param[in] ptr Pointer to the starting postion of the buffer |
| * @param[in] offset_first_element_in_bytes The offset of the first element in the source vector |
| * @param[in] stride_x Stride of the vector in X dimension (in bytes) |
| * @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes) |
| * |
| * @return An image object |
| */ |
| inline Vector update_vector_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x) |
| { |
| Vector vector = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| }; |
| vector.ptr += vector.offset_first_element_in_bytes + get_global_id(0) * step_x; |
| return vector; |
| } |
| |
| /** Wrap image information into an Image structure, and make the pointer point at this workitem's data. |
| * |
| * @param[in] ptr Pointer to the starting postion of the buffer |
| * @param[in] offset_first_element_in_bytes The offset of the first element in the source image |
| * @param[in] stride_x Stride of the image in X dimension (in bytes) |
| * @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] stride_y Stride of the image in Y dimension (in bytes) |
| * @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes) |
| * |
| * @return An image object |
| */ |
| inline Image update_image_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y) |
| { |
| Image img = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y |
| }; |
| img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y; |
| return img; |
| } |
| |
| /** Wrap 3D tensor information into an image structure, and make the pointer point at this workitem's data. |
| * |
| * @param[in] ptr Pointer to the starting postion of the buffer |
| * @param[in] offset_first_element_in_bytes The offset of the first element in the source image |
| * @param[in] stride_x Stride of the image in X dimension (in bytes) |
| * @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] stride_y Stride of the image in Y dimension (in bytes) |
| * @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] stride_z Stride of the image in Z dimension (in bytes) |
| * @param[in] step_z stride_z * number of elements along Z processed per workitem(in bytes) |
| * |
| * @return A 3D tensor object |
| */ |
| inline Image update_image_from_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) |
| { |
| Image img = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y |
| }; |
| img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z; |
| return img; |
| } |
| |
| /** Wrap 3D tensor information into an tensor structure, and make the pointer point at this workitem's data. |
| * |
| * @param[in] ptr Pointer to the starting postion of the buffer |
| * @param[in] offset_first_element_in_bytes The offset of the first element in the source image |
| * @param[in] stride_x Stride of the image in X dimension (in bytes) |
| * @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] stride_y Stride of the image in Y dimension (in bytes) |
| * @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] stride_z Stride of the image in Z dimension (in bytes) |
| * @param[in] step_z stride_z * number of elements along Z processed per workitem(in bytes) |
| * |
| * @return A 3D tensor object |
| */ |
| inline Tensor3D update_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) |
| { |
| Tensor3D tensor = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y, |
| .stride_z = stride_z |
| }; |
| tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z; |
| return tensor; |
| } |
| |
| /** Wrap 3D tensor information into an tensor structure. |
| * |
| * @param[in] ptr Pointer to the starting postion of the buffer |
| * @param[in] offset_first_element_in_bytes The offset of the first element in the source image |
| * @param[in] stride_x Stride of the image in X dimension (in bytes) |
| * @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] stride_y Stride of the image in Y dimension (in bytes) |
| * @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] stride_z Stride of the image in Z dimension (in bytes) |
| * @param[in] step_z stride_z * number of elements along Z processed per workitem(in bytes) |
| * |
| * @return A 3D tensor object |
| */ |
| inline Tensor3D tensor3D_ptr_no_update(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) |
| { |
| Tensor3D tensor = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y, |
| .stride_z = stride_z |
| }; |
| return tensor; |
| } |
| |
| inline Tensor4D update_tensor4D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z, uint stride_w, |
| uint step_w, |
| uint mod_size) |
| { |
| Tensor4D tensor = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y, |
| .stride_z = stride_z, |
| .stride_w = stride_w |
| }; |
| |
| tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + (get_global_id(2) % mod_size) * step_z + (get_global_id(2) / mod_size) * step_w; |
| return tensor; |
| } |
| |
| /** Get the pointer position of a Vector |
| * |
| * @param[in] vec Pointer to the starting position of the buffer |
| * @param[in] x Relative X position |
| */ |
| inline __global const uchar *vector_offset(const Vector *vec, int x) |
| { |
| return vec->ptr + x * vec->stride_x; |
| } |
| |
| /** Get the pointer position of a Image |
| * |
| * @param[in] img Pointer to the starting position of the buffer |
| * @param[in] x Relative X position |
| * @param[in] y Relative Y position |
| */ |
| inline __global uchar *offset(const Image *img, int x, int y) |
| { |
| return img->ptr + x * img->stride_x + y * img->stride_y; |
| } |
| |
| /** Get the pointer position of a Tensor3D |
| * |
| * @param[in] tensor Pointer to the starting position of the buffer |
| * @param[in] x Relative X position |
| * @param[in] y Relative Y position |
| * @param[in] z Relative Z position |
| */ |
| inline __global const uchar *tensor3D_offset(const Tensor3D *tensor, int x, int y, int z) |
| { |
| return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z; |
| } |
| |
| /** Get the pointer position of a Tensor4D |
| * |
| * @param[in] tensor Pointer to the starting position of the buffer |
| * @param[in] x Relative X position |
| * @param[in] y Relative Y position |
| * @param[in] z Relative Z position |
| * @param[in] w Relative W position |
| */ |
| inline __global const uchar *tensor4D_offset(const Tensor4D *tensor, int x, int y, int z, int w) |
| { |
| return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + w * tensor->stride_w; |
| } |
| |
| /** Get the offset for a given linear index of a Tensor3D |
| * |
| * @param[in] tensor Pointer to the starting position of the buffer |
| * @param[in] width Width of the input tensor |
| * @param[in] height Height of the input tensor |
| * @param[in] depth Depth of the input tensor |
| * @param[in] index Linear index |
| */ |
| inline __global const uchar *tensor3D_index2ptr(const Tensor3D *tensor, uint width, uint height, uint depth, uint index) |
| { |
| uint num_elements = width * height; |
| |
| const uint z = index / num_elements; |
| |
| index %= num_elements; |
| |
| const uint y = index / width; |
| |
| index %= width; |
| |
| const uint x = index; |
| |
| return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + tensor->offset_first_element_in_bytes; |
| } |
| |
| #endif // _HELPER_H |
| |
| /** Convert the given vector with round to nearest even rounding mode |
| * |
| * @param[in] x The target to be converted |
| * @param[in] type The target type |
| * |
| * @return The converted vector |
| */ |
| #define CONVERT_DOWN_RTE_STR(x, type) (convert_##type##_rte((x))) |
| #define CONVERT_DOWN_RTE(x, type) CONVERT_DOWN_RTE_STR(x, type) |
| |
| /** Quantize a floating-point scalar value to 8-bit asymmetric |
| * |
| * @param[in] input Input value to quantize |
| * @param[in] offset Quantization offset |
| * @param[in] scale Quantization scale |
| * |
| * @return quantized value |
| */ |
| inline uchar quantize_qasymm8(float input, float offset, float scale) |
| { |
| float out_f32 = input / scale + offset; |
| uchar res_u8 = CONVERT_SAT(CONVERT_DOWN_RTE(out_f32, int), uchar); |
| return res_u8; |
| } |
| |
| /** Dequantize a scalar value from 8-bit asymmetric to floating-point |
| * |
| * @param[in] input Input value to quantize |
| * @param[in] offset Quantization offset |
| * @param[in] scale Quantization scale |
| * |
| * @return quantized value |
| */ |
| inline float dequantize_qasymm8(uchar input, float offset, float scale) |
| { |
| return ((float)input - offset) * scale; |
| } |
| |
| /** Dequantize a scalar value from signed 8-bit asymmetric to floating-point |
| * |
| * @param[in] input Input value to quantize |
| * @param[in] offset Quantization offset |
| * @param[in] scale Quantization scale |
| * |
| * @return quantized value |
| */ |
| inline float dequantize_qasymm8_signed(char input, float offset, float scale) |
| { |
| return ((float)input - offset) * scale; |
| } |
| |
| /** Quantize a vector of values from floating-point |
| * |
| * @param[in] type Output data type. |
| * @param[in] size Size of vector. |
| * |
| * @return quantized values |
| */ |
| #define QUANTIZE_IMPL(type, size) \ |
| inline VEC_DATA_TYPE(type, size) quantize_##type##size(VEC_DATA_TYPE(float, size) input, float offset, float scale) \ |
| { \ |
| VEC_DATA_TYPE(float, size) \ |
| out_f32 = input / (VEC_DATA_TYPE(float, size))(scale) + (VEC_DATA_TYPE(float, size))(offset); \ |
| VEC_DATA_TYPE(type, size) \ |
| res = CONVERT_SAT(CONVERT_DOWN_RTE(out_f32, VEC_DATA_TYPE(int, size)), VEC_DATA_TYPE(type, size)); \ |
| return res; \ |
| } |
| |
| /** Dequantize a vector of values to floating-point |
| * |
| * @param[in] type Input data type. |
| * @param[in] size Size of vector. |
| * |
| * @return dequantized values in floating point |
| */ |
| #define DEQUANTIZE_IMPL(type, size) \ |
| inline VEC_DATA_TYPE(float, size) dequantize_##type##size(VEC_DATA_TYPE(type, size) input, float offset, float scale) \ |
| { \ |
| return (CONVERT(input, VEC_DATA_TYPE(float, size)) - offset) * scale; \ |
| } |
| |
| /** Correctly-rounded-to-nearest division by a power-of-two. |
| * |
| * @param[in] size Size of vector. |
| * |
| * @return Correctly-rounded-to-nearest division by a power-of-two. |
| */ |
| #define ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(size) \ |
| inline VEC_DATA_TYPE(int, size) asymm_rounding_divide_by_POW2_##size(VEC_DATA_TYPE(int, size) x, VEC_DATA_TYPE(int, size) exponent) \ |
| { \ |
| const VEC_DATA_TYPE(int, size) \ |
| zero = (VEC_DATA_TYPE(int, size))0; \ |
| const VEC_DATA_TYPE(int, size) \ |
| one = (VEC_DATA_TYPE(int, size))1; \ |
| VEC_DATA_TYPE(int, size) \ |
| mask = (one << exponent) - one; \ |
| VEC_DATA_TYPE(int, size) \ |
| threshold = (mask >> 1) + select(zero, one, (SELECT_VEC_DATA_TYPE(int, size))(x < 0)); \ |
| return (x >> exponent) + select(zero, one, (SELECT_VEC_DATA_TYPE(int, size))((x & mask) > threshold)); \ |
| } |
| |
| /** Product of two numbers, interpreting them as fixed-point values in the interval [-1, 1), |
| * rounding to the nearest value, and saturating -1 * -1 to the maximum value. |
| * |
| * @param[in] size Size of vector. |
| * |
| * @return Product of two fixed-point numbers. |
| */ |
| #define ASYMM_MULT_IMPL(size) \ |
| inline VEC_DATA_TYPE(int, size) asymm_mult##size(VEC_DATA_TYPE(int, size) a, VEC_DATA_TYPE(int, size) b) \ |
| { \ |
| VEC_DATA_TYPE(int, size) \ |
| overflow = a == b && a == INT_MIN; \ |
| VEC_DATA_TYPE(long, size) \ |
| a_64 = convert_long##size(a); \ |
| VEC_DATA_TYPE(long, size) \ |
| b_64 = convert_long##size(b); \ |
| VEC_DATA_TYPE(long, size) \ |
| ab_64 = a_64 * b_64; \ |
| /* Revert COMPMID-907 */ \ |
| VEC_DATA_TYPE(long, size) \ |
| mask1 = 1 << 30; \ |
| VEC_DATA_TYPE(long, size) \ |
| mask2 = 1 - (1 << 30); \ |
| VEC_DATA_TYPE(long, size) \ |
| is_positive_or_zero = ab_64 >= 0; \ |
| VEC_DATA_TYPE(long, size) \ |
| nudge = select(mask2, mask1, (SELECT_VEC_DATA_TYPE(long, size))(is_positive_or_zero)); \ |
| VEC_DATA_TYPE(long, size) \ |
| mask = 1ll << 31; \ |
| VEC_DATA_TYPE(int, size) \ |
| ab_x2_high32 = convert_int##size((ab_64 + nudge) / mask); \ |
| return select(ab_x2_high32, INT_MAX, (SELECT_VEC_DATA_TYPE(int, size))(overflow)); \ |
| } |
| |
| /** Calculates \f$ exp(x) \f$ for x in [-1/4, 0). |
| * |
| * @param[in] size Size of vector. |
| * |
| * @return Result in fixed-point format Q0. |
| */ |
| #define ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(size) \ |
| inline VEC_DATA_TYPE(int, size) asymm_exp_on_interval_between_negative_one_quarter_and_0_excl##size(VEC_DATA_TYPE(int, size) a) \ |
| { \ |
| const VEC_DATA_TYPE(int, size) constant_term = 1895147668; \ |
| const VEC_DATA_TYPE(int, size) constant_1_over_3 = 715827883; \ |
| const int k_fractional_bits = 31; \ |
| VEC_DATA_TYPE(int, size) \ |
| x = a + (1 << (k_fractional_bits - 3)); \ |
| VEC_DATA_TYPE(int, size) \ |
| x2 = ASYMM_MULT(x, x, size); \ |
| VEC_DATA_TYPE(int, size) \ |
| x3 = ASYMM_MULT(x2, x, size); \ |
| VEC_DATA_TYPE(int, size) \ |
| x4 = ASYMM_MULT(x2, x2, size); \ |
| VEC_DATA_TYPE(int, size) \ |
| x4_over_4 = ASYMM_ROUNDING_DIVIDE_BY_POW2(x4, 2, size); \ |
| VEC_DATA_TYPE(int, size) \ |
| x4_over_24_plus_x3_over_6_plus_x2 = ASYMM_MULT((x4_over_4 + x3), constant_1_over_3, size) + x2; \ |
| VEC_DATA_TYPE(int, size) \ |
| x4_over_24_plus_x3_over_6_plus_x2_over_2 = ASYMM_ROUNDING_DIVIDE_BY_POW2(x4_over_24_plus_x3_over_6_plus_x2, 1, size); \ |
| return constant_term + ASYMM_MULT(constant_term, x + x4_over_24_plus_x3_over_6_plus_x2_over_2, size); \ |
| } |
| |
| /** Each bit of the result is set to the corresponding bit of either then_val or |
| * else_val depending on whether the corresponding bit of if_mask is set. |
| * Equivalent to the VBSL instruction in ARM NEON. |
| * |
| * @param[in] size Size of vector. |
| * |
| * @returns Result contaning bits from @p then_val or from @p else_val depending on corresponding bit in @p if_mask is set or not. |
| */ |
| #define ASYMM_SELECT_USING_MASK_IMPL(size) \ |
| inline VEC_DATA_TYPE(int, size) asymm_select_using_mask##size(VEC_DATA_TYPE(int, size) if_mask, VEC_DATA_TYPE(int, size) then_val, VEC_DATA_TYPE(int, size) else_val) \ |
| { \ |
| return (if_mask & then_val) ^ (~if_mask & else_val); \ |
| } |
| |
| /** For each element of input vector, the corresponding bits of the result item are set |
| * if the input item is zero. |
| * |
| * @param[in] size Size of vector. |
| * |
| * @returns Output vector with bits set when corresponding bit in @p a is zero. |
| */ |
| #define ASYMM_MASK_IF_ZERO_IMPL(size) \ |
| inline VEC_DATA_TYPE(int, size) asymm_mask_if_zero##size(VEC_DATA_TYPE(int, size) a) \ |
| { \ |
| const VEC_DATA_TYPE(int, size) all_zeros = 0; \ |
| const VEC_DATA_TYPE(int, size) all_ones = ~0; \ |
| return select(all_zeros, all_ones, (SELECT_VEC_DATA_TYPE(int, size))(a == 0)); \ |
| } |
| |
| /** For each element of input vector, the corresponding bits of the result item are set |
| * if the input item is non-zero. |
| * |
| * @param[in] size Size of vector. |
| * |
| * @returns Output vector with bits set when corresponding bit in @p a is non zero. |
| */ |
| #define ASYMM_MASK_IF_NON_ZERO_IMPL(size) \ |
| inline VEC_DATA_TYPE(int, size) asymm_mask_if_non_zero##size(VEC_DATA_TYPE(int, size) a) \ |
| { \ |
| const VEC_DATA_TYPE(int, size) all_zeros = 0; \ |
| const VEC_DATA_TYPE(int, size) all_ones = ~0; \ |
| return select(all_zeros, all_ones, (SELECT_VEC_DATA_TYPE(int, size))(a != 0)); \ |
| } |
| |
| #define EXP_BARREL_SHIFTER_IMPL(size) \ |
| inline VEC_DATA_TYPE(int, size) exp_barrel_shifter##size(VEC_DATA_TYPE(int, size) result, int exponent, int fp_multiplier, int k_integer_bits, int k_fractional_bits, VEC_DATA_TYPE(int, size) remainder) \ |
| { \ |
| if(k_integer_bits > exponent) \ |
| { \ |
| const int k_shift_amount = k_integer_bits > exponent ? k_fractional_bits + exponent : 0; \ |
| return ASYMM_SELECT_USING_MASK( \ |
| ASYMM_MASK_IF_NON_ZERO(remainder & (1 << k_shift_amount), size), \ |
| ASYMM_MULT(result, fp_multiplier, size), result, size); \ |
| } \ |
| \ |
| return result; \ |
| } |
| |
| /** Calculates \f$ exp(x) \f$ for x < 0. |
| * |
| * @param[in] size Size of vector. |
| * |
| * @return Result in fixed-point format Q0. |
| */ |
| #define ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(size) \ |
| inline VEC_DATA_TYPE(int, size) asymm_exp_on_negative_values##size(VEC_DATA_TYPE(int, size) a, int k_integer_bits) \ |
| { \ |
| const int k_fractional_bits = 31 - k_integer_bits; \ |
| VEC_DATA_TYPE(int, size) \ |
| k_one_quarter = 1 << (k_fractional_bits - 2); \ |
| VEC_DATA_TYPE(int, size) \ |
| mask = k_one_quarter - 1; \ |
| VEC_DATA_TYPE(int, size) \ |
| a_mod_quarter_minus_one_quarter = (a & mask) - k_one_quarter; \ |
| VEC_DATA_TYPE(int, size) \ |
| a_mod_quarter_minus_one_quarter_scaled = a_mod_quarter_minus_one_quarter << k_integer_bits; \ |
| VEC_DATA_TYPE(int, size) \ |
| result = ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL(a_mod_quarter_minus_one_quarter_scaled, size); \ |
| VEC_DATA_TYPE(int, size) \ |
| remainder = a_mod_quarter_minus_one_quarter - a; \ |
| \ |
| result = EXP_BARREL_SHIFTER(result, -2, 1672461947, k_integer_bits, k_fractional_bits, remainder, size); \ |
| result = EXP_BARREL_SHIFTER(result, -1, 1302514674, k_integer_bits, k_fractional_bits, remainder, size); \ |
| result = EXP_BARREL_SHIFTER(result, +0, 790015084, k_integer_bits, k_fractional_bits, remainder, size); \ |
| result = EXP_BARREL_SHIFTER(result, +1, 290630308, k_integer_bits, k_fractional_bits, remainder, size); \ |
| result = EXP_BARREL_SHIFTER(result, +2, 39332535, k_integer_bits, k_fractional_bits, remainder, size); \ |
| result = EXP_BARREL_SHIFTER(result, +3, 720401, k_integer_bits, k_fractional_bits, remainder, size); \ |
| result = EXP_BARREL_SHIFTER(result, +4, 242, k_integer_bits, k_fractional_bits, remainder, size); \ |
| \ |
| if(k_integer_bits > 5) \ |
| { \ |
| const VEC_DATA_TYPE(int, size) clamp = -(1 << (k_fractional_bits + 5)); \ |
| result = ASYMM_SELECT_USING_MASK(ASYMM_MASK_IF_NON_ZERO(a < clamp, size), 0, result, size); \ |
| } \ |
| \ |
| const VEC_DATA_TYPE(int, size) Q0_one = INT_MAX; \ |
| return ASYMM_SELECT_USING_MASK(ASYMM_MASK_IF_ZERO(a, size), Q0_one, result, size); \ |
| } |
| |
| /** Calculates the product of a integer value by a power of two, with either a positive exponent |
| * (equivalent to an arithmetic left shift, saturating) or a negative exponent |
| * (equivalent to an arithmetic right shift, rounding to nearest). |
| * |
| * @param[in] size Size of vector. |
| * |
| * @return Arithmetic left or right shift. |
| */ |
| #define ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(size) \ |
| inline VEC_DATA_TYPE(int, size) asymm_saturating_rounding_mult_by_pow2##size(VEC_DATA_TYPE(int, size) x, int exponent) \ |
| { \ |
| if(exponent < 0) \ |
| { \ |
| return ASYMM_ROUNDING_DIVIDE_BY_POW2(x, -exponent, size); \ |
| } \ |
| \ |
| const VEC_DATA_TYPE(int, size) min = INT_MIN; \ |
| const VEC_DATA_TYPE(int, size) max = INT_MAX; \ |
| int threshold = ((1 << (31 - exponent)) - 1); \ |
| VEC_DATA_TYPE(int, size) \ |
| positive_mask = ASYMM_MASK_IF_NON_ZERO(x > threshold, size); \ |
| VEC_DATA_TYPE(int, size) \ |
| negative_mask = ASYMM_MASK_IF_NON_ZERO(x < -threshold, size); \ |
| VEC_DATA_TYPE(int, size) \ |
| result = x << exponent; \ |
| result = ASYMM_SELECT_USING_MASK(positive_mask, max, result, size); \ |
| result = ASYMM_SELECT_USING_MASK(negative_mask, min, result, size); \ |
| return result; \ |
| } |
| |
| /** Calculates (a+b)/2, rounded to the nearest integer. |
| * Equivalent to VRHADD in the ARM NEON instruction set. |
| * |
| * @param[in] size Size of vector. |
| * |
| * @return (a+b)/2, rounded to the nearest integer. |
| */ |
| #define ASYMM_ROUNDING_HALF_SUM_IMPL(size) \ |
| inline VEC_DATA_TYPE(int, size) asymm_rounding_half_sum##size(VEC_DATA_TYPE(int, size) a, VEC_DATA_TYPE(int, size) b) \ |
| { \ |
| VEC_DATA_TYPE(long, size) \ |
| a64 = convert_long##size(a); \ |
| VEC_DATA_TYPE(long, size) \ |
| b64 = convert_long##size(b); \ |
| VEC_DATA_TYPE(long, size) \ |
| sum = a64 + b64; \ |
| const VEC_DATA_TYPE(long, size) one = 1; \ |
| const VEC_DATA_TYPE(long, size) minus_one = -1; \ |
| VEC_DATA_TYPE(long, size) \ |
| sign = select(minus_one, one, (SELECT_VEC_DATA_TYPE(long, size))(sum >= 0)); \ |
| return convert_int##size((sum + sign) / 2); \ |
| } |
| |
| /** Calculates \f$ 1 / (1 + x) \f$ for x in (0, 1). |
| * |
| * @param[in] size Size of vector. |
| * |
| * @return Result in fixed-point format Q0. |
| */ |
| #define ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_IMPL(size) \ |
| inline VEC_DATA_TYPE(int, size) asymm_one_over_one_plus_x_for_x_in_0_1##size(VEC_DATA_TYPE(int, size) a) \ |
| { \ |
| const VEC_DATA_TYPE(int, size) Q0_one = INT_MAX; \ |
| const VEC_DATA_TYPE(int, size) Q2_one = 1 << (31 - 2); \ |
| VEC_DATA_TYPE(int, size) \ |
| half_denominator = ASYMM_ROUNDING_HALF_SUM(a, Q0_one, size); \ |
| const VEC_DATA_TYPE(int, size) Q2_48_over_17 = 1515870810; \ |
| const VEC_DATA_TYPE(int, size) Q2_neg_32_over_17 = -1010580540; \ |
| VEC_DATA_TYPE(int, size) \ |
| x = Q2_48_over_17 + ASYMM_MULT(half_denominator, Q2_neg_32_over_17, size); \ |
| for(int i = 0; i < 3; i++) \ |
| { \ |
| VEC_DATA_TYPE(int, size) \ |
| half_denominator_times_x = ASYMM_MULT(half_denominator, x, size); \ |
| VEC_DATA_TYPE(int, size) \ |
| one_minus_half_denominator_times_x = Q2_one - half_denominator_times_x; \ |
| VEC_DATA_TYPE(int, size) \ |
| tmp = ASYMM_MULT(x, one_minus_half_denominator_times_x, size); \ |
| x = x + ASYMM_SATURATING_ROUNDING_MULT_BY_POW2(tmp, 2, size); \ |
| } \ |
| return ASYMM_SATURATING_ROUNDING_MULT_BY_POW2(x, 1, size); \ |
| } |
| |
| /** Considering the integer value as fixed-point, change the number of integer bits and update value accordingly. |
| * |
| * @param[in] size Size of vector. |
| * |
| * @return Rescaled value. |
| */ |
| #define ASYMM_RESCALE_IMPL(size) \ |
| inline VEC_DATA_TYPE(int, size) asymm_rescale##size(VEC_DATA_TYPE(int, size) value, int src_integer_bits, int dst_integer_bits) \ |
| { \ |
| int exponent = src_integer_bits - dst_integer_bits; \ |
| return ASYMM_SATURATING_ROUNDING_MULT_BY_POW2(value, exponent, size); \ |
| } |
| |
| #define QUANTIZE_STR(input, offset, scale, type, size) quantize_##type##size(input, offset, scale) |
| #define QUANTIZE(input, offset, scale, type, size) QUANTIZE_STR(input, offset, scale, type, size) |
| #define DEQUANTIZE_STR(input, offset, scale, type, size) dequantize_##type##size(input, offset, scale) |
| #define DEQUANTIZE(input, offset, scale, type, size) DEQUANTIZE_STR(input, offset, scale, type, size) |
| |
| #define ASYMM_ROUNDING_DIVIDE_BY_POW2_STR(x, exponent, size) asymm_rounding_divide_by_POW2_##size(x, exponent) |
| #define ASYMM_ROUNDING_DIVIDE_BY_POW2(x, exponent, size) ASYMM_ROUNDING_DIVIDE_BY_POW2_STR(x, exponent, size) |
| #define ASYMM_MULT_STR(a, b, size) asymm_mult##size(a, b) |
| #define ASYMM_MULT(a, b, size) ASYMM_MULT_STR(a, b, size) |
| #define ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(x, quantized_multiplier, left_shift, size) \ |
| ASYMM_MULT(x *((VEC_DATA_TYPE(int, size))(1) << (-left_shift)), quantized_multiplier, size) |
| #define ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(x, quantized_multiplier, right_shift, size) \ |
| ASYMM_ROUNDING_DIVIDE_BY_POW2(ASYMM_MULT(x, quantized_multiplier, size), right_shift, size) |
| #define ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL(a, size) asymm_exp_on_interval_between_negative_one_quarter_and_0_excl##size(a) |
| #define ASYMM_SELECT_USING_MASK(if_mask, then_val, else_val, size) asymm_select_using_mask##size(if_mask, then_val, else_val) |
| #define ASYMM_MASK_IF_ZERO(a, size) asymm_mask_if_zero##size(a) |
| #define ASYMM_MASK_IF_NON_ZERO(a, size) asymm_mask_if_non_zero##size(a) |
| #define EXP_BARREL_SHIFTER(result, exponent, fp_multiplier, k_integer_bits, k_fractional_bits, remainder, size) exp_barrel_shifter##size(result, exponent, fp_multiplier, k_integer_bits, k_fractional_bits, remainder) |
| #define ASYMM_EXP_ON_NEGATIVE_VALUES_STR(a, k_integer_bits, size) asymm_exp_on_negative_values##size(a, k_integer_bits) |
| #define ASYMM_EXP_ON_NEGATIVE_VALUES(a, k_integer_bits, size) ASYMM_EXP_ON_NEGATIVE_VALUES_STR(a, k_integer_bits, size) |
| #define ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_STR(a, size) asymm_one_over_one_plus_x_for_x_in_0_1##size(a) |
| #define ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1(a, size) ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_STR(a, size) |
| #define ASYMM_SATURATING_ROUNDING_MULT_BY_POW2(x, exponent, size) asymm_saturating_rounding_mult_by_pow2##size(x, exponent) |
| #define ASYMM_ROUNDING_HALF_SUM(a, b, size) asymm_rounding_half_sum##size(a, b) |
| #define ASYMM_RESCALE_STR(value, src_integer_bits, dst_integer_bits, size) asymm_rescale##size(value, src_integer_bits, dst_integer_bits) |
| #define ASYMM_RESCALE(value, src_integer_bits, dst_integer_bits, size) ASYMM_RESCALE_STR(value, src_integer_bits, dst_integer_bits, size) |
| |
| #define MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(size) \ |
| inline VEC_DATA_TYPE(int, size) multiply_by_quantized_multiplier##size(VEC_DATA_TYPE(int, size) input, int qmul, int shift) \ |
| { \ |
| const int left_shift = shift > 0 ? shift : 0; \ |
| const int right_shift = shift > 0 ? 0 : -shift; \ |
| return ASYMM_ROUNDING_DIVIDE_BY_POW2(ASYMM_MULT(input * (1 << left_shift), qmul, size), right_shift, size); \ |
| } |
| #define MULTIPLY_BY_QUANTIZED_MULTIPLIER(input, qmul, shift, size) multiply_by_quantized_multiplier##size(input, qmul, shift) |
| |
| QUANTIZE_IMPL(uchar, 1) |
| QUANTIZE_IMPL(char, 1) |
| QUANTIZE_IMPL(uint, 1) |
| QUANTIZE_IMPL(int, 1) |
| QUANTIZE_IMPL(uchar, 4) |
| QUANTIZE_IMPL(ushort, 4) |
| QUANTIZE_IMPL(short, 4) |
| QUANTIZE_IMPL(uchar, 16) |
| QUANTIZE_IMPL(char, 16) |
| QUANTIZE_IMPL(ushort, 16) |
| QUANTIZE_IMPL(short, 16) |
| QUANTIZE_IMPL(uint, 16) |
| QUANTIZE_IMPL(int, 16) |
| |
| DEQUANTIZE_IMPL(uchar, 1) |
| DEQUANTIZE_IMPL(char, 1) |
| DEQUANTIZE_IMPL(uint, 1) |
| DEQUANTIZE_IMPL(int, 1) |
| DEQUANTIZE_IMPL(uchar, 4) |
| DEQUANTIZE_IMPL(ushort, 4) |
| DEQUANTIZE_IMPL(short, 4) |
| DEQUANTIZE_IMPL(uchar, 16) |
| DEQUANTIZE_IMPL(char, 16) |
| DEQUANTIZE_IMPL(ushort, 16) |
| DEQUANTIZE_IMPL(short, 16) |
| DEQUANTIZE_IMPL(uint, 16) |
| DEQUANTIZE_IMPL(int, 16) |
| |
| ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(1) |
| ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(2) |
| ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(3) |
| ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(4) |
| ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(8) |
| ASYMM_ROUNDING_DIVIDE_BY_POW2_IMPL(16) |
| |
| ASYMM_MULT_IMPL(1) |
| ASYMM_MULT_IMPL(2) |
| ASYMM_MULT_IMPL(3) |
| ASYMM_MULT_IMPL(4) |
| ASYMM_MULT_IMPL(8) |
| ASYMM_MULT_IMPL(16) |
| |
| ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(1) |
| ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(2) |
| ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(3) |
| ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(4) |
| ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(8) |
| ASYMM_EXP_ON_INTERVAL_BETWEEN_NEGATIVE_ONE_QUARTER_AND_0_EXCL_IMPL(16) |
| |
| ASYMM_SELECT_USING_MASK_IMPL(1) |
| ASYMM_SELECT_USING_MASK_IMPL(2) |
| ASYMM_SELECT_USING_MASK_IMPL(3) |
| ASYMM_SELECT_USING_MASK_IMPL(4) |
| ASYMM_SELECT_USING_MASK_IMPL(8) |
| ASYMM_SELECT_USING_MASK_IMPL(16) |
| |
| ASYMM_MASK_IF_ZERO_IMPL(1) |
| ASYMM_MASK_IF_ZERO_IMPL(2) |
| ASYMM_MASK_IF_ZERO_IMPL(3) |
| ASYMM_MASK_IF_ZERO_IMPL(4) |
| ASYMM_MASK_IF_ZERO_IMPL(8) |
| ASYMM_MASK_IF_ZERO_IMPL(16) |
| |
| ASYMM_MASK_IF_NON_ZERO_IMPL(1) |
| ASYMM_MASK_IF_NON_ZERO_IMPL(2) |
| ASYMM_MASK_IF_NON_ZERO_IMPL(3) |
| ASYMM_MASK_IF_NON_ZERO_IMPL(4) |
| ASYMM_MASK_IF_NON_ZERO_IMPL(8) |
| ASYMM_MASK_IF_NON_ZERO_IMPL(16) |
| |
| EXP_BARREL_SHIFTER_IMPL(1) |
| EXP_BARREL_SHIFTER_IMPL(2) |
| EXP_BARREL_SHIFTER_IMPL(3) |
| EXP_BARREL_SHIFTER_IMPL(4) |
| EXP_BARREL_SHIFTER_IMPL(8) |
| EXP_BARREL_SHIFTER_IMPL(16) |
| |
| ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(1) |
| ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(2) |
| ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(3) |
| ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(4) |
| ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(8) |
| ASYMM_EXP_ON_NEGATIVE_VALUES_IMPL(16) |
| |
| ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(1) |
| ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(2) |
| ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(3) |
| ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(4) |
| ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(8) |
| ASYMM_SATURATING_ROUNDING_MULT_BY_POW2_IMPL(16) |
| |
| ASYMM_ROUNDING_HALF_SUM_IMPL(1) |
| ASYMM_ROUNDING_HALF_SUM_IMPL(2) |
| ASYMM_ROUNDING_HALF_SUM_IMPL(3) |
| ASYMM_ROUNDING_HALF_SUM_IMPL(4) |
| ASYMM_ROUNDING_HALF_SUM_IMPL(8) |
| ASYMM_ROUNDING_HALF_SUM_IMPL(16) |
| |
| ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_IMPL(1) |
| ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_IMPL(2) |
| ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_IMPL(3) |
| ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_IMPL(4) |
| ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_IMPL(8) |
| ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1_IMPL(16) |
| |
| ASYMM_RESCALE_IMPL(1) |
| ASYMM_RESCALE_IMPL(2) |
| ASYMM_RESCALE_IMPL(3) |
| ASYMM_RESCALE_IMPL(4) |
| ASYMM_RESCALE_IMPL(8) |
| ASYMM_RESCALE_IMPL(16) |
| |
| MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(1) |
| MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(2) |
| MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(3) |
| MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(4) |
| MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(8) |
| MULTIPLY_BY_QUANTIZED_MULTIPLIER_IMPL(16) |
| |
| #endif // ARM_COMPUTE_HELPERS_ASYMM_H |
| |
| #ifndef VEC_SIZE |
| #if defined(N0) |
| #define VEC_SIZE N0 |
| #else /* defined(N0) */ |
| #define VEC_SIZE 8 |
| #endif /* defined(N0) */ |
| #endif /* VEC_SIZE */ |
| |
| #if defined(ACTIVATION_TYPE) && defined(CONST_0) |
| /* |
| * Copyright (c) 2016-2020 Arm Limited. |
| * |
| * SPDX-License-Identifier: MIT |
| * |
| * Permission is hereby granted, free of charge, to any person obtaining a copy |
| * of this software and associated documentation files (the "Software"), to |
| * deal in the Software without restriction, including without limitation the |
| * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| * sell copies of the Software, and to permit persons to whom the Software is |
| * furnished to do so, subject to the following conditions: |
| * |
| * The above copyright notice and this permission notice shall be included in all |
| * copies or substantial portions of the Software. |
| * |
| * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| * SOFTWARE. |
| */ |
| /* |
| * Copyright (c) 2019-2020 Arm Limited. |
| * |
| * SPDX-License-Identifier: MIT |
| * |
| * Permission is hereby granted, free of charge, to any person obtaining a copy |
| * of this software and associated documentation files (the "Software"), to |
| * deal in the Software without restriction, including without limitation the |
| * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| * sell copies of the Software, and to permit persons to whom the Software is |
| * furnished to do so, subject to the following conditions: |
| * |
| * The above copyright notice and this permission notice shall be included in all |
| * copies or substantial portions of the Software. |
| * |
| * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| * SOFTWARE. |
| */ |
| |
| /* |
| * Copyright (c) 2016-2020 Arm Limited. |
| * |
| * SPDX-License-Identifier: MIT |
| * |
| * Permission is hereby granted, free of charge, to any person obtaining a copy |
| * of this software and associated documentation files (the "Software"), to |
| * deal in the Software without restriction, including without limitation the |
| * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| * sell copies of the Software, and to permit persons to whom the Software is |
| * furnished to do so, subject to the following conditions: |
| * |
| * The above copyright notice and this permission notice shall be included in all |
| * copies or substantial portions of the Software. |
| * |
| * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| * SOFTWARE. |
| */ |
| #ifndef ARM_COMPUTE_HELPER_H |
| #define ARM_COMPUTE_HELPER_H |
| |
| /* |
| * Copyright (c) 2020 Arm Limited. |
| * |
| * SPDX-License-Identifier: MIT |
| * |
| * Permission is hereby granted, free of charge, to any person obtaining a copy |
| * of this software and associated documentation files (the "Software"), to |
| * deal in the Software without restriction, including without limitation the |
| * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| * sell copies of the Software, and to permit persons to whom the Software is |
| * furnished to do so, subject to the following conditions: |
| * |
| * The above copyright notice and this permission notice shall be included in all |
| * copies or substantial portions of the Software. |
| * |
| * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| * SOFTWARE. |
| */ |
| |
| /** Store the 0 to (n-1)th rows of the given variables |
| * @name STORE_ROW_n |
| * |
| * @param[in] N0 The width of the passed in vector. Supported: 1, 2, 3, 4, 8, 16 |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @{ |
| */ |
| #define STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); |
| |
| #define STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); |
| |
| #define STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); |
| |
| #define STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); |
| |
| #define STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); |
| |
| #define STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); |
| |
| #define STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); |
| |
| #define STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); |
| |
| #define STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); |
| |
| #define STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); |
| |
| #define STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); |
| |
| #define STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); |
| |
| #define STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); |
| |
| #define STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); |
| |
| #define STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); |
| |
| #define STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); |
| /** @} */ // end of groupd STORE_ROW_n |
| |
| /** Convert and store the 0th to (n-1)th rows of the given variables |
| * @name CONVERT_STORE_ROW_n |
| * |
| * @param[in] N0 The size of the vectors |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @{ |
| */ |
| #define CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##0), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); |
| |
| #define CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##1), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); |
| |
| #define CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##2), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); |
| |
| #define CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##3), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); |
| |
| #define CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##4), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); |
| |
| #define CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##5), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); |
| |
| #define CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##6), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); |
| |
| #define CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##7), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); |
| |
| #define CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##8), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); |
| |
| #define CONVERT_STORE_ROW_10(N0, DATA, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##9), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); |
| |
| #define CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##A), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); |
| |
| #define CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##B), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); |
| |
| #define CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##C), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); |
| |
| #define CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##D), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); |
| |
| #define CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##E), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); |
| |
| #define CONVERT_STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##F), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); |
| |
| /** @} */ // end of groupd CONVERT_STORE_ROW_n |
| |
| /** Store a block of the given size M0xN0 |
| * @name STORE_BLOCK |
| * |
| * Supported cases are M0=1,2,3,...,16 and N0=2,3,4,8,16. |
| * The data to store is expected to have consecutive names for each row. |
| * E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2. |
| * The Z offset is expected to have consecutive names. |
| * E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2. |
| * |
| * @param[in] M0 The number of rows to store |
| * @param[in] N0 The size of each vector |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @{ |
| */ |
| #define STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| #define STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| /** @} */ // end of group STORE_BLOCK |
| |
| /** Convert and store a block of the given size M0xN0 |
| * @name CONVERT_STORE_BLOCK |
| * |
| * Supported cases are M0=1,2,3,...,16 and N0=2,3,4,8,16. |
| * The data to store is expected to have consecutive names for each row. |
| * E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2. |
| * The Z offset is expected to have consecutive names. |
| * E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2. |
| * |
| * @param[in] M0 The number of rows to store |
| * @param[in] N0 The size of each vector |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @{ |
| */ |
| #define CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| #define CONVERT_STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| /** @} */ // end of group CONVERT_STORE_BLOCK |
| |
| /** Partially store the 0 to (n-1)th rows of the given variables |
| * @name STORE_ROW_PARTIAL_n |
| * Within each row, store the lower @p STORE_N0 elements of vectors of width @p N0 |
| * |
| * @note in case @p STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty. |
| * |
| * @param[in] N0 The width of the passed in vector. Supported: 1, 2, 3, 4, 8, 16 |
| * @param[in] STORE_N0 The **lower** size of the vectors to store. Supported: [1-16 and <= @p N0 |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @{ |
| */ |
| #define STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); |
| |
| #define STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); |
| |
| #define STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); |
| |
| #define STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); |
| |
| #define STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); |
| |
| #define STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); |
| |
| #define STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); |
| |
| #define STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); |
| |
| #define STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); |
| |
| #define STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); |
| |
| #define STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); |
| |
| #define STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); |
| |
| #define STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); |
| |
| #define STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); |
| |
| #define STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); |
| |
| #define STORE_ROW_PARTIAL_16(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); |
| /** @} */ // end of groupd STORE_ROW_PARTIAL_n |
| |
| /** Partially store a block of the given size STORE_M0xSTORE_N0 |
| * @name STORE_BLOCK_PARTIAL |
| * |
| * @note The vector width @p N0 is also required for correct partial storing behaviour. |
| * @note in case @p STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty. |
| * |
| * The data to store is expected to have consecutive names for each row. |
| * E.g., for STORE_M0=3 and basename=c, the expected names are c0, c1 and c2. |
| * The Z offset is expected to have consecutive names. |
| * E.g., for STORE_M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2. |
| * |
| * @param[in] STORE_M0 The number of rows to store. Supported: 1-16 |
| * @param[in] STORE_N0 The lower number of elements of vectors to store. Supported: 1-16 and <= @p N0 |
| * @param[in] N0 The size of each vector. Supported: 1, 2, 3, 4, 8, 16 |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @{ |
| */ |
| #define STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_PARTIAL_##STORE_M0(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| #define STORE_BLOCK_PARTIAL(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| /** Store a block that can be partial in both x and y dimensions |
| * |
| * @note in cases @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty. |
| * |
| * The data to store is expected to have consecutive names for each row. |
| * E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2. |
| * The Z offset is expected to have consecutive names. |
| * E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2. |
| * |
| * @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16 |
| * @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16 |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported range: [1, @p M0) |
| * @param[in] PARTIAL_STORE_N0 The partial size in x, for partial blocks. Supported range: [1, @p N0) |
| * @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial store Y. True to use PARTIAL_STORE_M0 rather than M0. |
| * @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial store X. True to use PARTIAL_STORE_N0 rather than N0. |
| */ |
| #define STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| if(!(PARTIAL_COND_X) && !(PARTIAL_COND_Y)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else if((PARTIAL_COND_Y) && !(PARTIAL_COND_X)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else if(!(PARTIAL_COND_Y) && (PARTIAL_COND_X)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else \ |
| { \ |
| STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } |
| /** Store a block that can only be partial in x but not y. |
| * |
| * @note in case @p N0 or @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty. |
| * |
| * The data to store is expected to have consecutive names for each row. |
| * E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2. |
| * The Z offset is expected to have consecutive names. |
| * E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2. |
| * |
| * @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16 |
| * @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16 |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @param[in] PARTIAL_STORE_N0 The partial size in x, for partial blocks. Supported range: [1, @p N0) |
| * @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial store X. True to use PARTIAL_STORE_N0 rather than N0. |
| */ |
| #define STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) \ |
| if(!(PARTIAL_COND_X)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } |
| /** Store a block that can only be partial in y but not x. |
| * |
| * @note in case @p N0 or @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty. |
| * |
| * The data to store is expected to have consecutive names for each row. |
| * E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2. |
| * The Z offset is expected to have consecutive names. |
| * E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2. |
| * |
| * @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16 |
| * @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16 |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported range: [1, @p M0) |
| * @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial store Y. True to use PARTIAL_STORE_M0 rather than M0. |
| */ |
| #define STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) \ |
| if(!(PARTIAL_COND_Y)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else \ |
| { \ |
| STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } |
| /** @} */ // end of group STORE_BLOCK_PARTIAL |
| |
| #if defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0) |
| |
| /** Boundary-aware GEMM block store |
| * @name STORE_BLOCK_BOUNDARY_AWARE |
| * This macro assumes the following schemes to achieve boundary-awareness: |
| * - Overlapping load in Y axis from lhs tensor. This implies lhs has no padding along y dim. |
| * - Non-Overlapping(normal) load from rhs tensor. This imples rhs can have paddings. |
| * - Overlapping load in Y axis from bias tensor. This implies rhs has no padding along y dim. |
| * The macro then ensures that the dst tensor can be stored without any paddings in both x and y dim. |
| * |
| * In the y dimension, we place the partial blocks **at the beginning** while in the x dimension, we place the partial |
| * blocks **at the end**. |
| * Say, the dst tensor is of shape MxN and we have M0 and N0 as the block size, this is how we define "partial blocks"/ |
| * "boundary block" (we use the 2 terms "partial blocks" and "boundary blocks" interchangeably) and its various parameters: |
| * |
| * *--x--> x == 0 x == 1 |
| * | |<------------------------------N-------------------------->| |
| * y |<--------------N0------------->|<----PARTIAL_STORE_N0----->| |
| * | -------------############################################################# |
| * * | | |...............................|...........................| |
| * y == 0 | PAR_..._M0 |......Boundary block in y......|.Boundary block in x and y.| |
| * | | |...............................|...........................| |
| * M --############################################################# |
| * | | | |...........................| |
| * y == 1 | M0 | Non-boundary block |....Boundary block in x....| |
| * | | | |...........................| |
| * |------------############################################################# |
| * |
| * Then @p PARTIAL_STORE_M0 = M % M0 and @p PARTIAL_STORE_N0 = N % N0 |
| * |
| * @note in cases @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty. |
| * |
| * It automatically detects if a giving M,N,M0,N0 combination can yield partial blocks in either X and Y dimension, |
| * and select corresponding store methods such that the boundary detection logic is only added when needed. |
| * |
| * The data to store is expected to have consecutive names for each row. |
| * E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2. |
| * The Z offset is expected to have consecutive names. |
| * E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2. |
| * |
| * @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16 |
| * @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16 |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported: [0, @p M0) |
| * @param[in] PARTIAL_STORE_N0 The partial size in x, for partial blocks. Supported: [0, @p N0) |
| * @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial store Y. True to use PARTIAL_STORE_M0 rather than M0. |
| * @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial store X. True to use PARTIAL_STORE_N0 rather than N0. |
| * @{ |
| */ |
| #if PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0 |
| // Case1: No partial blocks in either x or y |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| |
| #elif PARTIAL_STORE_M0 > 0 && PARTIAL_STORE_N0 == 0 |
| // Case2: Partial blocks in y |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) |
| |
| #elif PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 > 0 |
| // Case3: Partial blocks in x |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) |
| |
| #else // PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0 |
| // Case4: Partial blocks in both x and y |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) |
| |
| #endif // PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0 |
| |
| #endif // defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0) |
| /** @} */ // end of group STORE_BLOCK_BOUNDARY_AWARE |
| |
| #if defined(PARTIAL_STORE_M0) |
| /** Compute the start m0 row (LHS, BIAS and DST) in a boundary-aware way so as to avoid padding |
| * @name COMPUTE_M0_START_ROW |
| * If there're any partial blocks in y dimension, they are placed at the beginning of the rows. |
| * This shift amount is added to all rows such that the partial block (at the beginning) overlaps with the subsequent |
| * blocks in the y dimension to avoid any padding. |
| * EG: M0=4, PARTIAL_STORE_M0=1: |
| * | Non-overlapping | +M0_ROW_SHIFT (Overlapping) |
| * block 0 (partial)| start row = 0 | start row = 0 |
| * block 1 (full) | start row = 4 | start row = 1 |
| * block 2 (full) | start row = 8 | start row = 5 |
| * |
| * @param[in] y Global id of current block in y. |
| * @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16 |
| * @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported: [0, @p M0) |
| * @{ |
| */ |
| #define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \ |
| ((uint)(max(0, (int)(y * M0) - (int)((M0 - PARTIAL_STORE_M0) % M0)))) |
| #else // defined(PARTIAL_STORE_M0) |
| #define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \ |
| ((uint)(y * M0)) |
| #endif // defined(PARTIAL_STORE_M0) |
| /** @} */ // end of group COMPUTE_M0_START_ROW |
| |
| /** Store a vector that can only be partial in x. |
| * |
| * @note in case @p vec_size or @p leftover != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty. |
| * |
| * The data to store is expected to end in a 0. |
| * E.g., for basename=c, the expected name is c0. |
| * |
| * @param[in] basename The name of the variable without trailing 0 |
| * @param[in] data_type The data type of the vector |
| * @param[in] ptr The base pointer |
| * @param[in] vec_size The vector size if cond = false. Supported: 1, 2, 3, 4, 8, 16 |
| * @param[in] leftover The vector size if cond = true. Supported range: [1, @p vec_size0) |
| * @param[in] cond Condition to select either vec_size0 or vec_size1 |
| * @{ |
| */ |
| #define STORE_VECTOR_SELECT(basename, data_type, ptr, vec_size, leftover, cond) \ |
| STORE_BLOCK_PARTIAL_IN_X(1, vec_size, data_type, basename, ptr, 0, 0, leftover, cond) |
| /** @} */ // end of group STORE_VECTOR_SELECT |
| |
| #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) |
| #pragma OPENCL EXTENSION cl_khr_fp16 : enable |
| #endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) |
| |
| #if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) |
| #pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : enable |
| #endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) |
| |
| #if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8) |
| #pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : enable |
| #endif // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8) |
| |
| #if defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf) |
| #pragma OPENCL EXTENSION cl_arm_printf : enable |
| #endif // defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf) |
| |
| #define GPU_ARCH_MIDGARD 0x100 |
| #define GPU_ARCH_BIFROST 0x200 |
| |
| /** Concatenate two inputs. |
| * |
| * @param[in] a The first input to be concatenated |
| * @param[in] b The second input to be concatenated |
| * |
| * @return The concatenated output |
| */ |
| #define CONCAT(a, b) a##b |
| |
| /** Expand the given vector |
| * |
| * @param[in] x The vector to be expanded |
| * |
| * @return The expanded output |
| */ |
| #define EXPAND(x) x |
| |
| /** Clamp the given value between an upper and lower bound. |
| * |
| * @param[in] x The value to be clamped |
| * @param[in] min_val The lower bound |
| * @param[in] max_val The upper bound |
| * |
| * @return The clamped value. |
| */ |
| #define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val) |
| |
| /** REVn reverses the given vector whose size is n. |
| * @name REVn |
| * |
| * @param[in] x The vector to be reversed |
| * |
| * @return The reversed vector |
| * @{ |
| */ |
| #define REV1(x) ((x)) |
| #define REV2(x) ((x).s10) |
| #define REV3(x) ((x).s210) |
| #define REV4(x) ((x).s3210) |
| #define REV8(x) ((x).s76543210) |
| #define REV16(x) ((x).sFEDCBA9876543210) |
| /** @} */ // end of group REVn |
| |
| /** Reverse the given vector. |
| * @name REVERSE |
| * |
| * @param[in] x The vector to be reversed |
| * @param[in] s The size of the vector |
| * |
| * @return The reversed vector |
| * @{ |
| */ |
| #define REVERSE_STR(x, s) REV##s((x)) |
| #define REVERSE(x, s) REVERSE_STR(x, s) |
| /** @} */ // end of group REVERSE |
| |
| /** Circular-right-shift (rotate-right) the vector of size s by the amount of n. |
| * @name ROTs_n |
| * |
| * @param[in] x The vector to be shifted |
| * |
| * @return The shifted vector |
| * @{ |
| */ |
| #define ROT1_0(x) ((x)) |
| |
| #define ROT2_0(x) ((x)) |
| #define ROT2_1(x) ((x).s10) |
| |
| #define ROT3_0(x) ((x)) |
| #define ROT3_1(x) ((x).s201) |
| #define ROT3_2(x) ((x).s120) |
| |
| #define ROT4_0(x) ((x)) |
| #define ROT4_1(x) ((x).s3012) |
| #define ROT4_2(x) ((x).s2301) |
| #define ROT4_3(x) ((x).s1230) |
| |
| #define ROT8_0(x) ((x)) |
| #define ROT8_1(x) ((x).s70123456) |
| #define ROT8_2(x) ((x).s67012345) |
| #define ROT8_3(x) ((x).s56701234) |
| #define ROT8_4(x) ((x).s45670123) |
| #define ROT8_5(x) ((x).s34567012) |
| #define ROT8_6(x) ((x).s23456701) |
| #define ROT8_7(x) ((x).s12345670) |
| |
| #define ROT16_0(x) ((x)) |
| #define ROT16_1(x) ((x).sF0123456789ABCDE) |
| #define ROT16_2(x) ((x).sEF0123456789ABCD) |
| #define ROT16_3(x) ((x).sDEF0123456789ABC) |
| #define ROT16_4(x) ((x).sCDEF0123456789AB) |
| #define ROT16_5(x) ((x).sBCDEF0123456789A) |
| #define ROT16_6(x) ((x).sABCDEF0123456789) |
| #define ROT16_7(x) ((x).s9ABCDEF012345678) |
| #define ROT16_8(x) ((x).s89ABCDEF01234567) |
| #define ROT16_9(x) ((x).s789ABCDEF0123456) |
| #define ROT16_10(x) ((x).s6789ABCDEF012345) |
| #define ROT16_11(x) ((x).s56789ABCDEF01234) |
| #define ROT16_12(x) ((x).s456789ABCDEF0123) |
| #define ROT16_13(x) ((x).s3456789ABCDEF012) |
| #define ROT16_14(x) ((x).s23456789ABCDEF01) |
| #define ROT16_15(x) ((x).s123456789ABCDEF0) |
| /** @} */ // end of group ROTs_n |
| |
| /** Circular-right-shift (rotate-right) the given vector by the given amount. |
| * @name ROTATE |
| * |
| * @param[in] x The vector to be shifted |
| * @param[in] s The size of the vector |
| * @param[in] n The amount to be shifted |
| * |
| * @return The shifted vector |
| * @{ |
| */ |
| #define ROTATE_STR(x, s, n) ROT##s##_##n(x) |
| #define ROTATE(x, s, n) ROTATE_STR(x, s, n) |
| /** @} */ // end of group ROTATE |
| |
| /** Creates a vector of size n filled with offset values corresponding to the location of each element. |
| * @name V_OFFSn |
| * |
| * @param[in] dt The data type of the output vector |
| * |
| * @return The vector filled with offset values |
| * @{ |
| */ |
| #define V_OFFS1(dt) (dt##1)(0) |
| #define V_OFFS2(dt) (dt##2)(0, 1) |
| #define V_OFFS3(dt) (dt##3)(0, 1, 2) |
| #define V_OFFS4(dt) (dt##4)(0, 1, 2, 3) |
| #define V_OFFS8(dt) (dt##8)(0, 1, 2, 3, 4, 5, 6, 7) |
| #define V_OFFS16(dt) (dt##16)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15) |
| /** @} */ // end of group V_OFFSn |
| |
| /** Create a vector filled with offset values corresponding to the location of each element. |
| * @name VEC_OFFS |
| * |
| * @param[in] dt The data type of the output vector |
| * @param[in] s The size of the output vector |
| * |
| * @return The vector filled with offset values |
| * @{ |
| */ |
| #define VEC_OFFS_STR(dt, s) V_OFFS##s(dt) |
| #define VEC_OFFS(dt, s) VEC_OFFS_STR(dt, s) |
| /** @} */ // end of group VEC_OFFS |
| |
| #define VLOAD_STR(size) vload##size |
| #define VLOAD(size) VLOAD_STR(size) |
| |
| #define PIXEL_UNIT4 1 |
| #define PIXEL_UNIT8 2 |
| #define PIXEL_UNIT16 4 |
| |
| /** Utility macro to convert a vector size in pixel unit. |
| * |
| * @name CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT |
| * |
| * @param[in] vec_size Vector size. Only 4,8 and 16 is supported |
| * |
| * @return The pixel unit (number of pixels) |
| * @{ |
| */ |
| #define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) PIXEL_UNIT##vec_size |
| #define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(vec_size) CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) |
| /** @} */ // end of group CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT |
| |
| #define read_image2d_floatx1(img, x_coord, y_coord) (float4)(read_imagef(img, (int2)(x_coord, y_coord))); |
| #define read_image2d_floatx2(img, x_coord, y_coord) (float8)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord))); |
| #define read_image2d_floatx4(img, x_coord, y_coord) (float16)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)), read_imagef(img, (int2)(x_coord + 2, y_coord)), read_imagef(img, (int2)(x_coord + 3, y_coord))); |
| |
| #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) |
| #define read_image2d_halfx1(img, x_coord, y_coord) (half4)(read_imageh(img, (int2)(x_coord, y_coord))); |
| #define read_image2d_halfx2(img, x_coord, y_coord) (half8)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord))); |
| #define read_image2d_halfx4(img, x_coord, y_coord) (half16)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)), read_imageh(img, (int2)(x_coord + 2, y_coord)), read_imageh(img, (int2)(x_coord + 3, y_coord))); |
| #endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) |
| |
| /** Utility macro to read a 2D OpenCL image object. |
| * |
| * @note Coordinates are not normalized |
| * |
| * @param[in] data_type Data type |
| * @param[in] n0 Number of pixel to read. Only 1,2 and 4 is supported |
| * @param[in] img OpenCL image object |
| * @param[in] x_coord The x coordinate for the top-left pixel |
| * @param[in] y_coord The y coordinate for the top-left pixel |
| * |
| * @return Pixels from the 2D OpenCL image object |
| * @{ |
| */ |
| #define READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) read_image2d_##data_type##x##n0(img, x_coord, y_coord) |
| #define READ_IMAGE2D(data_type, n0, img, x_coord, y_coord) READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) |
| |
| #define VSTORE_STR(size) vstore##size |
| #define VSTORE(size) VSTORE_STR(size) |
| |
| #define float1 float |
| #define half1 half |
| #define char1 char |
| #define uchar1 uchar |
| #define short1 short |
| #define ushort1 ushort |
| #define int1 int |
| #define uint1 uint |
| #define long1 long |
| #define ulong1 ulong |
| #define double1 double |
| |
| #define vload1(OFFSET, PTR) *(OFFSET + PTR) |
| #define vstore1(DATA, OFFSET, PTR) *(OFFSET + PTR) = DATA |
| |
| /** Extended partial vstore that correctly handles scalar values as well. |
| * Store the **lower** 0 to (n-1)th elements of the given vector while minimising the amount of vstore ops |
| * @name VSTORE_PARTIAL |
| * |
| * @note With this macro, the passed data can be both a vector and a scalar |
| * @note @p store_size needs to be <= @p size |
| * eg 1: Valid |
| * VSTORE_PARTIAL(16, 15) ...; |
| * eg 2: Invalid |
| * VSTORE_PARTIAL(4, 7) ...; |
| * |
| * @param[in] size The width of @p DATA. Supported values: 1(scalar), 2, 3, 4, 8, 16 |
| * @param[in] store_size The number of lower elements to store. Supported values: 1-16, but has to be <= @p size |
| * @{ |
| */ |
| #define VSTORE_PARTIAL_STR(size, store_size) vstore_partial_##size##_##store_size |
| #define VSTORE_PARTIAL(size, store_size) VSTORE_PARTIAL_STR(size, store_size) |
| |
| #define NO_STORE(data, offs, ptr) \ |
| { \ |
| } |
| |
| // Size == 1 (scalar) |
| #define vstore_partial_1_0 NO_STORE |
| #define vstore_partial_1_1 vstore1 |
| #define vstore_partial_1_2 NO_STORE |
| #define vstore_partial_1_3 NO_STORE |
| #define vstore_partial_1_4 NO_STORE |
| #define vstore_partial_1_5 NO_STORE |
| #define vstore_partial_1_6 NO_STORE |
| #define vstore_partial_1_7 NO_STORE |
| #define vstore_partial_1_8 NO_STORE |
| #define vstore_partial_1_9 NO_STORE |
| #define vstore_partial_1_10 NO_STORE |
| #define vstore_partial_1_11 NO_STORE |
| #define vstore_partial_1_12 NO_STORE |
| #define vstore_partial_1_13 NO_STORE |
| #define vstore_partial_1_14 NO_STORE |
| #define vstore_partial_1_15 NO_STORE |
| #define vstore_partial_1_16 NO_STORE |
| // Size == 2 |
| #define vstore_partial_2_0 NO_STORE |
| #define vstore_partial_2_1 vstore_partial_1 |
| #define vstore_partial_2_2 vstore_partial_2 |
| #define vstore_partial_2_3 NO_STORE |
| #define vstore_partial_2_4 NO_STORE |
| #define vstore_partial_2_5 NO_STORE |
| #define vstore_partial_2_6 NO_STORE |
| #define vstore_partial_2_7 NO_STORE |
| #define vstore_partial_2_8 NO_STORE |
| #define vstore_partial_2_9 NO_STORE |
| #define vstore_partial_2_10 NO_STORE |
| #define vstore_partial_2_11 NO_STORE |
| #define vstore_partial_2_12 NO_STORE |
| #define vstore_partial_2_13 NO_STORE |
| #define vstore_partial_2_14 NO_STORE |
| #define vstore_partial_2_15 NO_STORE |
| #define vstore_partial_2_16 NO_STORE |
| // Size == 3 |
| #define vstore_partial_3_0 NO_STORE |
| #define vstore_partial_3_1 vstore_partial_1 |
| #define vstore_partial_3_2 vstore_partial_2 |
| #define vstore_partial_3_3 vstore_partial_3 |
| #define vstore_partial_3_4 NO_STORE |
| #define vstore_partial_3_5 NO_STORE |
| #define vstore_partial_3_6 NO_STORE |
| #define vstore_partial_3_7 NO_STORE |
| #define vstore_partial_3_8 NO_STORE |
| #define vstore_partial_3_9 NO_STORE |
| #define vstore_partial_3_10 NO_STORE |
| #define vstore_partial_3_11 NO_STORE |
| #define vstore_partial_3_12 NO_STORE |
| #define vstore_partial_3_13 NO_STORE |
| #define vstore_partial_3_14 NO_STORE |
| #define vstore_partial_3_15 NO_STORE |
| #define vstore_partial_3_16 NO_STORE |
| // Size == 4 |
| #define vstore_partial_4_0 NO_STORE |
| #define vstore_partial_4_1 vstore_partial_1 |
| #define vstore_partial_4_2 vstore_partial_2 |
| #define vstore_partial_4_3 vstore_partial_3 |
| #define vstore_partial_4_4 vstore_partial_4 |
| #define vstore_partial_4_5 NO_STORE |
| #define vstore_partial_4_6 NO_STORE |
| #define vstore_partial_4_7 NO_STORE |
| #define vstore_partial_4_8 NO_STORE |
| #define vstore_partial_4_9 NO_STORE |
| #define vstore_partial_4_10 NO_STORE |
| #define vstore_partial_4_11 NO_STORE |
| #define vstore_partial_4_12 NO_STORE |
| #define vstore_partial_4_13 NO_STORE |
| #define vstore_partial_4_14 NO_STORE |
| #define vstore_partial_4_15 NO_STORE |
| #define vstore_partial_4_16 NO_STORE |
| // Size == 8 |
| #define vstore_partial_8_0 NO_STORE |
| #define vstore_partial_8_1 vstore_partial_1 |
| #define vstore_partial_8_2 vstore_partial_2 |
| #define vstore_partial_8_3 vstore_partial_3 |
| #define vstore_partial_8_4 vstore_partial_4 |
| #define vstore_partial_8_5 vstore_partial_5 |
| #define vstore_partial_8_6 vstore_partial_6 |
| #define vstore_partial_8_7 vstore_partial_7 |
| #define vstore_partial_8_8 vstore_partial_8 |
| #define vstore_partial_8_9 NO_STORE |
| #define vstore_partial_8_10 NO_STORE |
| #define vstore_partial_8_11 NO_STORE |
| #define vstore_partial_8_12 NO_STORE |
| #define vstore_partial_8_13 NO_STORE |
| #define vstore_partial_8_14 NO_STORE |
| #define vstore_partial_8_15 NO_STORE |
| #define vstore_partial_8_16 NO_STORE |
| // Size == 16 |
| #define vstore_partial_16_0 NO_STORE |
| #define vstore_partial_16_1 vstore_partial_1 |
| #define vstore_partial_16_2 vstore_partial_2 |
| #define vstore_partial_16_3 vstore_partial_3 |
| #define vstore_partial_16_4 vstore_partial_4 |
| #define vstore_partial_16_5 vstore_partial_5 |
| #define vstore_partial_16_6 vstore_partial_6 |
| #define vstore_partial_16_7 vstore_partial_7 |
| #define vstore_partial_16_8 vstore_partial_8 |
| #define vstore_partial_16_9 vstore_partial_9 |
| #define vstore_partial_16_10 vstore_partial_10 |
| #define vstore_partial_16_11 vstore_partial_11 |
| #define vstore_partial_16_12 vstore_partial_12 |
| #define vstore_partial_16_13 vstore_partial_13 |
| #define vstore_partial_16_14 vstore_partial_14 |
| #define vstore_partial_16_15 vstore_partial_15 |
| #define vstore_partial_16_16 vstore_partial_16 |
| |
| /** Partial vstore. Store the **lower** 0 to (n-1)th elements of the given vector while minimising the amount of vstore ops |
| * @name vstore_partial_n |
| * |
| * @note @p DATA needs to be a vector not a scalar |
| * @note n needs to be <= the vector width of the input variable @p DATA |
| * eg 1: Valid |
| * vstore_partial_15(var:float16, 0, 0xabcd); |
| * eg 2: Invalid |
| * vstore_partial_7(var:float4, 0, 0xabcd); |
| * |
| * @note in cases n == 1, 2, 3, 4, 8, 16, no extra vstore is invoked, thus there's no performance penalty. |
| * |
| * @param[in] DATA The name of the variable |
| * @param[in] OFFSET Offset in n |
| * @param[in] PTR The base pointer |
| * @{ |
| */ |
| #define vstore_partial_1(DATA, OFFSET, PTR) \ |
| vstore1(DATA.s0, OFFSET, PTR); |
| |
| #define vstore_partial_2(DATA, OFFSET, PTR) \ |
| vstore2(DATA.s01, OFFSET, PTR); |
| |
| #define vstore_partial_3(DATA, OFFSET, PTR) \ |
| vstore3(DATA.s012, OFFSET, PTR); |
| |
| #define vstore_partial_4(DATA, OFFSET, PTR) \ |
| vstore4(DATA.s0123, OFFSET, PTR); |
| |
| #define vstore_partial_5(DATA, OFFSET, PTR) \ |
| vstore_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vstore1(DATA.s4, OFFSET, PTR + 4); |
| |
| #define vstore_partial_6(DATA, OFFSET, PTR) \ |
| vstore_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vstore_partial_2(DATA.s45, OFFSET, PTR + 4); |
| |
| #define vstore_partial_7(DATA, OFFSET, PTR) \ |
| vstore_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vstore_partial_3(DATA.s456, OFFSET, PTR + 4); |
| |
| #define vstore_partial_8(DATA, OFFSET, PTR) \ |
| vstore8(DATA.s01234567, OFFSET, PTR); |
| |
| #define vstore_partial_9(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore1(DATA.s8, OFFSET, PTR + 8); |
| |
| #define vstore_partial_10(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_2(DATA.s89, OFFSET, PTR + 8); |
| |
| #define vstore_partial_11(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_3(DATA.s89a, OFFSET, PTR + 8); |
| |
| #define vstore_partial_12(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_4(DATA.s89ab, OFFSET, PTR + 8); |
| |
| #define vstore_partial_13(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_5(DATA.s89abcdef, OFFSET, PTR + 8); |
| |
| #define vstore_partial_14(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_6(DATA.s89abcdef, OFFSET, PTR + 8); |
| |
| #define vstore_partial_15(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_7(DATA.s89abcdef, OFFSET, PTR + 8); |
| |
| #define vstore_partial_16(DATA, OFFSET, PTR) \ |
| vstore16(DATA, OFFSET, PTR); |
| /** @} */ // end of groupd vstore_partial_n |
| /** @} */ // end of groupd VSTORE_PARTIAL |
| |
| // Convert built-in functions with _sat modifier are not supported in floating point so we create defines |
| // without _sat to overcome this issue |
| #define convert_float_sat convert_float |
| #define convert_float1_sat convert_float |
| #define convert_float2_sat convert_float2 |
| #define convert_float3_sat convert_float3 |
| #define convert_float4_sat convert_float4 |
| #define convert_float8_sat convert_float8 |
| #define convert_float16_sat convert_float16 |
| #define convert_half_sat convert_float |
| #define convert_half1_sat convert_half |
| #define convert_half2_sat convert_half2 |
| #define convert_half3_sat convert_half3 |
| #define convert_half4_sat convert_half4 |
| #define convert_half8_sat convert_half8 |
| #define convert_half16_sat convert_half16 |
| |
| #define convert_float1 convert_float |
| #define convert_half1 convert_half |
| #define convert_char1 convert_char |
| #define convert_uchar1 convert_uchar |
| #define convert_short1 convert_short |
| #define convert_ushort1 convert_ushort |
| #define convert_int1 convert_int |
| #define convert_uint1 convert_uint |
| #define convert_long1 convert_long |
| #define convert_ulong1 convert_ulong |
| #define convert_double1 convert_double |
| |
| #define convert_char1_sat convert_char_sat |
| #define convert_uchar1_sat convert_uchar_sat |
| #define convert_short1_sat convert_short_sat |
| #define convert_ushort1_sat convert_ushort_sat |
| #define convert_int1_sat convert_int_sat |
| #define convert_uint1_sat convert_uint_sat |
| #define convert_long1_sat convert_long_sat |
| #define convert_ulong1_sat convert_ulong_sat |
| #define convert_double1_sat convert_double_sat |
| |
| #define VEC_DATA_TYPE_STR(type, size) type##size |
| #define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size) |
| |
| #define CONVERT_STR(x, type) (convert_##type((x))) |
| #define CONVERT(x, type) CONVERT_STR(x, type) |
| |
| #define CONVERT_SAT_STR(x, type) (convert_##type##_sat((x))) |
| #define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type) |
| |
| #define CONVERT_SAT_ROUND_STR(x, type, round) (convert_##type##_sat_##round((x))) |
| #define CONVERT_SAT_ROUND(x, type, round) CONVERT_SAT_ROUND_STR(x, type, round) |
| |
| #define select_vec_dt_uchar(size) uchar##size |
| #define select_vec_dt_char(size) char##size |
| #define select_vec_dt_ushort(size) ushort##size |
| #define select_vec_dt_short(size) short##size |
| #define select_vec_dt_half(size) short##size |
| #define select_vec_dt_uint(size) uint##size |
| #define select_vec_dt_int(size) int##size |
| #define select_vec_dt_float(size) int##size |
| #define select_vec_dt_ulong(size) ulong##size |
| #define select_vec_dt_long(size) long##size |
| |
| #define SELECT_VEC_DATA_TYPE_STR(type, size) select_vec_dt_##type(size) |
| #define SELECT_VEC_DATA_TYPE(type, size) SELECT_VEC_DATA_TYPE_STR(type, size) |
| #define SELECT_DATA_TYPE(type) SELECT_VEC_DATA_TYPE_STR(type, 1) |
| |
| #define sum_reduce_1(x) (x) |
| #define sum_reduce_2(x) ((x).s0) + ((x).s1) |
| #define sum_reduce_3(x) sum_reduce_2((x).s01) + ((x).s2) |
| #define sum_reduce_4(x) sum_reduce_2((x).s01) + sum_reduce_2((x).s23) |
| #define sum_reduce_8(x) sum_reduce_4((x).s0123) + sum_reduce_4((x).s4567) |
| #define sum_reduce_16(x) sum_reduce_8((x).s01234567) + sum_reduce_8((x).s89ABCDEF) |
| |
| #define SUM_REDUCE_STR(x, size) sum_reduce_##size(x) |
| #define SUM_REDUCE(x, size) SUM_REDUCE_STR(x, size) |
| |
| #define max_reduce_1(x) (x) |
| #define max_reduce_2(x) max(((x).s0), ((x).s1)) |
| #define max_reduce_3(x) max(max_reduce_2((x).s01), ((x).s2)) |
| #define max_reduce_4(x) max(max_reduce_2((x).s01), max_reduce_2((x).s23)) |
| #define max_reduce_8(x) max(max_reduce_4((x).s0123), max_reduce_4((x).s4567)) |
| #define max_reduce_16(x) max(max_reduce_8((x).s01234567), max_reduce_8((x).s89ABCDEF)) |
| |
| #define MAX_REDUCE_STR(x, size) max_reduce_##size(x) |
| #define MAX_REDUCE(x, size) MAX_REDUCE_STR(x, size) |
| |
| #define VECTOR_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define IMAGE_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define TENSOR3D_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_stride_z, \ |
| uint name##_step_z, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define TENSOR4D_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_stride_z, \ |
| uint name##_step_z, \ |
| uint name##_stride_w, \ |
| uint name##_step_w, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define CONVERT_TO_VECTOR_STRUCT(name) \ |
| update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x) |
| |
| #define CONVERT_TO_VECTOR_STRUCT_NO_STEP(name) \ |
| update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0) |
| |
| #define CONVERT_TO_IMAGE_STRUCT(name) \ |
| update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y) |
| |
| #define CONVERT_TO_IMAGE_STRUCT_NO_STEP(name) \ |
| update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0) |
| |
| #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \ |
| update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(name) \ |
| update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \ |
| update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TO_TENSOR3D_STRUCT(name) \ |
| update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ |
| name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(name) \ |
| update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0) |
| |
| #define CONVERT_TO_TENSOR4D_STRUCT(name, mod_size) \ |
| update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ |
| name##_stride_z, name##_step_z, name##_stride_w, name##_step_w, mod_size) |
| |
| #define CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(name, mod_size) \ |
| update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0, name##_stride_w, 0, mod_size) |
| |
| #define CONVERT_TO_TENSOR3D_STRUCT_NO_UPDATE_PTR(name) \ |
| tensor3D_ptr_no_update(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ |
| name##_stride_z, name##_step_z) |
| |
| /** Structure to hold Vector information */ |
| typedef struct Vector |
| { |
| __global uchar *ptr; /**< Pointer to the starting postion of the buffer */ |
| int offset_first_element_in_bytes; /**< The offset of the first element in the source image */ |
| int stride_x; /**< Stride of the image in X dimension (in bytes) */ |
| } Vector; |
| |
| /** Structure to hold Image information */ |
| typedef struct Image |
| { |
| __global uchar *ptr; /**< Pointer to the starting postion of the buffer */ |
| int offset_first_element_in_bytes; /**< The offset of the first element in the source image */ |
| int stride_x; /**< Stride of the image in X dimension (in bytes) */ |
| int stride_y; /**< Stride of the image in Y dimension (in bytes) */ |
| } Image; |
| |
| /** Structure to hold 3D tensor information */ |
| typedef struct Tensor3D |
| { |
| __global uchar *ptr; /**< Pointer to the starting postion of the buffer */ |
| int offset_first_element_in_bytes; /**< The offset of the first element in the source image */ |
| int stride_x; /**< Stride of the image in X dimension (in bytes) */ |
| int stride_y; /**< Stride of the image in Y dimension (in bytes) */ |
| int stride_z; /**< Stride of the image in Z dimension (in bytes) */ |
| } Tensor3D; |
| |
| /** Structure to hold 4D tensor information */ |
| typedef struct Tensor4D |
| { |
| __global uchar *ptr; /**< Pointer to the starting postion of the buffer */ |
| int offset_first_element_in_bytes; /**< The offset of the first element in the source image */ |
| int stride_x; /**< Stride of the image in X dimension (in bytes) */ |
| int stride_y; /**< Stride of the image in Y dimension (in bytes) */ |
| int stride_z; /**< Stride of the image in Z dimension (in bytes) */ |
| int stride_w; /**< Stride of the image in W dimension (in bytes) */ |
| } Tensor4D; |
| |
| /** Wrap vector information into an Vector structure, and make the pointer point at this workitem's data. |
| * |
| * @param[in] ptr Pointer to the starting postion of the buffer |
| * @param[in] offset_first_element_in_bytes The offset of the first element in the source vector |
| * @param[in] stride_x Stride of the vector in X dimension (in bytes) |
| * @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes) |
| * |
| * @return An image object |
| */ |
| inline Vector update_vector_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x) |
| { |
| Vector vector = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| }; |
| vector.ptr += vector.offset_first_element_in_bytes + get_global_id(0) * step_x; |
| return vector; |
| } |
| |
| /** Wrap image information into an Image structure, and make the pointer point at this workitem's data. |
| * |
| * @param[in] ptr Pointer to the starting postion of the buffer |
| * @param[in] offset_first_element_in_bytes The offset of the first element in the source image |
| * @param[in] stride_x Stride of the image in X dimension (in bytes) |
| * @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] stride_y Stride of the image in Y dimension (in bytes) |
| * @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes) |
| * |
| * @return An image object |
| */ |
| inline Image update_image_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y) |
| { |
| Image img = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y |
| }; |
| img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y; |
| return img; |
| } |
| |
| /** Wrap 3D tensor information into an image structure, and make the pointer point at this workitem's data. |
| * |
| * @param[in] ptr Pointer to the starting postion of the buffer |
| * @param[in] offset_first_element_in_bytes The offset of the first element in the source image |
| * @param[in] stride_x Stride of the image in X dimension (in bytes) |
| * @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] stride_y Stride of the image in Y dimension (in bytes) |
| * @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] stride_z Stride of the image in Z dimension (in bytes) |
| * @param[in] step_z stride_z * number of elements along Z processed per workitem(in bytes) |
| * |
| * @return A 3D tensor object |
| */ |
| inline Image update_image_from_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) |
| { |
| Image img = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y |
| }; |
| img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z; |
| return img; |
| } |
| |
| /** Wrap 3D tensor information into an tensor structure, and make the pointer point at this workitem's data. |
| * |
| * @param[in] ptr Pointer to the starting postion of the buffer |
| * @param[in] offset_first_element_in_bytes The offset of the first element in the source image |
| * @param[in] stride_x Stride of the image in X dimension (in bytes) |
| * @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] stride_y Stride of the image in Y dimension (in bytes) |
| * @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] stride_z Stride of the image in Z dimension (in bytes) |
| * @param[in] step_z stride_z * number of elements along Z processed per workitem(in bytes) |
| * |
| * @return A 3D tensor object |
| */ |
| inline Tensor3D update_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) |
| { |
| Tensor3D tensor = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y, |
| .stride_z = stride_z |
| }; |
| tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z; |
| return tensor; |
| } |
| |
| /** Wrap 3D tensor information into an tensor structure. |
| * |
| * @param[in] ptr Pointer to the starting postion of the buffer |
| * @param[in] offset_first_element_in_bytes The offset of the first element in the source image |
| * @param[in] stride_x Stride of the image in X dimension (in bytes) |
| * @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] stride_y Stride of the image in Y dimension (in bytes) |
| * @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] stride_z Stride of the image in Z dimension (in bytes) |
| * @param[in] step_z stride_z * number of elements along Z processed per workitem(in bytes) |
| * |
| * @return A 3D tensor object |
| */ |
| inline Tensor3D tensor3D_ptr_no_update(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) |
| { |
| Tensor3D tensor = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y, |
| .stride_z = stride_z |
| }; |
| return tensor; |
| } |
| |
| inline Tensor4D update_tensor4D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z, uint stride_w, |
| uint step_w, |
| uint mod_size) |
| { |
| Tensor4D tensor = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y, |
| .stride_z = stride_z, |
| .stride_w = stride_w |
| }; |
| |
| tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + (get_global_id(2) % mod_size) * step_z + (get_global_id(2) / mod_size) * step_w; |
| return tensor; |
| } |
| |
| /** Get the pointer position of a Vector |
| * |
| * @param[in] vec Pointer to the starting position of the buffer |
| * @param[in] x Relative X position |
| */ |
| inline __global const uchar *vector_offset(const Vector *vec, int x) |
| { |
| return vec->ptr + x * vec->stride_x; |
| } |
| |
| /** Get the pointer position of a Image |
| * |
| * @param[in] img Pointer to the starting position of the buffer |
| * @param[in] x Relative X position |
| * @param[in] y Relative Y position |
| */ |
| inline __global uchar *offset(const Image *img, int x, int y) |
| { |
| return img->ptr + x * img->stride_x + y * img->stride_y; |
| } |
| |
| /** Get the pointer position of a Tensor3D |
| * |
| * @param[in] tensor Pointer to the starting position of the buffer |
| * @param[in] x Relative X position |
| * @param[in] y Relative Y position |
| * @param[in] z Relative Z position |
| */ |
| inline __global const uchar *tensor3D_offset(const Tensor3D *tensor, int x, int y, int z) |
| { |
| return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z; |
| } |
| |
| /** Get the pointer position of a Tensor4D |
| * |
| * @param[in] tensor Pointer to the starting position of the buffer |
| * @param[in] x Relative X position |
| * @param[in] y Relative Y position |
| * @param[in] z Relative Z position |
| * @param[in] w Relative W position |
| */ |
| inline __global const uchar *tensor4D_offset(const Tensor4D *tensor, int x, int y, int z, int w) |
| { |
| return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + w * tensor->stride_w; |
| } |
| |
| /** Get the offset for a given linear index of a Tensor3D |
| * |
| * @param[in] tensor Pointer to the starting position of the buffer |
| * @param[in] width Width of the input tensor |
| * @param[in] height Height of the input tensor |
| * @param[in] depth Depth of the input tensor |
| * @param[in] index Linear index |
| */ |
| inline __global const uchar *tensor3D_index2ptr(const Tensor3D *tensor, uint width, uint height, uint depth, uint index) |
| { |
| uint num_elements = width * height; |
| |
| const uint z = index / num_elements; |
| |
| index %= num_elements; |
| |
| const uint y = index / width; |
| |
| index %= width; |
| |
| const uint x = index; |
| |
| return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + tensor->offset_first_element_in_bytes; |
| } |
| |
| #endif // _HELPER_H |
| |
| #define TYPE VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) |
| |
| #if defined(S1_VAL) && !defined(S2_VAL) |
| #define S2_VAL S1_VAL |
| #endif // defined(S1_VAL) && !defined(S2_VAL) |
| #if defined(O1_VAL) && !defined(O2_VAL) |
| #define O2_VAL O1_VAL |
| #endif // defined(O1_VAL) && !defined(O2_VAL) |
| |
| // RELU Activation |
| inline TYPE relu_op(TYPE x) |
| { |
| return max((TYPE)CONST_0, x); |
| } |
| // Bounded RELU Activation |
| inline TYPE brelu_op(TYPE x) |
| { |
| return min((TYPE)A_VAL, max((TYPE)CONST_0, x)); |
| } |
| // Lower Upper Bounded RELU Activation |
| inline TYPE lu_brelu_op(TYPE x) |
| { |
| return min(max(x, (TYPE)B_VAL), (TYPE)A_VAL); |
| } |
| // Hard Swish Activation |
| inline TYPE hard_swish_op(TYPE x) |
| { |
| return (x * ((min(max((TYPE)(x + (TYPE)3.f), (TYPE)0.f), (TYPE)6.f)) * (TYPE)0.166666667f)); |
| } |
| |
| #define ACTIVATION_OP2(op, x) op##_op(x) |
| #define ACTIVATION_OP(op, x) ACTIVATION_OP2(op, x) |
| |
| #if defined(S1_VAL) && defined(S2_VAL) |
| #if defined(O1_VAL) && defined(O2_VAL) |
| #define PERFORM_ACTIVATION_QUANT(act, data) \ |
| ({ \ |
| data = ACTIVATION_OP(act, data); \ |
| \ |
| VEC_DATA_TYPE(float, VEC_SIZE) \ |
| fdata = CONVERT(data, VEC_DATA_TYPE(float, VEC_SIZE)); \ |
| \ |
| fdata = round((fdata - (float)O1_VAL) * ((float)S1_VAL / (float)S2_VAL) + (float)O2_VAL); \ |
| data = CONVERT_SAT(fdata, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)); \ |
| }) |
| #else // defined(O1_VAL) && defined(O2_VAL) |
| #define PERFORM_ACTIVATION_QUANT(act, data) \ |
| ({ \ |
| data = ACTIVATION_OP(act, data); \ |
| \ |
| VEC_DATA_TYPE(float, VEC_SIZE) \ |
| fdata = CONVERT(data, VEC_DATA_TYPE(float, VEC_SIZE)); \ |
| \ |
| fdata = round((fdata) * ((float)S1_VAL / (float)S2_VAL)); \ |
| data = CONVERT_SAT(fdata, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)); \ |
| }) |
| #endif /* defined(O1_VAL) && defined(O2_VAL) */ |
| #else /* defined(S1_VAL) && defined(S2_VAL) */ |
| #define PERFORM_ACTIVATION_QUANT(act, data) \ |
| ({ \ |
| data = ACTIVATION_OP(act, data); \ |
| }) |
| #endif /* defined(S1_VAL) && defined(S2_VAL) */ |
| |
| #define VEC_FLOAT VEC_DATA_TYPE(float, VEC_SIZE) |
| |
| #if defined(FLOAT_DOMAIN) |
| // Activations performed in the float domain |
| |
| /* |
| * Copyright (c) 2019-2020 Arm Limited. |
| * |
| * SPDX-License-Identifier: MIT |
| * |
| * Permission is hereby granted, free of charge, to any person obtaining a copy |
| * of this software and associated documentation files (the "Software"), to |
| * deal in the Software without restriction, including without limitation the |
| * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| * sell copies of the Software, and to permit persons to whom the Software is |
| * furnished to do so, subject to the following conditions: |
| * |
| * The above copyright notice and this permission notice shall be included in all |
| * copies or substantial portions of the Software. |
| * |
| * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| * SOFTWARE. |
| */ |
| |
| /* |
| * Copyright (c) 2016-2020 Arm Limited. |
| * |
| * SPDX-License-Identifier: MIT |
| * |
| * Permission is hereby granted, free of charge, to any person obtaining a copy |
| * of this software and associated documentation files (the "Software"), to |
| * deal in the Software without restriction, including without limitation the |
| * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| * sell copies of the Software, and to permit persons to whom the Software is |
| * furnished to do so, subject to the following conditions: |
| * |
| * The above copyright notice and this permission notice shall be included in all |
| * copies or substantial portions of the Software. |
| * |
| * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| * SOFTWARE. |
| */ |
| #ifndef ARM_COMPUTE_HELPER_H |
| #define ARM_COMPUTE_HELPER_H |
| |
| /* |
| * Copyright (c) 2020 Arm Limited. |
| * |
| * SPDX-License-Identifier: MIT |
| * |
| * Permission is hereby granted, free of charge, to any person obtaining a copy |
| * of this software and associated documentation files (the "Software"), to |
| * deal in the Software without restriction, including without limitation the |
| * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| * sell copies of the Software, and to permit persons to whom the Software is |
| * furnished to do so, subject to the following conditions: |
| * |
| * The above copyright notice and this permission notice shall be included in all |
| * copies or substantial portions of the Software. |
| * |
| * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| * SOFTWARE. |
| */ |
| |
| /** Store the 0 to (n-1)th rows of the given variables |
| * @name STORE_ROW_n |
| * |
| * @param[in] N0 The width of the passed in vector. Supported: 1, 2, 3, 4, 8, 16 |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @{ |
| */ |
| #define STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); |
| |
| #define STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); |
| |
| #define STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); |
| |
| #define STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); |
| |
| #define STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); |
| |
| #define STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); |
| |
| #define STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); |
| |
| #define STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); |
| |
| #define STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); |
| |
| #define STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); |
| |
| #define STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); |
| |
| #define STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); |
| |
| #define STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); |
| |
| #define STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); |
| |
| #define STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); |
| |
| #define STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); |
| /** @} */ // end of groupd STORE_ROW_n |
| |
| /** Convert and store the 0th to (n-1)th rows of the given variables |
| * @name CONVERT_STORE_ROW_n |
| * |
| * @param[in] N0 The size of the vectors |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @{ |
| */ |
| #define CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##0), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); |
| |
| #define CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##1), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); |
| |
| #define CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##2), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); |
| |
| #define CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##3), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); |
| |
| #define CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##4), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); |
| |
| #define CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##5), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); |
| |
| #define CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##6), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); |
| |
| #define CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##7), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); |
| |
| #define CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##8), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); |
| |
| #define CONVERT_STORE_ROW_10(N0, DATA, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##9), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); |
| |
| #define CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##A), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); |
| |
| #define CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##B), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); |
| |
| #define CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##C), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); |
| |
| #define CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##D), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); |
| |
| #define CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##E), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); |
| |
| #define CONVERT_STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##F), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); |
| |
| /** @} */ // end of groupd CONVERT_STORE_ROW_n |
| |
| /** Store a block of the given size M0xN0 |
| * @name STORE_BLOCK |
| * |
| * Supported cases are M0=1,2,3,...,16 and N0=2,3,4,8,16. |
| * The data to store is expected to have consecutive names for each row. |
| * E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2. |
| * The Z offset is expected to have consecutive names. |
| * E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2. |
| * |
| * @param[in] M0 The number of rows to store |
| * @param[in] N0 The size of each vector |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @{ |
| */ |
| #define STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| #define STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| /** @} */ // end of group STORE_BLOCK |
| |
| /** Convert and store a block of the given size M0xN0 |
| * @name CONVERT_STORE_BLOCK |
| * |
| * Supported cases are M0=1,2,3,...,16 and N0=2,3,4,8,16. |
| * The data to store is expected to have consecutive names for each row. |
| * E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2. |
| * The Z offset is expected to have consecutive names. |
| * E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2. |
| * |
| * @param[in] M0 The number of rows to store |
| * @param[in] N0 The size of each vector |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @{ |
| */ |
| #define CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| #define CONVERT_STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| /** @} */ // end of group CONVERT_STORE_BLOCK |
| |
| /** Partially store the 0 to (n-1)th rows of the given variables |
| * @name STORE_ROW_PARTIAL_n |
| * Within each row, store the lower @p STORE_N0 elements of vectors of width @p N0 |
| * |
| * @note in case @p STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty. |
| * |
| * @param[in] N0 The width of the passed in vector. Supported: 1, 2, 3, 4, 8, 16 |
| * @param[in] STORE_N0 The **lower** size of the vectors to store. Supported: [1-16 and <= @p N0 |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @{ |
| */ |
| #define STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); |
| |
| #define STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); |
| |
| #define STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); |
| |
| #define STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); |
| |
| #define STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); |
| |
| #define STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); |
| |
| #define STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); |
| |
| #define STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); |
| |
| #define STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); |
| |
| #define STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); |
| |
| #define STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); |
| |
| #define STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); |
| |
| #define STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); |
| |
| #define STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); |
| |
| #define STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); |
| |
| #define STORE_ROW_PARTIAL_16(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); |
| /** @} */ // end of groupd STORE_ROW_PARTIAL_n |
| |
| /** Partially store a block of the given size STORE_M0xSTORE_N0 |
| * @name STORE_BLOCK_PARTIAL |
| * |
| * @note The vector width @p N0 is also required for correct partial storing behaviour. |
| * @note in case @p STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty. |
| * |
| * The data to store is expected to have consecutive names for each row. |
| * E.g., for STORE_M0=3 and basename=c, the expected names are c0, c1 and c2. |
| * The Z offset is expected to have consecutive names. |
| * E.g., for STORE_M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2. |
| * |
| * @param[in] STORE_M0 The number of rows to store. Supported: 1-16 |
| * @param[in] STORE_N0 The lower number of elements of vectors to store. Supported: 1-16 and <= @p N0 |
| * @param[in] N0 The size of each vector. Supported: 1, 2, 3, 4, 8, 16 |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @{ |
| */ |
| #define STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_PARTIAL_##STORE_M0(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| #define STORE_BLOCK_PARTIAL(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| /** Store a block that can be partial in both x and y dimensions |
| * |
| * @note in cases @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty. |
| * |
| * The data to store is expected to have consecutive names for each row. |
| * E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2. |
| * The Z offset is expected to have consecutive names. |
| * E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2. |
| * |
| * @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16 |
| * @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16 |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported range: [1, @p M0) |
| * @param[in] PARTIAL_STORE_N0 The partial size in x, for partial blocks. Supported range: [1, @p N0) |
| * @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial store Y. True to use PARTIAL_STORE_M0 rather than M0. |
| * @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial store X. True to use PARTIAL_STORE_N0 rather than N0. |
| */ |
| #define STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| if(!(PARTIAL_COND_X) && !(PARTIAL_COND_Y)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else if((PARTIAL_COND_Y) && !(PARTIAL_COND_X)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else if(!(PARTIAL_COND_Y) && (PARTIAL_COND_X)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else \ |
| { \ |
| STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } |
| /** Store a block that can only be partial in x but not y. |
| * |
| * @note in case @p N0 or @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty. |
| * |
| * The data to store is expected to have consecutive names for each row. |
| * E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2. |
| * The Z offset is expected to have consecutive names. |
| * E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2. |
| * |
| * @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16 |
| * @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16 |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @param[in] PARTIAL_STORE_N0 The partial size in x, for partial blocks. Supported range: [1, @p N0) |
| * @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial store X. True to use PARTIAL_STORE_N0 rather than N0. |
| */ |
| #define STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) \ |
| if(!(PARTIAL_COND_X)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } |
| /** Store a block that can only be partial in y but not x. |
| * |
| * @note in case @p N0 or @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty. |
| * |
| * The data to store is expected to have consecutive names for each row. |
| * E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2. |
| * The Z offset is expected to have consecutive names. |
| * E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2. |
| * |
| * @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16 |
| * @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16 |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported range: [1, @p M0) |
| * @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial store Y. True to use PARTIAL_STORE_M0 rather than M0. |
| */ |
| #define STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) \ |
| if(!(PARTIAL_COND_Y)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else \ |
| { \ |
| STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } |
| /** @} */ // end of group STORE_BLOCK_PARTIAL |
| |
| #if defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0) |
| |
| /** Boundary-aware GEMM block store |
| * @name STORE_BLOCK_BOUNDARY_AWARE |
| * This macro assumes the following schemes to achieve boundary-awareness: |
| * - Overlapping load in Y axis from lhs tensor. This implies lhs has no padding along y dim. |
| * - Non-Overlapping(normal) load from rhs tensor. This imples rhs can have paddings. |
| * - Overlapping load in Y axis from bias tensor. This implies rhs has no padding along y dim. |
| * The macro then ensures that the dst tensor can be stored without any paddings in both x and y dim. |
| * |
| * In the y dimension, we place the partial blocks **at the beginning** while in the x dimension, we place the partial |
| * blocks **at the end**. |
| * Say, the dst tensor is of shape MxN and we have M0 and N0 as the block size, this is how we define "partial blocks"/ |
| * "boundary block" (we use the 2 terms "partial blocks" and "boundary blocks" interchangeably) and its various parameters: |
| * |
| * *--x--> x == 0 x == 1 |
| * | |<------------------------------N-------------------------->| |
| * y |<--------------N0------------->|<----PARTIAL_STORE_N0----->| |
| * | -------------############################################################# |
| * * | | |...............................|...........................| |
| * y == 0 | PAR_..._M0 |......Boundary block in y......|.Boundary block in x and y.| |
| * | | |...............................|...........................| |
| * M --############################################################# |
| * | | | |...........................| |
| * y == 1 | M0 | Non-boundary block |....Boundary block in x....| |
| * | | | |...........................| |
| * |------------############################################################# |
| * |
| * Then @p PARTIAL_STORE_M0 = M % M0 and @p PARTIAL_STORE_N0 = N % N0 |
| * |
| * @note in cases @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty. |
| * |
| * It automatically detects if a giving M,N,M0,N0 combination can yield partial blocks in either X and Y dimension, |
| * and select corresponding store methods such that the boundary detection logic is only added when needed. |
| * |
| * The data to store is expected to have consecutive names for each row. |
| * E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2. |
| * The Z offset is expected to have consecutive names. |
| * E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2. |
| * |
| * @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16 |
| * @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16 |
| * @param[in] DATA_TYPE The data type of the vectors |
| * @param[in] BASENAME The basename of the variables |
| * @param[in] PTR The base pointer |
| * @param[in] STRIDE_Y The stride value in y-axis direction |
| * @param[in] Z The offset in z-axis direction |
| * @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported: [0, @p M0) |
| * @param[in] PARTIAL_STORE_N0 The partial size in x, for partial blocks. Supported: [0, @p N0) |
| * @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial store Y. True to use PARTIAL_STORE_M0 rather than M0. |
| * @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial store X. True to use PARTIAL_STORE_N0 rather than N0. |
| * @{ |
| */ |
| #if PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0 |
| // Case1: No partial blocks in either x or y |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| |
| #elif PARTIAL_STORE_M0 > 0 && PARTIAL_STORE_N0 == 0 |
| // Case2: Partial blocks in y |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) |
| |
| #elif PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 > 0 |
| // Case3: Partial blocks in x |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) |
| |
| #else // PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0 |
| // Case4: Partial blocks in both x and y |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) |
| |
| #endif // PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0 |
| |
| #endif // defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0) |
| /** @} */ // end of group STORE_BLOCK_BOUNDARY_AWARE |
| |
| #if defined(PARTIAL_STORE_M0) |
| /** Compute the start m0 row (LHS, BIAS and DST) in a boundary-aware way so as to avoid padding |
| * @name COMPUTE_M0_START_ROW |
| * If there're any partial blocks in y dimension, they are placed at the beginning of the rows. |
| * This shift amount is added to all rows such that the partial block (at the beginning) overlaps with the subsequent |
| * blocks in the y dimension to avoid any padding. |
| * EG: M0=4, PARTIAL_STORE_M0=1: |
| * | Non-overlapping | +M0_ROW_SHIFT (Overlapping) |
| * block 0 (partial)| start row = 0 | start row = 0 |
| * block 1 (full) | start row = 4 | start row = 1 |
| * block 2 (full) | start row = 8 | start row = 5 |
| * |
| * @param[in] y Global id of current block in y. |
| * @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16 |
| * @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported: [0, @p M0) |
| * @{ |
| */ |
| #define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \ |
| ((uint)(max(0, (int)(y * M0) - (int)((M0 - PARTIAL_STORE_M0) % M0)))) |
| #else // defined(PARTIAL_STORE_M0) |
| #define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \ |
| ((uint)(y * M0)) |
| #endif // defined(PARTIAL_STORE_M0) |
| /** @} */ // end of group COMPUTE_M0_START_ROW |
| |
| /** Store a vector that can only be partial in x. |
| * |
| * @note in case @p vec_size or @p leftover != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty. |
| * |
| * The data to store is expected to end in a 0. |
| * E.g., for basename=c, the expected name is c0. |
| * |
| * @param[in] basename The name of the variable without trailing 0 |
| * @param[in] data_type The data type of the vector |
| * @param[in] ptr The base pointer |
| * @param[in] vec_size The vector size if cond = false. Supported: 1, 2, 3, 4, 8, 16 |
| * @param[in] leftover The vector size if cond = true. Supported range: [1, @p vec_size0) |
| * @param[in] cond Condition to select either vec_size0 or vec_size1 |
| * @{ |
| */ |
| #define STORE_VECTOR_SELECT(basename, data_type, ptr, vec_size, leftover, cond) \ |
| STORE_BLOCK_PARTIAL_IN_X(1, vec_size, data_type, basename, ptr, 0, 0, leftover, cond) |
| /** @} */ // end of group STORE_VECTOR_SELECT |
| |
| #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) |
| #pragma OPENCL EXTENSION cl_khr_fp16 : enable |
| #endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) |
| |
| #if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) |
| #pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : enable |
| #endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) |
| |
| #if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8) |
| #pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : enable |
| #endif // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8) |
| |
| #if defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf) |
| #pragma OPENCL EXTENSION cl_arm_printf : enable |
| #endif // defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf) |
| |
| #define GPU_ARCH_MIDGARD 0x100 |
| #define GPU_ARCH_BIFROST 0x200 |
| |
| /** Concatenate two inputs. |
| * |
| * @param[in] a The first input to be concatenated |
| * @param[in] b The second input to be concatenated |
| * |
| * @return The concatenated output |
| */ |
| #define CONCAT(a, b) a##b |
| |
| /** Expand the given vector |
| * |
| * @param[in] x The vector to be expanded |
| * |
| * @return The expanded output |
| */ |
| #define EXPAND(x) x |
| |
| /** Clamp the given value between an upper and lower bound. |
| * |
| * @param[in] x The value to be clamped |
| * @param[in] min_val The lower bound |
| * @param[in] max_val The upper bound |
| * |
| * @return The clamped value. |
| */ |
| #define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val) |
| |
| /** REVn reverses the given vector whose size is n. |
| * @name REVn |
| * |
| * @param[in] x The vector to be reversed |
| * |
| * @return The reversed vector |
| * @{ |
| */ |
| #define REV1(x) ((x)) |
| #define REV2(x) ((x).s10) |
| #define REV3(x) ((x).s210) |
| #define REV4(x) ((x).s3210) |
| #define REV8(x) ((x).s76543210) |
| #define REV16(x) ((x).sFEDCBA9876543210) |
| /** @} */ // end of group REVn |
| |
| /** Reverse the given vector. |
| * @name REVERSE |
| * |
| * @param[in] x The vector to be reversed |
| * @param[in] s The size of the vector |
| * |
| * @return The reversed vector |
| * @{ |
| */ |
| #define REVERSE_STR(x, s) REV##s((x)) |
| #define REVERSE(x, s) REVERSE_STR(x, s) |
| /** @} */ // end of group REVERSE |
| |
| /** Circular-right-shift (rotate-right) the vector of size s by the amount of n. |
| * @name ROTs_n |
| * |
| * @param[in] x The vector to be shifted |
| * |
| * @return The shifted vector |
| * @{ |
| */ |
| #define ROT1_0(x) ((x)) |
| |
| #define ROT2_0(x) ((x)) |
| #define ROT2_1(x) ((x).s10) |
| |
| #define ROT3_0(x) ((x)) |
| #define ROT3_1(x) ((x).s201) |
| #define ROT3_2(x) ((x).s120) |
| |
| #define ROT4_0(x) ((x)) |
| #define ROT4_1(x) ((x).s3012) |
| #define ROT4_2(x) ((x).s2301) |
| #define ROT4_3(x) ((x).s1230) |
| |
| #define ROT8_0(x) ((x)) |
| #define ROT8_1(x) ((x).s70123456) |
| #define ROT8_2(x) ((x).s67012345) |
| #define ROT8_3(x) ((x).s56701234) |
| #define ROT8_4(x) ((x).s45670123) |
| #define ROT8_5(x) ((x).s34567012) |
| #define ROT8_6(x) ((x).s23456701) |
| #define ROT8_7(x) ((x).s12345670) |
| |
| #define ROT16_0(x) ((x)) |
| #define ROT16_1(x) ((x).sF0123456789ABCDE) |
| #define ROT16_2(x) ((x).sEF0123456789ABCD) |
| #define ROT16_3(x) ((x).sDEF0123456789ABC) |
| #define ROT16_4(x) ((x).sCDEF0123456789AB) |
| #define ROT16_5(x) ((x).sBCDEF0123456789A) |
| #define ROT16_6(x) ((x).sABCDEF0123456789) |
| #define ROT16_7(x) ((x).s9ABCDEF012345678) |
| #define ROT16_8(x) ((x).s89ABCDEF01234567) |
| #define ROT16_9(x) ((x).s789ABCDEF0123456) |
| #define ROT16_10(x) ((x).s6789ABCDEF012345) |
| #define ROT16_11(x) ((x).s56789ABCDEF01234) |
| #define ROT16_12(x) ((x).s456789ABCDEF0123) |
| #define ROT16_13(x) ((x).s3456789ABCDEF012) |
| #define ROT16_14(x) ((x).s23456789ABCDEF01) |
| #define ROT16_15(x) ((x).s123456789ABCDEF0) |
| /** @} */ // end of group ROTs_n |
| |
| /** Circular-right-shift (rotate-right) the given vector by the given amount. |
| * @name ROTATE |
| * |
| * @param[in] x The vector to be shifted |
| * @param[in] s The size of the vector |
| * @param[in] n The amount to be shifted |
| * |
| * @return The shifted vector |
| * @{ |
| */ |
| #define ROTATE_STR(x, s, n) ROT##s##_##n(x) |
| #define ROTATE(x, s, n) ROTATE_STR(x, s, n) |
| /** @} */ // end of group ROTATE |
| |
| /** Creates a vector of size n filled with offset values corresponding to the location of each element. |
| * @name V_OFFSn |
| * |
| * @param[in] dt The data type of the output vector |
| * |
| * @return The vector filled with offset values |
| * @{ |
| */ |
| #define V_OFFS1(dt) (dt##1)(0) |
| #define V_OFFS2(dt) (dt##2)(0, 1) |
| #define V_OFFS3(dt) (dt##3)(0, 1, 2) |
| #define V_OFFS4(dt) (dt##4)(0, 1, 2, 3) |
| #define V_OFFS8(dt) (dt##8)(0, 1, 2, 3, 4, 5, 6, 7) |
| #define V_OFFS16(dt) (dt##16)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15) |
| /** @} */ // end of group V_OFFSn |
| |
| /** Create a vector filled with offset values corresponding to the location of each element. |
| * @name VEC_OFFS |
| * |
| * @param[in] dt The data type of the output vector |
| * @param[in] s The size of the output vector |
| * |
| * @return The vector filled with offset values |
| * @{ |
| */ |
| #define VEC_OFFS_STR(dt, s) V_OFFS##s(dt) |
| #define VEC_OFFS(dt, s) VEC_OFFS_STR(dt, s) |
| /** @} */ // end of group VEC_OFFS |
| |
| #define VLOAD_STR(size) vload##size |
| #define VLOAD(size) VLOAD_STR(size) |
| |
| #define PIXEL_UNIT4 1 |
| #define PIXEL_UNIT8 2 |
| #define PIXEL_UNIT16 4 |
| |
| /** Utility macro to convert a vector size in pixel unit. |
| * |
| * @name CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT |
| * |
| * @param[in] vec_size Vector size. Only 4,8 and 16 is supported |
| * |
| * @return The pixel unit (number of pixels) |
| * @{ |
| */ |
| #define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) PIXEL_UNIT##vec_size |
| #define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(vec_size) CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) |
| /** @} */ // end of group CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT |
| |
| #define read_image2d_floatx1(img, x_coord, y_coord) (float4)(read_imagef(img, (int2)(x_coord, y_coord))); |
| #define read_image2d_floatx2(img, x_coord, y_coord) (float8)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord))); |
| #define read_image2d_floatx4(img, x_coord, y_coord) (float16)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)), read_imagef(img, (int2)(x_coord + 2, y_coord)), read_imagef(img, (int2)(x_coord + 3, y_coord))); |
| |
| #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) |
| #define read_image2d_halfx1(img, x_coord, y_coord) (half4)(read_imageh(img, (int2)(x_coord, y_coord))); |
| #define read_image2d_halfx2(img, x_coord, y_coord) (half8)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord))); |
| #define read_image2d_halfx4(img, x_coord, y_coord) (half16)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)), read_imageh(img, (int2)(x_coord + 2, y_coord)), read_imageh(img, (int2)(x_coord + 3, y_coord))); |
| #endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) |
| |
| /** Utility macro to read a 2D OpenCL image object. |
| * |
| * @note Coordinates are not normalized |
| * |
| * @param[in] data_type Data type |
| * @param[in] n0 Number of pixel to read. Only 1,2 and 4 is supported |
| * @param[in] img OpenCL image object |
| * @param[in] x_coord The x coordinate for the top-left pixel |
| * @param[in] y_coord The y coordinate for the top-left pixel |
| * |
| * @return Pixels from the 2D OpenCL image object |
| * @{ |
| */ |
| #define READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) read_image2d_##data_type##x##n0(img, x_coord, y_coord) |
| #define READ_IMAGE2D(data_type, n0, img, x_coord, y_coord) READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) |
| |
| #define VSTORE_STR(size) vstore##size |
| #define VSTORE(size) VSTORE_STR(size) |
| |
| #define float1 float |
| #define half1 half |
| #define char1 char |
| #define uchar1 uchar |
| #define short1 short |
| #define ushort1 ushort |
| #define int1 int |
| #define uint1 uint |
| #define long1 long |
| #define ulong1 ulong |
| #define double1 double |
| |
| #define vload1(OFFSET, PTR) *(OFFSET + PTR) |
| #define vstore1(DATA, OFFSET, PTR) *(OFFSET + PTR) = DATA |
| |
| /** Extended partial vstore that correctly handles scalar values as well. |
| * Store the **lower** 0 to (n-1)th elements of the given vector while minimising the amount of vstore ops |
| * @name VSTORE_PARTIAL |
| * |
| * @note With this macro, the passed data can be both a vector and a scalar |
| * @note @p store_size needs to be <= @p size |
| * eg 1: Valid |
| * VSTORE_PARTIAL(16, 15) ...; |
| * eg 2: Invalid |
| * VSTORE_PARTIAL(4, 7) ...; |
| * |
| * @param[in] size The width of @p DATA. Supported values: 1(scalar), 2, 3, 4, 8, 16 |
| * @param[in] store_size The number of lower elements to store. Supported values: 1-16, but has to be <= @p size |
| * @{ |
| */ |
| #define VSTORE_PARTIAL_STR(size, store_size) vstore_partial_##size##_##store_size |
| #define VSTORE_PARTIAL(size, store_size) VSTORE_PARTIAL_STR(size, store_size) |
| |
| #define NO_STORE(data, offs, ptr) \ |
| { \ |
| } |
| |
| // Size == 1 (scalar) |
| #define vstore_partial_1_0 NO_STORE |
| #define vstore_partial_1_1 vstore1 |
| #define vstore_partial_1_2 NO_STORE |
| #define vstore_partial_1_3 NO_STORE |
| #define vstore_partial_1_4 NO_STORE |
| #define vstore_partial_1_5 NO_STORE |
| #define vstore_partial_1_6 NO_STORE |
| #define vstore_partial_1_7 NO_STORE |
| #define vstore_partial_1_8 NO_STORE |
| #define vstore_partial_1_9 NO_STORE |
| #define vstore_partial_1_10 NO_STORE |
| #define vstore_partial_1_11 NO_STORE |
| #define vstore_partial_1_12 NO_STORE |
| #define vstore_partial_1_13 NO_STORE |
| #define vstore_partial_1_14 NO_STORE |
| #define vstore_partial_1_15 NO_STORE |
| #define vstore_partial_1_16 NO_STORE |
| // Size == 2 |
| #define vstore_partial_2_0 NO_STORE |
| #define vstore_partial_2_1 vstore_partial_1 |
| #define vstore_partial_2_2 vstore_partial_2 |
| #define vstore_partial_2_3 NO_STORE |
| #define vstore_partial_2_4 NO_STORE |
| #define vstore_partial_2_5 NO_STORE |
| #define vstore_partial_2_6 NO_STORE |
| #define vstore_partial_2_7 NO_STORE |
| #define vstore_partial_2_8 NO_STORE |
| #define vstore_partial_2_9 NO_STORE |
| #define vstore_partial_2_10 NO_STORE |
| #define vstore_partial_2_11 NO_STORE |
| #define vstore_partial_2_12 NO_STORE |
| #define vstore_partial_2_13 NO_STORE |
| #define vstore_partial_2_14 NO_STORE |
| #define vstore_partial_2_15 NO_STORE |
| #define vstore_partial_2_16 NO_STORE |
| // Size == 3 |
| #define vstore_partial_3_0 NO_STORE |
| #define vstore_partial_3_1 vstore_partial_1 |
| #define vstore_partial_3_2 vstore_partial_2 |
| #define vstore_partial_3_3 vstore_partial_3 |
| #define vstore_partial_3_4 NO_STORE |
| #define vstore_partial_3_5 NO_STORE |
| #define vstore_partial_3_6 NO_STORE |
| #define vstore_partial_3_7 NO_STORE |
| #define vstore_partial_3_8 NO_STORE |
| #define vstore_partial_3_9 NO_STORE |
| #define vstore_partial_3_10 NO_STORE |
| #define vstore_partial_3_11 NO_STORE |
| #define vstore_partial_3_12 NO_STORE |
| #define vstore_partial_3_13 NO_STORE |
| #define vstore_partial_3_14 NO_STORE |
| #define vstore_partial_3_15 NO_STORE |
| #define vstore_partial_3_16 NO_STORE |
| // Size == 4 |
| #define vstore_partial_4_0 NO_STORE |
| #define vstore_partial_4_1 vstore_partial_1 |
| #define vstore_partial_4_2 vstore_partial_2 |
| #define vstore_partial_4_3 vstore_partial_3 |
| #define vstore_partial_4_4 vstore_partial_4 |
| #define vstore_partial_4_5 NO_STORE |
| #define vstore_partial_4_6 NO_STORE |
| #define vstore_partial_4_7 NO_STORE |
| #define vstore_partial_4_8 NO_STORE |
| #define vstore_partial_4_9 NO_STORE |
| #define vstore_partial_4_10 NO_STORE |
| #define vstore_partial_4_11 NO_STORE |
| #define vstore_partial_4_12 NO_STORE |
| #define vstore_partial_4_13 NO_STORE |
| #define vstore_partial_4_14 NO_STORE |
| #define vstore_partial_4_15 NO_STORE |
| #define vstore_partial_4_16 NO_STORE |
| // Size == 8 |
| #define vstore_partial_8_0 NO_STORE |
| #define vstore_partial_8_1 vstore_partial_1 |
| #define vstore_partial_8_2 vstore_partial_2 |
| #define vstore_partial_8_3 vstore_partial_3 |
| #define vstore_partial_8_4 vstore_partial_4 |
| #define vstore_partial_8_5 vstore_partial_5 |
| #define vstore_partial_8_6 vstore_partial_6 |
| #define vstore_partial_8_7 vstore_partial_7 |
| #define vstore_partial_8_8 vstore_partial_8 |
| #define vstore_partial_8_9 NO_STORE |
| #define vstore_partial_8_10 NO_STORE |
| #define vstore_partial_8_11 NO_STORE |
| #define vstore_partial_8_12 NO_STORE |
| #define vstore_partial_8_13 NO_STORE |
| #define vstore_partial_8_14 NO_STORE |
| #define vstore_partial_8_15 NO_STORE |
| #define vstore_partial_8_16 NO_STORE |
| // Size == 16 |
| #define vstore_partial_16_0 NO_STORE |
| #define vstore_partial_16_1 vstore_partial_1 |
| #define vstore_partial_16_2 vstore_partial_2 |
| #define vstore_partial_16_3 vstore_partial_3 |
| #define vstore_partial_16_4 vstore_partial_4 |
| #define vstore_partial_16_5 vstore_partial_5 |
| #define vstore_partial_16_6 vstore_partial_6 |
| #define vstore_partial_16_7 vstore_partial_7 |
| #define vstore_partial_16_8 vstore_partial_8 |
| #define vstore_partial_16_9 vstore_partial_9 |
| #define vstore_partial_16_10 vstore_partial_10 |
| #define vstore_partial_16_11 vstore_partial_11 |
| #define vstore_partial_16_12 vstore_partial_12 |
| #define vstore_partial_16_13 vstore_partial_13 |
| #define vstore_partial_16_14 vstore_partial_14 |
| #define vstore_partial_16_15 vstore_partial_15 |
| #define vstore_partial_16_16 vstore_partial_16 |
| |
| /** Partial vstore. Store the **lower** 0 to (n-1)th elements of the given vector while minimising the amount of vstore ops |
| * @name vstore_partial_n |
| * |
| * @note @p DATA needs to be a vector not a scalar |
| * @note n needs to be <= the vector width of the input variable @p DATA |
| * eg 1: Valid |
| * vstore_partial_15(var:float16, 0, 0xabcd); |
| * eg 2: Invalid |
| * vstore_partial_7(var:float4, 0, 0xabcd); |
| * |
| * @note in cases n == 1, 2, 3, 4, 8, 16, no extra vstore is invoked, thus there's no performance penalty. |
| * |
| * @param[in] DATA The name of the variable |
| * @param[in] OFFSET Offset in n |
| * @param[in] PTR The base pointer |
| * @{ |
| */ |
| #define vstore_partial_1(DATA, OFFSET, PTR) \ |
| vstore1(DATA.s0, OFFSET, PTR); |
| |
| #define vstore_partial_2(DATA, OFFSET, PTR) \ |
| vstore2(DATA.s01, OFFSET, PTR); |
| |
| #define vstore_partial_3(DATA, OFFSET, PTR) \ |
| vstore3(DATA.s012, OFFSET, PTR); |
| |
| #define vstore_partial_4(DATA, OFFSET, PTR) \ |
| vstore4(DATA.s0123, OFFSET, PTR); |
| |
| #define vstore_partial_5(DATA, OFFSET, PTR) \ |
| vstore_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vstore1(DATA.s4, OFFSET, PTR + 4); |
| |
| #define vstore_partial_6(DATA, OFFSET, PTR) \ |
| vstore_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vstore_partial_2(DATA.s45, OFFSET, PTR + 4); |
| |
| #define vstore_partial_7(DATA, OFFSET, PTR) \ |
| vstore_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vstore_partial_3(DATA.s456, OFFSET, PTR + 4); |
| |
| #define vstore_partial_8(DATA, OFFSET, PTR) \ |
| vstore8(DATA.s01234567, OFFSET, PTR); |
| |
| #define vstore_partial_9(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore1(DATA.s8, OFFSET, PTR + 8); |
| |
| #define vstore_partial_10(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_2(DATA.s89, OFFSET, PTR + 8); |
| |
| #define vstore_partial_11(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_3(DATA.s89a, OFFSET, PTR + 8); |
| |
| #define vstore_partial_12(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_4(DATA.s89ab, OFFSET, PTR + 8); |
| |
| #define vstore_partial_13(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_5(DATA.s89abcdef, OFFSET, PTR + 8); |
| |
| #define vstore_partial_14(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_6(DATA.s89abcdef, OFFSET, PTR + 8); |
| |
| #define vstore_partial_15(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_7(DATA.s89abcdef, OFFSET, PTR + 8); |
| |
| #define vstore_partial_16(DATA, OFFSET, PTR) \ |
| vstore16(DATA, OFFSET, PTR); |
| /** @} */ // end of groupd vstore_partial_n |
| /** @} */ // end of groupd VSTORE_PARTIAL |
| |
| // Convert built-in functions with _sat modifier are not supported in floating point so we create defines |
| // without _sat to overcome this issue |
| #define convert_float_sat convert_float |
| #define convert_float1_sat convert_float |
| #define convert_float2_sat convert_float2 |
| #define convert_float3_sat convert_float3 |
| #define convert_float4_sat convert_float4 |
| #define convert_float8_sat convert_float8 |
| #define convert_float16_sat convert_float16 |
| #define convert_half_sat convert_float |
| #define convert_half1_sat convert_half |
| #define convert_half2_sat convert_half2 |
| #define convert_half3_sat convert_half3 |
| #define convert_half4_sat convert_half4 |
| #define convert_half8_sat convert_half8 |
| #define convert_half16_sat convert_half16 |
| |
| #define convert_float1 convert_float |
| #define convert_half1 convert_half |
| #define convert_char1 convert_char |
| #define convert_uchar1 convert_uchar |
| #define convert_short1 convert_short |
| #define convert_ushort1 convert_ushort |
| #define convert_int1 convert_int |
| #define convert_uint1 convert_uint |
| #define convert_long1 convert_long |
| #define convert_ulong1 convert_ulong |
| #define convert_double1 convert_double |
| |
| #define convert_char1_sat convert_char_sat |
| #define convert_uchar1_sat convert_uchar_sat |
| #define convert_short1_sat convert_short_sat |
| #define convert_ushort1_sat convert_ushort_sat |
| #define convert_int1_sat convert_int_sat |
| #define convert_uint1_sat convert_uint_sat |
| #define convert_long1_sat convert_long_sat |
| #define convert_ulong1_sat convert_ulong_sat |
| #define convert_double1_sat convert_double_sat |
| |
| #define VEC_DATA_TYPE_STR(type, size) type##size |
| #define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size) |
| |
| #define CONVERT_STR(x, type) (convert_##type((x))) |
| #define CONVERT(x, type) CONVERT_STR(x, type) |
| |
| #define CONVERT_SAT_STR(x, type) (convert_##type##_sat((x))) |
| #define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type) |
| |
| #define CONVERT_SAT_ROUND_STR(x, type, round) (convert_##type##_sat_##round((x))) |
| #define CONVERT_SAT_ROUND(x, type, round) CONVERT_SAT_ROUND_STR(x, type, round) |
| |
| #define select_vec_dt_uchar(size) uchar##size |
| #define select_vec_dt_char(size) char##size |
| #define select_vec_dt_ushort(size) ushort##size |
| #define select_vec_dt_short(size) short##size |
| #define select_vec_dt_half(size) short##size |
| #define select_vec_dt_uint(size) uint##size |
| #define select_vec_dt_int(size) int##size |
| #define select_vec_dt_float(size) int##size |
| #define select_vec_dt_ulong(size) ulong##size |
| #define select_vec_dt_long(size) long##size |
| |
| #define SELECT_VEC_DATA_TYPE_STR(type, size) select_vec_dt_##type(size) |
| #define SELECT_VEC_DATA_TYPE(type, size) SELECT_VEC_DATA_TYPE_STR(type, size) |
| #define SELECT_DATA_TYPE(type) SELECT_VEC_DATA_TYPE_STR(type, 1) |
| |
| #define sum_reduce_1(x) (x) |
| #define sum_reduce_2(x) ((x).s0) + ((x).s1) |
| #define sum_reduce_3(x) sum_reduce_2((x).s01) + ((x).s2) |
| #define sum_reduce_4(x) sum_reduce_2((x).s01) + sum_reduce_2((x).s23) |
| #define sum_reduce_8(x) sum_reduce_4((x).s0123) + sum_reduce_4((x).s4567) |
| #define sum_reduce_16(x) sum_reduce_8((x).s01234567) + sum_reduce_8((x).s89ABCDEF) |
| |
| #define SUM_REDUCE_STR(x, size) sum_reduce_##size(x) |
| #define SUM_REDUCE(x, size) SUM_REDUCE_STR(x, size) |
| |
| #define max_reduce_1(x) (x) |
| #define max_reduce_2(x) max(((x).s0), ((x).s1)) |
| #define max_reduce_3(x) max(max_reduce_2((x).s01), ((x).s2)) |
| #define max_reduce_4(x) max(max_reduce_2((x).s01), max_reduce_2((x).s23)) |
| #define max_reduce_8(x) max(max_reduce_4((x).s0123), max_reduce_4((x).s4567)) |
| #define max_reduce_16(x) max(max_reduce_8((x).s01234567), max_reduce_8((x).s89ABCDEF)) |
| |
| #define MAX_REDUCE_STR(x, size) max_reduce_##size(x) |
| #define MAX_REDUCE(x, size) MAX_REDUCE_STR(x, size) |
| |
| #define VECTOR_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define IMAGE_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define TENSOR3D_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_stride_z, \ |
| uint name##_step_z, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define TENSOR4D_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_stride_z, \ |
| uint name##_step_z, \ |
| uint name##_stride_w, \ |
| uint name##_step_w, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define CONVERT_TO_VECTOR_STRUCT(name) \ |
| update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x) |
| |
| #define CONVERT_TO_VECTOR_STRUCT_NO_STEP(name) \ |
| update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0) |
| |
| #define CONVERT_TO_IMAGE_STRUCT(name) \ |
| update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y) |
| |
| #define CONVERT_TO_IMAGE_STRUCT_NO_STEP(name) \ |
| update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0) |
| |
| #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \ |
| update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(name) \ |
| update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \ |
| update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TO_TENSOR3D_STRUCT(name) \ |
| update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ |
| name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(name) \ |
| update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0) |
| |
| #define CONVERT_TO_TENSOR4D_STRUCT(name, mod_size) \ |
| update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ |
| name##_stride_z, name##_step_z, name##_stride_w, name##_step_w, mod_size) |
| |
| #define CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(name, mod_size) \ |
| update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0, name##_stride_w, 0, mod_size) |
| |
| #define CONVERT_TO_TENSOR3D_STRUCT_NO_UPDATE_PTR(name) \ |
| tensor3D_ptr_no_update(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ |
| name##_stride_z, name##_step_z) |
| |
| /** Structure to hold Vector information */ |
| typedef struct Vector |
| { |
| __global uchar *ptr; /**< Pointer to the starting postion of the buffer */ |
| int offset_first_element_in_bytes; /**< The offset of the first element in the source image */ |
| int stride_x; /**< Stride of the image in X dimension (in bytes) */ |
| } Vector; |
| |
| /** Structure to hold Image information */ |
| typedef struct Image |
| { |
| __global uchar *ptr; /**< Pointer to the starting postion of the buffer */ |
| int offset_first_element_in_bytes; /**< The offset of the first element in the source image */ |
| int stride_x; /**< Stride of the image in X dimension (in bytes) */ |
| int stride_y; /**< Stride of the image in Y dimension (in bytes) */ |
| } Image; |
| |
| /** Structure to hold 3D tensor information */ |
| typedef struct Tensor3D |
| { |
| __global uchar *ptr; /**< Pointer to the starting postion of the buffer */ |
| int offset_first_element_in_bytes; /**< The offset of the first element in the source image */ |
| int stride_x; /**< Stride of the image in X dimension (in bytes) */ |
| int stride_y; /**< Stride of the image in Y dimension (in bytes) */ |
| int stride_z; /**< Stride of the image in Z dimension (in bytes) */ |
| } Tensor3D; |
| |
| /** Structure to hold 4D tensor information */ |
| typedef struct Tensor4D |
| { |
| __global uchar *ptr; /**< Pointer to the starting postion of the buffer */ |
| int offset_first_element_in_bytes; /**< The offset of the first element in the source image */ |
| int stride_x; /**< Stride of the image in X dimension (in bytes) */ |
| int stride_y; /**< Stride of the image in Y dimension (in bytes) */ |
| int stride_z; /**< Stride of the image in Z dimension (in bytes) */ |
| int stride_w; /**< Stride of the image in W dimension (in bytes) */ |
| } Tensor4D; |
| |
| /** Wrap vector information into an Vector structure, and make the pointer point at this workitem's data. |
| * |
| * @param[in] ptr Pointer to the starting postion of the buffer |
| * @param[in] offset_first_element_in_bytes The offset of the first element in the source vector |
| * @param[in] stride_x Stride of the vector in X dimension (in bytes) |
| * @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes) |
| * |
| * @return An image object |
| */ |
| inline Vector update_vector_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x) |
| { |
| Vector vector = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| }; |
| vector.ptr += vector.offset_first_element_in_bytes + get_global_id(0) * step_x; |
| return vector; |
| } |
| |
| /** Wrap image information into an Image structure, and make the pointer point at this workitem's data. |
| * |
| * @param[in] ptr Pointer to the starting postion of the buffer |
| * @param[in] offset_first_element_in_bytes The offset of the first element in the source image |
| * @param[in] stride_x Stride of the image in X dimension (in bytes) |
| * @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] stride_y Stride of the image in Y dimension (in bytes) |
| * @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes) |
| * |
| * @return An image object |
| */ |
| inline Image update_image_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y) |
| { |
| Image img = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y |
| }; |
| img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y; |
| return img; |
| } |
| |
| /** Wrap 3D tensor information into an image structure, and make the pointer point at this workitem's data. |
| * |
| * @param[in] ptr Pointer to the starting postion of the buffer |
| * @param[in] offset_first_element_in_bytes The offset of the first element in the source image |
| * @param[in] stride_x Stride of the image in X dimension (in bytes) |
| * @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] stride_y Stride of the image in Y dimension (in bytes) |
| * @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] stride_z Stride of the image in Z dimension (in bytes) |
| * @param[in] step_z stride_z * number of elements along Z processed per workitem(in bytes) |
| * |
| * @return A 3D tensor object |
| */ |
| inline Image update_image_from_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) |
| { |
| Image img = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y |
| }; |
| img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z; |
| return img; |
| } |
| |
| /** Wrap 3D tensor information into an tensor structure, and make the pointer point at this workitem's data. |
| * |
| * @param[in] ptr Pointer to the starting postion of the buffer |
| * @param[in] offset_first_element_in_bytes The offset of the first element in the source image |
| * @param[in] stride_x Stride of the image in X dimension (in bytes) |
| * @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] stride_y Stride of the image in Y dimension (in bytes) |
| * @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] stride_z Stride of the image in Z dimension (in bytes) |
| * @param[in] step_z stride_z * number of elements along Z processed per workitem(in bytes) |
| * |
| * @return A 3D tensor object |
| */ |
| inline Tensor3D update_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) |
| { |
| Tensor3D tensor = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y, |
| .stride_z = stride_z |
| }; |
| tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z; |
| return tensor; |
| } |
| |
| /** Wrap 3D tensor information into an tensor structure. |
| * |
| * @param[in] ptr Pointer to the starting postion of the buffer |
| * @param[in] offset_first_element_in_bytes The offset of the first element in the source image |
| * @param[in] stride_x Stride of the image in X dimension (in bytes) |
| * @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] stride_y Stride of the image in Y dimension (in bytes) |
| * @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] stride_z Stride of the image in Z dimension (in bytes) |
| * @param[in] step_z stride_z * number of elements along Z processed per workitem(in bytes) |
| * |
| * @return A 3D tensor object |
| */ |
| inline Tensor3D tensor3D_ptr_no_update(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) |
| { |
| Tensor3D tensor = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y, |
| .stride_z = stride_z |
| }; |
| return tensor; |
| } |
| |
| inline Tensor4D update_tensor4D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z, uint stride_w, |
| uint step_w, |
| uint mod_size) |
| { |
| Tensor4D tensor = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y, |
| .stride_z = stride_z, |
| .stride_w = stride_w |
| }; |
| |
| tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + (get_global_id(2) % mod_size) * step_z + (get_global_id(2) / mod_size) * step_w; |
| return tensor; |
| } |
| |
| /** Get the pointer position of a Vector |
| * |
| * @param[in] vec Pointer to the starting position of the buffer |
| * @param[in] x Relative X position |
| */ |
| inline __global const uchar *vector_offset(const Vector *vec, int x) |
| { |
| return vec->ptr + x * vec->stride_x; |
| } |
| |
| /** Get the pointer position of a Image |
| * |
| * @param[in] img Pointer to the starting position of the buffer |
| * @param[in] x Relative X position |
| * @param[in] y Relative Y position |
| */ |
| inline __global uchar *offset(const Image *img, int x, int y) |
| { |
| return img->ptr + x * img->stride_x + y * img->stride_y; |
| } |
| |
| /** Get the pointer position of a Tensor3D |
| * |
| * @param[in] tensor Pointer to the starting position of the buffer |
| * @param[in] x Relative X position |
| * @param[in] y Relative Y position |
| * @param[in] z Relative Z position |
| */ |
| inline __global const uchar *tensor3D_offset(const Tensor3D *tensor, int x, int y, int z) |
| { |
| return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z; |
| } |
| |
| /** Get the pointer position of a Tensor4D |
| * |
| * @param[in] tensor Pointer to the starting position of the buffer |
| * @param[in] x Relative X position |
| * @param[in] y Relative Y position |
| * @param[in] z Relative Z position |
| * @param[in] w Relative W position |
| */ |
| inline __global const uchar *tensor4D_offset(const Tensor4D *tensor, int x, int y, int z, int w) |
| { |
| return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + w * tensor->stride_w; |
| } |
| |
| /** Get the offset for a given linear index of a Tensor3D |
| * |
| * @param[in] tensor Pointer to the starting position of the buffer |
| * @param[in] width Width of the input tensor |
| * @param[in] height Height of the input tensor |
| * @param[in] depth Depth of the input tensor |
| * @param[in] index Linear index |
| */ |
| inline __global const uchar *tensor3D_index2ptr(const Tensor3D *tensor, uint width, uint height, uint depth, uint index) |
| { |
| uint num_elements = width * height; |
| |
| const uint z = index / num_elements; |
| |
| index %= num_elements; |
| |
| const uint y = index / width; |
| |
| index %= width; |
| |
| const uint x = index; |
| |
| return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + tensor->offset_first_element_in_bytes; |
| } |
| |
| #endif // _HELPER_H |
| |
| #if GPU_ARCH == GPU_ARCH_BIFROST |
| #define MLA(a, b, c) (fma(c, b, a)) |
| #else // GPU_ARCH == GPU_ARCH_BIFROST |
| #define MLA(a, b, c) ((b) * (c) + (a)) |
| #endif // GPU_ARCH == GPU_ARCH_BIFROST |
| |
| // Hard-Swish |
| #define hard_swish_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (x * ((min(max((x + (DATA_TYPE)3.0), (DATA_TYPE)0.0), (DATA_TYPE)6.0)) * (DATA_TYPE)0.166666667)) |
| |
| // Logistic Activation |
| #define logistic_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) ((DATA_TYPE)1.0 / ((DATA_TYPE)1.0 + exp(-x))) |
| |
| // Hyperbolic Tangent Activation |
| #define tanh_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) ((DATA_TYPE)A_VAL * tanh((DATA_TYPE)B_VAL * x)) |
| |
| // RELU Tangent Activation |
| #define relu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (max((DATA_TYPE)0.0, x)) |
| |
| // Bounded RELU Activation |
| #define brelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (min((DATA_TYPE)A_VAL, max((DATA_TYPE)0.0, x))) |
| |
| // Lower Upper Bounded RELU Activation |
| #define lu_brelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (min(max(x, (DATA_TYPE)B_VAL), (DATA_TYPE)A_VAL)) |
| |
| // Leaky RELU Activation |
| #define lrelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) ((min(x, (DATA_TYPE)0.0) * (DATA_TYPE)A_VAL) + max(x, (DATA_TYPE)0.0)) |
| |
| // Soft RELU Activation |
| #define srelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (log((DATA_TYPE)1.0 + exp(x))) |
| |
| // ELU Activation |
| #define elu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (select(((DATA_TYPE)A_VAL * (exp(x) - (DATA_TYPE)1.0)), x, (SELECT_VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))isgreaterequal(x, (DATA_TYPE)0.0))) |
| |
| // Absolute Activation |
| #define abs_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (fabs(x)) |
| |
| // Square Activation |
| #define square_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (x * x) |
| |
| // Square-root Activation |
| #define sqrt_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (sqrt(x)) |
| |
| // Linear Activation |
| #define linear_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (MLA((DATA_TYPE)B_VAL, (DATA_TYPE)A_VAL, x)) |
| |
| // Identity Activation |
| #define identity_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (x) |
| |
| #define ACT_OP(op, DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) op##_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) |
| |
| #define ACTIVATION(op, DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) ACT_OP(op, DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) |
| |
| /** This performs an activation function on quantized inputs with float transformations. |
| * |
| * @note In order to perform the activation function "in-place", the pre-processor -DIN_PLACE must be passed at compile time |
| * |
| * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short |
| * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16 |
| * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE |
| * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively. |
| * @note Quantization scales of the input/output tensors are passed in with -DS1_VAL= and -DS2_VAL= respectively. |
| * @note Quantization offsets of the input/output tensors are passed in only if asymmetric with -DO1_VAL= and -DO2_VAL= respectively. |
| * @note Quantized value of constant zero should be given as a preprocessor argument using -DCONST_0=value. e.g. -DCONST_0=128. |
| * |
| * @param[in] input_ptr Pointer to the source image. Supported data types: QASYMM8/QASYMM8_SIGNED/QSYMM16 |
| * @param[in] input_stride_x Stride of the source image in X dimension (in bytes) |
| * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] input_stride_y Stride of the source image in Y dimension (in bytes) |
| * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes) |
| * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes) |
| * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source image |
| * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr |
| * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes) |
| * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes) |
| * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes) |
| * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes) |
| * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image |
| */ |
| __kernel void activation_layer_quant_f32( |
| TENSOR3D_DECLARATION(input) |
| #ifndef IN_PLACE |
| , |
| TENSOR3D_DECLARATION(output) |
| #endif /* not IN_PLACE */ |
| ) |
| { |
| uint x_offs = max((int)(get_global_id(0) * VEC_SIZE * sizeof(DATA_TYPE) - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE * sizeof(DATA_TYPE)), 0); |
| |
| // Get pixels pointer |
| __global uchar *input_addr = input_ptr + input_offset_first_element_in_bytes + x_offs + get_global_id(1) * input_stride_y + get_global_id(2) * input_stride_z; |
| #ifdef IN_PLACE |
| __global uchar *output_addr = input_addr; |
| #else /* IN_PLACE */ |
| __global uchar *output_addr = output_ptr + output_offset_first_element_in_bytes + x_offs + get_global_id(1) * output_stride_y + get_global_id(2) * output_stride_z; |
| #endif /* IN_PLACE */ |
| |
| // Load data |
| TYPE data0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input_addr); |
| |
| VEC_FLOAT data_flt = CONVERT(data0, VEC_FLOAT); |
| #if defined(O1_VAL) |
| data_flt = round(data_flt - (float)O1_VAL) * ((float)S1_VAL); |
| #else // defined(O1_VAL) |
| data_flt = round(data_flt) * ((float)S1_VAL); |
| #endif // defined(O1_VAL) |
| data_flt = ACTIVATION(ACT, float, VEC_SIZE, data_flt, A_VAL, B_VAL); |
| |
| #if defined(O2_VAL) |
| data0 = CONVERT_SAT(round(data_flt / ((float)S2_VAL)) + (float)O2_VAL, TYPE); |
| #else // defined(O2_VAL) |
| data0 = CONVERT_SAT(round(data_flt / ((float)S2_VAL)), TYPE); |
| #endif // defined(O2_VAL) |
| |
| // Store result |
| STORE_VECTOR_SELECT(data, DATA_TYPE, output_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0) |
| } |
| |
| #else // defined(FLOAT_DOMAIN) |
| // Activations performed in the quantized domain |
| |
| #if defined(ACT) |
| /** This performs an activation function on quantized inputs. |
| * |
| * @note In order to perform the activation function "in-place", the pre-processor -DIN_PLACE must be passed at compile time |
| * |
| * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short |
| * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16 |
| * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE_LEFTOVER=3. It is defined as the remainder between the input's first dimension and VEC_SIZE |
| * @note Activation function should be given as a preprocessor argument using -DACT=name. e.g. -DACT=TANH |
| * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively. |
| * @note Quantization scales of the input/output tensors are passed in with -DS1_VAL= and -DS2_VAL= respectively. |
| * @note Quantization offsets of the input/output tensors are passed in with -DO1_VAL= and -DO2_VAL= respectively. |
| * @note Quantized value of constant zero should be given as a preprocessor argument using -DCONST_0=value. e.g. -DCONST_0=128. |
| * |
| * @param[in] input_ptr Pointer to the source image. Supported data types: QASYMM8/QASYMM8_SIGNED/QSYMM16 |
| * @param[in] input_stride_x Stride of the source image in X dimension (in bytes) |
| * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] input_stride_y Stride of the source image in Y dimension (in bytes) |
| * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] input_stride_z Stride of the source tensor in Z dimension (in bytes) |
| * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes) |
| * @param[in] input_offset_first_element_in_bytes The offset of the first element in the source image |
| * @param[out] output_ptr (Optional) Pointer to the destination image. Supported data types: same as @p input_ptr |
| * @param[in] output_stride_x (Optional) Stride of the destination image in X dimension (in bytes) |
| * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] output_stride_y (Optional) Stride of the destination image in Y dimension (in bytes) |
| * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] output_stride_z (Optional) Stride of the source tensor in Z dimension (in bytes) |
| * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes) |
| * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image |
| */ |
| __kernel void activation_layer_quant( |
| TENSOR3D_DECLARATION(input) |
| #ifndef IN_PLACE |
| , |
| TENSOR3D_DECLARATION(output) |
| #endif /* not IN_PLACE */ |
| ) |
| { |
| uint x_offs = max((int)(get_global_id(0) * VEC_SIZE * sizeof(DATA_TYPE) - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE * sizeof(DATA_TYPE)), 0); |
| |
| // Get pixels pointer |
| __global uchar *input_addr = input_ptr + input_offset_first_element_in_bytes + x_offs + get_global_id(1) * input_stride_y + get_global_id(2) * input_stride_z; |
| #ifdef IN_PLACE |
| __global uchar *output_addr = input_addr; |
| #else /* IN_PLACE */ |
| __global uchar *output_addr = output_ptr + output_offset_first_element_in_bytes + x_offs + get_global_id(1) * output_stride_y + get_global_id(2) * output_stride_z; |
| #endif /* IN_PLACE */ |
| |
| // Load data |
| TYPE data0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input_addr); |
| |
| data0 = PERFORM_ACTIVATION_QUANT(ACT, data0); |
| |
| // Store result |
| STORE_VECTOR_SELECT(data, DATA_TYPE, output_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0) |
| } |
| #endif // defined(ACT) |
| #endif // defined(FLOAT_DOMAIN) |
| #define ACTIVATION_FUNC(x) PERFORM_ACTIVATION_QUANT(ACTIVATION_TYPE, x) |
| #else /* defined(ACTIVATION_TYPE) && defined(CONST_0) */ |
| #define ACTIVATION_FUNC(x) (x) |
| #endif /* defined(ACTIVATION_TYPE) && defined(CONST_0) */ |
| |
| #define VEC_INT VEC_DATA_TYPE(int, VEC_SIZE) |
| #define VEC_FLOAT VEC_DATA_TYPE(float, VEC_SIZE) |
| #define VEC_SHORT VEC_DATA_TYPE(short, VEC_SIZE) |
| |
| #if defined(DATA_TYPE) && defined(WEIGHTS_TYPE) |
| |
| #define VEC_TYPE(size) VEC_DATA_TYPE(DATA_TYPE, size) |
| |
| #if defined(WEIGHTS_OFFSET) && defined(INPUT_OFFSET) && defined(K_OFFSET) && ((defined(OUTPUT_OFFSET) && defined(OUTPUT_MULTIPLIER) && defined(OUTPUT_SHIFT)) || defined(REAL_MULTIPLIER)) |
| |
| #if defined(WEIGHTS_PROMOTED_TYPE) |
| #define VEC_WEIGHTS_PROMOTED_TYPE(size) VEC_DATA_TYPE(WEIGHTS_PROMOTED_TYPE, size) |
| |
| #if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) |
| #if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8) |
| #define ARM_DOT(x, y, val) val = arm_dot_acc((x), (y), val); |
| #else // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8) |
| #define ARM_DOT(x, y, val) val += arm_dot((x), (y)); |
| #endif // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8) |
| #endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) |
| |
| #if defined(CONV_STRIDE_Y) && defined(CONV_STRIDE_X) && defined(DEPTH_MULTIPLIER) && defined(DST_CHANNELS) |
| |
| #if CONV_STRIDE_X > 3 |
| #error "Stride X not supported" |
| #endif /* CONV_STRIDE_X > 3 */ |
| |
| #if !defined(IS_DOT8) |
| |
| #if DILATION_X == 1 |
| |
| #if CONV_STRIDE_X == 1 |
| #define GET_VALUES(first_value, left, middle, right) \ |
| ({ \ |
| int8 temp0 = CONVERT(vload8(0, (__global DATA_TYPE *)(first_value)), int8); \ |
| int2 temp1 = CONVERT(vload2(0, (__global DATA_TYPE *)(first_value + 8 * sizeof(DATA_TYPE))), int2); \ |
| \ |
| left = CONVERT(temp0.s01234567, int8); \ |
| middle = CONVERT((int8)(temp0.s1234, temp0.s567, temp1.s0), int8); \ |
| right = CONVERT((int8)(temp0.s2345, temp0.s67, temp1.s01), int8); \ |
| }) |
| #elif CONV_STRIDE_X == 2 |
| #define GET_VALUES(first_value, left, middle, right) \ |
| ({ \ |
| int16 temp0 = CONVERT(vload16(0, (__global DATA_TYPE *)(first_value)), int16); \ |
| int temp1 = CONVERT(*((__global DATA_TYPE *)(first_value + 16 * sizeof(DATA_TYPE))), int); \ |
| \ |
| left = CONVERT(temp0.s02468ace, int8); \ |
| middle = CONVERT(temp0.s13579bdf, int8); \ |
| right = CONVERT((int8)(temp0.s2468, temp0.sace, temp1), int8); \ |
| }) |
| #else /* CONV_STRIDE_X */ |
| #define GET_VALUES(first_value, left, middle, right) \ |
| ({ \ |
| int16 temp0 = CONVERT(vload16(0, (__global DATA_TYPE *)(first_value)), int16); \ |
| int8 temp1 = CONVERT(vload8(0, (__global DATA_TYPE *)(first_value + 16 * sizeof(DATA_TYPE))), int8); \ |
| \ |
| left = CONVERT((int8)(temp0.s0369, temp0.scf, temp1.s25), int8); \ |
| middle = CONVERT((int8)(temp0.s147a, temp0.sd, temp1.s036), int8); \ |
| right = CONVERT((int8)(temp0.s258b, temp0.se, temp1.s147), int8); \ |
| }) |
| #endif /* CONV_STRIDE_X */ |
| |
| #else /* DILATION_X == 1 */ |
| |
| #if CONV_STRIDE_X == 1 |
| #define GET_VALUES(first_value, left, middle, right) \ |
| ({ \ |
| left = CONVERT(vload8(0, (__global DATA_TYPE *)(first_value)), int8); \ |
| middle = CONVERT(vload8(0, (__global DATA_TYPE *)(first_value + DILATION_X * sizeof(DATA_TYPE))), int8); \ |
| right = CONVERT(vload8(0, (__global DATA_TYPE *)(first_value + 2 * DILATION_X * sizeof(DATA_TYPE))), int8); \ |
| }) |
| #elif CONV_STRIDE_X == 2 |
| #define GET_VALUES(first_value, left, middle, right) \ |
| ({ \ |
| int16 temp0 = CONVERT(vload16(0, (__global DATA_TYPE *)(first_value)), int16); \ |
| left = CONVERT(temp0.s02468ace, int8); \ |
| \ |
| temp0 = CONVERT(vload16(0, (__global DATA_TYPE *)(first_value + DILATION_X * sizeof(DATA_TYPE))), int16); \ |
| middle = CONVERT(temp0.s02468ace, int8); \ |
| \ |
| temp0 = CONVERT(vload16(0, (__global DATA_TYPE *)(first_value + 2 * DILATION_X * sizeof(DATA_TYPE))), int16); \ |
| right = CONVERT(temp0.s02468ace, int8); \ |
| }) |
| #else /* CONV_STRIDE_X */ |
| #define GET_VALUES(first_value, left, middle, right) \ |
| ({ \ |
| int16 temp0 = CONVERT(vload16(0, (__global DATA_TYPE *)(first_value)), int16); \ |
| int8 temp1 = CONVERT(vload8(0, (__global DATA_TYPE *)(first_value + 16 * sizeof(DATA_TYPE))), int8); \ |
| left = CONVERT((int8)(temp0.s0369, temp0.scf, temp1.s25), int8); \ |
| \ |
| temp0 = CONVERT(vload16(0, (__global DATA_TYPE *)(first_value + DILATION_X * sizeof(DATA_TYPE))), int16); \ |
| temp1 = CONVERT(vload8(0, (__global DATA_TYPE *)(first_value + (16 + DILATION_X) * sizeof(DATA_TYPE))), int8); \ |
| middle = CONVERT((int8)(temp0.s0369, temp0.scf, temp1.s25), int8); \ |
| \ |
| temp0 = CONVERT(vload16(0, (__global DATA_TYPE *)(first_value + 2 * DILATION_X * sizeof(DATA_TYPE))), int16); \ |
| temp1 = CONVERT(vload8(0, (__global DATA_TYPE *)(first_value + (16 + 2 * DILATION_X) * sizeof(DATA_TYPE))), int8); \ |
| right = CONVERT((int8)(temp0.s0369, temp0.scf, temp1.s25), int8); \ |
| }) |
| |
| #endif /* CONV_STRIDE_X */ |
| #endif /* DILATION_X==1 */ |
| |
| /** This function computes the depthwise convolution quantized. |
| * |
| * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED |
| * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) |
| * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) |
| * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) |
| * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes) |
| * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor |
| * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr |
| * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) |
| * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) |
| * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) |
| * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes) |
| * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor |
| * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL |
| * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes) |
| * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes) |
| * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes) |
| * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes) |
| * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor |
| * @param[in] output_multipliers_ptr Pointer to the output multipliers vector. Supported data types: S32 |
| * @param[in] output_multipliers_stride_x Stride of the output multipliers vector in X dimension (in bytes) |
| * @param[in] output_multipliers_step_x output_multipliers_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] output_multipliers_offset_first_element_in_bytes The offset of the first element in the output multipliers vector |
| * @param[in] output_shifts_ptr Pointer to the output shifts vector. Supported data types: S32 |
| * @param[in] output_shifts_stride_x Stride of the output shifts vector in X dimension (in bytes) |
| * @param[in] output_shifts_step_x output_shifts_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] output_shifts_offset_first_element_in_bytes The offset of the first element in the output shifts vector |
| * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: S32 |
| * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes) |
| * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector |
| */ |
| |
| __kernel void dwc_3x3_native_quantized8_nchw( |
| TENSOR3D_DECLARATION(src), |
| TENSOR3D_DECLARATION(dst), |
| TENSOR3D_DECLARATION(weights), |
| VECTOR_DECLARATION(output_multipliers), |
| VECTOR_DECLARATION(output_shifts) |
| #if defined(HAS_BIAS) |
| , |
| VECTOR_DECLARATION(biases) |
| #endif //defined(HAS_BIAS) |
| ) |
| { |
| Image src = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(src); |
| Image dst = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst); |
| Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights); |
| Vector output_multipliers = CONVERT_TO_VECTOR_STRUCT_NO_STEP(output_multipliers); |
| Vector output_shifts = CONVERT_TO_VECTOR_STRUCT_NO_STEP(output_shifts); |
| |
| // Extract channel and linearized batch indices |
| const int channel = get_global_id(2) % DST_CHANNELS; |
| const int batch = get_global_id(2) / DST_CHANNELS; |
| |
| #if defined(HAS_BIAS) |
| Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases); |
| |
| int bias_value = *((__global int *)(vector_offset(&biases, channel))); |
| #endif //defined(HAS_BIAS) |
| |
| // Load relevant input and weights data (Accounts depth multiplier when indexing input, OFM = IFM * DEPTH_MULTIPLIER) |
| src.ptr -= batch * (DST_CHANNELS / DEPTH_MULTIPLIER) * (DEPTH_MULTIPLIER - 1) * src_step_z + (channel - (channel / DEPTH_MULTIPLIER)) * src_step_z; |
| __global uchar *weights_addr = weights.ptr + get_global_id(0) * weights_step_x + get_global_id(1) * weights_step_y + channel * weights_step_z; |
| |
| VEC_DATA_TYPE(WEIGHTS_TYPE, 3) |
| w0 = vload3(0, (__global WEIGHTS_TYPE *)(weights_addr + 0 * weights_stride_y)); |
| VEC_DATA_TYPE(WEIGHTS_TYPE, 3) |
| w1 = vload3(0, (__global WEIGHTS_TYPE *)(weights_addr + 1 * weights_stride_y)); |
| VEC_DATA_TYPE(WEIGHTS_TYPE, 3) |
| w2 = vload3(0, (__global WEIGHTS_TYPE *)(weights_addr + 2 * weights_stride_y)); |
| |
| #if defined(PER_CHANNEL_QUANTIZATION) |
| const int output_multiplier = *((__global int *)vector_offset(&output_multipliers, channel)); |
| const int output_shift = *((__global int *)vector_offset(&output_shifts, channel)); |
| #endif // defined(PER_CHANNEL_QUANTIZATION) |
| |
| int8 values0 = 0; |
| int8 sum0 = 0; |
| #if CONV_STRIDE_Y == 1 && DILATION_Y == 1 |
| int8 values1 = 0; |
| int8 sum1 = 0; |
| #endif /* CONV_STRIDE_Y &&DILATION_Y==1 */ |
| |
| // Row0 |
| int8 left, middle, right; |
| GET_VALUES(src.ptr + 0 * src_stride_y, left, middle, right); |
| values0 += left * (int8)(w0.s0); |
| values0 += middle * (int8)(w0.s1); |
| values0 += right * (int8)(w0.s2); |
| |
| #if WEIGHTS_OFFSET != 0 |
| sum0 += left + middle + right; |
| #endif /* WEIGHTS_OFFSET != 0 */ |
| |
| // Row1 |
| GET_VALUES(src.ptr + DILATION_Y * src_stride_y, left, middle, right); |
| values0 += left * (int8)(w1.s0); |
| values0 += middle * (int8)(w1.s1); |
| values0 += right * (int8)(w1.s2); |
| #if CONV_STRIDE_Y == 1 && DILATION_Y == 1 |
| values1 += left * (int8)(w0.s0); |
| values1 += middle * (int8)(w0.s1); |
| values1 += right * (int8)(w0.s2); |
| #endif /* CONV_STRIDE_Y && DILATION_Y== 1 */ |
| |
| #if WEIGHTS_OFFSET != 0 |
| int8 tmp = left + middle + right; |
| sum0 += tmp; |
| #if CONV_STRIDE_Y == 1 && DILATION_Y == 1 |
| sum1 += tmp; |
| #endif /* CONV_STRIDE_Y &&DILATION_Y== 1 */ |
| #endif /* WEIGHTS_OFFSET != 0 */ |
| |
| // Row2 |
| GET_VALUES(src.ptr + 2 * DILATION_Y * src_stride_y, left, middle, right); |
| values0 += left * (int8)(w2.s0); |
| values0 += middle * (int8)(w2.s1); |
| values0 += right * (int8)(w2.s2); |
| #if CONV_STRIDE_Y == 1 && DILATION_Y == 1 |
| values1 += left * (int8)(w1.s0); |
| values1 += middle * (int8)(w1.s1); |
| values1 += right * (int8)(w1.s2); |
| #endif /* CONV_STRIDE_Y &&DILATION_Y == 1 */ |
| |
| #if WEIGHTS_OFFSET != 0 |
| tmp = left + middle + right; |
| sum0 += tmp; |
| #if CONV_STRIDE_Y == 1 && DILATION_Y == 1 |
| sum1 += tmp; |
| #endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1 */ |
| #endif /* WEIGHTS_OFFSET != 0 */ |
| |
| #if CONV_STRIDE_Y == 1 && DILATION_Y == 1 |
| // Row3 |
| GET_VALUES(src.ptr + 3 * src_stride_y, left, middle, right); |
| values1 += left * (int8)(w2.s0); |
| values1 += middle * (int8)(w2.s1); |
| values1 += right * (int8)(w2.s2); |
| |
| #if WEIGHTS_OFFSET != 0 |
| sum1 += left + middle + right; |
| #endif /* WEIGHTS_OFFSET != 0 */ |
| #endif /* CONV_STRIDE_Y && DILATION_Y == 1 */ |
| |
| #if defined(HAS_BIAS) |
| values0 += (int8)(bias_value); |
| #if CONV_STRIDE_Y == 1 && DILATION_Y == 1 |
| values1 += (int8)(bias_value); |
| #endif /* CONV_STRIDE_Y & &DILATION_Y == 1 */ |
| #endif //defined(HAS_BIAS) |
| |
| #if WEIGHTS_OFFSET != 0 |
| values0 += sum0 * (int8)(WEIGHTS_OFFSET); |
| #if CONV_STRIDE_Y == 1 && DILATION_Y == 1 |
| values1 += sum1 * (int8)(WEIGHTS_OFFSET); |
| #endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1 */ |
| #endif /* WEIGHTS_OFFSET != 0 */ |
| |
| #if INPUT_OFFSET != 0 |
| VEC_WEIGHTS_PROMOTED_TYPE(3) |
| tmp_we = CONVERT(w0, VEC_WEIGHTS_PROMOTED_TYPE(3)) + CONVERT(w1, VEC_WEIGHTS_PROMOTED_TYPE(3)) + CONVERT(w2, VEC_WEIGHTS_PROMOTED_TYPE(3)); |
| |
| WEIGHTS_PROMOTED_TYPE sum_weights = tmp_we.s0 + tmp_we.s1 + tmp_we.s2; |
| values0 += sum_weights * (int8)(INPUT_OFFSET); |
| #if CONV_STRIDE_Y == 1 && DILATION_Y == 1 |
| values1 += sum_weights * (int8)(INPUT_OFFSET); |
| #endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1 */ |
| #endif /* INPUT_OFFSET != 0 */ |
| |
| #if K_OFFSET != 0 |
| values0 += (int8)(K_OFFSET); |
| #if CONV_STRIDE_Y == 1 && DILATION_Y == 1 |
| values1 += (int8)(K_OFFSET); |
| #endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1*/ |
| #endif /* K_OFFSET != 0 */ |
| |
| #if defined(REAL_MULTIPLIER) |
| |
| values0 = CONVERT(round(CONVERT(values0, float8) * (float8)REAL_MULTIPLIER), int8); |
| |
| #else // defined(REAL_MULTIPLIER) |
| |
| #if defined(PER_CHANNEL_QUANTIZATION) |
| int8 res0_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values0, output_multiplier, output_shift, 8); |
| int8 res0_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values0, output_multiplier, output_shift, 8); |
| values0 = select(res0_shift_lt0, res0_shift_gt0, (int8)(output_shift) >= 0); |
| #else // defined(PER_CHANNEL_QUANTIZATION) |
| #if OUTPUT_SHIFT < 0 |
| values0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8); |
| #else // OUTPUT_SHIFT < 0 |
| values0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8); |
| #endif // OUTPUT_OFFSET < 0 |
| #endif // defined(PER_CHANNEL_QUANTIZATION) |
| |
| #endif // defined(REAL_MULTIPLIER) |
| |
| values0 += (int8)OUTPUT_OFFSET; |
| VEC_TYPE(8) |
| res0 = CONVERT_SAT(values0, VEC_TYPE(8)); |
| |
| vstore8(ACTIVATION_FUNC(res0), 0, dst.ptr); |
| #if CONV_STRIDE_Y == 1 && DILATION_Y == 1 |
| #if defined(REAL_MULTIPLIER) |
| |
| values1 = CONVERT(round(CONVERT(values1, float8) * (float8)REAL_MULTIPLIER), int8); |
| |
| #else // defined(REAL_MULTIPLIER) |
| |
| #if defined(PER_CHANNEL_QUANTIZATION) |
| int8 res1_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values1, output_multiplier, output_shift, 8); |
| int8 res1_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values1, output_multiplier, output_shift, 8); |
| values1 = select(res1_shift_lt0, res1_shift_gt0, (int8)(output_shift) >= 0); |
| #else // defined(PER_CHANNEL_QUANTIZATION) |
| #if OUTPUT_SHIFT < 0 |
| values1 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8); |
| #else // OUTPUT_SHIFT < 0 |
| values1 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8); |
| #endif // OUTPUT_OFFSET < 0 |
| #endif // defined(PER_CHANNEL_QUANTIZATION) |
| |
| #endif // defined(REAL_MULTIPLIER) |
| |
| values1 += (int8)OUTPUT_OFFSET; |
| VEC_TYPE(8) |
| res1 = CONVERT_SAT(values1, VEC_TYPE(8)); |
| |
| vstore8(ACTIVATION_FUNC(res1), 0, dst.ptr + dst_stride_y); |
| #endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1*/ |
| } |
| |
| #else // !defined(IS_DOT8) |
| |
| #if DILATION_X == 1 |
| #if CONV_STRIDE_X == 1 |
| #define GET_VALUES(first_value, left, middle, right) \ |
| ({ \ |
| VEC_TYPE(8) \ |
| temp0 = vload8(0, (__global DATA_TYPE *)(first_value)); \ |
| VEC_TYPE(2) \ |
| temp1 = vload2(0, (__global DATA_TYPE *)(first_value + 8 * sizeof(DATA_TYPE))); \ |
| \ |
| left = temp0.s01234567; \ |
| middle = (VEC_TYPE(8))(temp0.s1234, temp0.s567, temp1.s0); \ |
| right = (VEC_TYPE(8))(temp0.s2345, temp0.s67, temp1.s01); \ |
| }) |
| #elif CONV_STRIDE_X == 2 |
| #define GET_VALUES(first_value, left, middle, right) \ |
| ({ \ |
| VEC_TYPE(16) \ |
| temp0 = vload16(0, (__global DATA_TYPE *)(first_value)); \ |
| DATA_TYPE temp1 = *((__global DATA_TYPE *)(first_value + 16 * sizeof(DATA_TYPE))); \ |
| \ |
| left = temp0.s02468ace; \ |
| middle = temp0.s13579bdf; \ |
| right = (VEC_TYPE(8))(temp0.s2468, temp0.sace, temp1); \ |
| }) |
| #else /* CONV_STRIDE_X */ |
| #define GET_VALUES(first_value, left, middle, right) \ |
| ({ \ |
| VEC_TYPE(16) \ |
| temp0 = vload16(0, (__global DATA_TYPE *)(first_value)); \ |
| VEC_TYPE(8) \ |
| temp1 = vload8(0, (__global DATA_TYPE *)(first_value + 16 * sizeof(DATA_TYPE))); \ |
| \ |
| left = (VEC_TYPE(8))(temp0.s0369, temp0.scf, temp1.s25); \ |
| middle = (VEC_TYPE(8))(temp0.s147a, temp0.sd, temp1.s036); \ |
| right = (VEC_TYPE(8))(temp0.s258b, temp0.se, temp1.s147); \ |
| }) |
| #endif /* CONV_STRIDE_X */ |
| #else /*DILATION_X==1*/ |
| |
| #if CONV_STRIDE_X == 1 |
| #define GET_VALUES(first_value, left, middle, right) \ |
| ({ \ |
| left = vload8(0, (__global DATA_TYPE *)(first_value)); \ |
| middle = vload8(0, (__global DATA_TYPE *)(first_value + DILATION_X * sizeof(DATA_TYPE))); \ |
| right = vload8(0, (__global DATA_TYPE *)(first_value + 2 * DILATION_X * sizeof(DATA_TYPE))); \ |
| }) |
| #elif CONV_STRIDE_X == 2 |
| #define GET_VALUES(first_value, left, middle, right) \ |
| ({ \ |
| VEC_TYPE(16) \ |
| temp0 = vload16(0, (__global DATA_TYPE *)(first_value)); \ |
| left = temp0.s02468ace; \ |
| temp0 = vload16(0, (__global DATA_TYPE *)(first_value + DILATION_X * sizeof(DATA_TYPE))); \ |
| middle = temp0.s02468ace; \ |
| temp0 = vload16(0, (__global DATA_TYPE *)(first_value + 2 * DILATION_X * sizeof(DATA_TYPE))); \ |
| right = temp0.s02468ace; \ |
| }) |
| #else /* CONV_STRIDE_X */ |
| #define GET_VALUES(first_value, left, middle, right) \ |
| ({ \ |
| VEC_TYPE(16) \ |
| temp0 = vload16(0, (__global DATA_TYPE *)(first_value)); \ |
| VEC_TYPE(8) \ |
| temp1 = vload8(0, (__global DATA_TYPE *)(first_value + 16 * sizeof(DATA_TYPE)))); \ |
| left = (VEC_TYPE(8))(temp0.s0369, temp0.scf, temp1.s25); \ |
| \ |
| temp0 = vload16(0, (__global DATA_TYPE *)(first_value + DILATION_X * sizeof(DATA_TYPE))); \ |
| temp1 = vload8(0, (__global DATA_TYPE *)(first_value + (16 + DILATION_X) * sizeof(DATA_TYPE))); \ |
| middle = (VEC_TYPE(8))(temp0.s0369, temp0.scf, temp1.s25); \ |
| \ |
| temp0 = vload16(0, (__global DATA_TYPE *)(first_value + 2 * DILATION_X * sizeof(DATA_TYPE))); \ |
| temp1 = vload8(0, (__global DATA_TYPE *)(first_value + (16 + 2 * DILATION_X) * sizeof(DATA_TYPE))); \ |
| right = (VEC_TYPE(8))(temp0.s0369, temp0.scf, temp1.s25); \ |
| }) |
| |
| #endif /* CONV_STRIDE_X */ |
| #endif /*DILATION_X==1*/ |
| /** This function computes the depthwise convolution quantized using dot product when the data layout is NCHW. |
| * |
| * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED |
| * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) |
| * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) |
| * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) |
| * @param[in] src_step_z src_stride_z * number of elements along Y processed per workitem(in bytes) |
| * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor |
| * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr |
| * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) |
| * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) |
| * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) |
| * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes) |
| * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor |
| * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL |
| * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes) |
| * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes) |
| * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes) |
| * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes) |
| * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor |
| * @param[in] output_multipliers_ptr Pointer to the output multipliers vector. Supported data types: S32 |
| * @param[in] output_multipliers_stride_x Stride of the output multipliers vector in X dimension (in bytes) |
| * @param[in] output_multipliers_step_x output_multipliers_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] output_multipliers_offset_first_element_in_bytes The offset of the first element in the output multipliers vector |
| * @param[in] output_shifts_ptr Pointer to the output shifts vector. Supported data types: S32 |
| * @param[in] output_shifts_stride_x Stride of the output shifts vector in X dimension (in bytes) |
| * @param[in] output_shifts_step_x output_shifts_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] output_shifts_offset_first_element_in_bytes The offset of the first element in the output shifts vector |
| * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: S32 |
| * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes) |
| * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector |
| */ |
| |
| __kernel void dwc_3x3_native_quantized8_dot8_nchw( |
| TENSOR3D_DECLARATION(src), |
| TENSOR3D_DECLARATION(dst), |
| TENSOR3D_DECLARATION(weights), |
| VECTOR_DECLARATION(output_multipliers), |
| VECTOR_DECLARATION(output_shifts) |
| #if defined(HAS_BIAS) |
| , |
| VECTOR_DECLARATION(biases) |
| #endif //defined(HAS_BIAS) |
| ) |
| { |
| Image src = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(src); |
| Image dst = CONVERT_TENSOR3D_TO_IMAGE_STRUCT(dst); |
| Tensor3D weights = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(weights); |
| Vector output_multipliers = CONVERT_TO_VECTOR_STRUCT_NO_STEP(output_multipliers); |
| Vector output_shifts = CONVERT_TO_VECTOR_STRUCT_NO_STEP(output_shifts); |
| |
| // Extract channel and linearized batch indices |
| const int channel = get_global_id(2) % DST_CHANNELS; |
| const int batch = get_global_id(2) / DST_CHANNELS; |
| |
| #if defined(HAS_BIAS) |
| Vector biases = CONVERT_TO_VECTOR_STRUCT_NO_STEP(biases); |
| |
| const int bias_value = *((__global int *)(vector_offset(&biases, channel))); |
| #endif //defined(HAS_BIAS) |
| |
| // Load relevant input and weights data (Accounts depth multiplier when indexing input, OFM = IFM * DEPTH_MULTIPLIER) |
| src.ptr -= batch * (DST_CHANNELS / DEPTH_MULTIPLIER) * (DEPTH_MULTIPLIER - 1) * src_step_z + (channel - (channel / DEPTH_MULTIPLIER)) * src_step_z; |
| __global uchar *weights_addr = weights.ptr + get_global_id(0) * weights_step_x + get_global_id(1) * weights_step_y + channel * weights_step_z; |
| |
| VEC_TYPE(3) |
| w0 = vload3(0, (__global WEIGHTS_TYPE *)(weights_addr + 0 * weights_stride_y)); |
| VEC_TYPE(3) |
| w1 = vload3(0, (__global WEIGHTS_TYPE *)(weights_addr + 1 * weights_stride_y)); |
| VEC_TYPE(3) |
| w2 = vload3(0, (__global WEIGHTS_TYPE *)(weights_addr + 2 * weights_stride_y)); |
| |
| const int output_multiplier = *((__global int *)vector_offset(&output_multipliers, 0)); |
| const int output_shift = *((__global int *)vector_offset(&output_shifts, 0)); |
| |
| VEC_TYPE(8) |
| left0, middle0, right0; |
| VEC_TYPE(8) |
| left1, middle1, right1; |
| VEC_TYPE(8) |
| left2, middle2, right2; |
| |
| int8 values0 = 0; |
| int8 sum0 = 0; |
| |
| GET_VALUES(src.ptr + 0 * src_stride_y, left0, middle0, right0); |
| GET_VALUES(src.ptr + DILATION_Y * src_stride_y, left1, middle1, right1); |
| GET_VALUES(src.ptr + 2 * DILATION_Y * src_stride_y, left2, middle2, right2); |
| |
| #if WEIGHTS_OFFSET != 0 |
| sum0 += convert_int8(left0) + convert_int8(middle0) + convert_int8(right0); |
| sum0 += convert_int8(left1) + convert_int8(middle1) + convert_int8(right1); |
| sum0 += convert_int8(left2) + convert_int8(middle2) + convert_int8(right2); |
| #endif /* WEIGHTS_OFFSET != 0 */ |
| |
| #if CONV_STRIDE_Y == 1 && DILATION_Y == 1 |
| // If conv_stride_y is equals to 1, we compute two output rows |
| |
| VEC_TYPE(8) |
| left3, middle3, right3; |
| int8 values1 = 0; |
| int8 sum1 = 0; |
| |
| GET_VALUES(src.ptr + 3 * src_stride_y, left3, middle3, right3); |
| |
| #if WEIGHTS_OFFSET != 0 |
| sum1 += convert_int8(left1) + convert_int8(middle1) + convert_int8(right1); |
| sum1 += convert_int8(left2) + convert_int8(middle2) + convert_int8(right2); |
| sum1 += convert_int8(left3) + convert_int8(middle3) + convert_int8(right3); |
| #endif /* WEIGHTS_OFFSET != 0 */ |
| #endif // CONV_STRIDE_Y == 1 && DILATION_Y==1 |
| |
| ARM_DOT((VEC_TYPE(4))(left0.s0, middle0.s0, right0.s0, left1.s0), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values0.s0); |
| ARM_DOT((VEC_TYPE(4))(middle1.s0, right1.s0, left2.s0, middle2.s0), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values0.s0); |
| values0.s0 += right2.s0 * w2.s2; |
| |
| ARM_DOT((VEC_TYPE(4))(left0.s1, middle0.s1, right0.s1, left1.s1), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values0.s1); |
| ARM_DOT((VEC_TYPE(4))(middle1.s1, right1.s1, left2.s1, middle2.s1), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values0.s1); |
| values0.s1 += right2.s1 * w2.s2; |
| |
| ARM_DOT((VEC_TYPE(4))(left0.s2, middle0.s2, right0.s2, left1.s2), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values0.s2); |
| ARM_DOT((VEC_TYPE(4))(middle1.s2, right1.s2, left2.s2, middle2.s2), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values0.s2); |
| values0.s2 += right2.s2 * w2.s2; |
| |
| ARM_DOT((VEC_TYPE(4))(left0.s3, middle0.s3, right0.s3, left1.s3), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values0.s3); |
| ARM_DOT((VEC_TYPE(4))(middle1.s3, right1.s3, left2.s3, middle2.s3), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values0.s3); |
| values0.s3 += right2.s3 * w2.s2; |
| |
| ARM_DOT((VEC_TYPE(4))(left0.s4, middle0.s4, right0.s4, left1.s4), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values0.s4); |
| ARM_DOT((VEC_TYPE(4))(middle1.s4, right1.s4, left2.s4, middle2.s4), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values0.s4); |
| values0.s4 += right2.s4 * w2.s2; |
| |
| ARM_DOT((VEC_TYPE(4))(left0.s5, middle0.s5, right0.s5, left1.s5), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values0.s5); |
| ARM_DOT((VEC_TYPE(4))(middle1.s5, right1.s5, left2.s5, middle2.s5), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values0.s5); |
| values0.s5 += right2.s5 * w2.s2; |
| |
| ARM_DOT((VEC_TYPE(4))(left0.s6, middle0.s6, right0.s6, left1.s6), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values0.s6); |
| ARM_DOT((VEC_TYPE(4))(middle1.s6, right1.s6, left2.s6, middle2.s6), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values0.s6); |
| values0.s6 += right2.s6 * w2.s2; |
| |
| ARM_DOT((VEC_TYPE(4))(left0.s7, middle0.s7, right0.s7, left1.s7), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values0.s7); |
| ARM_DOT((VEC_TYPE(4))(middle1.s7, right1.s7, left2.s7, middle2.s7), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values0.s7); |
| values0.s7 += right2.s7 * w2.s2; |
| |
| #if CONV_STRIDE_Y == 1 && DILATION_Y == 1 |
| ARM_DOT((VEC_TYPE(4))(left1.s0, middle1.s0, right1.s0, left2.s0), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values1.s0); |
| ARM_DOT((VEC_TYPE(4))(middle2.s0, right2.s0, left3.s0, middle3.s0), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values1.s0); |
| values1.s0 += right3.s0 * w2.s2; |
| |
| ARM_DOT((VEC_TYPE(4))(left1.s1, middle1.s1, right1.s1, left2.s1), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values1.s1); |
| ARM_DOT((VEC_TYPE(4))(middle2.s1, right2.s1, left3.s1, middle3.s1), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values1.s1); |
| values1.s1 += right3.s1 * w2.s2; |
| |
| ARM_DOT((VEC_TYPE(4))(left1.s2, middle1.s2, right1.s2, left2.s2), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values1.s2); |
| ARM_DOT((VEC_TYPE(4))(middle2.s2, right2.s2, left3.s2, middle3.s2), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values1.s2); |
| values1.s2 += right3.s2 * w2.s2; |
| |
| ARM_DOT((VEC_TYPE(4))(left1.s3, middle1.s3, right1.s3, left2.s3), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values1.s3); |
| ARM_DOT((VEC_TYPE(4))(middle2.s3, right2.s3, left3.s3, middle3.s3), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values1.s3); |
| values1.s3 += right3.s3 * w2.s2; |
| |
| ARM_DOT((VEC_TYPE(4))(left1.s4, middle1.s4, right1.s4, left2.s4), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values1.s4); |
| ARM_DOT((VEC_TYPE(4))(middle2.s4, right2.s4, left3.s4, middle3.s4), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values1.s4); |
| values1.s4 += right3.s4 * w2.s2; |
| |
| ARM_DOT((VEC_TYPE(4))(left1.s5, middle1.s5, right1.s5, left2.s5), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values1.s5); |
| ARM_DOT((VEC_TYPE(4))(middle2.s5, right2.s5, left3.s5, middle3.s5), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values1.s5); |
| values1.s5 += right3.s5 * w2.s2; |
| |
| ARM_DOT((VEC_TYPE(4))(left1.s6, middle1.s6, right1.s6, left2.s6), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values1.s6); |
| ARM_DOT((VEC_TYPE(4))(middle2.s6, right2.s6, left3.s6, middle3.s6), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values1.s6); |
| values1.s6 += right3.s6 * w2.s2; |
| |
| ARM_DOT((VEC_TYPE(4))(left1.s7, middle1.s7, right1.s7, left2.s7), (VEC_TYPE(4))(w0.s0, w0.s1, w0.s2, w1.s0), values1.s7); |
| ARM_DOT((VEC_TYPE(4))(middle2.s7, right2.s7, left3.s7, middle3.s7), (VEC_TYPE(4))(w1.s1, w1.s2, w2.s0, w2.s1), values1.s7); |
| values1.s7 += right3.s7 * w2.s2; |
| #endif // CONV_STRIDE_Y == 1 && DILATION_Y==1 |
| |
| #if defined(HAS_BIAS) |
| values0 += (int8)(bias_value); |
| #if CONV_STRIDE_Y == 1 && DILATION_Y == 1 |
| values1 += (int8)(bias_value); |
| #endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1 */ |
| #endif //defined(HAS_BIAS) |
| |
| #if WEIGHTS_OFFSET != 0 |
| values0 += sum0 * (int8)(WEIGHTS_OFFSET); |
| #if CONV_STRIDE_Y == 1 && DILATION_Y == 1 |
| values1 += sum1 * (int8)(WEIGHTS_OFFSET); |
| #endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1 */ |
| #endif /* WEIGHTS_OFFSET != 0 */ |
| |
| #if INPUT_OFFSET != 0 |
| WEIGHTS_PROMOTED_TYPE sum_weights = 0; |
| VEC_WEIGHTS_PROMOTED_TYPE(3) |
| tmp_we = CONVERT(w0, VEC_WEIGHTS_PROMOTED_TYPE(3)) + CONVERT(w1, VEC_WEIGHTS_PROMOTED_TYPE(3)) + CONVERT(w2, VEC_WEIGHTS_PROMOTED_TYPE(3)); |
| sum_weights += tmp_we.s0 + tmp_we.s1 + tmp_we.s2; |
| values0 += sum_weights * (int8)(INPUT_OFFSET); |
| #if CONV_STRIDE_Y == 1 && DILATION_Y == 1 |
| values1 += sum_weights * (int8)(INPUT_OFFSET); |
| #endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1*/ |
| #endif /* INPUT_OFFSET != 0 */ |
| |
| #if K_OFFSET != 0 |
| values0 += (int8)(K_OFFSET); |
| #if CONV_STRIDE_Y == 1 && DILATION_Y == 1 |
| values1 += (int8)(K_OFFSET); |
| #endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1*/ |
| #endif /* K_OFFSET != 0 */ |
| |
| #if defined(REAL_MULTIPLIER) |
| |
| values0 = CONVERT(round(CONVERT(values0, float8) * (float8)REAL_MULTIPLIER), int8); |
| |
| #else // defined(REAL_MULTIPLIER) |
| |
| #if defined(PER_CHANNEL_QUANTIZATION) |
| int8 res0_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values0, output_multiplier, output_shift, 8); |
| int8 res0_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values0, output_multiplier, output_shift, 8); |
| values0 = select(res0_shift_lt0, res0_shift_gt0, (int8)(output_shift) >= 0); |
| #else // defined(PER_CHANNEL_QUANTIZATION) |
| #if OUTPUT_SHIFT < 0 |
| values0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8); |
| #else // OUTPUT_SHIFT < 0 |
| values0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8); |
| #endif // OUTPUT_OFFSET < 0 |
| #endif // defined(PER_CHANNEL_QUANTIZATION) |
| |
| #endif // defined(REAL_MULTIPLIER) |
| |
| values0 += (int8)OUTPUT_OFFSET; |
| VEC_TYPE(8) |
| res0 = CONVERT_SAT(values0, VEC_TYPE(8)); |
| |
| vstore8(ACTIVATION_FUNC(res0), 0, dst.ptr); |
| #if CONV_STRIDE_Y == 1 && DILATION_Y == 1 |
| |
| #if defined(REAL_MULTIPLIER) |
| |
| values1 = CONVERT(round(CONVERT(values1, float8) * (float8)REAL_MULTIPLIER), int8); |
| |
| #else // defined(REAL_MULTIPLIER) |
| |
| #if defined(PER_CHANNEL_QUANTIZATION) |
| int8 res1_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values1, output_multiplier, output_shift, 8); |
| int8 res1_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values1, output_multiplier, output_shift, 8); |
| values1 = select(res1_shift_lt0, res1_shift_gt0, (int8)(output_shift) >= 0); |
| #else // defined(PER_CHANNEL_QUANTIZATION) |
| #if OUTPUT_SHIFT < 0 |
| values1 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(values1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8); |
| #else // OUTPUT_SHIFT < 0 |
| values1 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(values1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, 8); |
| #endif // OUTPUT_OFFSET < 0 |
| #endif // defined(PER_CHANNEL_QUANTIZATION) |
| |
| #endif // defined(REAL_MULTIPLIER) |
| |
| values1 += (int8)OUTPUT_OFFSET; |
| VEC_TYPE(8) |
| res1 = CONVERT_SAT(values1, VEC_TYPE(8)); |
| |
| vstore8(ACTIVATION_FUNC(res1), 0, dst.ptr + dst_stride_y); |
| #endif /* CONV_STRIDE_Y == 1 && DILATION_Y==1*/ |
| } |
| |
| #endif // !defined(IS_DOT8) |
| |
| #endif /* defined(CONV_STRIDE_Y) && defined(CONV_STRIDE_X) && defined(DEPTH_MULTIPLIER) && defined(DST_CHANNELS) */ |
| |
| #if defined(VEC_SIZE) && defined(SRC_DIM_1) && defined(SRC_DIM_2) && defined(CONV_PAD_TOP) && defined(CONV_PAD_LEFT) |
| |
| #define asymm_mult_by_quant_multiplier_less_than_one(x, y, z) ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(x, y, z, VEC_SIZE) |
| |
| #define MULTIPLY_ADD(x, y, acc) acc += CONVERT(CONVERT(x, VEC_WEIGHTS_PROMOTED_TYPE(VEC_SIZE)) * CONVERT(y, VEC_WEIGHTS_PROMOTED_TYPE(VEC_SIZE)), VEC_INT) |
| |
| #if WEIGHTS_OFFSET != 0 |
| #define MULTIPLY_ADD_ACCUMULATE(x, y, acc, sum) \ |
| ({ \ |
| sum += CONVERT(x, VEC_INT); \ |
| MULTIPLY_ADD(x, y, acc); \ |
| }) |
| #else /* WEIGHTS_OFFSET != 0 */ |
| #define MULTIPLY_ADD_ACCUMULATE(x, y, acc, sum) MULTIPLY_ADD(x, y, acc) |
| #endif /* WEIGHTS_OFFSET != 0 */ |
| |
| #if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) |
| #define DOT_PRODUCT(acc, val0, val1, val2, val3, val4, val5, val6, val7, val8, w0, w1) \ |
| ({ \ |
| ARM_DOT((VEC_TYPE(4))(val0, val1, val2, val3), w0.s0123, acc); \ |
| ARM_DOT((VEC_TYPE(4))(val4, val5, val6, val7), w0.s4567, acc); \ |
| acc += val8 * w1; \ |
| }) |
| |
| #define DOT_PRODUCT_REDUCTION(sum, val0, val1, val2, val3, val4, val5, val6, val7, val8) \ |
| ({ \ |
| sum = val0; \ |
| ARM_DOT((VEC_TYPE(4))(val1, val2, val3, val4), (VEC_TYPE(4))1, sum); \ |
| ARM_DOT((VEC_TYPE(4))(val5, val6, val7, val8), (VEC_TYPE(4))1, sum); \ |
| }) |
| |
| #define DOT_PRODUCT_REDUCTION_WEIGHTS(sum, w0, w1) \ |
| ({ \ |
| sum = w1; \ |
| ARM_DOT(w0.s0123, (VEC_TYPE(4))1, sum); \ |
| ARM_DOT(w0.s4567, (VEC_TYPE(4))1, sum); \ |
| }) |
| |
| #endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) |
| |
| #if defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y) && VEC_SIZE == 4 |
| /** This function computes the depthwise convolution quantized for NHWC data layout when the stride along the width or height is not 1. |
| * |
| * @note This kernel assumes VEC_SIZE is 4. |
| * @note The weights tensor is expected to be reshaped using @ref CLDepthwiseConvolutionLayerReshapeWeightsKernel. |
| * @note The number of elements read per thread must be passed at compile time using -DVEC_SIZE (e.g. -DVEC_SIZE=2) |
| * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112) |
| * @note The convolution pad top must be passed at compile time using -DCONV_PAD_TOP (e.g. -DCONV_PAD_TOP=1) |
| * @note The convolution pad top must be passed at compile time using -DCONV_PAD_LEFT (e.g. -DCONV_PAD_LEFT=1) |
| * @note The convolution stride along the width must be passed at compile time using -DCONV_STRIDE_X (e.g. -DCONV_STRIDE_Y=X) |
| * @note The convolution stride along the height must be passed at compile time using -DCONV_STRIDE_Y (e.g. -DCONV_STRIDE_Y=1) |
| * |
| * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED |
| * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) |
| * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) |
| * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) |
| * @param[in] src_step_z src_stride_y * number of elements along Z processed per workitem(in bytes) |
| * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes) |
| * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes) |
| * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor |
| * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr |
| * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) |
| * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) |
| * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) |
| * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes) |
| * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes) |
| * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes) |
| * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor |
| * @param[in] weights_ptr Pointer to the weights tensor reshaped. Supported data types: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL |
| * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes) |
| * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes) |
| * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor |
| * @param[in] output_multipliers_ptr Pointer to the output multipliers vector. Supported data types: S32 |
| * @param[in] output_multipliers_stride_x Stride of the output multipliers vector in X dimension (in bytes) |
| * @param[in] output_multipliers_step_x output_multipliers_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] output_multipliers_offset_first_element_in_bytes The offset of the first element in the output multipliers vector |
| * @param[in] output_shifts_ptr Pointer to the output shifts vector. Supported data types: S32 |
| * @param[in] output_shifts_stride_x Stride of the output shifts vector in X dimension (in bytes) |
| * @param[in] output_shifts_step_x output_shifts_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] output_shifts_offset_first_element_in_bytes The offset of the first element in the output shifts vector |
| * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: S32 |
| * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes) |
| * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector |
| * @param[in] max_offset Max offset for the input tensor |
| */ |
| __kernel void dwc_3x3_reshaped_quantized8_nhwc( |
| TENSOR4D_DECLARATION(src), |
| TENSOR4D_DECLARATION(dst), |
| IMAGE_DECLARATION(weights), |
| VECTOR_DECLARATION(output_multipliers), |
| VECTOR_DECLARATION(output_shifts), |
| #if defined(HAS_BIAS) |
| VECTOR_DECLARATION(biases), |
| #endif /* defined(HAS_BIAS) */ |
| int max_offset) |
| { |
| const int x = get_global_id(0); // channels |
| const int y = get_global_id(1); // spatial coordinate x |
| #if defined(DST_DEPTH) |
| int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y |
| int b = get_global_id(2) / (int)DST_DEPTH; // batch |
| #else // defined(DST_DEPTH) |
| int z = get_global_id(2); // spatial coordinate y |
| #endif // defined(DST_DEPTH) |
| |
| __global uchar *weights_addr = weights_ptr + weights_offset_first_element_in_bytes + x * weights_stride_y; |
| |
| #if defined(DST_DEPTH) |
| __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * VEC_SIZE + b * src_stride_w; |
| #else /* defined(DST_DEPTH) */ |
| __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * VEC_SIZE; |
| #endif /* defined(DST_DEPTH) */ |
| |
| int z_coord = 0; |
| int4 offset = 0; |
| int4 y_coord = ((int4)(y * CONV_STRIDE_X) + (int4)(0, DILATION_X * 1, DILATION_X * 2, DILATION_X * 3)) - (int)CONV_PAD_LEFT; |
| |
| // Only for y = 0 we can have a negative coordinate. If so, we convert it to SRC_DIM_1 |
| y_coord.s0 = min((uint)y_coord.s0, (uint)SRC_DIM_1); |
| y_coord.s1 = min((uint)y_coord.s1, (uint)SRC_DIM_1); |
| y_coord.s2 = min((uint)y_coord.s2, (uint)SRC_DIM_1); |
| y_coord.s3 = min((uint)y_coord.s3, (uint)SRC_DIM_1); |
| |
| int4 y_offset = convert_int4(y_coord * (int)src_stride_y); |
| |
| // We compute VEC_SIZEx1x1 [C,W,H] elements |
| VEC_INT acc = 0, sum = 0; |
| |
| // Load weights |
| VEC_DATA_TYPE(WEIGHTS_TYPE, 16) |
| w0_tmp = VLOAD(16)(0, (__global WEIGHTS_TYPE *)(weights_addr)); |
| VEC_DATA_TYPE(WEIGHTS_TYPE, 16) |
| w1_tmp = VLOAD(16)(0, (__global WEIGHTS_TYPE *)(weights_addr + 16)); |
| VEC_DATA_TYPE(WEIGHTS_TYPE, 4) |
| w8 = VLOAD(4)(0, (__global WEIGHTS_TYPE *)(weights_addr + 2 * 16)); |
| |
| VEC_DATA_TYPE(WEIGHTS_TYPE, 4) |
| w0 = w0_tmp.s0123; |
| VEC_DATA_TYPE(WEIGHTS_TYPE, 4) |
| w1 = w0_tmp.s4567; |
| VEC_DATA_TYPE(WEIGHTS_TYPE, 4) |
| w2 = w0_tmp.s89AB; |
| VEC_DATA_TYPE(WEIGHTS_TYPE, 4) |
| w3 = w0_tmp.sCDEF; |
| |
| VEC_DATA_TYPE(WEIGHTS_TYPE, 4) |
| w4 = w1_tmp.s0123; |
| VEC_DATA_TYPE(WEIGHTS_TYPE, 4) |
| w5 = w1_tmp.s4567; |
| VEC_DATA_TYPE(WEIGHTS_TYPE, 4) |
| w6 = w1_tmp.s89AB; |
| VEC_DATA_TYPE(WEIGHTS_TYPE, 4) |
| w7 = w1_tmp.sCDEF; |
| |
| #if INPUT_OFFSET != 0 |
| VEC_INT sum_we = CONVERT(w0, VEC_INT) + CONVERT(w1, VEC_INT) + CONVERT(w2, VEC_INT) |
| + CONVERT(w3, VEC_INT) + CONVERT(w4, VEC_INT) + CONVERT(w5, VEC_INT) |
| + CONVERT(w6, VEC_INT) + CONVERT(w7, VEC_INT) + CONVERT(w8, VEC_INT); |
| #endif /* INPUT_OFFSET != 0 */ |
| |
| // Load input values |
| // z == 0 |
| // Clamp z_coord as for z = 0, it can be negative |
| // z_coord is casted to unsigned int in order to use just a min() operation |
| // A "-1" 32 bit signed variable converted to unsigned gives 4294967295 |
| z_coord = z * (int)CONV_STRIDE_Y - (int)CONV_PAD_TOP; |
| z_coord = min((uint)z_coord, (uint)SRC_DIM_2); |
| offset = y_offset + (int4)(z_coord * src_stride_z); |
| offset = min(offset, (int4)max_offset); |
| |
| VEC_TYPE(VEC_SIZE) |
| values0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0)); |
| VEC_TYPE(VEC_SIZE) |
| values1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1)); |
| VEC_TYPE(VEC_SIZE) |
| values2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2)); |
| |
| // z == 1 |
| // z_coord can be only negative for z = 0 so we do not need to clamp it |
| // Moreover z_coord cannot be out-of-bound for z = 1 so we do not need to clamp the offset |
| z_coord = z * (int)CONV_STRIDE_Y - (int)CONV_PAD_TOP + DILATION_Y; |
| offset = y_offset + (int4)(z_coord * src_stride_z); |
| VEC_TYPE(VEC_SIZE) |
| values3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0)); |
| VEC_TYPE(VEC_SIZE) |
| values4 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1)); |
| VEC_TYPE(VEC_SIZE) |
| values5 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2)); |
| |
| // z == 2 |
| // Offset can be out-of-bound so we need to check if it is greater than max_offset |
| z_coord = z * (int)CONV_STRIDE_Y - (int)CONV_PAD_TOP + DILATION_Y * 2; |
| offset = y_offset + (int4)(z_coord * src_stride_z); |
| offset = min(offset, (int4)max_offset); |
| VEC_TYPE(VEC_SIZE) |
| values6 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0)); |
| VEC_TYPE(VEC_SIZE) |
| values7 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1)); |
| VEC_TYPE(VEC_SIZE) |
| values8 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2)); |
| |
| MULTIPLY_ADD_ACCUMULATE(values0, w0, acc, sum); |
| MULTIPLY_ADD_ACCUMULATE(values1, w1, acc, sum); |
| MULTIPLY_ADD_ACCUMULATE(values2, w2, acc, sum); |
| |
| MULTIPLY_ADD_ACCUMULATE(values3, w3, acc, sum); |
| MULTIPLY_ADD_ACCUMULATE(values4, w4, acc, sum); |
| MULTIPLY_ADD_ACCUMULATE(values5, w5, acc, sum); |
| |
| MULTIPLY_ADD_ACCUMULATE(values6, w6, acc, sum); |
| MULTIPLY_ADD_ACCUMULATE(values7, w7, acc, sum); |
| MULTIPLY_ADD_ACCUMULATE(values8, w8, acc, sum); |
| |
| #if defined(HAS_BIAS) |
| Vector biases = CONVERT_TO_VECTOR_STRUCT(biases); |
| VEC_INT bias_values = VLOAD(VEC_SIZE)(0, (__global int *)biases.ptr); |
| acc += bias_values; |
| #endif // defined(HAS_BIAS) |
| |
| #if WEIGHTS_OFFSET != 0 |
| acc += WEIGHTS_OFFSET * sum; |
| #endif /* WEIGHTS_OFFSET != 0 */ |
| |
| #if INPUT_OFFSET != 0 |
| acc += INPUT_OFFSET * sum_we; |
| #endif /* INPUT_OFFSET != 0 */ |
| |
| #if K_OFFSET != 0 |
| acc += (VEC_INT)K_OFFSET; |
| #endif /* K_OFFSET != 0 */ |
| |
| #if defined(REAL_MULTIPLIER) |
| |
| acc = CONVERT(round(CONVERT(acc, VEC_FLOAT) * (VEC_FLOAT)REAL_MULTIPLIER), VEC_INT); |
| |
| #else // defined(REAL_MULTIPLIER) |
| |
| #if defined(PER_CHANNEL_QUANTIZATION) |
| Vector output_multipliers = CONVERT_TO_VECTOR_STRUCT(output_multipliers); |
| Vector output_shifts = CONVERT_TO_VECTOR_STRUCT(output_shifts); |
| VEC_INT output_multiplier = VLOAD(VEC_SIZE)(0, (__global int *)output_multipliers.ptr); |
| VEC_INT output_shift = VLOAD(VEC_SIZE)(0, (__global int *)output_shifts.ptr); |
| |
| VEC_INT res_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc, output_multiplier, output_shift, VEC_SIZE); |
| VEC_INT res_shift_gt0 = asymm_mult_by_quant_multiplier_less_than_one(acc, output_multiplier, output_shift); |
| acc = select(res_shift_lt0, res_shift_gt0, output_shift >= 0); |
| #else // defined(PER_CHANNEL_QUANTIZATION) |
| #if OUTPUT_SHIFT < 0 |
| acc = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, VEC_SIZE); |
| #else // OUTPUT_SHIFT < 0 |
| acc = asymm_mult_by_quant_multiplier_less_than_one(acc, OUTPUT_MULTIPLIER, OUTPUT_SHIFT); |
| #endif // OUTPUT_SHIFT < 0 |
| #endif // defined(PER_CHANNEL_QUANTIZATION) |
| |
| #endif // defined(REAL_MULTIPLIER) |
| |
| acc += (VEC_INT)OUTPUT_OFFSET; |
| |
| VEC_TYPE(VEC_SIZE) |
| res = CONVERT_SAT(acc, VEC_TYPE(VEC_SIZE)); |
| |
| #if defined(DST_DEPTH) |
| __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * dst_step_x + y * dst_step_y + z * dst_step_z + b * dst_stride_w; |
| #else /* defined(DST_DEPTH) */ |
| __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * dst_step_x + y * dst_step_y + z * dst_step_z; |
| #endif /* defined(DST_DEPTH) */ |
| |
| VSTORE(VEC_SIZE) |
| (ACTIVATION_FUNC(res), 0, (__global DATA_TYPE *)(dst_addr)); |
| } |
| #endif // defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y) |
| |
| #if defined(NUM_ROWS_PROCESSED) && defined(NUM_PLANES_PROCESSED) && VEC_SIZE == 4 |
| /** This function computes the depthwise convolution quantized for NHWC data layout when the stride along the width and height is 1. |
| * |
| * @note This kernel assumes VEC_SIZE is 4. |
| * @note The weights tensor is expected to be reshaped using @ref CLDepthwiseConvolutionLayerReshapeWeightsKernel. |
| * @note The number of elements read per thread must be passed at compile time using -DVEC_SIZE (e.g. -DVEC_SIZE=2) |
| * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112) |
| * @note The number of rows processed per thread must be passed at compile time using -DNUM_ROWS_PROCESSED (i.e. -DNUM_ROWS_PROCESSED=2) |
| * @note The number of planes processed per thread must be passed at compile time using -DNUM_PLANES_PROCESSED (i.e. -DNUM_PLANES_PROCESSED=2) |
| * @note The convolution pad top must be passed at compile time using -DCONV_PAD_TOP (e.g. -DCONV_PAD_TOP=1) |
| * @note The convolution pad top must be passed at compile time using -DCONV_PAD_LEFT (e.g. -DCONV_PAD_LEFT=1). |
| * |
| * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED |
| * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) |
| * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) |
| * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) |
| * @param[in] src_step_z src_stride_y * number of elements along Z processed per workitem(in bytes) |
| * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes) |
| * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes) |
| * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor |
| * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr |
| * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) |
| * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) |
| * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) |
| * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes) |
| * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes) |
| * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes) |
| * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor |
| * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL |
| * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes) |
| * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes) |
| * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor |
| * @param[in] output_multipliers_ptr Pointer to the output multipliers vector. Supported data types: S32 |
| * @param[in] output_multipliers_stride_x Stride of the output multipliers vector in X dimension (in bytes) |
| * @param[in] output_multipliers_step_x output_multipliers_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] output_multipliers_offset_first_element_in_bytes The offset of the first element in the output multipliers vector |
| * @param[in] output_shifts_ptr Pointer to the output shifts vector. Supported data types: S32 |
| * @param[in] output_shifts_stride_x Stride of the output shifts vector in X dimension (in bytes) |
| * @param[in] output_shifts_step_x output_shifts_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] output_shifts_offset_first_element_in_bytes The offset of the first element in the output shifts vector |
| * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: S32 |
| * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes) |
| * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector |
| * @param[in] max_offset Max offset for the input tensor |
| */ |
| |
| __kernel void dwc_3x3_reshaped_quantized8_stride1_nhwc( |
| TENSOR4D_DECLARATION(src), |
| TENSOR4D_DECLARATION(dst), |
| IMAGE_DECLARATION(weights), |
| VECTOR_DECLARATION(output_multipliers), |
| VECTOR_DECLARATION(output_shifts), |
| #if defined(HAS_BIAS) |
| VECTOR_DECLARATION(biases), |
| #endif /* defined(HAS_BIAS) */ |
| int max_offset) |
| { |
| int x = get_global_id(0); |
| int y = get_global_id(1); |
| #if defined(DST_DEPTH) |
| int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y |
| int b = get_global_id(2) / (int)DST_DEPTH; // batch |
| #else // defined(DST_DEPTH) |
| int z = get_global_id(2); // spatial coordinate y |
| #endif // defined(DST_DEPTH) |
| |
| __global uchar *weights_addr = weights_ptr + weights_offset_first_element_in_bytes + x * weights_stride_y; |
| |
| #if defined(DST_DEPTH) |
| __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * VEC_SIZE + b * src_stride_w; |
| #else /* defined(DST_DEPTH) */ |
| __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * VEC_SIZE; |
| #endif /* defined(DST_DEPTH) */ |
| |
| int z_coord = 0; |
| int4 offset = 0; |
| int4 y_coord = ((int4)(y * NUM_ROWS_PROCESSED) + (int4)(0, 1, 2, 3)) - (int)CONV_PAD_LEFT; |
| |
| // Only for y = 0 we can have a negative coordinate. If so, we convert it to SRC_DIM_1 |
| y_coord.s0 = min((uint)y_coord.s0, (uint)SRC_DIM_1); |
| y_coord.s1 = min((uint)y_coord.s1, (uint)SRC_DIM_1); |
| y_coord.s2 = min((uint)y_coord.s2, (uint)SRC_DIM_1); |
| y_coord.s3 = min((uint)y_coord.s3, (uint)SRC_DIM_1); |
| |
| int4 y_offset = convert_int4(y_coord * (int)src_stride_y); |
| |
| // We compute 4x2x2 [C,W,H] elements |
| VEC_INT acc0 = 0, sum0 = 0; |
| VEC_INT acc1 = 0, sum1 = 0; |
| VEC_INT acc2 = 0, sum2 = 0; |
| VEC_INT acc3 = 0, sum3 = 0; |
| |
| // Load weights |
| VEC_DATA_TYPE(WEIGHTS_TYPE, 16) |
| w0_tmp = VLOAD(16)(0, (__global WEIGHTS_TYPE *)(weights_addr)); |
| VEC_DATA_TYPE(WEIGHTS_TYPE, 16) |
| w1_tmp = VLOAD(16)(0, (__global WEIGHTS_TYPE *)(weights_addr + 16)); |
| VEC_DATA_TYPE(WEIGHTS_TYPE, 4) |
| w8 = VLOAD(4)(0, (__global WEIGHTS_TYPE *)(weights_addr + 2 * 16)); |
| |
| VEC_DATA_TYPE(WEIGHTS_TYPE, 4) |
| w0 = w0_tmp.s0123; |
| VEC_DATA_TYPE(WEIGHTS_TYPE, 4) |
| w1 = w0_tmp.s4567; |
| VEC_DATA_TYPE(WEIGHTS_TYPE, 4) |
| w2 = w0_tmp.s89AB; |
| VEC_DATA_TYPE(WEIGHTS_TYPE, 4) |
| w3 = w0_tmp.sCDEF; |
| |
| VEC_DATA_TYPE(WEIGHTS_TYPE, 4) |
| w4 = w1_tmp.s0123; |
| VEC_DATA_TYPE(WEIGHTS_TYPE, 4) |
| w5 = w1_tmp.s4567; |
| VEC_DATA_TYPE(WEIGHTS_TYPE, 4) |
| w6 = w1_tmp.s89AB; |
| VEC_DATA_TYPE(WEIGHTS_TYPE, 4) |
| w7 = w1_tmp.sCDEF; |
| |
| #if INPUT_OFFSET != 0 |
| VEC_INT sum_we = CONVERT(w0, VEC_INT) + CONVERT(w1, VEC_INT) + CONVERT(w2, VEC_INT) |
| + CONVERT(w3, VEC_INT) + CONVERT(w4, VEC_INT) + CONVERT(w5, VEC_INT) |
| + CONVERT(w6, VEC_INT) + CONVERT(w7, VEC_INT) + CONVERT(w8, VEC_INT); |
| #endif /* INPUT_OFFSET != 0 */ |
| |
| // Load input values |
| // z == 0 |
| // Clamp z_coord as for z = 0, it can be negative |
| // z_coord is casted to unsigned int in order to use just a min() operation |
| // A "-1" 32 bit signed variable converted to unsigned gives 4294967295 |
| z_coord = z * (int)NUM_PLANES_PROCESSED - (int)CONV_PAD_TOP; |
| z_coord = min((uint)z_coord, (uint)SRC_DIM_2); |
| offset = y_offset + (int4)(z_coord * src_stride_z); |
| offset = min(offset, (int4)max_offset); |
| |
| VEC_TYPE(VEC_SIZE) |
| values0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0)); |
| VEC_TYPE(VEC_SIZE) |
| values1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1)); |
| VEC_TYPE(VEC_SIZE) |
| values2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2)); |
| VEC_TYPE(VEC_SIZE) |
| values3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s3)); |
| |
| // z == 1 |
| // z_coord can be only negative for z = 0 so we do not need to clamp it |
| // Moreover z_coord cannot be out-of-bound for z = 1 so we do not need to clamp the offset |
| z_coord = z * (int)NUM_PLANES_PROCESSED - (int)CONV_PAD_TOP + 1; |
| offset = y_offset + (int4)(z_coord * src_stride_z); |
| VEC_TYPE(VEC_SIZE) |
| values4 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0)); |
| VEC_TYPE(VEC_SIZE) |
| values5 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1)); |
| VEC_TYPE(VEC_SIZE) |
| values6 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2)); |
| VEC_TYPE(VEC_SIZE) |
| values7 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s3)); |
| |
| // z == 2 |
| // After z = 1 we can simply add src_stride_z to offset without updating z_coord |
| // However offset can be out-of-bound so we need to check if it is greater than max_offset |
| offset += (int4)src_stride_z; |
| offset = min(offset, (int4)max_offset); |
| VEC_TYPE(VEC_SIZE) |
| values8 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0)); |
| VEC_TYPE(VEC_SIZE) |
| values9 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1)); |
| VEC_TYPE(VEC_SIZE) |
| values10 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2)); |
| VEC_TYPE(VEC_SIZE) |
| values11 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s3)); |
| |
| // z == 3 |
| // After z = 1 we can simply add src_stride_z to offset without updating z_coord |
| // However offset can be out-of-bound so we need to check if it is greater than max_offset |
| offset += (int4)(src_stride_z); |
| offset = min(offset, (int4)max_offset); |
| VEC_TYPE(VEC_SIZE) |
| values12 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0)); |
| VEC_TYPE(VEC_SIZE) |
| values13 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1)); |
| VEC_TYPE(VEC_SIZE) |
| values14 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2)); |
| VEC_TYPE(VEC_SIZE) |
| values15 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s3)); |
| |
| MULTIPLY_ADD_ACCUMULATE(values0, w0, acc0, sum0); |
| MULTIPLY_ADD_ACCUMULATE(values1, w1, acc0, sum0); |
| MULTIPLY_ADD_ACCUMULATE(values2, w2, acc0, sum0); |
| MULTIPLY_ADD_ACCUMULATE(values1, w0, acc1, sum1); |
| MULTIPLY_ADD_ACCUMULATE(values2, w1, acc1, sum1); |
| MULTIPLY_ADD_ACCUMULATE(values3, w2, acc1, sum1); |
| |
| MULTIPLY_ADD_ACCUMULATE(values4, w3, acc0, sum0); |
| MULTIPLY_ADD_ACCUMULATE(values5, w4, acc0, sum0); |
| MULTIPLY_ADD_ACCUMULATE(values6, w5, acc0, sum0); |
| MULTIPLY_ADD_ACCUMULATE(values5, w3, acc1, sum1); |
| MULTIPLY_ADD_ACCUMULATE(values6, w4, acc1, sum1); |
| MULTIPLY_ADD_ACCUMULATE(values7, w5, acc1, sum1); |
| |
| MULTIPLY_ADD_ACCUMULATE(values8, w6, acc0, sum0); |
| MULTIPLY_ADD_ACCUMULATE(values9, w7, acc0, sum0); |
| MULTIPLY_ADD_ACCUMULATE(values10, w8, acc0, sum0); |
| MULTIPLY_ADD_ACCUMULATE(values9, w6, acc1, sum1); |
| MULTIPLY_ADD_ACCUMULATE(values10, w7, acc1, sum1); |
| MULTIPLY_ADD_ACCUMULATE(values11, w8, acc1, sum1); |
| |
| MULTIPLY_ADD_ACCUMULATE(values4, w0, acc2, sum2); |
| MULTIPLY_ADD_ACCUMULATE(values5, w1, acc2, sum2); |
| MULTIPLY_ADD_ACCUMULATE(values6, w2, acc2, sum2); |
| MULTIPLY_ADD_ACCUMULATE(values5, w0, acc3, sum3); |
| MULTIPLY_ADD_ACCUMULATE(values6, w1, acc3, sum3); |
| MULTIPLY_ADD_ACCUMULATE(values7, w2, acc3, sum3); |
| |
| MULTIPLY_ADD_ACCUMULATE(values8, w3, acc2, sum2); |
| MULTIPLY_ADD_ACCUMULATE(values9, w4, acc2, sum2); |
| MULTIPLY_ADD_ACCUMULATE(values10, w5, acc2, sum2); |
| MULTIPLY_ADD_ACCUMULATE(values9, w3, acc3, sum3); |
| MULTIPLY_ADD_ACCUMULATE(values10, w4, acc3, sum3); |
| MULTIPLY_ADD_ACCUMULATE(values11, w5, acc3, sum3); |
| |
| MULTIPLY_ADD_ACCUMULATE(values12, w6, acc2, sum2); |
| MULTIPLY_ADD_ACCUMULATE(values13, w7, acc2, sum2); |
| MULTIPLY_ADD_ACCUMULATE(values14, w8, acc2, sum2); |
| MULTIPLY_ADD_ACCUMULATE(values13, w6, acc3, sum3); |
| MULTIPLY_ADD_ACCUMULATE(values14, w7, acc3, sum3); |
| MULTIPLY_ADD_ACCUMULATE(values15, w8, acc3, sum3); |
| |
| #if defined(HAS_BIAS) |
| Vector biases = CONVERT_TO_VECTOR_STRUCT(biases); |
| |
| VEC_INT bias_values = VLOAD(VEC_SIZE)(0, (__global int *)biases.ptr); |
| |
| acc0 += bias_values; |
| acc1 += bias_values; |
| acc2 += bias_values; |
| acc3 += bias_values; |
| #endif /* defined(HAS_BIAS) */ |
| |
| #if WEIGHTS_OFFSET != 0 |
| acc0 += WEIGHTS_OFFSET * sum0; |
| acc1 += WEIGHTS_OFFSET * sum1; |
| acc2 += WEIGHTS_OFFSET * sum2; |
| acc3 += WEIGHTS_OFFSET * sum3; |
| #endif /* WEIGHTS_OFFSET != 0 */ |
| |
| #if INPUT_OFFSET != 0 |
| VEC_INT offs = INPUT_OFFSET * sum_we; |
| |
| acc0 += offs; |
| acc1 += offs; |
| acc2 += offs; |
| acc3 += offs; |
| #endif /* INPUT_OFFSET != 0 */ |
| |
| #if K_OFFSET != 0 |
| acc0 += (VEC_INT)K_OFFSET; |
| acc1 += (VEC_INT)K_OFFSET; |
| acc2 += (VEC_INT)K_OFFSET; |
| acc3 += (VEC_INT)K_OFFSET; |
| #endif /* K_OFFSET != 0 */ |
| |
| #if defined(REAL_MULTIPLIER) |
| |
| acc0 = CONVERT(round(CONVERT(acc0, VEC_FLOAT) * (VEC_FLOAT)REAL_MULTIPLIER), VEC_INT); |
| acc1 = CONVERT(round(CONVERT(acc1, VEC_FLOAT) * (VEC_FLOAT)REAL_MULTIPLIER), VEC_INT); |
| acc2 = CONVERT(round(CONVERT(acc2, VEC_FLOAT) * (VEC_FLOAT)REAL_MULTIPLIER), VEC_INT); |
| acc3 = CONVERT(round(CONVERT(acc3, VEC_FLOAT) * (VEC_FLOAT)REAL_MULTIPLIER), VEC_INT); |
| |
| #else // defined(REAL_MULTIPLIER) |
| |
| #if defined(PER_CHANNEL_QUANTIZATION) |
| Vector output_multipliers = CONVERT_TO_VECTOR_STRUCT(output_multipliers); |
| Vector output_shifts = CONVERT_TO_VECTOR_STRUCT(output_shifts); |
| VEC_INT output_multiplier = VLOAD(VEC_SIZE)(0, (__global int *)output_multipliers.ptr); |
| VEC_INT output_shift = VLOAD(VEC_SIZE)(0, (__global int *)output_shifts.ptr); |
| |
| VEC_INT res0_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc0, output_multiplier, output_shift, VEC_SIZE); |
| VEC_INT res1_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc1, output_multiplier, output_shift, VEC_SIZE); |
| VEC_INT res2_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc2, output_multiplier, output_shift, VEC_SIZE); |
| VEC_INT res3_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc3, output_multiplier, output_shift, VEC_SIZE); |
| VEC_INT res0_shift_gt0 = asymm_mult_by_quant_multiplier_less_than_one(acc0, output_multiplier, output_shift); |
| VEC_INT res1_shift_gt0 = asymm_mult_by_quant_multiplier_less_than_one(acc1, output_multiplier, output_shift); |
| VEC_INT res2_shift_gt0 = asymm_mult_by_quant_multiplier_less_than_one(acc2, output_multiplier, output_shift); |
| VEC_INT res3_shift_gt0 = asymm_mult_by_quant_multiplier_less_than_one(acc3, output_multiplier, output_shift); |
| acc0 = select(res0_shift_lt0, res0_shift_gt0, output_shift >= 0); |
| acc1 = select(res1_shift_lt0, res1_shift_gt0, output_shift >= 0); |
| acc2 = select(res2_shift_lt0, res2_shift_gt0, output_shift >= 0); |
| acc3 = select(res3_shift_lt0, res3_shift_gt0, output_shift >= 0); |
| #else // defined(PER_CHANNEL_QUANTIZATION) |
| #if OUTPUT_SHIFT < 0 |
| acc0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, VEC_SIZE); |
| acc1 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, VEC_SIZE); |
| acc2 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc2, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, VEC_SIZE); |
| acc3 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc3, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, VEC_SIZE); |
| #else // OUTPUT_SHIFT < 0 |
| acc0 = asymm_mult_by_quant_multiplier_less_than_one(acc0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT); |
| acc1 = asymm_mult_by_quant_multiplier_less_than_one(acc1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT); |
| acc2 = asymm_mult_by_quant_multiplier_less_than_one(acc2, OUTPUT_MULTIPLIER, OUTPUT_SHIFT); |
| acc3 = asymm_mult_by_quant_multiplier_less_than_one(acc3, OUTPUT_MULTIPLIER, OUTPUT_SHIFT); |
| #endif // OUTPUT_SHIFT < 0 |
| #endif // defined(PER_CHANNEL_QUANTIZATION) |
| |
| #endif // defined(REAL_MULTIPLIER) |
| |
| acc0 += (VEC_INT)OUTPUT_OFFSET; |
| acc1 += (VEC_INT)OUTPUT_OFFSET; |
| acc2 += (VEC_INT)OUTPUT_OFFSET; |
| acc3 += (VEC_INT)OUTPUT_OFFSET; |
| |
| VEC_TYPE(VEC_SIZE) |
| res0 = CONVERT_SAT(acc0, VEC_TYPE(VEC_SIZE)); |
| VEC_TYPE(VEC_SIZE) |
| res1 = CONVERT_SAT(acc1, VEC_TYPE(VEC_SIZE)); |
| VEC_TYPE(VEC_SIZE) |
| res2 = CONVERT_SAT(acc2, VEC_TYPE(VEC_SIZE)); |
| VEC_TYPE(VEC_SIZE) |
| res3 = CONVERT_SAT(acc3, VEC_TYPE(VEC_SIZE)); |
| |
| #if defined(DST_DEPTH) |
| __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * dst_step_x + y * dst_step_y + (z * NUM_PLANES_PROCESSED) * dst_step_z + b * dst_stride_w; |
| #else /* defined(DST_DEPTH) */ |
| __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * dst_step_x + y * dst_step_y + (z * NUM_PLANES_PROCESSED) * dst_step_z; |
| #endif /* defined(DST_DEPTH) */ |
| |
| VSTORE(VEC_SIZE) |
| (ACTIVATION_FUNC(res0), 0, dst_addr + 0 * dst_stride_y); |
| VSTORE(VEC_SIZE) |
| (ACTIVATION_FUNC(res1), 0, dst_addr + 1 * dst_stride_y); |
| |
| #if((DST_DIM_2 % NUM_PLANES_PROCESSED) != 0) |
| if((z * NUM_PLANES_PROCESSED + 1) < DST_DIM_2) |
| #endif // ((DST_DIM_2 % NUM_PLANES_PROCESSED) != 0) |
| { |
| VSTORE(VEC_SIZE) |
| (ACTIVATION_FUNC(res2), 0, (__global DATA_TYPE *)(dst_addr + 0 * dst_stride_y + 1 * dst_stride_z)); |
| VSTORE(VEC_SIZE) |
| (ACTIVATION_FUNC(res3), 0, (__global DATA_TYPE *)(dst_addr + 1 * dst_stride_y + 1 * dst_stride_z)); |
| } |
| } |
| |
| #if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) && VEC_SIZE == 4 |
| /** This function computes the depthwise convolution quantized for NHWC data layout when the stride along the width and height is 1 using dot product. |
| * |
| * @note Per-channel quantization is not supported by this kernel. |
| * @note This kernel assumes VEC_SIZE is 4. |
| * @note The weights tensor is expected to be reshaped using @ref CLDepthwiseConvolutionLayerReshapeWeightsKernel. |
| * @note The number of elements read per thread must be passed at compile time using -DVEC_SIZE (e.g. -DVEC_SIZE=2) |
| * @note Dimension two of the input tensor (height for NHWC data layout) must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM_2=112) |
| * @note The number of rows processed per thread must be passed at compile time using -DNUM_ROWS_PROCESSED (i.e. -DNUM_ROWS_PROCESSED=2) |
| * @note The number of planes processed per thread must be passed at compile time using -DNUM_PLANES_PROCESSED (i.e. -DNUM_PLANES_PROCESSED=2) |
| * @note The convolution pad top must be passed at compile time using -DCONV_PAD_TOP (e.g. -DCONV_PAD_TOP=1) |
| * @note The convolution pad top must be passed at compile time using -DCONV_PAD_LEFT (e.g. -DCONV_PAD_LEFT=1). |
| * @note If REAL_MULTIPLIER is passed at compile time (i.e. -DREAL_MULTIPLIER=1.355f), the final quantization is performed using a floating point multiplication. |
| * If not, the quantization will be performed using a fixed point multiplication |
| * |
| * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED |
| * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) |
| * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) |
| * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) |
| * @param[in] src_step_z src_stride_y * number of elements along Z processed per workitem(in bytes) |
| * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes) |
| * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes) |
| * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor |
| * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr |
| * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) |
| * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) |
| * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) |
| * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes) |
| * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes) |
| * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes) |
| * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor |
| * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: same as @p src_ptr |
| * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes) |
| * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes) |
| * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor |
| * @param[in] output_multipliers_ptr Pointer to the output multipliers vector. Supported data types: S32 |
| * @param[in] output_multipliers_stride_x Stride of the output multipliers vector in X dimension (in bytes) |
| * @param[in] output_multipliers_step_x output_multipliers_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] output_multipliers_offset_first_element_in_bytes The offset of the first element in the output multipliers vector |
| * @param[in] output_shifts_ptr Pointer to the output shifts vector. Supported data types: S32 |
| * @param[in] output_shifts_stride_x Stride of the output shifts vector in X dimension (in bytes) |
| * @param[in] output_shifts_step_x output_shifts_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] output_shifts_offset_first_element_in_bytes The offset of the first element in the output shifts vector |
| * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: S32 |
| * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes) |
| * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector |
| * @param[in] max_offset The maximum allowed offset for the input tensor |
| */ |
| __kernel void dwc_3x3_reshaped_quantized8_dot8_stride1_nhwc( |
| TENSOR4D_DECLARATION(src), |
| TENSOR4D_DECLARATION(dst), |
| IMAGE_DECLARATION(weights), |
| VECTOR_DECLARATION(output_multipliers), |
| VECTOR_DECLARATION(output_shifts), |
| #if defined(HAS_BIAS) |
| VECTOR_DECLARATION(biases), |
| #endif // defined(HAS_BIAS) |
| int max_offset) |
| { |
| int x = get_global_id(0); |
| int y = get_global_id(1); |
| #if defined(DST_DEPTH) |
| int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y |
| int b = get_global_id(2) / (int)DST_DEPTH; // batch |
| #else // defined(DST_DEPTH) |
| int z = get_global_id(2); // spatial coordinate y |
| #endif // defined(DST_DEPTH) |
| |
| __global uchar *weights_addr = weights_ptr + weights_offset_first_element_in_bytes + x * weights_stride_y; |
| |
| #if defined(DST_DEPTH) |
| __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * VEC_SIZE + b * src_stride_w; |
| #else /* defined(DST_DEPTH) */ |
| __global uchar *src_addr = src_ptr + src_offset_first_element_in_bytes + x * VEC_SIZE; |
| #endif /* defined(DST_DEPTH) */ |
| |
| int z_coord = 0; |
| int4 offset = 0; |
| int4 y_coord = ((int4)(y * NUM_ROWS_PROCESSED) + (int4)(0, 1, 2, 3)) - (int)CONV_PAD_LEFT; |
| |
| // Only for y = 0 we can have a negative coordinate. If so, we convert it to SRC_DIM_1 |
| y_coord.s0 = min((uint)y_coord.s0, (uint)SRC_DIM_1); |
| y_coord.s1 = min((uint)y_coord.s1, (uint)SRC_DIM_1); |
| y_coord.s2 = min((uint)y_coord.s2, (uint)SRC_DIM_1); |
| y_coord.s3 = min((uint)y_coord.s3, (uint)SRC_DIM_1); |
| |
| int4 y_offset = convert_int4(y_coord * (int)src_stride_y); |
| |
| // We compute 4x2x1 [C,W,H] elements |
| VEC_INT acc0 = 0; |
| VEC_INT acc1 = 0; |
| VEC_INT sum0 = 0; |
| VEC_INT sum1 = 0; |
| |
| // Load weights |
| VEC_TYPE(16) |
| w0 = VLOAD(16)(0, (__global WEIGHTS_TYPE *)(weights_addr)); |
| VEC_TYPE(16) |
| w1 = VLOAD(16)(0, (__global WEIGHTS_TYPE *)(weights_addr + 16)); |
| VEC_TYPE(4) |
| w2 = VLOAD(4)(0, (__global WEIGHTS_TYPE *)(weights_addr + 32)); |
| |
| #if INPUT_OFFSET != 0 |
| // Initilize the final result with the weights reduction multiplied by INPUT_OFFSET |
| DOT_PRODUCT_REDUCTION_WEIGHTS(acc0.s0, w0.s01234567, w0.s8); |
| DOT_PRODUCT_REDUCTION_WEIGHTS(acc0.s1, (VEC_TYPE(8))((w0.s9ABC), (w0.sDEF), w1.s0), w1.s1); |
| DOT_PRODUCT_REDUCTION_WEIGHTS(acc0.s2, w1.s23456789, w1.sA); |
| DOT_PRODUCT_REDUCTION_WEIGHTS(acc0.s3, (VEC_TYPE(8))((w1.sBCD), (w1.sEF), (w2.s012)), w2.s3); |
| |
| // Multiply the weights reduction with INPUT_OFFSET |
| acc0 = INPUT_OFFSET * acc0; |
| |
| acc1 = acc0; |
| #endif // INPUT_OFFSET != 0 |
| |
| // Load input values |
| // z == 0 |
| // Clamp z_coord as for z = 0, it can be negative |
| // z_coord is casted to unsigned int in order to use just a min() operation |
| // A "-1" 32 bit signed variable converted to unsigned gives 4294967295 |
| z_coord = z - (int)CONV_PAD_TOP; |
| z_coord = min((uint)z_coord, (uint)SRC_DIM_2); |
| offset = y_offset + (int4)(z_coord * src_stride_z); |
| offset = min(offset, (int4)max_offset); |
| |
| VEC_TYPE(VEC_SIZE) |
| values0 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0)); |
| VEC_TYPE(VEC_SIZE) |
| values1 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1)); |
| VEC_TYPE(VEC_SIZE) |
| values2 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2)); |
| VEC_TYPE(VEC_SIZE) |
| values3 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s3)); |
| |
| // z == 1 |
| // z_coord can be only negative for z = 0 so we do not need to clamp it |
| // Moreover z_coord cannot be out-of-bound for z = 1 so we do not need to clamp the offset |
| z_coord = z - (int)CONV_PAD_TOP + 1; |
| offset = y_offset + (int4)(z_coord * src_stride_z); |
| VEC_TYPE(VEC_SIZE) |
| values4 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0)); |
| VEC_TYPE(VEC_SIZE) |
| values5 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1)); |
| VEC_TYPE(VEC_SIZE) |
| values6 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2)); |
| VEC_TYPE(VEC_SIZE) |
| values7 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s3)); |
| |
| // z == 2 |
| // After z = 1 we can simply add src_stride_z to offset without updating z_coord |
| // However offset can be out-of-bound so we need to check if it is greater than max_offset |
| offset += (int4)src_stride_z; |
| offset = min(offset, (int4)max_offset); |
| VEC_TYPE(VEC_SIZE) |
| values8 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s0)); |
| VEC_TYPE(VEC_SIZE) |
| values9 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s1)); |
| VEC_TYPE(VEC_SIZE) |
| values10 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s2)); |
| VEC_TYPE(VEC_SIZE) |
| values11 = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(src_addr + offset.s3)); |
| |
| DOT_PRODUCT_REDUCTION(sum0.s0, values0.s0, values1.s0, values2.s0, values4.s0, values5.s0, values6.s0, values8.s0, values9.s0, values10.s0); |
| DOT_PRODUCT_REDUCTION(sum1.s0, values1.s0, values2.s0, values3.s0, values5.s0, values6.s0, values7.s0, values9.s0, values10.s0, values11.s0); |
| DOT_PRODUCT(acc0.s0, values0.s0, values1.s0, values2.s0, values4.s0, values5.s0, values6.s0, values8.s0, values9.s0, values10.s0, w0.s01234567, w0.s8); |
| DOT_PRODUCT(acc1.s0, values1.s0, values2.s0, values3.s0, values5.s0, values6.s0, values7.s0, values9.s0, values10.s0, values11.s0, w0.s01234567, w0.s8); |
| |
| DOT_PRODUCT_REDUCTION(sum0.s1, values0.s1, values1.s1, values2.s1, values4.s1, values5.s1, values6.s1, values8.s1, values9.s1, values10.s1); |
| DOT_PRODUCT_REDUCTION(sum1.s1, values1.s1, values2.s1, values3.s1, values5.s1, values6.s1, values7.s1, values9.s1, values10.s1, values11.s1); |
| DOT_PRODUCT(acc0.s1, values0.s1, values1.s1, values2.s1, values4.s1, values5.s1, values6.s1, values8.s1, values9.s1, values10.s1, (VEC_TYPE(8))((w0.s9ABC), (w0.sDEF), w1.s0), w1.s1); |
| DOT_PRODUCT(acc1.s1, values1.s1, values2.s1, values3.s1, values5.s1, values6.s1, values7.s1, values9.s1, values10.s1, values11.s1, (VEC_TYPE(8))((w0.s9ABC), (w0.sDEF), w1.s0), w1.s1); |
| |
| DOT_PRODUCT_REDUCTION(sum0.s2, values0.s2, values1.s2, values2.s2, values4.s2, values5.s2, values6.s2, values8.s2, values9.s2, values10.s2); |
| DOT_PRODUCT_REDUCTION(sum1.s2, values1.s2, values2.s2, values3.s2, values5.s2, values6.s2, values7.s2, values9.s2, values10.s2, values11.s2); |
| DOT_PRODUCT(acc0.s2, values0.s2, values1.s2, values2.s2, values4.s2, values5.s2, values6.s2, values8.s2, values9.s2, values10.s2, w1.s23456789, w1.sA); |
| DOT_PRODUCT(acc1.s2, values1.s2, values2.s2, values3.s2, values5.s2, values6.s2, values7.s2, values9.s2, values10.s2, values11.s2, w1.s23456789, w1.sA); |
| |
| DOT_PRODUCT_REDUCTION(sum0.s3, values0.s3, values1.s3, values2.s3, values4.s3, values5.s3, values6.s3, values8.s3, values9.s3, values10.s3); |
| DOT_PRODUCT_REDUCTION(sum1.s3, values1.s3, values2.s3, values3.s3, values5.s3, values6.s3, values7.s3, values9.s3, values10.s3, values11.s3); |
| DOT_PRODUCT(acc0.s3, values0.s3, values1.s3, values2.s3, values4.s3, values5.s3, values6.s3, values8.s3, values9.s3, values10.s3, (VEC_TYPE(8))((w1.sBCD), (w1.sEF), (w2.s012)), w2.s3); |
| DOT_PRODUCT(acc1.s3, values1.s3, values2.s3, values3.s3, values5.s3, values6.s3, values7.s3, values9.s3, values10.s3, values11.s3, (VEC_TYPE(8))((w1.sBCD), (w1.sEF), (w2.s012)), w2.s3); |
| |
| #if defined(HAS_BIAS) |
| Vector biases = CONVERT_TO_VECTOR_STRUCT(biases); |
| |
| VEC_INT bias_values = VLOAD(VEC_SIZE)(0, (__global int *)biases.ptr); |
| |
| acc0 += bias_values; |
| acc1 += bias_values; |
| |
| #endif // defined(HAS_BIAS) |
| |
| #if WEIGHTS_OFFSET != 0 |
| acc0 += WEIGHTS_OFFSET * sum0; |
| acc1 += WEIGHTS_OFFSET * sum1; |
| #endif // WEIGHTS_OFFSET != 0 |
| |
| #if K_OFFSET != 0 |
| acc0 += (VEC_INT)K_OFFSET; |
| acc1 += (VEC_INT)K_OFFSET; |
| |
| #endif // K_OFFSET != 0 |
| |
| #if defined(REAL_MULTIPLIER) |
| |
| acc0 = CONVERT(round(CONVERT(acc0, VEC_FLOAT) * (VEC_FLOAT)REAL_MULTIPLIER), VEC_INT); |
| acc1 = CONVERT(round(CONVERT(acc1, VEC_FLOAT) * (VEC_FLOAT)REAL_MULTIPLIER), VEC_INT); |
| |
| #else // defined(REAL_MULTIPLIER) |
| |
| #if OUTPUT_SHIFT < 0 |
| acc0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, VEC_SIZE); |
| acc1 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(acc1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, VEC_SIZE); |
| #else // OUTPUT_SHIFT < 0 |
| acc0 = asymm_mult_by_quant_multiplier_less_than_one(acc0, OUTPUT_MULTIPLIER, OUTPUT_SHIFT); |
| acc1 = asymm_mult_by_quant_multiplier_less_than_one(acc1, OUTPUT_MULTIPLIER, OUTPUT_SHIFT); |
| #endif // OUTPUT_SHIFT < 0 |
| |
| #endif // defined(REAL_MULTIPLIER) |
| acc0 += (VEC_INT)OUTPUT_OFFSET; |
| acc1 += (VEC_INT)OUTPUT_OFFSET; |
| |
| VEC_TYPE(VEC_SIZE) |
| res0 = CONVERT_SAT(acc0, VEC_TYPE(VEC_SIZE)); |
| VEC_TYPE(VEC_SIZE) |
| res1 = CONVERT_SAT(acc1, VEC_TYPE(VEC_SIZE)); |
| |
| #if defined(DST_DEPTH) |
| __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * dst_step_x + y * dst_step_y + z * dst_step_z + b * dst_stride_w; |
| #else /* defined(DST_DEPTH) */ |
| __global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + x * dst_step_x + y * dst_step_y + z * dst_step_z; |
| #endif /* defined(DST_DEPTH) */ |
| |
| VSTORE(VEC_SIZE) |
| (ACTIVATION_FUNC(res0), 0, (__global DATA_TYPE *)(dst_addr + 0 * dst_stride_y)); |
| VSTORE(VEC_SIZE) |
| (ACTIVATION_FUNC(res1), 0, (__global DATA_TYPE *)(dst_addr + 1 * dst_stride_y)); |
| } |
| #endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) && VEC_SIZE==4 |
| |
| #endif // defined(NUM_ROWS_PROCESSED) && defined(NUM_PLANES_PROCESSED) |
| |
| #endif // defined(VEC_SIZE) && defined(SRC_DIM_1) && defined(SRC_DIM_2) && defined(CONV_PAD_TOP) && defined(CONV_PAD_LEFT) |
| |
| #endif // defined(WEIGHTS_PROMOTED_TYPE) |
| |
| #endif // defined(WEIGHTS_OFFSET) && defined(INPUT_OFFSET) && defined(K_OFFSET) && ((defined(OUTPUT_OFFSET) && defined(OUTPUT_MULTIPLIER) && defined(OUTPUT_SHIFT)) || defined(REAL_MULTIPLIER)) |
| |
| #if defined(SRC_DIM1) && defined(SRC_DIM2) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defined(N0) && defined(DILATION_X) && defined(DILATION_Y) && defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y) && defined(CONV_PAD_LEFT) && defined(CONV_PAD_TOP) && defined(INPUT_OFFSET) && defined(WEIGHTS_OFFSET) && defined(OUTPUT_OFFSET) && defined(OUTPUT_SHIFT) && defined(OUTPUT_MULTIPLIER) && defined(VEC_SIZE_LEFTOVER) |
| /** This function computes the depthwise convolution for NHWC data layout. This kernel assumes that the weights tensor is NOT reshaped |
| * |
| * @note The number of elements processed must be passed at compile time using -DN0 (e.g. -DN0=2) |
| * @note The depth multiplier must be passed at compile time using -DDEPTH_MULTIPLIER (e.g. -DDEPTH_MULTIPLIER=1) |
| * @note The first dimension of the input tensor must be passed at compile time using -DSRC_DIM1 (e.g. -DSRC_DIM1=112) |
| * @note The second dimension of the input tensor must be passed at compile time using -DSRC_DIM2 (e.g. -DSRC_DIM2=80) |
| * @note The kernel width must be passed at compile time using -DKERNEL_WIDTH (e.g. -DKERNEL_WIDTH=5) |
| * @note The kernel height must be passed at compile time using -DKERNEL_HEIGHT (e.g. -DKERNEL_HEIGHT=5) |
| * @note The convolution pad top must be passed at compile time using -DCONV_PAD_TOP (e.g. -DCONV_PAD_TOP=1) |
| * @note The convolution pad top must be passed at compile time using -DCONV_PAD_LEFT (e.g. -DCONV_PAD_LEFT=1) |
| * @note The convolution stride along the width must be passed at compile time using -DCONV_STRIDE_X (e.g. -DCONV_STRIDE_Y=X) |
| * @note The convolution stride along the height must be passed at compile time using -DCONV_STRIDE_Y (e.g. -DCONV_STRIDE_Y=1) |
| * @note Leftover vector size has to be passed at compile time using -DVEC_SIZE_LEFTOVER. e.g. -DVEC_SIZE=3. It is defined as the remainder between the input's first dimension and VEC_SIZE |
| * @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu |
| * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively |
| * |
| * @param[in] src_ptr Pointer to the source tensor. Supported data types: QASYMM8/QASYMM8_SIGNED |
| * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes) |
| * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes) |
| * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes) |
| * @param[in] src_step_z src_stride_y * number of elements along Z processed per workitem(in bytes) |
| * @param[in] src_stride_w Stride of the source tensor in W dimension (in bytes) |
| * @param[in] src_step_w src_stride_w * number of elements along W processed per workitem(in bytes) |
| * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor |
| * @param[in] dst_ptr Pointer to the destination tensor. Supported data types: same as @p src_ptr |
| * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes) |
| * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes) |
| * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes) |
| * @param[in] dst_step_z dst_stride_z * number of elements along Y processed per workitem(in bytes) |
| * @param[in] dst_stride_w Stride of the destination tensor in W dimension (in bytes) |
| * @param[in] dst_step_w dst_stride_w * number of elements along W processed per workitem(in bytes) |
| * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor |
| * @param[in] weights_ptr Pointer to the weights tensor. Supported data types: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL |
| * @param[in] weights_stride_x Stride of the weights tensor in X dimension (in bytes) |
| * @param[in] weights_step_x weights_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] weights_stride_y Stride of the weights tensor in Y dimension (in bytes) |
| * @param[in] weights_step_y weights_stride_y * number of elements along Y processed per workitem(in bytes) |
| * @param[in] weights_stride_z Stride of the weights tensor in Z dimension (in bytes) |
| * @param[in] weights_step_z weights_stride_z * number of elements along Y processed per workitem(in bytes) |
| * @param[in] weights_offset_first_element_in_bytes The offset of the first element in the weights tensor |
| * @param[in] output_multipliers_ptr Pointer to the output multipliers vector. Supported data types: S32 |
| * @param[in] output_multipliers_stride_x Stride of the output multipliers vector in X dimension (in bytes) |
| * @param[in] output_multipliers_step_x output_multipliers_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] output_multipliers_offset_first_element_in_bytes The offset of the first element in the output multipliers vector |
| * @param[in] output_shifts_ptr Pointer to the output shifts vector. Supported data types: S32 |
| * @param[in] output_shifts_stride_x Stride of the output shifts vector in X dimension (in bytes) |
| * @param[in] output_shifts_step_x output_shifts_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] output_shifts_offset_first_element_in_bytes The offset of the first element in the output shifts vector |
| * @param[in] biases_ptr (Optional) Pointer to the biases vector. Supported data types: S32 |
| * @param[in] biases_stride_x (Optional) Stride of the biases vector in X dimension (in bytes) |
| * @param[in] biases_step_x (Optional) biases_stride_x * number of elements along X processed per workitem(in bytes) |
| * @param[in] biases_offset_first_element_in_bytes (Optional) The offset of the first element in the biases vector |
| */ |
| __kernel void dwc_MxN_native_quantized8_nhwc( |
| TENSOR4D_DECLARATION(src), |
| TENSOR4D_DECLARATION(dst), |
| TENSOR3D_DECLARATION(weights), |
| VECTOR_DECLARATION(output_multipliers), |
| VECTOR_DECLARATION(output_shifts) |
| #if defined(HAS_BIAS) |
| , |
| VECTOR_DECLARATION(biases) |
| #endif // defined(HAS_BIAS) |
| ) |
| { |
| int x_offs = max((int)(get_global_id(0) * N0 - (N0 - VEC_SIZE_LEFTOVER) % N0), 0); |
| int y = get_global_id(1); // spatial coordinate x |
| #if defined(DST_DEPTH) |
| int z = get_global_id(2) % (int)DST_DEPTH; // spatial coordinate y |
| int b = get_global_id(2) / (int)DST_DEPTH; // batch |
| #else // defined(DST_DEPTH) |
| int z = get_global_id(2); // spatial coordinate y |
| #endif // defined(DST_DEPTH) |
| |
| __global uchar *s_addr = src_ptr + src_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE); |
| |
| __global uchar *d_addr = dst_ptr + dst_offset_first_element_in_bytes + x_offs * sizeof(DATA_TYPE) * (int)DEPTH_MULTIPLIER + y * dst_stride_y + z * dst_stride_z; |
| |
| __global uchar *w_addr = weights_ptr + weights_offset_first_element_in_bytes + x_offs * sizeof(WEIGHTS_TYPE) * (int)DEPTH_MULTIPLIER; |
| |
| #if defined(HAS_BIAS) |
| __global uchar *b_addr = biases_ptr + biases_offset_first_element_in_bytes + x_offs * sizeof(int) * (int)DEPTH_MULTIPLIER; |
| #endif // defined(HAS_BIAS) |
| |
| #if defined(PER_CHANNEL_QUANTIZATION) |
| __global uchar *out_mul_addr = output_multipliers_ptr + output_multipliers_offset_first_element_in_bytes + x_offs * sizeof(int) * (int)DEPTH_MULTIPLIER; |
| __global uchar *out_shift_addr = output_shifts_ptr + output_shifts_offset_first_element_in_bytes + x_offs * sizeof(int) * (int)DEPTH_MULTIPLIER; |
| #endif // defined(PER_CHANNEL_QUANTIZATION) |
| |
| #if defined(DST_DEPTH) |
| s_addr += b * src_stride_w; |
| d_addr += b * dst_stride_w; |
| #endif // defined(DST_DEPTH) |
| |
| #if DEPTH_MULTIPLIER > 1 |
| for(int d = 0; d < (int)DEPTH_MULTIPLIER; ++d) |
| { |
| #endif // DEPTH_MULTIPLIER > 1 |
| // Each work-item computes N0x1x1 elements |
| VEC_INT res = 0; |
| |
| int x_coord = y * CONV_STRIDE_X - (int)CONV_PAD_LEFT; |
| int y_coord = z * CONV_STRIDE_Y - (int)CONV_PAD_TOP; |
| |
| for(int yk = 0; yk < KERNEL_HEIGHT; ++yk) |
| { |
| if(y_coord >= 0 && y_coord < SRC_DIM2) |
| { |
| int x_coord_tmp = x_coord; |
| |
| for(int xk = 0; xk < KERNEL_WIDTH; ++xk) |
| { |
| if(x_coord_tmp >= 0 && x_coord_tmp < SRC_DIM1) |
| { |
| int s_offset = x_coord_tmp * (int)src_stride_y + y_coord * (int)src_stride_z; |
| int w_offset = xk * weights_stride_y + yk * weights_stride_z; |
| |
| // Load input and weights values |
| VEC_INT i = CONVERT(VLOAD(N0)(0, (__global DATA_TYPE *)(s_addr + s_offset)), VEC_INT); |
| VEC_INT w = CONVERT(VLOAD(N0)(0, (__global WEIGHTS_TYPE *)(w_addr + w_offset)), VEC_INT); |
| |
| res += (i + (VEC_INT)INPUT_OFFSET) * (w + (VEC_INT)WEIGHTS_OFFSET); |
| } |
| x_coord_tmp += DILATION_X; |
| } |
| } |
| y_coord += DILATION_Y; |
| } |
| |
| #if defined(HAS_BIAS) |
| VEC_INT bias = VLOAD(N0)(0, (__global int *)(b_addr)); |
| res += bias; |
| #endif // defined(HAS_BIAS) |
| |
| #if defined(PER_CHANNEL_QUANTIZATION) |
| VEC_INT output_multiplier = VLOAD(N0)(0, (__global int *)(out_mul_addr)); |
| VEC_INT output_shift = VLOAD(N0)(0, (__global int *)(out_shift_addr)); |
| |
| VEC_INT res_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(res, output_multiplier, output_shift, N0); |
| VEC_INT res_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(res, output_multiplier, output_shift, N0); |
| res = select(res_shift_lt0, res_shift_gt0, (VEC_INT)(output_shift) >= 0); |
| #else // defined(PER_CHANNEL_QUANTIZATION) |
| #if OUTPUT_SHIFT < 0 |
| res = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(res, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, N0); |
| #else // OUTPUT_SHIFT < 0 |
| res = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(res, OUTPUT_MULTIPLIER, OUTPUT_SHIFT, N0); |
| #endif // OUTPUT_OFFSET < 0 |
| #endif // defined(PER_CHANNEL_QUANTIZATION) |
| |
| res += (VEC_INT)OUTPUT_OFFSET; |
| |
| VEC_TYPE(VEC_SIZE) |
| res0 = CONVERT_SAT(res, VEC_TYPE(VEC_SIZE)); |
| res0 = ACTIVATION_FUNC(res0); |
| |
| STORE_VECTOR_SELECT(res, DATA_TYPE, d_addr, N0, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0) |
| |
| #if DEPTH_MULTIPLIER > 1 |
| w_addr += sizeof(WEIGHTS_TYPE); |
| d_addr += sizeof(DATA_TYPE); |
| #if defined(PER_CHANNEL_QUANTIZATION) |
| out_mul_addr += sizeof(int); |
| out_shift_addr += sizeof(int); |
| #endif // defined(PER_CHANNEL_QUANTIZATION) |
| #if defined(HAS_BIAS) |
| b_addr += sizeof(int); |
| #endif // defined(HAS_BIAS) |
| } |
| #endif // DEPTH_MULTIPLIER > 1 |
| } |
| #endif // defined(SRC_DIM1) && defined(SRC_DIM2) && defined(KERNEL_WIDTH) && defined(KERNEL_HEIGHT) && defiend(N0) && defined(DILATION_X) && defined(DILATION_Y) && defined(CONV_STRIDE_X) && defined(CONV_STRIDE_Y) && defined(CONV_PAD_LEFT) && defined(CONV_PAD_TOP) && defined(INPUT_OFFSET) && defined(WEIGHTS_OFFSET) && defined(OUTPUT_OFFSET) && defined(OUTPUT_SHIFT) && defined(OUTPUT_MULTIPLIER) && defined(VEC_SIZE_LEFTOVER) |
| #endif // defined(DATA_TYPE) && defined(WEIGHTS_TYPE) |
| |
| )" |