blob: a7a6a5ae2fe1838a82861d924e8f3cf852b0cc8f [file] [log] [blame]
R"(
/*
* Copyright (c) 2017-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* Copyright (c) 2016-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef ARM_COMPUTE_HELPER_H
#define ARM_COMPUTE_HELPER_H
/*
* Copyright (c) 2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/** Store the 0 to (n-1)th rows of the given variables
* @name STORE_ROW_n
*
* @param[in] N0 The width of the passed in vector. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0));
#define STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1));
#define STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2));
#define STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3));
#define STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4));
#define STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5));
#define STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6));
#define STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7));
#define STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8));
#define STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9));
#define STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A));
#define STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B));
#define STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C));
#define STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D));
#define STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E));
#define STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F));
/** @} */ // end of groupd STORE_ROW_n
/** Convert and store the 0th to (n-1)th rows of the given variables
* @name CONVERT_STORE_ROW_n
*
* @param[in] N0 The size of the vectors
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##0), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0));
#define CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##1), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1));
#define CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##2), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2));
#define CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##3), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3));
#define CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##4), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4));
#define CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##5), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5));
#define CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##6), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6));
#define CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##7), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7));
#define CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##8), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8));
#define CONVERT_STORE_ROW_10(N0, DATA, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##9), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9));
#define CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##A), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A));
#define CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##B), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B));
#define CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##C), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C));
#define CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##D), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D));
#define CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##E), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E));
#define CONVERT_STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##F), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F));
/** @} */ // end of groupd CONVERT_STORE_ROW_n
/** Store a block of the given size M0xN0
* @name STORE_BLOCK
*
* Supported cases are M0=1,2,3,...,16 and N0=2,3,4,8,16.
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store
* @param[in] N0 The size of each vector
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
#define STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
/** @} */ // end of group STORE_BLOCK
/** Convert and store a block of the given size M0xN0
* @name CONVERT_STORE_BLOCK
*
* Supported cases are M0=1,2,3,...,16 and N0=2,3,4,8,16.
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store
* @param[in] N0 The size of each vector
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
#define CONVERT_STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
/** @} */ // end of group CONVERT_STORE_BLOCK
/** Partially store the 0 to (n-1)th rows of the given variables
* @name STORE_ROW_PARTIAL_n
* Within each row, store the lower @p STORE_N0 elements of vectors of width @p N0
*
* @note in case @p STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* @param[in] N0 The width of the passed in vector. Supported: 1, 2, 3, 4, 8, 16
* @param[in] STORE_N0 The **lower** size of the vectors to store. Supported: [1-16 and <= @p N0
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0));
#define STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1));
#define STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2));
#define STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3));
#define STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4));
#define STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5));
#define STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6));
#define STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7));
#define STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8));
#define STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9));
#define STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A));
#define STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B));
#define STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C));
#define STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D));
#define STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E));
#define STORE_ROW_PARTIAL_16(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F));
/** @} */ // end of groupd STORE_ROW_PARTIAL_n
/** Partially store a block of the given size STORE_M0xSTORE_N0
* @name STORE_BLOCK_PARTIAL
*
* @note The vector width @p N0 is also required for correct partial storing behaviour.
* @note in case @p STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* The data to store is expected to have consecutive names for each row.
* E.g., for STORE_M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for STORE_M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] STORE_M0 The number of rows to store. Supported: 1-16
* @param[in] STORE_N0 The lower number of elements of vectors to store. Supported: 1-16 and <= @p N0
* @param[in] N0 The size of each vector. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_PARTIAL_##STORE_M0(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
#define STORE_BLOCK_PARTIAL(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
/** Store a block that can be partial in both x and y dimensions
*
* @note in cases @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16
* @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported range: [1, @p M0)
* @param[in] PARTIAL_STORE_N0 The partial size in x, for partial blocks. Supported range: [1, @p N0)
* @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial store Y. True to use PARTIAL_STORE_M0 rather than M0.
* @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial store X. True to use PARTIAL_STORE_N0 rather than N0.
*/
#define STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
if(!(PARTIAL_COND_X) && !(PARTIAL_COND_Y)) \
{ \
STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
} \
else if((PARTIAL_COND_Y) && !(PARTIAL_COND_X)) \
{ \
STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
} \
else if(!(PARTIAL_COND_Y) && (PARTIAL_COND_X)) \
{ \
STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
} \
else \
{ \
STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
}
/** Store a block that can only be partial in x but not y.
*
* @note in case @p N0 or @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16
* @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @param[in] PARTIAL_STORE_N0 The partial size in x, for partial blocks. Supported range: [1, @p N0)
* @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial store X. True to use PARTIAL_STORE_N0 rather than N0.
*/
#define STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) \
if(!(PARTIAL_COND_X)) \
{ \
STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
} \
else \
{ \
STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
}
/** Store a block that can only be partial in y but not x.
*
* @note in case @p N0 or @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16
* @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported range: [1, @p M0)
* @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial store Y. True to use PARTIAL_STORE_M0 rather than M0.
*/
#define STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) \
if(!(PARTIAL_COND_Y)) \
{ \
STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
} \
else \
{ \
STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
}
/** @} */ // end of group STORE_BLOCK_PARTIAL
#if defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
/** Boundary-aware GEMM block store
* @name STORE_BLOCK_BOUNDARY_AWARE
* This macro assumes the following schemes to achieve boundary-awareness:
* - Overlapping load in Y axis from lhs tensor. This implies lhs has no padding along y dim.
* - Non-Overlapping(normal) load from rhs tensor. This imples rhs can have paddings.
* - Overlapping load in Y axis from bias tensor. This implies rhs has no padding along y dim.
* The macro then ensures that the dst tensor can be stored without any paddings in both x and y dim.
*
* In the y dimension, we place the partial blocks **at the beginning** while in the x dimension, we place the partial
* blocks **at the end**.
* Say, the dst tensor is of shape MxN and we have M0 and N0 as the block size, this is how we define "partial blocks"/
* "boundary block" (we use the 2 terms "partial blocks" and "boundary blocks" interchangeably) and its various parameters:
*
* *--x--> x == 0 x == 1
* | |<------------------------------N-------------------------->|
* y |<--------------N0------------->|<----PARTIAL_STORE_N0----->|
* | -------------#############################################################
* * | | |...............................|...........................|
* y == 0 | PAR_..._M0 |......Boundary block in y......|.Boundary block in x and y.|
* | | |...............................|...........................|
* M --#############################################################
* | | | |...........................|
* y == 1 | M0 | Non-boundary block |....Boundary block in x....|
* | | | |...........................|
* |------------#############################################################
*
* Then @p PARTIAL_STORE_M0 = M % M0 and @p PARTIAL_STORE_N0 = N % N0
*
* @note in cases @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* It automatically detects if a giving M,N,M0,N0 combination can yield partial blocks in either X and Y dimension,
* and select corresponding store methods such that the boundary detection logic is only added when needed.
*
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16
* @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported: [0, @p M0)
* @param[in] PARTIAL_STORE_N0 The partial size in x, for partial blocks. Supported: [0, @p N0)
* @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial store Y. True to use PARTIAL_STORE_M0 rather than M0.
* @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial store X. True to use PARTIAL_STORE_N0 rather than N0.
* @{
*/
#if PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0
// Case1: No partial blocks in either x or y
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
#elif PARTIAL_STORE_M0 > 0 && PARTIAL_STORE_N0 == 0
// Case2: Partial blocks in y
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y)
#elif PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 > 0
// Case3: Partial blocks in x
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X)
#else // PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0
// Case4: Partial blocks in both x and y
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X)
#endif // PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0
#endif // defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
/** @} */ // end of group STORE_BLOCK_BOUNDARY_AWARE
#if defined(PARTIAL_STORE_M0)
/** Compute the start m0 row (LHS, BIAS and DST) in a boundary-aware way so as to avoid padding
* @name COMPUTE_M0_START_ROW
* If there're any partial blocks in y dimension, they are placed at the beginning of the rows.
* This shift amount is added to all rows such that the partial block (at the beginning) overlaps with the subsequent
* blocks in the y dimension to avoid any padding.
* EG: M0=4, PARTIAL_STORE_M0=1:
* | Non-overlapping | +M0_ROW_SHIFT (Overlapping)
* block 0 (partial)| start row = 0 | start row = 0
* block 1 (full) | start row = 4 | start row = 1
* block 2 (full) | start row = 8 | start row = 5
*
* @param[in] y Global id of current block in y.
* @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16
* @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported: [0, @p M0)
* @{
*/
#define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \
((uint)(max(0, (int)(y * M0) - (int)((M0 - PARTIAL_STORE_M0) % M0))))
#else // defined(PARTIAL_STORE_M0)
#define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \
((uint)(y * M0))
#endif // defined(PARTIAL_STORE_M0)
/** @} */ // end of group COMPUTE_M0_START_ROW
/** Store a vector that can only be partial in x.
*
* @note in case @p vec_size or @p leftover != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* The data to store is expected to end in a 0.
* E.g., for basename=c, the expected name is c0.
*
* @param[in] basename The name of the variable without trailing 0
* @param[in] data_type The data type of the vector
* @param[in] ptr The base pointer
* @param[in] vec_size The vector size if cond = false. Supported: 1, 2, 3, 4, 8, 16
* @param[in] leftover The vector size if cond = true. Supported range: [1, @p vec_size0)
* @param[in] cond Condition to select either vec_size0 or vec_size1
* @{
*/
#define STORE_VECTOR_SELECT(basename, data_type, ptr, vec_size, leftover, cond) \
STORE_BLOCK_PARTIAL_IN_X(1, vec_size, data_type, basename, ptr, 0, 0, leftover, cond)
/** @} */ // end of group STORE_VECTOR_SELECT
#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
#pragma OPENCL EXTENSION cl_khr_fp16 : enable
#endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
#pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : enable
#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
#if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : enable
#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
#if defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf)
#pragma OPENCL EXTENSION cl_arm_printf : enable
#endif // defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf)
#define GPU_ARCH_MIDGARD 0x100
#define GPU_ARCH_BIFROST 0x200
/** Concatenate two inputs.
*
* @param[in] a The first input to be concatenated
* @param[in] b The second input to be concatenated
*
* @return The concatenated output
*/
#define CONCAT(a, b) a##b
/** Expand the given vector
*
* @param[in] x The vector to be expanded
*
* @return The expanded output
*/
#define EXPAND(x) x
/** Clamp the given value between an upper and lower bound.
*
* @param[in] x The value to be clamped
* @param[in] min_val The lower bound
* @param[in] max_val The upper bound
*
* @return The clamped value.
*/
#define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val)
/** REVn reverses the given vector whose size is n.
* @name REVn
*
* @param[in] x The vector to be reversed
*
* @return The reversed vector
* @{
*/
#define REV1(x) ((x))
#define REV2(x) ((x).s10)
#define REV3(x) ((x).s210)
#define REV4(x) ((x).s3210)
#define REV8(x) ((x).s76543210)
#define REV16(x) ((x).sFEDCBA9876543210)
/** @} */ // end of group REVn
/** Reverse the given vector.
* @name REVERSE
*
* @param[in] x The vector to be reversed
* @param[in] s The size of the vector
*
* @return The reversed vector
* @{
*/
#define REVERSE_STR(x, s) REV##s((x))
#define REVERSE(x, s) REVERSE_STR(x, s)
/** @} */ // end of group REVERSE
/** Circular-right-shift (rotate-right) the vector of size s by the amount of n.
* @name ROTs_n
*
* @param[in] x The vector to be shifted
*
* @return The shifted vector
* @{
*/
#define ROT1_0(x) ((x))
#define ROT2_0(x) ((x))
#define ROT2_1(x) ((x).s10)
#define ROT3_0(x) ((x))
#define ROT3_1(x) ((x).s201)
#define ROT3_2(x) ((x).s120)
#define ROT4_0(x) ((x))
#define ROT4_1(x) ((x).s3012)
#define ROT4_2(x) ((x).s2301)
#define ROT4_3(x) ((x).s1230)
#define ROT8_0(x) ((x))
#define ROT8_1(x) ((x).s70123456)
#define ROT8_2(x) ((x).s67012345)
#define ROT8_3(x) ((x).s56701234)
#define ROT8_4(x) ((x).s45670123)
#define ROT8_5(x) ((x).s34567012)
#define ROT8_6(x) ((x).s23456701)
#define ROT8_7(x) ((x).s12345670)
#define ROT16_0(x) ((x))
#define ROT16_1(x) ((x).sF0123456789ABCDE)
#define ROT16_2(x) ((x).sEF0123456789ABCD)
#define ROT16_3(x) ((x).sDEF0123456789ABC)
#define ROT16_4(x) ((x).sCDEF0123456789AB)
#define ROT16_5(x) ((x).sBCDEF0123456789A)
#define ROT16_6(x) ((x).sABCDEF0123456789)
#define ROT16_7(x) ((x).s9ABCDEF012345678)
#define ROT16_8(x) ((x).s89ABCDEF01234567)
#define ROT16_9(x) ((x).s789ABCDEF0123456)
#define ROT16_10(x) ((x).s6789ABCDEF012345)
#define ROT16_11(x) ((x).s56789ABCDEF01234)
#define ROT16_12(x) ((x).s456789ABCDEF0123)
#define ROT16_13(x) ((x).s3456789ABCDEF012)
#define ROT16_14(x) ((x).s23456789ABCDEF01)
#define ROT16_15(x) ((x).s123456789ABCDEF0)
/** @} */ // end of group ROTs_n
/** Circular-right-shift (rotate-right) the given vector by the given amount.
* @name ROTATE
*
* @param[in] x The vector to be shifted
* @param[in] s The size of the vector
* @param[in] n The amount to be shifted
*
* @return The shifted vector
* @{
*/
#define ROTATE_STR(x, s, n) ROT##s##_##n(x)
#define ROTATE(x, s, n) ROTATE_STR(x, s, n)
/** @} */ // end of group ROTATE
/** Creates a vector of size n filled with offset values corresponding to the location of each element.
* @name V_OFFSn
*
* @param[in] dt The data type of the output vector
*
* @return The vector filled with offset values
* @{
*/
#define V_OFFS1(dt) (dt##1)(0)
#define V_OFFS2(dt) (dt##2)(0, 1)
#define V_OFFS3(dt) (dt##3)(0, 1, 2)
#define V_OFFS4(dt) (dt##4)(0, 1, 2, 3)
#define V_OFFS8(dt) (dt##8)(0, 1, 2, 3, 4, 5, 6, 7)
#define V_OFFS16(dt) (dt##16)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)
/** @} */ // end of group V_OFFSn
/** Create a vector filled with offset values corresponding to the location of each element.
* @name VEC_OFFS
*
* @param[in] dt The data type of the output vector
* @param[in] s The size of the output vector
*
* @return The vector filled with offset values
* @{
*/
#define VEC_OFFS_STR(dt, s) V_OFFS##s(dt)
#define VEC_OFFS(dt, s) VEC_OFFS_STR(dt, s)
/** @} */ // end of group VEC_OFFS
#define VLOAD_STR(size) vload##size
#define VLOAD(size) VLOAD_STR(size)
#define PIXEL_UNIT4 1
#define PIXEL_UNIT8 2
#define PIXEL_UNIT16 4
/** Utility macro to convert a vector size in pixel unit.
*
* @name CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT
*
* @param[in] vec_size Vector size. Only 4,8 and 16 is supported
*
* @return The pixel unit (number of pixels)
* @{
*/
#define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) PIXEL_UNIT##vec_size
#define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(vec_size) CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size)
/** @} */ // end of group CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT
#define read_image2d_floatx1(img, x_coord, y_coord) (float4)(read_imagef(img, (int2)(x_coord, y_coord)));
#define read_image2d_floatx2(img, x_coord, y_coord) (float8)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)));
#define read_image2d_floatx4(img, x_coord, y_coord) (float16)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)), read_imagef(img, (int2)(x_coord + 2, y_coord)), read_imagef(img, (int2)(x_coord + 3, y_coord)));
#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
#define read_image2d_halfx1(img, x_coord, y_coord) (half4)(read_imageh(img, (int2)(x_coord, y_coord)));
#define read_image2d_halfx2(img, x_coord, y_coord) (half8)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)));
#define read_image2d_halfx4(img, x_coord, y_coord) (half16)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)), read_imageh(img, (int2)(x_coord + 2, y_coord)), read_imageh(img, (int2)(x_coord + 3, y_coord)));
#endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
/** Utility macro to read a 2D OpenCL image object.
*
* @note Coordinates are not normalized
*
* @param[in] data_type Data type
* @param[in] n0 Number of pixel to read. Only 1,2 and 4 is supported
* @param[in] img OpenCL image object
* @param[in] x_coord The x coordinate for the top-left pixel
* @param[in] y_coord The y coordinate for the top-left pixel
*
* @return Pixels from the 2D OpenCL image object
* @{
*/
#define READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) read_image2d_##data_type##x##n0(img, x_coord, y_coord)
#define READ_IMAGE2D(data_type, n0, img, x_coord, y_coord) READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord)
#define VSTORE_STR(size) vstore##size
#define VSTORE(size) VSTORE_STR(size)
#define float1 float
#define half1 half
#define char1 char
#define uchar1 uchar
#define short1 short
#define ushort1 ushort
#define int1 int
#define uint1 uint
#define long1 long
#define ulong1 ulong
#define double1 double
#define vload1(OFFSET, PTR) *(OFFSET + PTR)
#define vstore1(DATA, OFFSET, PTR) *(OFFSET + PTR) = DATA
/** Extended partial vstore that correctly handles scalar values as well.
* Store the **lower** 0 to (n-1)th elements of the given vector while minimising the amount of vstore ops
* @name VSTORE_PARTIAL
*
* @note With this macro, the passed data can be both a vector and a scalar
* @note @p store_size needs to be <= @p size
* eg 1: Valid
* VSTORE_PARTIAL(16, 15) ...;
* eg 2: Invalid
* VSTORE_PARTIAL(4, 7) ...;
*
* @param[in] size The width of @p DATA. Supported values: 1(scalar), 2, 3, 4, 8, 16
* @param[in] store_size The number of lower elements to store. Supported values: 1-16, but has to be <= @p size
* @{
*/
#define VSTORE_PARTIAL_STR(size, store_size) vstore_partial_##size##_##store_size
#define VSTORE_PARTIAL(size, store_size) VSTORE_PARTIAL_STR(size, store_size)
#define NO_STORE(data, offs, ptr) \
{ \
}
// Size == 1 (scalar)
#define vstore_partial_1_0 NO_STORE
#define vstore_partial_1_1 vstore1
#define vstore_partial_1_2 NO_STORE
#define vstore_partial_1_3 NO_STORE
#define vstore_partial_1_4 NO_STORE
#define vstore_partial_1_5 NO_STORE
#define vstore_partial_1_6 NO_STORE
#define vstore_partial_1_7 NO_STORE
#define vstore_partial_1_8 NO_STORE
#define vstore_partial_1_9 NO_STORE
#define vstore_partial_1_10 NO_STORE
#define vstore_partial_1_11 NO_STORE
#define vstore_partial_1_12 NO_STORE
#define vstore_partial_1_13 NO_STORE
#define vstore_partial_1_14 NO_STORE
#define vstore_partial_1_15 NO_STORE
#define vstore_partial_1_16 NO_STORE
// Size == 2
#define vstore_partial_2_0 NO_STORE
#define vstore_partial_2_1 vstore_partial_1
#define vstore_partial_2_2 vstore_partial_2
#define vstore_partial_2_3 NO_STORE
#define vstore_partial_2_4 NO_STORE
#define vstore_partial_2_5 NO_STORE
#define vstore_partial_2_6 NO_STORE
#define vstore_partial_2_7 NO_STORE
#define vstore_partial_2_8 NO_STORE
#define vstore_partial_2_9 NO_STORE
#define vstore_partial_2_10 NO_STORE
#define vstore_partial_2_11 NO_STORE
#define vstore_partial_2_12 NO_STORE
#define vstore_partial_2_13 NO_STORE
#define vstore_partial_2_14 NO_STORE
#define vstore_partial_2_15 NO_STORE
#define vstore_partial_2_16 NO_STORE
// Size == 3
#define vstore_partial_3_0 NO_STORE
#define vstore_partial_3_1 vstore_partial_1
#define vstore_partial_3_2 vstore_partial_2
#define vstore_partial_3_3 vstore_partial_3
#define vstore_partial_3_4 NO_STORE
#define vstore_partial_3_5 NO_STORE
#define vstore_partial_3_6 NO_STORE
#define vstore_partial_3_7 NO_STORE
#define vstore_partial_3_8 NO_STORE
#define vstore_partial_3_9 NO_STORE
#define vstore_partial_3_10 NO_STORE
#define vstore_partial_3_11 NO_STORE
#define vstore_partial_3_12 NO_STORE
#define vstore_partial_3_13 NO_STORE
#define vstore_partial_3_14 NO_STORE
#define vstore_partial_3_15 NO_STORE
#define vstore_partial_3_16 NO_STORE
// Size == 4
#define vstore_partial_4_0 NO_STORE
#define vstore_partial_4_1 vstore_partial_1
#define vstore_partial_4_2 vstore_partial_2
#define vstore_partial_4_3 vstore_partial_3
#define vstore_partial_4_4 vstore_partial_4
#define vstore_partial_4_5 NO_STORE
#define vstore_partial_4_6 NO_STORE
#define vstore_partial_4_7 NO_STORE
#define vstore_partial_4_8 NO_STORE
#define vstore_partial_4_9 NO_STORE
#define vstore_partial_4_10 NO_STORE
#define vstore_partial_4_11 NO_STORE
#define vstore_partial_4_12 NO_STORE
#define vstore_partial_4_13 NO_STORE
#define vstore_partial_4_14 NO_STORE
#define vstore_partial_4_15 NO_STORE
#define vstore_partial_4_16 NO_STORE
// Size == 8
#define vstore_partial_8_0 NO_STORE
#define vstore_partial_8_1 vstore_partial_1
#define vstore_partial_8_2 vstore_partial_2
#define vstore_partial_8_3 vstore_partial_3
#define vstore_partial_8_4 vstore_partial_4
#define vstore_partial_8_5 vstore_partial_5
#define vstore_partial_8_6 vstore_partial_6
#define vstore_partial_8_7 vstore_partial_7
#define vstore_partial_8_8 vstore_partial_8
#define vstore_partial_8_9 NO_STORE
#define vstore_partial_8_10 NO_STORE
#define vstore_partial_8_11 NO_STORE
#define vstore_partial_8_12 NO_STORE
#define vstore_partial_8_13 NO_STORE
#define vstore_partial_8_14 NO_STORE
#define vstore_partial_8_15 NO_STORE
#define vstore_partial_8_16 NO_STORE
// Size == 16
#define vstore_partial_16_0 NO_STORE
#define vstore_partial_16_1 vstore_partial_1
#define vstore_partial_16_2 vstore_partial_2
#define vstore_partial_16_3 vstore_partial_3
#define vstore_partial_16_4 vstore_partial_4
#define vstore_partial_16_5 vstore_partial_5
#define vstore_partial_16_6 vstore_partial_6
#define vstore_partial_16_7 vstore_partial_7
#define vstore_partial_16_8 vstore_partial_8
#define vstore_partial_16_9 vstore_partial_9
#define vstore_partial_16_10 vstore_partial_10
#define vstore_partial_16_11 vstore_partial_11
#define vstore_partial_16_12 vstore_partial_12
#define vstore_partial_16_13 vstore_partial_13
#define vstore_partial_16_14 vstore_partial_14
#define vstore_partial_16_15 vstore_partial_15
#define vstore_partial_16_16 vstore_partial_16
/** Partial vstore. Store the **lower** 0 to (n-1)th elements of the given vector while minimising the amount of vstore ops
* @name vstore_partial_n
*
* @note @p DATA needs to be a vector not a scalar
* @note n needs to be <= the vector width of the input variable @p DATA
* eg 1: Valid
* vstore_partial_15(var:float16, 0, 0xabcd);
* eg 2: Invalid
* vstore_partial_7(var:float4, 0, 0xabcd);
*
* @note in cases n == 1, 2, 3, 4, 8, 16, no extra vstore is invoked, thus there's no performance penalty.
*
* @param[in] DATA The name of the variable
* @param[in] OFFSET Offset in n
* @param[in] PTR The base pointer
* @{
*/
#define vstore_partial_1(DATA, OFFSET, PTR) \
vstore1(DATA.s0, OFFSET, PTR);
#define vstore_partial_2(DATA, OFFSET, PTR) \
vstore2(DATA.s01, OFFSET, PTR);
#define vstore_partial_3(DATA, OFFSET, PTR) \
vstore3(DATA.s012, OFFSET, PTR);
#define vstore_partial_4(DATA, OFFSET, PTR) \
vstore4(DATA.s0123, OFFSET, PTR);
#define vstore_partial_5(DATA, OFFSET, PTR) \
vstore_partial_4(DATA.s0123, OFFSET, PTR); \
vstore1(DATA.s4, OFFSET, PTR + 4);
#define vstore_partial_6(DATA, OFFSET, PTR) \
vstore_partial_4(DATA.s0123, OFFSET, PTR); \
vstore_partial_2(DATA.s45, OFFSET, PTR + 4);
#define vstore_partial_7(DATA, OFFSET, PTR) \
vstore_partial_4(DATA.s0123, OFFSET, PTR); \
vstore_partial_3(DATA.s456, OFFSET, PTR + 4);
#define vstore_partial_8(DATA, OFFSET, PTR) \
vstore8(DATA.s01234567, OFFSET, PTR);
#define vstore_partial_9(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore1(DATA.s8, OFFSET, PTR + 8);
#define vstore_partial_10(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_2(DATA.s89, OFFSET, PTR + 8);
#define vstore_partial_11(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_3(DATA.s89a, OFFSET, PTR + 8);
#define vstore_partial_12(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_4(DATA.s89ab, OFFSET, PTR + 8);
#define vstore_partial_13(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_5(DATA.s89abcdef, OFFSET, PTR + 8);
#define vstore_partial_14(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_6(DATA.s89abcdef, OFFSET, PTR + 8);
#define vstore_partial_15(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_7(DATA.s89abcdef, OFFSET, PTR + 8);
#define vstore_partial_16(DATA, OFFSET, PTR) \
vstore16(DATA, OFFSET, PTR);
/** @} */ // end of groupd vstore_partial_n
/** @} */ // end of groupd VSTORE_PARTIAL
// Convert built-in functions with _sat modifier are not supported in floating point so we create defines
// without _sat to overcome this issue
#define convert_float_sat convert_float
#define convert_float1_sat convert_float
#define convert_float2_sat convert_float2
#define convert_float3_sat convert_float3
#define convert_float4_sat convert_float4
#define convert_float8_sat convert_float8
#define convert_float16_sat convert_float16
#define convert_half_sat convert_float
#define convert_half1_sat convert_half
#define convert_half2_sat convert_half2
#define convert_half3_sat convert_half3
#define convert_half4_sat convert_half4
#define convert_half8_sat convert_half8
#define convert_half16_sat convert_half16
#define convert_float1 convert_float
#define convert_half1 convert_half
#define convert_char1 convert_char
#define convert_uchar1 convert_uchar
#define convert_short1 convert_short
#define convert_ushort1 convert_ushort
#define convert_int1 convert_int
#define convert_uint1 convert_uint
#define convert_long1 convert_long
#define convert_ulong1 convert_ulong
#define convert_double1 convert_double
#define convert_char1_sat convert_char_sat
#define convert_uchar1_sat convert_uchar_sat
#define convert_short1_sat convert_short_sat
#define convert_ushort1_sat convert_ushort_sat
#define convert_int1_sat convert_int_sat
#define convert_uint1_sat convert_uint_sat
#define convert_long1_sat convert_long_sat
#define convert_ulong1_sat convert_ulong_sat
#define convert_double1_sat convert_double_sat
#define VEC_DATA_TYPE_STR(type, size) type##size
#define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size)
#define CONVERT_STR(x, type) (convert_##type((x)))
#define CONVERT(x, type) CONVERT_STR(x, type)
#define CONVERT_SAT_STR(x, type) (convert_##type##_sat((x)))
#define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type)
#define CONVERT_SAT_ROUND_STR(x, type, round) (convert_##type##_sat_##round((x)))
#define CONVERT_SAT_ROUND(x, type, round) CONVERT_SAT_ROUND_STR(x, type, round)
#define select_vec_dt_uchar(size) uchar##size
#define select_vec_dt_char(size) char##size
#define select_vec_dt_ushort(size) ushort##size
#define select_vec_dt_short(size) short##size
#define select_vec_dt_half(size) short##size
#define select_vec_dt_uint(size) uint##size
#define select_vec_dt_int(size) int##size
#define select_vec_dt_float(size) int##size
#define select_vec_dt_ulong(size) ulong##size
#define select_vec_dt_long(size) long##size
#define SELECT_VEC_DATA_TYPE_STR(type, size) select_vec_dt_##type(size)
#define SELECT_VEC_DATA_TYPE(type, size) SELECT_VEC_DATA_TYPE_STR(type, size)
#define SELECT_DATA_TYPE(type) SELECT_VEC_DATA_TYPE_STR(type, 1)
#define sum_reduce_1(x) (x)
#define sum_reduce_2(x) ((x).s0) + ((x).s1)
#define sum_reduce_3(x) sum_reduce_2((x).s01) + ((x).s2)
#define sum_reduce_4(x) sum_reduce_2((x).s01) + sum_reduce_2((x).s23)
#define sum_reduce_8(x) sum_reduce_4((x).s0123) + sum_reduce_4((x).s4567)
#define sum_reduce_16(x) sum_reduce_8((x).s01234567) + sum_reduce_8((x).s89ABCDEF)
#define SUM_REDUCE_STR(x, size) sum_reduce_##size(x)
#define SUM_REDUCE(x, size) SUM_REDUCE_STR(x, size)
#define max_reduce_1(x) (x)
#define max_reduce_2(x) max(((x).s0), ((x).s1))
#define max_reduce_3(x) max(max_reduce_2((x).s01), ((x).s2))
#define max_reduce_4(x) max(max_reduce_2((x).s01), max_reduce_2((x).s23))
#define max_reduce_8(x) max(max_reduce_4((x).s0123), max_reduce_4((x).s4567))
#define max_reduce_16(x) max(max_reduce_8((x).s01234567), max_reduce_8((x).s89ABCDEF))
#define MAX_REDUCE_STR(x, size) max_reduce_##size(x)
#define MAX_REDUCE(x, size) MAX_REDUCE_STR(x, size)
#define VECTOR_DECLARATION(name) \
__global uchar *name##_ptr, \
uint name##_stride_x, \
uint name##_step_x, \
uint name##_offset_first_element_in_bytes
#define IMAGE_DECLARATION(name) \
__global uchar *name##_ptr, \
uint name##_stride_x, \
uint name##_step_x, \
uint name##_stride_y, \
uint name##_step_y, \
uint name##_offset_first_element_in_bytes
#define TENSOR3D_DECLARATION(name) \
__global uchar *name##_ptr, \
uint name##_stride_x, \
uint name##_step_x, \
uint name##_stride_y, \
uint name##_step_y, \
uint name##_stride_z, \
uint name##_step_z, \
uint name##_offset_first_element_in_bytes
#define TENSOR4D_DECLARATION(name) \
__global uchar *name##_ptr, \
uint name##_stride_x, \
uint name##_step_x, \
uint name##_stride_y, \
uint name##_step_y, \
uint name##_stride_z, \
uint name##_step_z, \
uint name##_stride_w, \
uint name##_step_w, \
uint name##_offset_first_element_in_bytes
#define CONVERT_TO_VECTOR_STRUCT(name) \
update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x)
#define CONVERT_TO_VECTOR_STRUCT_NO_STEP(name) \
update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0)
#define CONVERT_TO_IMAGE_STRUCT(name) \
update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y)
#define CONVERT_TO_IMAGE_STRUCT_NO_STEP(name) \
update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0)
#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \
update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z)
#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(name) \
update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, name##_step_z)
#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \
update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z)
#define CONVERT_TO_TENSOR3D_STRUCT(name) \
update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
name##_stride_z, name##_step_z)
#define CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(name) \
update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0)
#define CONVERT_TO_TENSOR4D_STRUCT(name, mod_size) \
update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
name##_stride_z, name##_step_z, name##_stride_w, name##_step_w, mod_size)
#define CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(name, mod_size) \
update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0, name##_stride_w, 0, mod_size)
#define CONVERT_TO_TENSOR3D_STRUCT_NO_UPDATE_PTR(name) \
tensor3D_ptr_no_update(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
name##_stride_z, name##_step_z)
/** Structure to hold Vector information */
typedef struct Vector
{
__global uchar *ptr; /**< Pointer to the starting postion of the buffer */
int offset_first_element_in_bytes; /**< The offset of the first element in the source image */
int stride_x; /**< Stride of the image in X dimension (in bytes) */
} Vector;
/** Structure to hold Image information */
typedef struct Image
{
__global uchar *ptr; /**< Pointer to the starting postion of the buffer */
int offset_first_element_in_bytes; /**< The offset of the first element in the source image */
int stride_x; /**< Stride of the image in X dimension (in bytes) */
int stride_y; /**< Stride of the image in Y dimension (in bytes) */
} Image;
/** Structure to hold 3D tensor information */
typedef struct Tensor3D
{
__global uchar *ptr; /**< Pointer to the starting postion of the buffer */
int offset_first_element_in_bytes; /**< The offset of the first element in the source image */
int stride_x; /**< Stride of the image in X dimension (in bytes) */
int stride_y; /**< Stride of the image in Y dimension (in bytes) */
int stride_z; /**< Stride of the image in Z dimension (in bytes) */
} Tensor3D;
/** Structure to hold 4D tensor information */
typedef struct Tensor4D
{
__global uchar *ptr; /**< Pointer to the starting postion of the buffer */
int offset_first_element_in_bytes; /**< The offset of the first element in the source image */
int stride_x; /**< Stride of the image in X dimension (in bytes) */
int stride_y; /**< Stride of the image in Y dimension (in bytes) */
int stride_z; /**< Stride of the image in Z dimension (in bytes) */
int stride_w; /**< Stride of the image in W dimension (in bytes) */
} Tensor4D;
/** Wrap vector information into an Vector structure, and make the pointer point at this workitem's data.
*
* @param[in] ptr Pointer to the starting postion of the buffer
* @param[in] offset_first_element_in_bytes The offset of the first element in the source vector
* @param[in] stride_x Stride of the vector in X dimension (in bytes)
* @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
*
* @return An image object
*/
inline Vector update_vector_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x)
{
Vector vector =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
};
vector.ptr += vector.offset_first_element_in_bytes + get_global_id(0) * step_x;
return vector;
}
/** Wrap image information into an Image structure, and make the pointer point at this workitem's data.
*
* @param[in] ptr Pointer to the starting postion of the buffer
* @param[in] offset_first_element_in_bytes The offset of the first element in the source image
* @param[in] stride_x Stride of the image in X dimension (in bytes)
* @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] stride_y Stride of the image in Y dimension (in bytes)
* @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes)
*
* @return An image object
*/
inline Image update_image_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y)
{
Image img =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
.stride_y = stride_y
};
img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y;
return img;
}
/** Wrap 3D tensor information into an image structure, and make the pointer point at this workitem's data.
*
* @param[in] ptr Pointer to the starting postion of the buffer
* @param[in] offset_first_element_in_bytes The offset of the first element in the source image
* @param[in] stride_x Stride of the image in X dimension (in bytes)
* @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] stride_y Stride of the image in Y dimension (in bytes)
* @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] stride_z Stride of the image in Z dimension (in bytes)
* @param[in] step_z stride_z * number of elements along Z processed per workitem(in bytes)
*
* @return A 3D tensor object
*/
inline Image update_image_from_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
{
Image img =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
.stride_y = stride_y
};
img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z;
return img;
}
/** Wrap 3D tensor information into an tensor structure, and make the pointer point at this workitem's data.
*
* @param[in] ptr Pointer to the starting postion of the buffer
* @param[in] offset_first_element_in_bytes The offset of the first element in the source image
* @param[in] stride_x Stride of the image in X dimension (in bytes)
* @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] stride_y Stride of the image in Y dimension (in bytes)
* @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] stride_z Stride of the image in Z dimension (in bytes)
* @param[in] step_z stride_z * number of elements along Z processed per workitem(in bytes)
*
* @return A 3D tensor object
*/
inline Tensor3D update_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
{
Tensor3D tensor =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
.stride_y = stride_y,
.stride_z = stride_z
};
tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z;
return tensor;
}
/** Wrap 3D tensor information into an tensor structure.
*
* @param[in] ptr Pointer to the starting postion of the buffer
* @param[in] offset_first_element_in_bytes The offset of the first element in the source image
* @param[in] stride_x Stride of the image in X dimension (in bytes)
* @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] stride_y Stride of the image in Y dimension (in bytes)
* @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] stride_z Stride of the image in Z dimension (in bytes)
* @param[in] step_z stride_z * number of elements along Z processed per workitem(in bytes)
*
* @return A 3D tensor object
*/
inline Tensor3D tensor3D_ptr_no_update(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
{
Tensor3D tensor =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
.stride_y = stride_y,
.stride_z = stride_z
};
return tensor;
}
inline Tensor4D update_tensor4D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z, uint stride_w,
uint step_w,
uint mod_size)
{
Tensor4D tensor =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
.stride_y = stride_y,
.stride_z = stride_z,
.stride_w = stride_w
};
tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + (get_global_id(2) % mod_size) * step_z + (get_global_id(2) / mod_size) * step_w;
return tensor;
}
/** Get the pointer position of a Vector
*
* @param[in] vec Pointer to the starting position of the buffer
* @param[in] x Relative X position
*/
inline __global const uchar *vector_offset(const Vector *vec, int x)
{
return vec->ptr + x * vec->stride_x;
}
/** Get the pointer position of a Image
*
* @param[in] img Pointer to the starting position of the buffer
* @param[in] x Relative X position
* @param[in] y Relative Y position
*/
inline __global uchar *offset(const Image *img, int x, int y)
{
return img->ptr + x * img->stride_x + y * img->stride_y;
}
/** Get the pointer position of a Tensor3D
*
* @param[in] tensor Pointer to the starting position of the buffer
* @param[in] x Relative X position
* @param[in] y Relative Y position
* @param[in] z Relative Z position
*/
inline __global const uchar *tensor3D_offset(const Tensor3D *tensor, int x, int y, int z)
{
return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z;
}
/** Get the pointer position of a Tensor4D
*
* @param[in] tensor Pointer to the starting position of the buffer
* @param[in] x Relative X position
* @param[in] y Relative Y position
* @param[in] z Relative Z position
* @param[in] w Relative W position
*/
inline __global const uchar *tensor4D_offset(const Tensor4D *tensor, int x, int y, int z, int w)
{
return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + w * tensor->stride_w;
}
/** Get the offset for a given linear index of a Tensor3D
*
* @param[in] tensor Pointer to the starting position of the buffer
* @param[in] width Width of the input tensor
* @param[in] height Height of the input tensor
* @param[in] depth Depth of the input tensor
* @param[in] index Linear index
*/
inline __global const uchar *tensor3D_index2ptr(const Tensor3D *tensor, uint width, uint height, uint depth, uint index)
{
uint num_elements = width * height;
const uint z = index / num_elements;
index %= num_elements;
const uint y = index / width;
index %= width;
const uint x = index;
return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + tensor->offset_first_element_in_bytes;
}
#endif // _HELPER_H
#define ADD_OP(a, b) ((a) + (b))
#define SUB_OP(a, b) ((a) - (b))
#define MUL_OP(a, b) ((a) * (b))
#define INVSQRT_OP(a) rsqrt((a))
#define SQCVT_SAT(a) (a)
#if defined(VEC_SIZE) && defined(DATA_TYPE) && defined(ACTIVATION_TYPE)
/*
* Copyright (c) 2019-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* Copyright (c) 2016-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef ARM_COMPUTE_HELPER_H
#define ARM_COMPUTE_HELPER_H
/*
* Copyright (c) 2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/** Store the 0 to (n-1)th rows of the given variables
* @name STORE_ROW_n
*
* @param[in] N0 The width of the passed in vector. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0));
#define STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1));
#define STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2));
#define STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3));
#define STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4));
#define STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5));
#define STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6));
#define STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7));
#define STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8));
#define STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9));
#define STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A));
#define STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B));
#define STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C));
#define STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D));
#define STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E));
#define STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F));
/** @} */ // end of groupd STORE_ROW_n
/** Convert and store the 0th to (n-1)th rows of the given variables
* @name CONVERT_STORE_ROW_n
*
* @param[in] N0 The size of the vectors
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##0), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0));
#define CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##1), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1));
#define CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##2), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2));
#define CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##3), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3));
#define CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##4), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4));
#define CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##5), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5));
#define CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##6), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6));
#define CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##7), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7));
#define CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##8), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8));
#define CONVERT_STORE_ROW_10(N0, DATA, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##9), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9));
#define CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##A), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A));
#define CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##B), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B));
#define CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##C), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C));
#define CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##D), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D));
#define CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##E), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E));
#define CONVERT_STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##F), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F));
/** @} */ // end of groupd CONVERT_STORE_ROW_n
/** Store a block of the given size M0xN0
* @name STORE_BLOCK
*
* Supported cases are M0=1,2,3,...,16 and N0=2,3,4,8,16.
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store
* @param[in] N0 The size of each vector
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
#define STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
/** @} */ // end of group STORE_BLOCK
/** Convert and store a block of the given size M0xN0
* @name CONVERT_STORE_BLOCK
*
* Supported cases are M0=1,2,3,...,16 and N0=2,3,4,8,16.
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store
* @param[in] N0 The size of each vector
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
#define CONVERT_STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
/** @} */ // end of group CONVERT_STORE_BLOCK
/** Partially store the 0 to (n-1)th rows of the given variables
* @name STORE_ROW_PARTIAL_n
* Within each row, store the lower @p STORE_N0 elements of vectors of width @p N0
*
* @note in case @p STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* @param[in] N0 The width of the passed in vector. Supported: 1, 2, 3, 4, 8, 16
* @param[in] STORE_N0 The **lower** size of the vectors to store. Supported: [1-16 and <= @p N0
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0));
#define STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1));
#define STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2));
#define STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3));
#define STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4));
#define STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5));
#define STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6));
#define STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7));
#define STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8));
#define STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9));
#define STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A));
#define STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B));
#define STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C));
#define STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D));
#define STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E));
#define STORE_ROW_PARTIAL_16(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F));
/** @} */ // end of groupd STORE_ROW_PARTIAL_n
/** Partially store a block of the given size STORE_M0xSTORE_N0
* @name STORE_BLOCK_PARTIAL
*
* @note The vector width @p N0 is also required for correct partial storing behaviour.
* @note in case @p STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* The data to store is expected to have consecutive names for each row.
* E.g., for STORE_M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for STORE_M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] STORE_M0 The number of rows to store. Supported: 1-16
* @param[in] STORE_N0 The lower number of elements of vectors to store. Supported: 1-16 and <= @p N0
* @param[in] N0 The size of each vector. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_PARTIAL_##STORE_M0(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
#define STORE_BLOCK_PARTIAL(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
/** Store a block that can be partial in both x and y dimensions
*
* @note in cases @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16
* @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported range: [1, @p M0)
* @param[in] PARTIAL_STORE_N0 The partial size in x, for partial blocks. Supported range: [1, @p N0)
* @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial store Y. True to use PARTIAL_STORE_M0 rather than M0.
* @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial store X. True to use PARTIAL_STORE_N0 rather than N0.
*/
#define STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
if(!(PARTIAL_COND_X) && !(PARTIAL_COND_Y)) \
{ \
STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
} \
else if((PARTIAL_COND_Y) && !(PARTIAL_COND_X)) \
{ \
STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
} \
else if(!(PARTIAL_COND_Y) && (PARTIAL_COND_X)) \
{ \
STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
} \
else \
{ \
STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
}
/** Store a block that can only be partial in x but not y.
*
* @note in case @p N0 or @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16
* @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @param[in] PARTIAL_STORE_N0 The partial size in x, for partial blocks. Supported range: [1, @p N0)
* @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial store X. True to use PARTIAL_STORE_N0 rather than N0.
*/
#define STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) \
if(!(PARTIAL_COND_X)) \
{ \
STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
} \
else \
{ \
STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
}
/** Store a block that can only be partial in y but not x.
*
* @note in case @p N0 or @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16
* @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported range: [1, @p M0)
* @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial store Y. True to use PARTIAL_STORE_M0 rather than M0.
*/
#define STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) \
if(!(PARTIAL_COND_Y)) \
{ \
STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
} \
else \
{ \
STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
}
/** @} */ // end of group STORE_BLOCK_PARTIAL
#if defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
/** Boundary-aware GEMM block store
* @name STORE_BLOCK_BOUNDARY_AWARE
* This macro assumes the following schemes to achieve boundary-awareness:
* - Overlapping load in Y axis from lhs tensor. This implies lhs has no padding along y dim.
* - Non-Overlapping(normal) load from rhs tensor. This imples rhs can have paddings.
* - Overlapping load in Y axis from bias tensor. This implies rhs has no padding along y dim.
* The macro then ensures that the dst tensor can be stored without any paddings in both x and y dim.
*
* In the y dimension, we place the partial blocks **at the beginning** while in the x dimension, we place the partial
* blocks **at the end**.
* Say, the dst tensor is of shape MxN and we have M0 and N0 as the block size, this is how we define "partial blocks"/
* "boundary block" (we use the 2 terms "partial blocks" and "boundary blocks" interchangeably) and its various parameters:
*
* *--x--> x == 0 x == 1
* | |<------------------------------N-------------------------->|
* y |<--------------N0------------->|<----PARTIAL_STORE_N0----->|
* | -------------#############################################################
* * | | |...............................|...........................|
* y == 0 | PAR_..._M0 |......Boundary block in y......|.Boundary block in x and y.|
* | | |...............................|...........................|
* M --#############################################################
* | | | |...........................|
* y == 1 | M0 | Non-boundary block |....Boundary block in x....|
* | | | |...........................|
* |------------#############################################################
*
* Then @p PARTIAL_STORE_M0 = M % M0 and @p PARTIAL_STORE_N0 = N % N0
*
* @note in cases @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* It automatically detects if a giving M,N,M0,N0 combination can yield partial blocks in either X and Y dimension,
* and select corresponding store methods such that the boundary detection logic is only added when needed.
*
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16
* @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported: [0, @p M0)
* @param[in] PARTIAL_STORE_N0 The partial size in x, for partial blocks. Supported: [0, @p N0)
* @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial store Y. True to use PARTIAL_STORE_M0 rather than M0.
* @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial store X. True to use PARTIAL_STORE_N0 rather than N0.
* @{
*/
#if PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0
// Case1: No partial blocks in either x or y
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
#elif PARTIAL_STORE_M0 > 0 && PARTIAL_STORE_N0 == 0
// Case2: Partial blocks in y
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y)
#elif PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 > 0
// Case3: Partial blocks in x
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X)
#else // PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0
// Case4: Partial blocks in both x and y
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X)
#endif // PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0
#endif // defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
/** @} */ // end of group STORE_BLOCK_BOUNDARY_AWARE
#if defined(PARTIAL_STORE_M0)
/** Compute the start m0 row (LHS, BIAS and DST) in a boundary-aware way so as to avoid padding
* @name COMPUTE_M0_START_ROW
* If there're any partial blocks in y dimension, they are placed at the beginning of the rows.
* This shift amount is added to all rows such that the partial block (at the beginning) overlaps with the subsequent
* blocks in the y dimension to avoid any padding.
* EG: M0=4, PARTIAL_STORE_M0=1:
* | Non-overlapping | +M0_ROW_SHIFT (Overlapping)
* block 0 (partial)| start row = 0 | start row = 0
* block 1 (full) | start row = 4 | start row = 1
* block 2 (full) | start row = 8 | start row = 5
*
* @param[in] y Global id of current block in y.
* @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16
* @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported: [0, @p M0)
* @{
*/
#define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \
((uint)(max(0, (int)(y * M0) - (int)((M0 - PARTIAL_STORE_M0) % M0))))
#else // defined(PARTIAL_STORE_M0)
#define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \
((uint)(y * M0))
#endif // defined(PARTIAL_STORE_M0)
/** @} */ // end of group COMPUTE_M0_START_ROW
/** Store a vector that can only be partial in x.
*
* @note in case @p vec_size or @p leftover != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* The data to store is expected to end in a 0.
* E.g., for basename=c, the expected name is c0.
*
* @param[in] basename The name of the variable without trailing 0
* @param[in] data_type The data type of the vector
* @param[in] ptr The base pointer
* @param[in] vec_size The vector size if cond = false. Supported: 1, 2, 3, 4, 8, 16
* @param[in] leftover The vector size if cond = true. Supported range: [1, @p vec_size0)
* @param[in] cond Condition to select either vec_size0 or vec_size1
* @{
*/
#define STORE_VECTOR_SELECT(basename, data_type, ptr, vec_size, leftover, cond) \
STORE_BLOCK_PARTIAL_IN_X(1, vec_size, data_type, basename, ptr, 0, 0, leftover, cond)
/** @} */ // end of group STORE_VECTOR_SELECT
#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
#pragma OPENCL EXTENSION cl_khr_fp16 : enable
#endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
#pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : enable
#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
#if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : enable
#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
#if defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf)
#pragma OPENCL EXTENSION cl_arm_printf : enable
#endif // defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf)
#define GPU_ARCH_MIDGARD 0x100
#define GPU_ARCH_BIFROST 0x200
/** Concatenate two inputs.
*
* @param[in] a The first input to be concatenated
* @param[in] b The second input to be concatenated
*
* @return The concatenated output
*/
#define CONCAT(a, b) a##b
/** Expand the given vector
*
* @param[in] x The vector to be expanded
*
* @return The expanded output
*/
#define EXPAND(x) x
/** Clamp the given value between an upper and lower bound.
*
* @param[in] x The value to be clamped
* @param[in] min_val The lower bound
* @param[in] max_val The upper bound
*
* @return The clamped value.
*/
#define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val)
/** REVn reverses the given vector whose size is n.
* @name REVn
*
* @param[in] x The vector to be reversed
*
* @return The reversed vector
* @{
*/
#define REV1(x) ((x))
#define REV2(x) ((x).s10)
#define REV3(x) ((x).s210)
#define REV4(x) ((x).s3210)
#define REV8(x) ((x).s76543210)
#define REV16(x) ((x).sFEDCBA9876543210)
/** @} */ // end of group REVn
/** Reverse the given vector.
* @name REVERSE
*
* @param[in] x The vector to be reversed
* @param[in] s The size of the vector
*
* @return The reversed vector
* @{
*/
#define REVERSE_STR(x, s) REV##s((x))
#define REVERSE(x, s) REVERSE_STR(x, s)
/** @} */ // end of group REVERSE
/** Circular-right-shift (rotate-right) the vector of size s by the amount of n.
* @name ROTs_n
*
* @param[in] x The vector to be shifted
*
* @return The shifted vector
* @{
*/
#define ROT1_0(x) ((x))
#define ROT2_0(x) ((x))
#define ROT2_1(x) ((x).s10)
#define ROT3_0(x) ((x))
#define ROT3_1(x) ((x).s201)
#define ROT3_2(x) ((x).s120)
#define ROT4_0(x) ((x))
#define ROT4_1(x) ((x).s3012)
#define ROT4_2(x) ((x).s2301)
#define ROT4_3(x) ((x).s1230)
#define ROT8_0(x) ((x))
#define ROT8_1(x) ((x).s70123456)
#define ROT8_2(x) ((x).s67012345)
#define ROT8_3(x) ((x).s56701234)
#define ROT8_4(x) ((x).s45670123)
#define ROT8_5(x) ((x).s34567012)
#define ROT8_6(x) ((x).s23456701)
#define ROT8_7(x) ((x).s12345670)
#define ROT16_0(x) ((x))
#define ROT16_1(x) ((x).sF0123456789ABCDE)
#define ROT16_2(x) ((x).sEF0123456789ABCD)
#define ROT16_3(x) ((x).sDEF0123456789ABC)
#define ROT16_4(x) ((x).sCDEF0123456789AB)
#define ROT16_5(x) ((x).sBCDEF0123456789A)
#define ROT16_6(x) ((x).sABCDEF0123456789)
#define ROT16_7(x) ((x).s9ABCDEF012345678)
#define ROT16_8(x) ((x).s89ABCDEF01234567)
#define ROT16_9(x) ((x).s789ABCDEF0123456)
#define ROT16_10(x) ((x).s6789ABCDEF012345)
#define ROT16_11(x) ((x).s56789ABCDEF01234)
#define ROT16_12(x) ((x).s456789ABCDEF0123)
#define ROT16_13(x) ((x).s3456789ABCDEF012)
#define ROT16_14(x) ((x).s23456789ABCDEF01)
#define ROT16_15(x) ((x).s123456789ABCDEF0)
/** @} */ // end of group ROTs_n
/** Circular-right-shift (rotate-right) the given vector by the given amount.
* @name ROTATE
*
* @param[in] x The vector to be shifted
* @param[in] s The size of the vector
* @param[in] n The amount to be shifted
*
* @return The shifted vector
* @{
*/
#define ROTATE_STR(x, s, n) ROT##s##_##n(x)
#define ROTATE(x, s, n) ROTATE_STR(x, s, n)
/** @} */ // end of group ROTATE
/** Creates a vector of size n filled with offset values corresponding to the location of each element.
* @name V_OFFSn
*
* @param[in] dt The data type of the output vector
*
* @return The vector filled with offset values
* @{
*/
#define V_OFFS1(dt) (dt##1)(0)
#define V_OFFS2(dt) (dt##2)(0, 1)
#define V_OFFS3(dt) (dt##3)(0, 1, 2)
#define V_OFFS4(dt) (dt##4)(0, 1, 2, 3)
#define V_OFFS8(dt) (dt##8)(0, 1, 2, 3, 4, 5, 6, 7)
#define V_OFFS16(dt) (dt##16)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)
/** @} */ // end of group V_OFFSn
/** Create a vector filled with offset values corresponding to the location of each element.
* @name VEC_OFFS
*
* @param[in] dt The data type of the output vector
* @param[in] s The size of the output vector
*
* @return The vector filled with offset values
* @{
*/
#define VEC_OFFS_STR(dt, s) V_OFFS##s(dt)
#define VEC_OFFS(dt, s) VEC_OFFS_STR(dt, s)
/** @} */ // end of group VEC_OFFS
#define VLOAD_STR(size) vload##size
#define VLOAD(size) VLOAD_STR(size)
#define PIXEL_UNIT4 1
#define PIXEL_UNIT8 2
#define PIXEL_UNIT16 4
/** Utility macro to convert a vector size in pixel unit.
*
* @name CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT
*
* @param[in] vec_size Vector size. Only 4,8 and 16 is supported
*
* @return The pixel unit (number of pixels)
* @{
*/
#define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) PIXEL_UNIT##vec_size
#define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(vec_size) CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size)
/** @} */ // end of group CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT
#define read_image2d_floatx1(img, x_coord, y_coord) (float4)(read_imagef(img, (int2)(x_coord, y_coord)));
#define read_image2d_floatx2(img, x_coord, y_coord) (float8)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)));
#define read_image2d_floatx4(img, x_coord, y_coord) (float16)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)), read_imagef(img, (int2)(x_coord + 2, y_coord)), read_imagef(img, (int2)(x_coord + 3, y_coord)));
#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
#define read_image2d_halfx1(img, x_coord, y_coord) (half4)(read_imageh(img, (int2)(x_coord, y_coord)));
#define read_image2d_halfx2(img, x_coord, y_coord) (half8)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)));
#define read_image2d_halfx4(img, x_coord, y_coord) (half16)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)), read_imageh(img, (int2)(x_coord + 2, y_coord)), read_imageh(img, (int2)(x_coord + 3, y_coord)));
#endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
/** Utility macro to read a 2D OpenCL image object.
*
* @note Coordinates are not normalized
*
* @param[in] data_type Data type
* @param[in] n0 Number of pixel to read. Only 1,2 and 4 is supported
* @param[in] img OpenCL image object
* @param[in] x_coord The x coordinate for the top-left pixel
* @param[in] y_coord The y coordinate for the top-left pixel
*
* @return Pixels from the 2D OpenCL image object
* @{
*/
#define READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) read_image2d_##data_type##x##n0(img, x_coord, y_coord)
#define READ_IMAGE2D(data_type, n0, img, x_coord, y_coord) READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord)
#define VSTORE_STR(size) vstore##size
#define VSTORE(size) VSTORE_STR(size)
#define float1 float
#define half1 half
#define char1 char
#define uchar1 uchar
#define short1 short
#define ushort1 ushort
#define int1 int
#define uint1 uint
#define long1 long
#define ulong1 ulong
#define double1 double
#define vload1(OFFSET, PTR) *(OFFSET + PTR)
#define vstore1(DATA, OFFSET, PTR) *(OFFSET + PTR) = DATA
/** Extended partial vstore that correctly handles scalar values as well.
* Store the **lower** 0 to (n-1)th elements of the given vector while minimising the amount of vstore ops
* @name VSTORE_PARTIAL
*
* @note With this macro, the passed data can be both a vector and a scalar
* @note @p store_size needs to be <= @p size
* eg 1: Valid
* VSTORE_PARTIAL(16, 15) ...;
* eg 2: Invalid
* VSTORE_PARTIAL(4, 7) ...;
*
* @param[in] size The width of @p DATA. Supported values: 1(scalar), 2, 3, 4, 8, 16
* @param[in] store_size The number of lower elements to store. Supported values: 1-16, but has to be <= @p size
* @{
*/
#define VSTORE_PARTIAL_STR(size, store_size) vstore_partial_##size##_##store_size
#define VSTORE_PARTIAL(size, store_size) VSTORE_PARTIAL_STR(size, store_size)
#define NO_STORE(data, offs, ptr) \
{ \
}
// Size == 1 (scalar)
#define vstore_partial_1_0 NO_STORE
#define vstore_partial_1_1 vstore1
#define vstore_partial_1_2 NO_STORE
#define vstore_partial_1_3 NO_STORE
#define vstore_partial_1_4 NO_STORE
#define vstore_partial_1_5 NO_STORE
#define vstore_partial_1_6 NO_STORE
#define vstore_partial_1_7 NO_STORE
#define vstore_partial_1_8 NO_STORE
#define vstore_partial_1_9 NO_STORE
#define vstore_partial_1_10 NO_STORE
#define vstore_partial_1_11 NO_STORE
#define vstore_partial_1_12 NO_STORE
#define vstore_partial_1_13 NO_STORE
#define vstore_partial_1_14 NO_STORE
#define vstore_partial_1_15 NO_STORE
#define vstore_partial_1_16 NO_STORE
// Size == 2
#define vstore_partial_2_0 NO_STORE
#define vstore_partial_2_1 vstore_partial_1
#define vstore_partial_2_2 vstore_partial_2
#define vstore_partial_2_3 NO_STORE
#define vstore_partial_2_4 NO_STORE
#define vstore_partial_2_5 NO_STORE
#define vstore_partial_2_6 NO_STORE
#define vstore_partial_2_7 NO_STORE
#define vstore_partial_2_8 NO_STORE
#define vstore_partial_2_9 NO_STORE
#define vstore_partial_2_10 NO_STORE
#define vstore_partial_2_11 NO_STORE
#define vstore_partial_2_12 NO_STORE
#define vstore_partial_2_13 NO_STORE
#define vstore_partial_2_14 NO_STORE
#define vstore_partial_2_15 NO_STORE
#define vstore_partial_2_16 NO_STORE
// Size == 3
#define vstore_partial_3_0 NO_STORE
#define vstore_partial_3_1 vstore_partial_1
#define vstore_partial_3_2 vstore_partial_2
#define vstore_partial_3_3 vstore_partial_3
#define vstore_partial_3_4 NO_STORE
#define vstore_partial_3_5 NO_STORE
#define vstore_partial_3_6 NO_STORE
#define vstore_partial_3_7 NO_STORE
#define vstore_partial_3_8 NO_STORE
#define vstore_partial_3_9 NO_STORE
#define vstore_partial_3_10 NO_STORE
#define vstore_partial_3_11 NO_STORE
#define vstore_partial_3_12 NO_STORE
#define vstore_partial_3_13 NO_STORE
#define vstore_partial_3_14 NO_STORE
#define vstore_partial_3_15 NO_STORE
#define vstore_partial_3_16 NO_STORE
// Size == 4
#define vstore_partial_4_0 NO_STORE
#define vstore_partial_4_1 vstore_partial_1
#define vstore_partial_4_2 vstore_partial_2
#define vstore_partial_4_3 vstore_partial_3
#define vstore_partial_4_4 vstore_partial_4
#define vstore_partial_4_5 NO_STORE
#define vstore_partial_4_6 NO_STORE
#define vstore_partial_4_7 NO_STORE
#define vstore_partial_4_8 NO_STORE
#define vstore_partial_4_9 NO_STORE
#define vstore_partial_4_10 NO_STORE
#define vstore_partial_4_11 NO_STORE
#define vstore_partial_4_12 NO_STORE
#define vstore_partial_4_13 NO_STORE
#define vstore_partial_4_14 NO_STORE
#define vstore_partial_4_15 NO_STORE
#define vstore_partial_4_16 NO_STORE
// Size == 8
#define vstore_partial_8_0 NO_STORE
#define vstore_partial_8_1 vstore_partial_1
#define vstore_partial_8_2 vstore_partial_2
#define vstore_partial_8_3 vstore_partial_3
#define vstore_partial_8_4 vstore_partial_4
#define vstore_partial_8_5 vstore_partial_5
#define vstore_partial_8_6 vstore_partial_6
#define vstore_partial_8_7 vstore_partial_7
#define vstore_partial_8_8 vstore_partial_8
#define vstore_partial_8_9 NO_STORE
#define vstore_partial_8_10 NO_STORE
#define vstore_partial_8_11 NO_STORE
#define vstore_partial_8_12 NO_STORE
#define vstore_partial_8_13 NO_STORE
#define vstore_partial_8_14 NO_STORE
#define vstore_partial_8_15 NO_STORE
#define vstore_partial_8_16 NO_STORE
// Size == 16
#define vstore_partial_16_0 NO_STORE
#define vstore_partial_16_1 vstore_partial_1
#define vstore_partial_16_2 vstore_partial_2
#define vstore_partial_16_3 vstore_partial_3
#define vstore_partial_16_4 vstore_partial_4
#define vstore_partial_16_5 vstore_partial_5
#define vstore_partial_16_6 vstore_partial_6
#define vstore_partial_16_7 vstore_partial_7
#define vstore_partial_16_8 vstore_partial_8
#define vstore_partial_16_9 vstore_partial_9
#define vstore_partial_16_10 vstore_partial_10
#define vstore_partial_16_11 vstore_partial_11
#define vstore_partial_16_12 vstore_partial_12
#define vstore_partial_16_13 vstore_partial_13
#define vstore_partial_16_14 vstore_partial_14
#define vstore_partial_16_15 vstore_partial_15
#define vstore_partial_16_16 vstore_partial_16
/** Partial vstore. Store the **lower** 0 to (n-1)th elements of the given vector while minimising the amount of vstore ops
* @name vstore_partial_n
*
* @note @p DATA needs to be a vector not a scalar
* @note n needs to be <= the vector width of the input variable @p DATA
* eg 1: Valid
* vstore_partial_15(var:float16, 0, 0xabcd);
* eg 2: Invalid
* vstore_partial_7(var:float4, 0, 0xabcd);
*
* @note in cases n == 1, 2, 3, 4, 8, 16, no extra vstore is invoked, thus there's no performance penalty.
*
* @param[in] DATA The name of the variable
* @param[in] OFFSET Offset in n
* @param[in] PTR The base pointer
* @{
*/
#define vstore_partial_1(DATA, OFFSET, PTR) \
vstore1(DATA.s0, OFFSET, PTR);
#define vstore_partial_2(DATA, OFFSET, PTR) \
vstore2(DATA.s01, OFFSET, PTR);
#define vstore_partial_3(DATA, OFFSET, PTR) \
vstore3(DATA.s012, OFFSET, PTR);
#define vstore_partial_4(DATA, OFFSET, PTR) \
vstore4(DATA.s0123, OFFSET, PTR);
#define vstore_partial_5(DATA, OFFSET, PTR) \
vstore_partial_4(DATA.s0123, OFFSET, PTR); \
vstore1(DATA.s4, OFFSET, PTR + 4);
#define vstore_partial_6(DATA, OFFSET, PTR) \
vstore_partial_4(DATA.s0123, OFFSET, PTR); \
vstore_partial_2(DATA.s45, OFFSET, PTR + 4);
#define vstore_partial_7(DATA, OFFSET, PTR) \
vstore_partial_4(DATA.s0123, OFFSET, PTR); \
vstore_partial_3(DATA.s456, OFFSET, PTR + 4);
#define vstore_partial_8(DATA, OFFSET, PTR) \
vstore8(DATA.s01234567, OFFSET, PTR);
#define vstore_partial_9(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore1(DATA.s8, OFFSET, PTR + 8);
#define vstore_partial_10(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_2(DATA.s89, OFFSET, PTR + 8);
#define vstore_partial_11(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_3(DATA.s89a, OFFSET, PTR + 8);
#define vstore_partial_12(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_4(DATA.s89ab, OFFSET, PTR + 8);
#define vstore_partial_13(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_5(DATA.s89abcdef, OFFSET, PTR + 8);
#define vstore_partial_14(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_6(DATA.s89abcdef, OFFSET, PTR + 8);
#define vstore_partial_15(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_7(DATA.s89abcdef, OFFSET, PTR + 8);
#define vstore_partial_16(DATA, OFFSET, PTR) \
vstore16(DATA, OFFSET, PTR);
/** @} */ // end of groupd vstore_partial_n
/** @} */ // end of groupd VSTORE_PARTIAL
// Convert built-in functions with _sat modifier are not supported in floating point so we create defines
// without _sat to overcome this issue
#define convert_float_sat convert_float
#define convert_float1_sat convert_float
#define convert_float2_sat convert_float2
#define convert_float3_sat convert_float3
#define convert_float4_sat convert_float4
#define convert_float8_sat convert_float8
#define convert_float16_sat convert_float16
#define convert_half_sat convert_float
#define convert_half1_sat convert_half
#define convert_half2_sat convert_half2
#define convert_half3_sat convert_half3
#define convert_half4_sat convert_half4
#define convert_half8_sat convert_half8
#define convert_half16_sat convert_half16
#define convert_float1 convert_float
#define convert_half1 convert_half
#define convert_char1 convert_char
#define convert_uchar1 convert_uchar
#define convert_short1 convert_short
#define convert_ushort1 convert_ushort
#define convert_int1 convert_int
#define convert_uint1 convert_uint
#define convert_long1 convert_long
#define convert_ulong1 convert_ulong
#define convert_double1 convert_double
#define convert_char1_sat convert_char_sat
#define convert_uchar1_sat convert_uchar_sat
#define convert_short1_sat convert_short_sat
#define convert_ushort1_sat convert_ushort_sat
#define convert_int1_sat convert_int_sat
#define convert_uint1_sat convert_uint_sat
#define convert_long1_sat convert_long_sat
#define convert_ulong1_sat convert_ulong_sat
#define convert_double1_sat convert_double_sat
#define VEC_DATA_TYPE_STR(type, size) type##size
#define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size)
#define CONVERT_STR(x, type) (convert_##type((x)))
#define CONVERT(x, type) CONVERT_STR(x, type)
#define CONVERT_SAT_STR(x, type) (convert_##type##_sat((x)))
#define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type)
#define CONVERT_SAT_ROUND_STR(x, type, round) (convert_##type##_sat_##round((x)))
#define CONVERT_SAT_ROUND(x, type, round) CONVERT_SAT_ROUND_STR(x, type, round)
#define select_vec_dt_uchar(size) uchar##size
#define select_vec_dt_char(size) char##size
#define select_vec_dt_ushort(size) ushort##size
#define select_vec_dt_short(size) short##size
#define select_vec_dt_half(size) short##size
#define select_vec_dt_uint(size) uint##size
#define select_vec_dt_int(size) int##size
#define select_vec_dt_float(size) int##size
#define select_vec_dt_ulong(size) ulong##size
#define select_vec_dt_long(size) long##size
#define SELECT_VEC_DATA_TYPE_STR(type, size) select_vec_dt_##type(size)
#define SELECT_VEC_DATA_TYPE(type, size) SELECT_VEC_DATA_TYPE_STR(type, size)
#define SELECT_DATA_TYPE(type) SELECT_VEC_DATA_TYPE_STR(type, 1)
#define sum_reduce_1(x) (x)
#define sum_reduce_2(x) ((x).s0) + ((x).s1)
#define sum_reduce_3(x) sum_reduce_2((x).s01) + ((x).s2)
#define sum_reduce_4(x) sum_reduce_2((x).s01) + sum_reduce_2((x).s23)
#define sum_reduce_8(x) sum_reduce_4((x).s0123) + sum_reduce_4((x).s4567)
#define sum_reduce_16(x) sum_reduce_8((x).s01234567) + sum_reduce_8((x).s89ABCDEF)
#define SUM_REDUCE_STR(x, size) sum_reduce_##size(x)
#define SUM_REDUCE(x, size) SUM_REDUCE_STR(x, size)
#define max_reduce_1(x) (x)
#define max_reduce_2(x) max(((x).s0), ((x).s1))
#define max_reduce_3(x) max(max_reduce_2((x).s01), ((x).s2))
#define max_reduce_4(x) max(max_reduce_2((x).s01), max_reduce_2((x).s23))
#define max_reduce_8(x) max(max_reduce_4((x).s0123), max_reduce_4((x).s4567))
#define max_reduce_16(x) max(max_reduce_8((x).s01234567), max_reduce_8((x).s89ABCDEF))
#define MAX_REDUCE_STR(x, size) max_reduce_##size(x)
#define MAX_REDUCE(x, size) MAX_REDUCE_STR(x, size)
#define VECTOR_DECLARATION(name) \
__global uchar *name##_ptr, \
uint name##_stride_x, \
uint name##_step_x, \
uint name##_offset_first_element_in_bytes
#define IMAGE_DECLARATION(name) \
__global uchar *name##_ptr, \
uint name##_stride_x, \
uint name##_step_x, \
uint name##_stride_y, \
uint name##_step_y, \
uint name##_offset_first_element_in_bytes
#define TENSOR3D_DECLARATION(name) \
__global uchar *name##_ptr, \
uint name##_stride_x, \
uint name##_step_x, \
uint name##_stride_y, \
uint name##_step_y, \
uint name##_stride_z, \
uint name##_step_z, \
uint name##_offset_first_element_in_bytes
#define TENSOR4D_DECLARATION(name) \
__global uchar *name##_ptr, \
uint name##_stride_x, \
uint name##_step_x, \
uint name##_stride_y, \
uint name##_step_y, \
uint name##_stride_z, \
uint name##_step_z, \
uint name##_stride_w, \
uint name##_step_w, \
uint name##_offset_first_element_in_bytes
#define CONVERT_TO_VECTOR_STRUCT(name) \
update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x)
#define CONVERT_TO_VECTOR_STRUCT_NO_STEP(name) \
update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0)
#define CONVERT_TO_IMAGE_STRUCT(name) \
update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y)
#define CONVERT_TO_IMAGE_STRUCT_NO_STEP(name) \
update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0)
#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \
update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z)
#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(name) \
update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, name##_step_z)
#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \
update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z)
#define CONVERT_TO_TENSOR3D_STRUCT(name) \
update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
name##_stride_z, name##_step_z)
#define CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(name) \
update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0)
#define CONVERT_TO_TENSOR4D_STRUCT(name, mod_size) \
update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
name##_stride_z, name##_step_z, name##_stride_w, name##_step_w, mod_size)
#define CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(name, mod_size) \
update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0, name##_stride_w, 0, mod_size)
#define CONVERT_TO_TENSOR3D_STRUCT_NO_UPDATE_PTR(name) \
tensor3D_ptr_no_update(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
name##_stride_z, name##_step_z)
/** Structure to hold Vector information */
typedef struct Vector
{
__global uchar *ptr; /**< Pointer to the starting postion of the buffer */
int offset_first_element_in_bytes; /**< The offset of the first element in the source image */
int stride_x; /**< Stride of the image in X dimension (in bytes) */
} Vector;
/** Structure to hold Image information */
typedef struct Image
{
__global uchar *ptr; /**< Pointer to the starting postion of the buffer */
int offset_first_element_in_bytes; /**< The offset of the first element in the source image */
int stride_x; /**< Stride of the image in X dimension (in bytes) */
int stride_y; /**< Stride of the image in Y dimension (in bytes) */
} Image;
/** Structure to hold 3D tensor information */
typedef struct Tensor3D
{
__global uchar *ptr; /**< Pointer to the starting postion of the buffer */
int offset_first_element_in_bytes; /**< The offset of the first element in the source image */
int stride_x; /**< Stride of the image in X dimension (in bytes) */
int stride_y; /**< Stride of the image in Y dimension (in bytes) */
int stride_z; /**< Stride of the image in Z dimension (in bytes) */
} Tensor3D;
/** Structure to hold 4D tensor information */
typedef struct Tensor4D
{
__global uchar *ptr; /**< Pointer to the starting postion of the buffer */
int offset_first_element_in_bytes; /**< The offset of the first element in the source image */
int stride_x; /**< Stride of the image in X dimension (in bytes) */
int stride_y; /**< Stride of the image in Y dimension (in bytes) */
int stride_z; /**< Stride of the image in Z dimension (in bytes) */
int stride_w; /**< Stride of the image in W dimension (in bytes) */
} Tensor4D;
/** Wrap vector information into an Vector structure, and make the pointer point at this workitem's data.
*
* @param[in] ptr Pointer to the starting postion of the buffer
* @param[in] offset_first_element_in_bytes The offset of the first element in the source vector
* @param[in] stride_x Stride of the vector in X dimension (in bytes)
* @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
*
* @return An image object
*/
inline Vector update_vector_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x)
{
Vector vector =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
};
vector.ptr += vector.offset_first_element_in_bytes + get_global_id(0) * step_x;
return vector;
}
/** Wrap image information into an Image structure, and make the pointer point at this workitem's data.
*
* @param[in] ptr Pointer to the starting postion of the buffer
* @param[in] offset_first_element_in_bytes The offset of the first element in the source image
* @param[in] stride_x Stride of the image in X dimension (in bytes)
* @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] stride_y Stride of the image in Y dimension (in bytes)
* @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes)
*
* @return An image object
*/
inline Image update_image_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y)
{
Image img =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
.stride_y = stride_y
};
img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y;
return img;
}
/** Wrap 3D tensor information into an image structure, and make the pointer point at this workitem's data.
*
* @param[in] ptr Pointer to the starting postion of the buffer
* @param[in] offset_first_element_in_bytes The offset of the first element in the source image
* @param[in] stride_x Stride of the image in X dimension (in bytes)
* @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] stride_y Stride of the image in Y dimension (in bytes)
* @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] stride_z Stride of the image in Z dimension (in bytes)
* @param[in] step_z stride_z * number of elements along Z processed per workitem(in bytes)
*
* @return A 3D tensor object
*/
inline Image update_image_from_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
{
Image img =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
.stride_y = stride_y
};
img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z;
return img;
}
/** Wrap 3D tensor information into an tensor structure, and make the pointer point at this workitem's data.
*
* @param[in] ptr Pointer to the starting postion of the buffer
* @param[in] offset_first_element_in_bytes The offset of the first element in the source image
* @param[in] stride_x Stride of the image in X dimension (in bytes)
* @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] stride_y Stride of the image in Y dimension (in bytes)
* @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] stride_z Stride of the image in Z dimension (in bytes)
* @param[in] step_z stride_z * number of elements along Z processed per workitem(in bytes)
*
* @return A 3D tensor object
*/
inline Tensor3D update_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
{
Tensor3D tensor =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
.stride_y = stride_y,
.stride_z = stride_z
};
tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z;
return tensor;
}
/** Wrap 3D tensor information into an tensor structure.
*
* @param[in] ptr Pointer to the starting postion of the buffer
* @param[in] offset_first_element_in_bytes The offset of the first element in the source image
* @param[in] stride_x Stride of the image in X dimension (in bytes)
* @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] stride_y Stride of the image in Y dimension (in bytes)
* @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] stride_z Stride of the image in Z dimension (in bytes)
* @param[in] step_z stride_z * number of elements along Z processed per workitem(in bytes)
*
* @return A 3D tensor object
*/
inline Tensor3D tensor3D_ptr_no_update(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
{
Tensor3D tensor =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
.stride_y = stride_y,
.stride_z = stride_z
};
return tensor;
}
inline Tensor4D update_tensor4D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z, uint stride_w,
uint step_w,
uint mod_size)
{
Tensor4D tensor =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
.stride_y = stride_y,
.stride_z = stride_z,
.stride_w = stride_w
};
tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + (get_global_id(2) % mod_size) * step_z + (get_global_id(2) / mod_size) * step_w;
return tensor;
}
/** Get the pointer position of a Vector
*
* @param[in] vec Pointer to the starting position of the buffer
* @param[in] x Relative X position
*/
inline __global const uchar *vector_offset(const Vector *vec, int x)
{
return vec->ptr + x * vec->stride_x;
}
/** Get the pointer position of a Image
*
* @param[in] img Pointer to the starting position of the buffer
* @param[in] x Relative X position
* @param[in] y Relative Y position
*/
inline __global uchar *offset(const Image *img, int x, int y)
{
return img->ptr + x * img->stride_x + y * img->stride_y;
}
/** Get the pointer position of a Tensor3D
*
* @param[in] tensor Pointer to the starting position of the buffer
* @param[in] x Relative X position
* @param[in] y Relative Y position
* @param[in] z Relative Z position
*/
inline __global const uchar *tensor3D_offset(const Tensor3D *tensor, int x, int y, int z)
{
return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z;
}
/** Get the pointer position of a Tensor4D
*
* @param[in] tensor Pointer to the starting position of the buffer
* @param[in] x Relative X position
* @param[in] y Relative Y position
* @param[in] z Relative Z position
* @param[in] w Relative W position
*/
inline __global const uchar *tensor4D_offset(const Tensor4D *tensor, int x, int y, int z, int w)
{
return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + w * tensor->stride_w;
}
/** Get the offset for a given linear index of a Tensor3D
*
* @param[in] tensor Pointer to the starting position of the buffer
* @param[in] width Width of the input tensor
* @param[in] height Height of the input tensor
* @param[in] depth Depth of the input tensor
* @param[in] index Linear index
*/
inline __global const uchar *tensor3D_index2ptr(const Tensor3D *tensor, uint width, uint height, uint depth, uint index)
{
uint num_elements = width * height;
const uint z = index / num_elements;
index %= num_elements;
const uint y = index / width;
index %= width;
const uint x = index;
return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + tensor->offset_first_element_in_bytes;
}
#endif // _HELPER_H
#if GPU_ARCH == GPU_ARCH_BIFROST
#define MLA(a, b, c) (fma(c, b, a))
#else // GPU_ARCH == GPU_ARCH_BIFROST
#define MLA(a, b, c) ((b) * (c) + (a))
#endif // GPU_ARCH == GPU_ARCH_BIFROST
// Hard-Swish
#define hard_swish_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (x * ((min(max((x + (DATA_TYPE)3.0), (DATA_TYPE)0.0), (DATA_TYPE)6.0)) * (DATA_TYPE)0.166666667))
// Logistic Activation
#define logistic_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) ((DATA_TYPE)1.0 / ((DATA_TYPE)1.0 + exp(-x)))
// Hyperbolic Tangent Activation
#define tanh_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) ((DATA_TYPE)A_VAL * tanh((DATA_TYPE)B_VAL * x))
// RELU Tangent Activation
#define relu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (max((DATA_TYPE)0.0, x))
// Bounded RELU Activation
#define brelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (min((DATA_TYPE)A_VAL, max((DATA_TYPE)0.0, x)))
// Lower Upper Bounded RELU Activation
#define lu_brelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (min(max(x, (DATA_TYPE)B_VAL), (DATA_TYPE)A_VAL))
// Leaky RELU Activation
#define lrelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) ((min(x, (DATA_TYPE)0.0) * (DATA_TYPE)A_VAL) + max(x, (DATA_TYPE)0.0))
// Soft RELU Activation
#define srelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (log((DATA_TYPE)1.0 + exp(x)))
// ELU Activation
#define elu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (select(((DATA_TYPE)A_VAL * (exp(x) - (DATA_TYPE)1.0)), x, (SELECT_VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))isgreaterequal(x, (DATA_TYPE)0.0)))
// Absolute Activation
#define abs_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (fabs(x))
// Square Activation
#define square_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (x * x)
// Square-root Activation
#define sqrt_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (sqrt(x))
// Linear Activation
#define linear_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (MLA((DATA_TYPE)B_VAL, (DATA_TYPE)A_VAL, x))
// Identity Activation
#define identity_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (x)
#define ACT_OP(op, DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) op##_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL)
#define ACTIVATION(op, DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) ACT_OP(op, DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL)
/** Apply batch normalization.
*
* @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
* @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively
*
* @param[in] input_ptr Pointer to the first source tensor. Supported data types: F16/F32
* @param[in] input_stride_x Stride of the first source tensor in X dimension (in bytes)
* @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] input_stride_y Stride of the first source tensor in Y dimension (in bytes)
* @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] input_stride_z Stride of the first source tensor in Z dimension (in bytes)
* @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
* @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
* @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
* @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
* @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes)
* @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
* @param[in] mean_ptr Pointer to the mean source tensor. Supported data types: same as @p input_ptr
* @param[in] mean_stride_x Stride of the mean source tensor in X dimension (in bytes)
* @param[in] mean_step_x mean_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] mean_offset_first_element_in_bytes The offset of the first element in the mean source tensor
* @param[in] var_ptr Pointer to the var tensor. Supported data types: same as @p input_ptr
* @param[in] var_stride_x Stride of the var tensor in X dimension (in bytes)
* @param[in] var_step_x var_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] var_offset_first_element_in_bytes The offset of the first element in the var source tensor
* @param[in] beta_ptr Pointer to the beta source tensor. Supported data types: same as @p input_ptr
* @param[in] beta_stride_x Stride of the beta source tensor in X dimension (in bytes)
* @param[in] beta_step_x beta_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] beta_offset_first_element_in_bytes The offset of the first element in the beta source tensor
* @param[in] gamma_ptr Pointer to the gamma source tensor. Supported data types: same as @p input_ptr
* @param[in] gamma_stride_x Stride of the gamma source tensor in X dimension (in bytes)
* @param[in] gamma_step_x gamma_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] gamma_offset_first_element_in_bytes The offset of the first element in the gamma source tensor
* @param[in] epsilon Epsilon parameter in the batch normalization equation
*/
__kernel void batchnormalization_layer_nchw(TENSOR3D_DECLARATION(input),
#ifndef IN_PLACE
TENSOR3D_DECLARATION(output),
#endif /* not IN_PLACE */
VECTOR_DECLARATION(mean),
VECTOR_DECLARATION(var),
#ifndef USE_DEFAULT_BETA
VECTOR_DECLARATION(beta),
#endif /* USE_DEFAULT_BETA */
#ifndef USE_DEFAULT_GAMMA
VECTOR_DECLARATION(gamma),
#endif /* USE_DEFAULT_GAMMA */
float epsilon)
{
Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
#ifdef IN_PLACE
Tensor3D out = in;
#else /* IN_PLACE */
Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
#endif /* IN_PLACE */
Vector mean = CONVERT_TO_VECTOR_STRUCT(mean);
Vector var = CONVERT_TO_VECTOR_STRUCT(var);
#ifndef USE_DEFAULT_BETA
Vector beta = CONVERT_TO_VECTOR_STRUCT(beta);
#endif /* USE_DEFAULT_BETA */
#ifndef USE_DEFAULT_GAMMA
Vector gamma = CONVERT_TO_VECTOR_STRUCT(gamma);
#endif /* USE_DEFAULT_GAMMA */
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
data = 0;
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
denominator = 0;
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
numerator = 0;
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
x_bar = 0;
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
res = 0;
const int current_slice = get_global_id(2);
data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)in.ptr);
denominator = *((__global DATA_TYPE *)(var.ptr + current_slice * var.stride_x));
denominator = INVSQRT_OP(ADD_OP(denominator, ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))SQCVT_SAT(epsilon))));
// Calculate x bar and store results
numerator = *((__global DATA_TYPE *)(mean.ptr + current_slice * mean.stride_x));
numerator = SUB_OP(data, numerator);
x_bar = MUL_OP(numerator, denominator);
#ifndef USE_DEFAULT_GAMMA
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
gamma_vec = *((__global DATA_TYPE *)(gamma.ptr + current_slice * gamma.stride_x));
res = MUL_OP(gamma_vec, x_bar);
#else /* USE_DEFAULT_GAMMA */
// gamma is equal to 1, no need to perform multiplications
res = x_bar;
#endif /* USE_DEFAULT_GAMMA */
#ifndef USE_DEFAULT_BETA
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
beta_vec = *((__global DATA_TYPE *)(beta.ptr + current_slice * beta.stride_x));
// beta is not zero, hence we need to perform the addition
res = ADD_OP(res, beta_vec);
#endif /* USE_DEFAULT_BETA */
res = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, res, A_VAL, B_VAL);
VSTORE(VEC_SIZE)
(res, 0, (__global DATA_TYPE *)out.ptr);
}
/** Apply batch normalization on tensors with NHWC format.
*
* @note It is possible to select the activation function to apply using -DACTIVATION_TYPE e.g. -DACTIVATION_TYPE=relu
* @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively
*
* @param[in] input_ptr Pointer to the first source tensor. Supported data types: F16/F32
* @param[in] input_stride_x Stride of the first source tensor in X dimension (in bytes)
* @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] input_stride_y Stride of the first source tensor in Y dimension (in bytes)
* @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] input_stride_z Stride of the first source tensor in Z dimension (in bytes)
* @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
* @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
* @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
* @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
* @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes)
* @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
* @param[in] mean_ptr Pointer to the mean source tensor. Supported data types: same as @p input_ptr
* @param[in] mean_stride_x Stride of the mean source tensor in X dimension (in bytes)
* @param[in] mean_step_x mean_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] mean_offset_first_element_in_bytes The offset of the first element in the mean source tensor
* @param[in] var_ptr Pointer to the var tensor. Supported data types: same as @p input_ptr
* @param[in] var_stride_x Stride of the var tensor in X dimension (in bytes)
* @param[in] var_step_x var_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] var_offset_first_element_in_bytes The offset of the first element in the var source tensor
* @param[in] beta_ptr Pointer to the beta source tensor. Supported data types: same as @p input_ptr
* @param[in] beta_stride_x Stride of the beta source tensor in X dimension (in bytes)
* @param[in] beta_step_x beta_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] beta_offset_first_element_in_bytes The offset of the first element in the beta source tensor
* @param[in] gamma_ptr Pointer to the gamma source tensor. Supported data types: same as @p input_ptr
* @param[in] gamma_stride_x Stride of the gamma source tensor in X dimension (in bytes)
* @param[in] gamma_step_x gamma_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] gamma_offset_first_element_in_bytes The offset of the first element in the gamma source tensor
* @param[in] epsilon Epsilon parameter in the batch normalization equation
*/
__kernel void batchnormalization_layer_nhwc(TENSOR3D_DECLARATION(input),
#ifndef IN_PLACE
TENSOR3D_DECLARATION(output),
#endif /* not IN_PLACE */
VECTOR_DECLARATION(mean),
VECTOR_DECLARATION(var),
#ifndef USE_DEFAULT_BETA
VECTOR_DECLARATION(beta),
#endif /* USE_DEFAULT_BETA */
#ifndef USE_DEFAULT_GAMMA
VECTOR_DECLARATION(gamma),
#endif /* USE_DEFAULT_GAMMA */
float epsilon)
{
uint x_offs = max((int)(get_global_id(0) * VEC_SIZE * sizeof(DATA_TYPE) - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE * sizeof(DATA_TYPE)), 0);
__global uchar *input_addr = input_ptr + input_offset_first_element_in_bytes + x_offs + get_global_id(1) * input_stride_y + get_global_id(2) * input_stride_z;
#ifdef IN_PLACE
__global uchar *output_addr = input_ptr;
#else /* IN_PLACE */
__global uchar *output_addr = output_ptr + output_offset_first_element_in_bytes + x_offs + get_global_id(1) * output_stride_y + get_global_id(2) * output_stride_z;
#endif /* IN_PLACE */
__global uchar *mean_addr = mean_ptr + mean_offset_first_element_in_bytes + x_offs;
__global uchar *var_addr = var_ptr + var_offset_first_element_in_bytes + x_offs;
#ifndef USE_DEFAULT_BETA
__global uchar *beta_addr = beta_ptr + beta_offset_first_element_in_bytes + x_offs;
#endif /* USE_DEFAULT_BETA */
#ifndef USE_DEFAULT_GAMMA
__global uchar *gamma_addr = gamma_ptr + gamma_offset_first_element_in_bytes + x_offs;
#endif /* USE_DEFAULT_GAMMA */
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
data = 0;
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
denominator = 0;
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
numerator = 0;
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
x_bar = 0;
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
res0 = 0;
data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input_addr);
denominator = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)var_addr);
denominator = INVSQRT_OP(ADD_OP(denominator, ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))SQCVT_SAT(epsilon))));
// Calculate x bar and store results
numerator = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)mean_addr);
numerator = SUB_OP(data, numerator);
x_bar = MUL_OP(numerator, denominator);
#ifndef USE_DEFAULT_GAMMA
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
gamma_vec = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)gamma_addr);
res0 = MUL_OP(gamma_vec, x_bar);
#else /* USE_DEFAULT_GAMMA */
// gamma is equal to 1, no need to perform multiplications
res0 = x_bar;
#endif /* USE_DEFAULT_GAMMA */
#ifndef USE_DEFAULT_BETA
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
beta_vec = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)beta_addr);
// beta is not zero, hence we need to perform the addition
res0 = ADD_OP(res0, beta_vec);
#endif /* USE_DEFAULT_BETA */
res0 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, res0, A_VAL, B_VAL);
STORE_VECTOR_SELECT(res, DATA_TYPE, output_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
}
#endif /* defined(VEC_SIZE) && defined(DATA_TYPE) && defined(DATA_TYPE)*/
#if defined(DATA_TYPE) && defined(EPSILON)
/** OpenCL kernel to fuse the weights of convolution or depthwise convolution layer with batch normalization when the data layout is either NCHW or NHWC
*
* @note The input weights tensor is assumed 4D with the OFMs in the fourth dimension
* @note Data type should be passed at compile time using the -DDATA_TYPE, e.g. -DDATA_TYPE=float
* @note The third dimension of the input tensor should be passed at compile time when weights belong to a convolution layer using -DDIM2=size. e.g. -DDIM2=16.
* For depthwise convolution weight do not pass DIM2
* @note Data layout NHWC should be passed at compile time with -DNHWC. For data layout NCHW it is not required to pass any parameter
* @note Batch normalization epsilon parameter should be passed at compile time using -DEPSILON=value. e.g. -DEPSILON=0.001f
*
* @param[in] w_ptr Pointer to the weights tensor. Supported data types: F16/F32
* @param[in] w_stride_x Stride of the weights tensor in X dimension (in bytes)
* @param[in] w_step_x w_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] w_stride_y Stride of the weights tensor in Y dimension (in bytes)
* @param[in] w_step_y w_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] w_stride_z Stride of the weights tensor in Z dimension (in bytes)
* @param[in] w_step_z w_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] w_offset_first_element_in_bytes The offset of the first element in the weights tensor
* @param[in] b_ptr (Optional) Pointer to the bias tensor. Supported data types: same as @p w_ptr
* @param[in] b_stride_x (Optional) Stride of the bias tensor in X dimension (in bytes)
* @param[in] b_step_x (Optional) b_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] b_stride_y (Optional) Stride of the bias tensor in Y dimension (in bytes)
* @param[in] b_step_y (Optional) b_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] b_stride_z (Optional) Stride of the bias tensor in Z dimension (in bytes)
* @param[in] b_step_z (Optional) b_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] b_offset_first_element_in_bytes (Optional) The offset of the first element in the bias tensor
* @param[in] mean_ptr Pointer to the mean source tensor. Supported data types: same as @p w_ptr
* @param[in] mean_stride_x Stride of the mean source tensor in X dimension (in bytes)
* @param[in] mean_step_x mean_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] mean_offset_first_element_in_bytes The offset of the first element in the mean source tensor
* @param[in] var_ptr Pointer to the var tensor. Supported data types: same as @p w_ptr
* @param[in] var_stride_x Stride of the var tensor in X dimension (in bytes)
* @param[in] var_step_x var_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] var_offset_first_element_in_bytes The offset of the first element in the var source tensor
* @param[out] w_fused_ptr (Optional) Pointer to the destination weights tensors. Supported data types: same as @p w_ptr
* @param[in] w_fused_stride_x (Optional) Stride of the destination weights tensor in X dimension (in bytes)
* @param[in] w_fused_step_x (Optional) w_fused_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] w_fused_stride_y (Optional) Stride of the destination weights tensor in Y dimension (in bytes)
* @param[in] w_fused_step_y (Optional) w_fused_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] w_fused_stride_z (Optional) Stride of the destination weights tensor in Z dimension (in bytes)
* @param[in] w_fused_step_z (Optional) w_fused_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] w_fused_offset_first_element_in_bytes (Optional) The offset of the first element in the destination weights tensor
* @param[in] b_fused_ptr (Optional) Pointer to the destination bias tensor. Supported data types: same as @p w_ptr
* @param[in] b_fused_stride_x (Optional) Stride of the destination bias tensor in X dimension (in bytes)
* @param[in] b_fused_step_x (Optional) b_fused_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] b_fused_offset_first_element_in_bytes (Optional) The offset of the first element in the destination bias tensor
* @param[in] beta_ptr (Optional) Pointer to the beta source tensor. Supported data types: same as @p w_ptr
* @param[in] beta_stride_x (Optional) Stride of the beta source tensor in X dimension (in bytes)
* @param[in] beta_step_x (Optional) beta_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] beta_offset_first_element_in_bytes (Optional) The offset of the first element in the beta source tensor
* @param[in] gamma_ptr (Optional) Pointer to the gamma source tensor. Supported data types: same as @p w_ptr
* @param[in] gamma_stride_x (Optional) Stride of the gamma source tensor in X dimension (in bytes)
* @param[in] gamma_step_x (Optional) gamma_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] gamma_offset_first_element_in_bytes (Optional) The offset of the first element in the gamma source tensor
*/
__kernel void fuse_batchnormalization_layer(TENSOR3D_DECLARATION(w),
#if defined(BIAS)
VECTOR_DECLARATION(b),
#endif // defined(BIAS)
VECTOR_DECLARATION(mean),
VECTOR_DECLARATION(var)
#ifndef IN_PLACE_W
,
TENSOR3D_DECLARATION(w_fused)
#endif // ifndef IN_PLACE_W
#ifndef IN_PLACE_B
,
VECTOR_DECLARATION(b_fused)
#endif // ifndef IN_PLACE_B
#if defined(BETA)
,
VECTOR_DECLARATION(beta)
#endif // defined(BETA)
#if defined(GAMMA)
,
VECTOR_DECLARATION(gamma)
#endif // defined(GAMMA)
)
{
int x = get_global_id(0);
int y = get_global_id(1);
int z = get_global_id(2);
#if defined(DIM2)
int c0 = z % DIM2;
int c1 = z / DIM2;
#else // ! defined(DIM2)
int c0 = 0;
#if defined(NHWC)
int c1 = x;
#else // defined(NHWC)
int c1 = z;
#endif // defined(NHWC)
#endif // defined(DIM2)
int w_offset = x * sizeof(DATA_TYPE) + y * w_stride_y + z * w_stride_z;
int v_offset = c1 * sizeof(DATA_TYPE);
DATA_TYPE w_old = 0.0f;
DATA_TYPE b_old = 0.0f;
DATA_TYPE w_new = 0.0f;
DATA_TYPE b_new = 0.0f;
DATA_TYPE gamma = 1.0f;
DATA_TYPE mean = 0.0f;
DATA_TYPE var = 1.0f;
DATA_TYPE beta = 0.0f;
w_old = *((__global DATA_TYPE *)(w_ptr + w_offset + w_offset_first_element_in_bytes));
var = *((__global DATA_TYPE *)(var_ptr + v_offset + var_offset_first_element_in_bytes));
mean = *((__global DATA_TYPE *)(mean_ptr + v_offset + mean_offset_first_element_in_bytes));
#if defined(GAMMA)
gamma = *((__global DATA_TYPE *)(gamma_ptr + v_offset + gamma_offset_first_element_in_bytes));
#endif // defined(GAMMA)
// Compute new weight
w_new = (gamma * w_old) / (sqrt(var + EPSILON));
#if defined(IN_PLACE_W)
*((__global DATA_TYPE *)(w_ptr + w_offset + w_offset_first_element_in_bytes)) = w_new;
#else // defined(IN_PLACE_W)
*((__global DATA_TYPE *)(w_fused_ptr + w_offset + w_fused_offset_first_element_in_bytes)) = w_new;
#endif // defined(IN_PLACE_W)
// Compute bias
#if !defined(DIM2) && defined(NHWC)
if(z == 0 && y == 0)
#else // !defined(DIM2) && defined(NHWC)
if(x == 0 && y == 0 && c0 == 0)
#endif // !defined(DIM2) && defined(NHWC)
{
#if defined(BIAS)
b_old = *((__global DATA_TYPE *)(b_ptr + v_offset + b_offset_first_element_in_bytes));
#endif // defined(BIAS)
#if defined(BETA)
beta = *((__global DATA_TYPE *)(beta_ptr + v_offset + beta_offset_first_element_in_bytes));
#endif // defined(BETA)
b_new = ((gamma * (b_old - mean)) / (sqrt(var + EPSILON))) + beta;
#if defined(BIAS)
#if defined(IN_PLACE_B)
*((__global DATA_TYPE *)(b_ptr + v_offset + b_offset_first_element_in_bytes)) = b_new;
#else // defined(IN_PLACE_B)
*((__global DATA_TYPE *)(b_fused_ptr + v_offset + b_fused_offset_first_element_in_bytes)) = b_new;
#endif // defined(IN_PLACE_B)
#else // defined(BIAS)
#ifndef IN_PLACE_B
*((__global DATA_TYPE *)(b_fused_ptr + v_offset + b_fused_offset_first_element_in_bytes)) = b_new;
#endif // ifndef IN_PLACE_B
#endif // defined(BIAS)
}
}
#endif // defined(DATA_TYPE) && defined(EPSILON)
)"