blob: 456d16531c4a3bff20576de1ffe16f54b8061e8f [file] [log] [blame]
R"(
/*
* Copyright (c) 2017-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* Copyright (c) 2019-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* Copyright (c) 2019-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* Copyright (c) 2016-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef ARM_COMPUTE_HELPER_H
#define ARM_COMPUTE_HELPER_H
/*
* Copyright (c) 2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/** Store the 0 to (n-1)th rows of the given variables
* @name STORE_ROW_n
*
* @param[in] N0 The width of the passed in vector. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0));
#define STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1));
#define STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2));
#define STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3));
#define STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4));
#define STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5));
#define STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6));
#define STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7));
#define STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8));
#define STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9));
#define STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A));
#define STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B));
#define STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C));
#define STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D));
#define STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E));
#define STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F));
/** @} */ // end of groupd STORE_ROW_n
/** Convert and store the 0th to (n-1)th rows of the given variables
* @name CONVERT_STORE_ROW_n
*
* @param[in] N0 The size of the vectors
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##0), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0));
#define CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##1), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1));
#define CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##2), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2));
#define CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##3), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3));
#define CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##4), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4));
#define CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##5), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5));
#define CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##6), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6));
#define CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##7), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7));
#define CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##8), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8));
#define CONVERT_STORE_ROW_10(N0, DATA, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##9), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9));
#define CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##A), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A));
#define CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##B), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B));
#define CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##C), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C));
#define CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##D), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D));
#define CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##E), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E));
#define CONVERT_STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##F), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F));
/** @} */ // end of groupd CONVERT_STORE_ROW_n
/** Store a block of the given size M0xN0
* @name STORE_BLOCK
*
* Supported cases are M0=1,2,3,...,16 and N0=2,3,4,8,16.
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store
* @param[in] N0 The size of each vector
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
#define STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
/** @} */ // end of group STORE_BLOCK
/** Convert and store a block of the given size M0xN0
* @name CONVERT_STORE_BLOCK
*
* Supported cases are M0=1,2,3,...,16 and N0=2,3,4,8,16.
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store
* @param[in] N0 The size of each vector
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
#define CONVERT_STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
/** @} */ // end of group CONVERT_STORE_BLOCK
/** Partially store the 0 to (n-1)th rows of the given variables
* @name STORE_ROW_PARTIAL_n
* Within each row, store the lower @p STORE_N0 elements of vectors of width @p N0
*
* @note in case @p STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* @param[in] N0 The width of the passed in vector. Supported: 1, 2, 3, 4, 8, 16
* @param[in] STORE_N0 The **lower** size of the vectors to store. Supported: [1-16 and <= @p N0
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0));
#define STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1));
#define STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2));
#define STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3));
#define STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4));
#define STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5));
#define STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6));
#define STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7));
#define STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8));
#define STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9));
#define STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A));
#define STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B));
#define STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C));
#define STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D));
#define STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E));
#define STORE_ROW_PARTIAL_16(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F));
/** @} */ // end of groupd STORE_ROW_PARTIAL_n
/** Partially store a block of the given size STORE_M0xSTORE_N0
* @name STORE_BLOCK_PARTIAL
*
* @note The vector width @p N0 is also required for correct partial storing behaviour.
* @note in case @p STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* The data to store is expected to have consecutive names for each row.
* E.g., for STORE_M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for STORE_M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] STORE_M0 The number of rows to store. Supported: 1-16
* @param[in] STORE_N0 The lower number of elements of vectors to store. Supported: 1-16 and <= @p N0
* @param[in] N0 The size of each vector. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_PARTIAL_##STORE_M0(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
#define STORE_BLOCK_PARTIAL(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
/** Store a block that can be partial in both x and y dimensions
*
* @note in cases @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16
* @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported range: [1, @p M0)
* @param[in] PARTIAL_STORE_N0 The partial size in x, for partial blocks. Supported range: [1, @p N0)
* @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial store Y. True to use PARTIAL_STORE_M0 rather than M0.
* @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial store X. True to use PARTIAL_STORE_N0 rather than N0.
*/
#define STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
if(!(PARTIAL_COND_X) && !(PARTIAL_COND_Y)) \
{ \
STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
} \
else if((PARTIAL_COND_Y) && !(PARTIAL_COND_X)) \
{ \
STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
} \
else if(!(PARTIAL_COND_Y) && (PARTIAL_COND_X)) \
{ \
STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
} \
else \
{ \
STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
}
/** Store a block that can only be partial in x but not y.
*
* @note in case @p N0 or @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16
* @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @param[in] PARTIAL_STORE_N0 The partial size in x, for partial blocks. Supported range: [1, @p N0)
* @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial store X. True to use PARTIAL_STORE_N0 rather than N0.
*/
#define STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) \
if(!(PARTIAL_COND_X)) \
{ \
STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
} \
else \
{ \
STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
}
/** Store a block that can only be partial in y but not x.
*
* @note in case @p N0 or @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16
* @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported range: [1, @p M0)
* @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial store Y. True to use PARTIAL_STORE_M0 rather than M0.
*/
#define STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) \
if(!(PARTIAL_COND_Y)) \
{ \
STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
} \
else \
{ \
STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
}
/** @} */ // end of group STORE_BLOCK_PARTIAL
#if defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
/** Boundary-aware GEMM block store
* @name STORE_BLOCK_BOUNDARY_AWARE
* This macro assumes the following schemes to achieve boundary-awareness:
* - Overlapping load in Y axis from lhs tensor. This implies lhs has no padding along y dim.
* - Non-Overlapping(normal) load from rhs tensor. This imples rhs can have paddings.
* - Overlapping load in Y axis from bias tensor. This implies rhs has no padding along y dim.
* The macro then ensures that the dst tensor can be stored without any paddings in both x and y dim.
*
* In the y dimension, we place the partial blocks **at the beginning** while in the x dimension, we place the partial
* blocks **at the end**.
* Say, the dst tensor is of shape MxN and we have M0 and N0 as the block size, this is how we define "partial blocks"/
* "boundary block" (we use the 2 terms "partial blocks" and "boundary blocks" interchangeably) and its various parameters:
*
* *--x--> x == 0 x == 1
* | |<------------------------------N-------------------------->|
* y |<--------------N0------------->|<----PARTIAL_STORE_N0----->|
* | -------------#############################################################
* * | | |...............................|...........................|
* y == 0 | PAR_..._M0 |......Boundary block in y......|.Boundary block in x and y.|
* | | |...............................|...........................|
* M --#############################################################
* | | | |...........................|
* y == 1 | M0 | Non-boundary block |....Boundary block in x....|
* | | | |...........................|
* |------------#############################################################
*
* Then @p PARTIAL_STORE_M0 = M % M0 and @p PARTIAL_STORE_N0 = N % N0
*
* @note in cases @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* It automatically detects if a giving M,N,M0,N0 combination can yield partial blocks in either X and Y dimension,
* and select corresponding store methods such that the boundary detection logic is only added when needed.
*
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16
* @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported: [0, @p M0)
* @param[in] PARTIAL_STORE_N0 The partial size in x, for partial blocks. Supported: [0, @p N0)
* @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial store Y. True to use PARTIAL_STORE_M0 rather than M0.
* @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial store X. True to use PARTIAL_STORE_N0 rather than N0.
* @{
*/
#if PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0
// Case1: No partial blocks in either x or y
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
#elif PARTIAL_STORE_M0 > 0 && PARTIAL_STORE_N0 == 0
// Case2: Partial blocks in y
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y)
#elif PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 > 0
// Case3: Partial blocks in x
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X)
#else // PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0
// Case4: Partial blocks in both x and y
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X)
#endif // PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0
#endif // defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
/** @} */ // end of group STORE_BLOCK_BOUNDARY_AWARE
#if defined(PARTIAL_STORE_M0)
/** Compute the start m0 row (LHS, BIAS and DST) in a boundary-aware way so as to avoid padding
* @name COMPUTE_M0_START_ROW
* If there're any partial blocks in y dimension, they are placed at the beginning of the rows.
* This shift amount is added to all rows such that the partial block (at the beginning) overlaps with the subsequent
* blocks in the y dimension to avoid any padding.
* EG: M0=4, PARTIAL_STORE_M0=1:
* | Non-overlapping | +M0_ROW_SHIFT (Overlapping)
* block 0 (partial)| start row = 0 | start row = 0
* block 1 (full) | start row = 4 | start row = 1
* block 2 (full) | start row = 8 | start row = 5
*
* @param[in] y Global id of current block in y.
* @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16
* @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported: [0, @p M0)
* @{
*/
#define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \
((uint)(max(0, (int)(y * M0) - (int)((M0 - PARTIAL_STORE_M0) % M0))))
#else // defined(PARTIAL_STORE_M0)
#define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \
((uint)(y * M0))
#endif // defined(PARTIAL_STORE_M0)
/** @} */ // end of group COMPUTE_M0_START_ROW
/** Store a vector that can only be partial in x.
*
* @note in case @p vec_size or @p leftover != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* The data to store is expected to end in a 0.
* E.g., for basename=c, the expected name is c0.
*
* @param[in] basename The name of the variable without trailing 0
* @param[in] data_type The data type of the vector
* @param[in] ptr The base pointer
* @param[in] vec_size The vector size if cond = false. Supported: 1, 2, 3, 4, 8, 16
* @param[in] leftover The vector size if cond = true. Supported range: [1, @p vec_size0)
* @param[in] cond Condition to select either vec_size0 or vec_size1
* @{
*/
#define STORE_VECTOR_SELECT(basename, data_type, ptr, vec_size, leftover, cond) \
STORE_BLOCK_PARTIAL_IN_X(1, vec_size, data_type, basename, ptr, 0, 0, leftover, cond)
/** @} */ // end of group STORE_VECTOR_SELECT
#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
#pragma OPENCL EXTENSION cl_khr_fp16 : enable
#endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
#pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : enable
#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
#if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : enable
#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
#if defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf)
#pragma OPENCL EXTENSION cl_arm_printf : enable
#endif // defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf)
#define GPU_ARCH_MIDGARD 0x100
#define GPU_ARCH_BIFROST 0x200
/** Concatenate two inputs.
*
* @param[in] a The first input to be concatenated
* @param[in] b The second input to be concatenated
*
* @return The concatenated output
*/
#define CONCAT(a, b) a##b
/** Expand the given vector
*
* @param[in] x The vector to be expanded
*
* @return The expanded output
*/
#define EXPAND(x) x
/** Clamp the given value between an upper and lower bound.
*
* @param[in] x The value to be clamped
* @param[in] min_val The lower bound
* @param[in] max_val The upper bound
*
* @return The clamped value.
*/
#define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val)
/** REVn reverses the given vector whose size is n.
* @name REVn
*
* @param[in] x The vector to be reversed
*
* @return The reversed vector
* @{
*/
#define REV1(x) ((x))
#define REV2(x) ((x).s10)
#define REV3(x) ((x).s210)
#define REV4(x) ((x).s3210)
#define REV8(x) ((x).s76543210)
#define REV16(x) ((x).sFEDCBA9876543210)
/** @} */ // end of group REVn
/** Reverse the given vector.
* @name REVERSE
*
* @param[in] x The vector to be reversed
* @param[in] s The size of the vector
*
* @return The reversed vector
* @{
*/
#define REVERSE_STR(x, s) REV##s((x))
#define REVERSE(x, s) REVERSE_STR(x, s)
/** @} */ // end of group REVERSE
/** Circular-right-shift (rotate-right) the vector of size s by the amount of n.
* @name ROTs_n
*
* @param[in] x The vector to be shifted
*
* @return The shifted vector
* @{
*/
#define ROT1_0(x) ((x))
#define ROT2_0(x) ((x))
#define ROT2_1(x) ((x).s10)
#define ROT3_0(x) ((x))
#define ROT3_1(x) ((x).s201)
#define ROT3_2(x) ((x).s120)
#define ROT4_0(x) ((x))
#define ROT4_1(x) ((x).s3012)
#define ROT4_2(x) ((x).s2301)
#define ROT4_3(x) ((x).s1230)
#define ROT8_0(x) ((x))
#define ROT8_1(x) ((x).s70123456)
#define ROT8_2(x) ((x).s67012345)
#define ROT8_3(x) ((x).s56701234)
#define ROT8_4(x) ((x).s45670123)
#define ROT8_5(x) ((x).s34567012)
#define ROT8_6(x) ((x).s23456701)
#define ROT8_7(x) ((x).s12345670)
#define ROT16_0(x) ((x))
#define ROT16_1(x) ((x).sF0123456789ABCDE)
#define ROT16_2(x) ((x).sEF0123456789ABCD)
#define ROT16_3(x) ((x).sDEF0123456789ABC)
#define ROT16_4(x) ((x).sCDEF0123456789AB)
#define ROT16_5(x) ((x).sBCDEF0123456789A)
#define ROT16_6(x) ((x).sABCDEF0123456789)
#define ROT16_7(x) ((x).s9ABCDEF012345678)
#define ROT16_8(x) ((x).s89ABCDEF01234567)
#define ROT16_9(x) ((x).s789ABCDEF0123456)
#define ROT16_10(x) ((x).s6789ABCDEF012345)
#define ROT16_11(x) ((x).s56789ABCDEF01234)
#define ROT16_12(x) ((x).s456789ABCDEF0123)
#define ROT16_13(x) ((x).s3456789ABCDEF012)
#define ROT16_14(x) ((x).s23456789ABCDEF01)
#define ROT16_15(x) ((x).s123456789ABCDEF0)
/** @} */ // end of group ROTs_n
/** Circular-right-shift (rotate-right) the given vector by the given amount.
* @name ROTATE
*
* @param[in] x The vector to be shifted
* @param[in] s The size of the vector
* @param[in] n The amount to be shifted
*
* @return The shifted vector
* @{
*/
#define ROTATE_STR(x, s, n) ROT##s##_##n(x)
#define ROTATE(x, s, n) ROTATE_STR(x, s, n)
/** @} */ // end of group ROTATE
/** Creates a vector of size n filled with offset values corresponding to the location of each element.
* @name V_OFFSn
*
* @param[in] dt The data type of the output vector
*
* @return The vector filled with offset values
* @{
*/
#define V_OFFS1(dt) (dt##1)(0)
#define V_OFFS2(dt) (dt##2)(0, 1)
#define V_OFFS3(dt) (dt##3)(0, 1, 2)
#define V_OFFS4(dt) (dt##4)(0, 1, 2, 3)
#define V_OFFS8(dt) (dt##8)(0, 1, 2, 3, 4, 5, 6, 7)
#define V_OFFS16(dt) (dt##16)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)
/** @} */ // end of group V_OFFSn
/** Create a vector filled with offset values corresponding to the location of each element.
* @name VEC_OFFS
*
* @param[in] dt The data type of the output vector
* @param[in] s The size of the output vector
*
* @return The vector filled with offset values
* @{
*/
#define VEC_OFFS_STR(dt, s) V_OFFS##s(dt)
#define VEC_OFFS(dt, s) VEC_OFFS_STR(dt, s)
/** @} */ // end of group VEC_OFFS
#define VLOAD_STR(size) vload##size
#define VLOAD(size) VLOAD_STR(size)
#define PIXEL_UNIT4 1
#define PIXEL_UNIT8 2
#define PIXEL_UNIT16 4
/** Utility macro to convert a vector size in pixel unit.
*
* @name CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT
*
* @param[in] vec_size Vector size. Only 4,8 and 16 is supported
*
* @return The pixel unit (number of pixels)
* @{
*/
#define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) PIXEL_UNIT##vec_size
#define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(vec_size) CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size)
/** @} */ // end of group CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT
#define read_image2d_floatx1(img, x_coord, y_coord) (float4)(read_imagef(img, (int2)(x_coord, y_coord)));
#define read_image2d_floatx2(img, x_coord, y_coord) (float8)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)));
#define read_image2d_floatx4(img, x_coord, y_coord) (float16)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)), read_imagef(img, (int2)(x_coord + 2, y_coord)), read_imagef(img, (int2)(x_coord + 3, y_coord)));
#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
#define read_image2d_halfx1(img, x_coord, y_coord) (half4)(read_imageh(img, (int2)(x_coord, y_coord)));
#define read_image2d_halfx2(img, x_coord, y_coord) (half8)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)));
#define read_image2d_halfx4(img, x_coord, y_coord) (half16)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)), read_imageh(img, (int2)(x_coord + 2, y_coord)), read_imageh(img, (int2)(x_coord + 3, y_coord)));
#endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
/** Utility macro to read a 2D OpenCL image object.
*
* @note Coordinates are not normalized
*
* @param[in] data_type Data type
* @param[in] n0 Number of pixel to read. Only 1,2 and 4 is supported
* @param[in] img OpenCL image object
* @param[in] x_coord The x coordinate for the top-left pixel
* @param[in] y_coord The y coordinate for the top-left pixel
*
* @return Pixels from the 2D OpenCL image object
* @{
*/
#define READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) read_image2d_##data_type##x##n0(img, x_coord, y_coord)
#define READ_IMAGE2D(data_type, n0, img, x_coord, y_coord) READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord)
#define VSTORE_STR(size) vstore##size
#define VSTORE(size) VSTORE_STR(size)
#define float1 float
#define half1 half
#define char1 char
#define uchar1 uchar
#define short1 short
#define ushort1 ushort
#define int1 int
#define uint1 uint
#define long1 long
#define ulong1 ulong
#define double1 double
#define vload1(OFFSET, PTR) *(OFFSET + PTR)
#define vstore1(DATA, OFFSET, PTR) *(OFFSET + PTR) = DATA
/** Extended partial vstore that correctly handles scalar values as well.
* Store the **lower** 0 to (n-1)th elements of the given vector while minimising the amount of vstore ops
* @name VSTORE_PARTIAL
*
* @note With this macro, the passed data can be both a vector and a scalar
* @note @p store_size needs to be <= @p size
* eg 1: Valid
* VSTORE_PARTIAL(16, 15) ...;
* eg 2: Invalid
* VSTORE_PARTIAL(4, 7) ...;
*
* @param[in] size The width of @p DATA. Supported values: 1(scalar), 2, 3, 4, 8, 16
* @param[in] store_size The number of lower elements to store. Supported values: 1-16, but has to be <= @p size
* @{
*/
#define VSTORE_PARTIAL_STR(size, store_size) vstore_partial_##size##_##store_size
#define VSTORE_PARTIAL(size, store_size) VSTORE_PARTIAL_STR(size, store_size)
#define NO_STORE(data, offs, ptr) \
{ \
}
// Size == 1 (scalar)
#define vstore_partial_1_0 NO_STORE
#define vstore_partial_1_1 vstore1
#define vstore_partial_1_2 NO_STORE
#define vstore_partial_1_3 NO_STORE
#define vstore_partial_1_4 NO_STORE
#define vstore_partial_1_5 NO_STORE
#define vstore_partial_1_6 NO_STORE
#define vstore_partial_1_7 NO_STORE
#define vstore_partial_1_8 NO_STORE
#define vstore_partial_1_9 NO_STORE
#define vstore_partial_1_10 NO_STORE
#define vstore_partial_1_11 NO_STORE
#define vstore_partial_1_12 NO_STORE
#define vstore_partial_1_13 NO_STORE
#define vstore_partial_1_14 NO_STORE
#define vstore_partial_1_15 NO_STORE
#define vstore_partial_1_16 NO_STORE
// Size == 2
#define vstore_partial_2_0 NO_STORE
#define vstore_partial_2_1 vstore_partial_1
#define vstore_partial_2_2 vstore_partial_2
#define vstore_partial_2_3 NO_STORE
#define vstore_partial_2_4 NO_STORE
#define vstore_partial_2_5 NO_STORE
#define vstore_partial_2_6 NO_STORE
#define vstore_partial_2_7 NO_STORE
#define vstore_partial_2_8 NO_STORE
#define vstore_partial_2_9 NO_STORE
#define vstore_partial_2_10 NO_STORE
#define vstore_partial_2_11 NO_STORE
#define vstore_partial_2_12 NO_STORE
#define vstore_partial_2_13 NO_STORE
#define vstore_partial_2_14 NO_STORE
#define vstore_partial_2_15 NO_STORE
#define vstore_partial_2_16 NO_STORE
// Size == 3
#define vstore_partial_3_0 NO_STORE
#define vstore_partial_3_1 vstore_partial_1
#define vstore_partial_3_2 vstore_partial_2
#define vstore_partial_3_3 vstore_partial_3
#define vstore_partial_3_4 NO_STORE
#define vstore_partial_3_5 NO_STORE
#define vstore_partial_3_6 NO_STORE
#define vstore_partial_3_7 NO_STORE
#define vstore_partial_3_8 NO_STORE
#define vstore_partial_3_9 NO_STORE
#define vstore_partial_3_10 NO_STORE
#define vstore_partial_3_11 NO_STORE
#define vstore_partial_3_12 NO_STORE
#define vstore_partial_3_13 NO_STORE
#define vstore_partial_3_14 NO_STORE
#define vstore_partial_3_15 NO_STORE
#define vstore_partial_3_16 NO_STORE
// Size == 4
#define vstore_partial_4_0 NO_STORE
#define vstore_partial_4_1 vstore_partial_1
#define vstore_partial_4_2 vstore_partial_2
#define vstore_partial_4_3 vstore_partial_3
#define vstore_partial_4_4 vstore_partial_4
#define vstore_partial_4_5 NO_STORE
#define vstore_partial_4_6 NO_STORE
#define vstore_partial_4_7 NO_STORE
#define vstore_partial_4_8 NO_STORE
#define vstore_partial_4_9 NO_STORE
#define vstore_partial_4_10 NO_STORE
#define vstore_partial_4_11 NO_STORE
#define vstore_partial_4_12 NO_STORE
#define vstore_partial_4_13 NO_STORE
#define vstore_partial_4_14 NO_STORE
#define vstore_partial_4_15 NO_STORE
#define vstore_partial_4_16 NO_STORE
// Size == 8
#define vstore_partial_8_0 NO_STORE
#define vstore_partial_8_1 vstore_partial_1
#define vstore_partial_8_2 vstore_partial_2
#define vstore_partial_8_3 vstore_partial_3
#define vstore_partial_8_4 vstore_partial_4
#define vstore_partial_8_5 vstore_partial_5
#define vstore_partial_8_6 vstore_partial_6
#define vstore_partial_8_7 vstore_partial_7
#define vstore_partial_8_8 vstore_partial_8
#define vstore_partial_8_9 NO_STORE
#define vstore_partial_8_10 NO_STORE
#define vstore_partial_8_11 NO_STORE
#define vstore_partial_8_12 NO_STORE
#define vstore_partial_8_13 NO_STORE
#define vstore_partial_8_14 NO_STORE
#define vstore_partial_8_15 NO_STORE
#define vstore_partial_8_16 NO_STORE
// Size == 16
#define vstore_partial_16_0 NO_STORE
#define vstore_partial_16_1 vstore_partial_1
#define vstore_partial_16_2 vstore_partial_2
#define vstore_partial_16_3 vstore_partial_3
#define vstore_partial_16_4 vstore_partial_4
#define vstore_partial_16_5 vstore_partial_5
#define vstore_partial_16_6 vstore_partial_6
#define vstore_partial_16_7 vstore_partial_7
#define vstore_partial_16_8 vstore_partial_8
#define vstore_partial_16_9 vstore_partial_9
#define vstore_partial_16_10 vstore_partial_10
#define vstore_partial_16_11 vstore_partial_11
#define vstore_partial_16_12 vstore_partial_12
#define vstore_partial_16_13 vstore_partial_13
#define vstore_partial_16_14 vstore_partial_14
#define vstore_partial_16_15 vstore_partial_15
#define vstore_partial_16_16 vstore_partial_16
/** Partial vstore. Store the **lower** 0 to (n-1)th elements of the given vector while minimising the amount of vstore ops
* @name vstore_partial_n
*
* @note @p DATA needs to be a vector not a scalar
* @note n needs to be <= the vector width of the input variable @p DATA
* eg 1: Valid
* vstore_partial_15(var:float16, 0, 0xabcd);
* eg 2: Invalid
* vstore_partial_7(var:float4, 0, 0xabcd);
*
* @note in cases n == 1, 2, 3, 4, 8, 16, no extra vstore is invoked, thus there's no performance penalty.
*
* @param[in] DATA The name of the variable
* @param[in] OFFSET Offset in n
* @param[in] PTR The base pointer
* @{
*/
#define vstore_partial_1(DATA, OFFSET, PTR) \
vstore1(DATA.s0, OFFSET, PTR);
#define vstore_partial_2(DATA, OFFSET, PTR) \
vstore2(DATA.s01, OFFSET, PTR);
#define vstore_partial_3(DATA, OFFSET, PTR) \
vstore3(DATA.s012, OFFSET, PTR);
#define vstore_partial_4(DATA, OFFSET, PTR) \
vstore4(DATA.s0123, OFFSET, PTR);
#define vstore_partial_5(DATA, OFFSET, PTR) \
vstore_partial_4(DATA.s0123, OFFSET, PTR); \
vstore1(DATA.s4, OFFSET, PTR + 4);
#define vstore_partial_6(DATA, OFFSET, PTR) \
vstore_partial_4(DATA.s0123, OFFSET, PTR); \
vstore_partial_2(DATA.s45, OFFSET, PTR + 4);
#define vstore_partial_7(DATA, OFFSET, PTR) \
vstore_partial_4(DATA.s0123, OFFSET, PTR); \
vstore_partial_3(DATA.s456, OFFSET, PTR + 4);
#define vstore_partial_8(DATA, OFFSET, PTR) \
vstore8(DATA.s01234567, OFFSET, PTR);
#define vstore_partial_9(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore1(DATA.s8, OFFSET, PTR + 8);
#define vstore_partial_10(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_2(DATA.s89, OFFSET, PTR + 8);
#define vstore_partial_11(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_3(DATA.s89a, OFFSET, PTR + 8);
#define vstore_partial_12(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_4(DATA.s89ab, OFFSET, PTR + 8);
#define vstore_partial_13(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_5(DATA.s89abcdef, OFFSET, PTR + 8);
#define vstore_partial_14(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_6(DATA.s89abcdef, OFFSET, PTR + 8);
#define vstore_partial_15(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_7(DATA.s89abcdef, OFFSET, PTR + 8);
#define vstore_partial_16(DATA, OFFSET, PTR) \
vstore16(DATA, OFFSET, PTR);
/** @} */ // end of groupd vstore_partial_n
/** @} */ // end of groupd VSTORE_PARTIAL
// Convert built-in functions with _sat modifier are not supported in floating point so we create defines
// without _sat to overcome this issue
#define convert_float_sat convert_float
#define convert_float1_sat convert_float
#define convert_float2_sat convert_float2
#define convert_float3_sat convert_float3
#define convert_float4_sat convert_float4
#define convert_float8_sat convert_float8
#define convert_float16_sat convert_float16
#define convert_half_sat convert_float
#define convert_half1_sat convert_half
#define convert_half2_sat convert_half2
#define convert_half3_sat convert_half3
#define convert_half4_sat convert_half4
#define convert_half8_sat convert_half8
#define convert_half16_sat convert_half16
#define convert_float1 convert_float
#define convert_half1 convert_half
#define convert_char1 convert_char
#define convert_uchar1 convert_uchar
#define convert_short1 convert_short
#define convert_ushort1 convert_ushort
#define convert_int1 convert_int
#define convert_uint1 convert_uint
#define convert_long1 convert_long
#define convert_ulong1 convert_ulong
#define convert_double1 convert_double
#define convert_char1_sat convert_char_sat
#define convert_uchar1_sat convert_uchar_sat
#define convert_short1_sat convert_short_sat
#define convert_ushort1_sat convert_ushort_sat
#define convert_int1_sat convert_int_sat
#define convert_uint1_sat convert_uint_sat
#define convert_long1_sat convert_long_sat
#define convert_ulong1_sat convert_ulong_sat
#define convert_double1_sat convert_double_sat
#define VEC_DATA_TYPE_STR(type, size) type##size
#define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size)
#define CONVERT_STR(x, type) (convert_##type((x)))
#define CONVERT(x, type) CONVERT_STR(x, type)
#define CONVERT_SAT_STR(x, type) (convert_##type##_sat((x)))
#define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type)
#define CONVERT_SAT_ROUND_STR(x, type, round) (convert_##type##_sat_##round((x)))
#define CONVERT_SAT_ROUND(x, type, round) CONVERT_SAT_ROUND_STR(x, type, round)
#define select_vec_dt_uchar(size) uchar##size
#define select_vec_dt_char(size) char##size
#define select_vec_dt_ushort(size) ushort##size
#define select_vec_dt_short(size) short##size
#define select_vec_dt_half(size) short##size
#define select_vec_dt_uint(size) uint##size
#define select_vec_dt_int(size) int##size
#define select_vec_dt_float(size) int##size
#define select_vec_dt_ulong(size) ulong##size
#define select_vec_dt_long(size) long##size
#define SELECT_VEC_DATA_TYPE_STR(type, size) select_vec_dt_##type(size)
#define SELECT_VEC_DATA_TYPE(type, size) SELECT_VEC_DATA_TYPE_STR(type, size)
#define SELECT_DATA_TYPE(type) SELECT_VEC_DATA_TYPE_STR(type, 1)
#define sum_reduce_1(x) (x)
#define sum_reduce_2(x) ((x).s0) + ((x).s1)
#define sum_reduce_3(x) sum_reduce_2((x).s01) + ((x).s2)
#define sum_reduce_4(x) sum_reduce_2((x).s01) + sum_reduce_2((x).s23)
#define sum_reduce_8(x) sum_reduce_4((x).s0123) + sum_reduce_4((x).s4567)
#define sum_reduce_16(x) sum_reduce_8((x).s01234567) + sum_reduce_8((x).s89ABCDEF)
#define SUM_REDUCE_STR(x, size) sum_reduce_##size(x)
#define SUM_REDUCE(x, size) SUM_REDUCE_STR(x, size)
#define max_reduce_1(x) (x)
#define max_reduce_2(x) max(((x).s0), ((x).s1))
#define max_reduce_3(x) max(max_reduce_2((x).s01), ((x).s2))
#define max_reduce_4(x) max(max_reduce_2((x).s01), max_reduce_2((x).s23))
#define max_reduce_8(x) max(max_reduce_4((x).s0123), max_reduce_4((x).s4567))
#define max_reduce_16(x) max(max_reduce_8((x).s01234567), max_reduce_8((x).s89ABCDEF))
#define MAX_REDUCE_STR(x, size) max_reduce_##size(x)
#define MAX_REDUCE(x, size) MAX_REDUCE_STR(x, size)
#define VECTOR_DECLARATION(name) \
__global uchar *name##_ptr, \
uint name##_stride_x, \
uint name##_step_x, \
uint name##_offset_first_element_in_bytes
#define IMAGE_DECLARATION(name) \
__global uchar *name##_ptr, \
uint name##_stride_x, \
uint name##_step_x, \
uint name##_stride_y, \
uint name##_step_y, \
uint name##_offset_first_element_in_bytes
#define TENSOR3D_DECLARATION(name) \
__global uchar *name##_ptr, \
uint name##_stride_x, \
uint name##_step_x, \
uint name##_stride_y, \
uint name##_step_y, \
uint name##_stride_z, \
uint name##_step_z, \
uint name##_offset_first_element_in_bytes
#define TENSOR4D_DECLARATION(name) \
__global uchar *name##_ptr, \
uint name##_stride_x, \
uint name##_step_x, \
uint name##_stride_y, \
uint name##_step_y, \
uint name##_stride_z, \
uint name##_step_z, \
uint name##_stride_w, \
uint name##_step_w, \
uint name##_offset_first_element_in_bytes
#define CONVERT_TO_VECTOR_STRUCT(name) \
update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x)
#define CONVERT_TO_VECTOR_STRUCT_NO_STEP(name) \
update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0)
#define CONVERT_TO_IMAGE_STRUCT(name) \
update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y)
#define CONVERT_TO_IMAGE_STRUCT_NO_STEP(name) \
update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0)
#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \
update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z)
#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(name) \
update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, name##_step_z)
#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \
update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z)
#define CONVERT_TO_TENSOR3D_STRUCT(name) \
update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
name##_stride_z, name##_step_z)
#define CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(name) \
update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0)
#define CONVERT_TO_TENSOR4D_STRUCT(name, mod_size) \
update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
name##_stride_z, name##_step_z, name##_stride_w, name##_step_w, mod_size)
#define CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(name, mod_size) \
update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0, name##_stride_w, 0, mod_size)
#define CONVERT_TO_TENSOR3D_STRUCT_NO_UPDATE_PTR(name) \
tensor3D_ptr_no_update(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
name##_stride_z, name##_step_z)
/** Structure to hold Vector information */
typedef struct Vector
{
__global uchar *ptr; /**< Pointer to the starting postion of the buffer */
int offset_first_element_in_bytes; /**< The offset of the first element in the source image */
int stride_x; /**< Stride of the image in X dimension (in bytes) */
} Vector;
/** Structure to hold Image information */
typedef struct Image
{
__global uchar *ptr; /**< Pointer to the starting postion of the buffer */
int offset_first_element_in_bytes; /**< The offset of the first element in the source image */
int stride_x; /**< Stride of the image in X dimension (in bytes) */
int stride_y; /**< Stride of the image in Y dimension (in bytes) */
} Image;
/** Structure to hold 3D tensor information */
typedef struct Tensor3D
{
__global uchar *ptr; /**< Pointer to the starting postion of the buffer */
int offset_first_element_in_bytes; /**< The offset of the first element in the source image */
int stride_x; /**< Stride of the image in X dimension (in bytes) */
int stride_y; /**< Stride of the image in Y dimension (in bytes) */
int stride_z; /**< Stride of the image in Z dimension (in bytes) */
} Tensor3D;
/** Structure to hold 4D tensor information */
typedef struct Tensor4D
{
__global uchar *ptr; /**< Pointer to the starting postion of the buffer */
int offset_first_element_in_bytes; /**< The offset of the first element in the source image */
int stride_x; /**< Stride of the image in X dimension (in bytes) */
int stride_y; /**< Stride of the image in Y dimension (in bytes) */
int stride_z; /**< Stride of the image in Z dimension (in bytes) */
int stride_w; /**< Stride of the image in W dimension (in bytes) */
} Tensor4D;
/** Wrap vector information into an Vector structure, and make the pointer point at this workitem's data.
*
* @param[in] ptr Pointer to the starting postion of the buffer
* @param[in] offset_first_element_in_bytes The offset of the first element in the source vector
* @param[in] stride_x Stride of the vector in X dimension (in bytes)
* @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
*
* @return An image object
*/
inline Vector update_vector_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x)
{
Vector vector =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
};
vector.ptr += vector.offset_first_element_in_bytes + get_global_id(0) * step_x;
return vector;
}
/** Wrap image information into an Image structure, and make the pointer point at this workitem's data.
*
* @param[in] ptr Pointer to the starting postion of the buffer
* @param[in] offset_first_element_in_bytes The offset of the first element in the source image
* @param[in] stride_x Stride of the image in X dimension (in bytes)
* @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] stride_y Stride of the image in Y dimension (in bytes)
* @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes)
*
* @return An image object
*/
inline Image update_image_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y)
{
Image img =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
.stride_y = stride_y
};
img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y;
return img;
}
/** Wrap 3D tensor information into an image structure, and make the pointer point at this workitem's data.
*
* @param[in] ptr Pointer to the starting postion of the buffer
* @param[in] offset_first_element_in_bytes The offset of the first element in the source image
* @param[in] stride_x Stride of the image in X dimension (in bytes)
* @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] stride_y Stride of the image in Y dimension (in bytes)
* @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] stride_z Stride of the image in Z dimension (in bytes)
* @param[in] step_z stride_z * number of elements along Z processed per workitem(in bytes)
*
* @return A 3D tensor object
*/
inline Image update_image_from_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
{
Image img =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
.stride_y = stride_y
};
img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z;
return img;
}
/** Wrap 3D tensor information into an tensor structure, and make the pointer point at this workitem's data.
*
* @param[in] ptr Pointer to the starting postion of the buffer
* @param[in] offset_first_element_in_bytes The offset of the first element in the source image
* @param[in] stride_x Stride of the image in X dimension (in bytes)
* @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] stride_y Stride of the image in Y dimension (in bytes)
* @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] stride_z Stride of the image in Z dimension (in bytes)
* @param[in] step_z stride_z * number of elements along Z processed per workitem(in bytes)
*
* @return A 3D tensor object
*/
inline Tensor3D update_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
{
Tensor3D tensor =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
.stride_y = stride_y,
.stride_z = stride_z
};
tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z;
return tensor;
}
/** Wrap 3D tensor information into an tensor structure.
*
* @param[in] ptr Pointer to the starting postion of the buffer
* @param[in] offset_first_element_in_bytes The offset of the first element in the source image
* @param[in] stride_x Stride of the image in X dimension (in bytes)
* @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] stride_y Stride of the image in Y dimension (in bytes)
* @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] stride_z Stride of the image in Z dimension (in bytes)
* @param[in] step_z stride_z * number of elements along Z processed per workitem(in bytes)
*
* @return A 3D tensor object
*/
inline Tensor3D tensor3D_ptr_no_update(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
{
Tensor3D tensor =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
.stride_y = stride_y,
.stride_z = stride_z
};
return tensor;
}
inline Tensor4D update_tensor4D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z, uint stride_w,
uint step_w,
uint mod_size)
{
Tensor4D tensor =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
.stride_y = stride_y,
.stride_z = stride_z,
.stride_w = stride_w
};
tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + (get_global_id(2) % mod_size) * step_z + (get_global_id(2) / mod_size) * step_w;
return tensor;
}
/** Get the pointer position of a Vector
*
* @param[in] vec Pointer to the starting position of the buffer
* @param[in] x Relative X position
*/
inline __global const uchar *vector_offset(const Vector *vec, int x)
{
return vec->ptr + x * vec->stride_x;
}
/** Get the pointer position of a Image
*
* @param[in] img Pointer to the starting position of the buffer
* @param[in] x Relative X position
* @param[in] y Relative Y position
*/
inline __global uchar *offset(const Image *img, int x, int y)
{
return img->ptr + x * img->stride_x + y * img->stride_y;
}
/** Get the pointer position of a Tensor3D
*
* @param[in] tensor Pointer to the starting position of the buffer
* @param[in] x Relative X position
* @param[in] y Relative Y position
* @param[in] z Relative Z position
*/
inline __global const uchar *tensor3D_offset(const Tensor3D *tensor, int x, int y, int z)
{
return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z;
}
/** Get the pointer position of a Tensor4D
*
* @param[in] tensor Pointer to the starting position of the buffer
* @param[in] x Relative X position
* @param[in] y Relative Y position
* @param[in] z Relative Z position
* @param[in] w Relative W position
*/
inline __global const uchar *tensor4D_offset(const Tensor4D *tensor, int x, int y, int z, int w)
{
return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + w * tensor->stride_w;
}
/** Get the offset for a given linear index of a Tensor3D
*
* @param[in] tensor Pointer to the starting position of the buffer
* @param[in] width Width of the input tensor
* @param[in] height Height of the input tensor
* @param[in] depth Depth of the input tensor
* @param[in] index Linear index
*/
inline __global const uchar *tensor3D_index2ptr(const Tensor3D *tensor, uint width, uint height, uint depth, uint index)
{
uint num_elements = width * height;
const uint z = index / num_elements;
index %= num_elements;
const uint y = index / width;
index %= width;
const uint x = index;
return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + tensor->offset_first_element_in_bytes;
}
#endif // _HELPER_H
#if GPU_ARCH == GPU_ARCH_BIFROST
#define MLA(a, b, c) (fma(c, b, a))
#else // GPU_ARCH == GPU_ARCH_BIFROST
#define MLA(a, b, c) ((b) * (c) + (a))
#endif // GPU_ARCH == GPU_ARCH_BIFROST
// Hard-Swish
#define hard_swish_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (x * ((min(max((x + (DATA_TYPE)3.0), (DATA_TYPE)0.0), (DATA_TYPE)6.0)) * (DATA_TYPE)0.166666667))
// Logistic Activation
#define logistic_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) ((DATA_TYPE)1.0 / ((DATA_TYPE)1.0 + exp(-x)))
// Hyperbolic Tangent Activation
#define tanh_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) ((DATA_TYPE)A_VAL * tanh((DATA_TYPE)B_VAL * x))
// RELU Tangent Activation
#define relu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (max((DATA_TYPE)0.0, x))
// Bounded RELU Activation
#define brelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (min((DATA_TYPE)A_VAL, max((DATA_TYPE)0.0, x)))
// Lower Upper Bounded RELU Activation
#define lu_brelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (min(max(x, (DATA_TYPE)B_VAL), (DATA_TYPE)A_VAL))
// Leaky RELU Activation
#define lrelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) ((min(x, (DATA_TYPE)0.0) * (DATA_TYPE)A_VAL) + max(x, (DATA_TYPE)0.0))
// Soft RELU Activation
#define srelu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (log((DATA_TYPE)1.0 + exp(x)))
// ELU Activation
#define elu_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (select(((DATA_TYPE)A_VAL * (exp(x) - (DATA_TYPE)1.0)), x, (SELECT_VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))isgreaterequal(x, (DATA_TYPE)0.0)))
// Absolute Activation
#define abs_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (fabs(x))
// Square Activation
#define square_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (x * x)
// Square-root Activation
#define sqrt_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (sqrt(x))
// Linear Activation
#define linear_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (MLA((DATA_TYPE)B_VAL, (DATA_TYPE)A_VAL, x))
// Identity Activation
#define identity_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) (x)
#define ACT_OP(op, DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) op##_op(DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL)
#define ACTIVATION(op, DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL) ACT_OP(op, DATA_TYPE, VEC_SIZE, x, A_VAL, B_VAL)
/*
* Copyright (c) 2016-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef ARM_COMPUTE_HELPER_H
#define ARM_COMPUTE_HELPER_H
/*
* Copyright (c) 2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/** Store the 0 to (n-1)th rows of the given variables
* @name STORE_ROW_n
*
* @param[in] N0 The width of the passed in vector. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0));
#define STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1));
#define STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2));
#define STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3));
#define STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4));
#define STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5));
#define STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6));
#define STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7));
#define STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8));
#define STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9));
#define STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A));
#define STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B));
#define STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C));
#define STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D));
#define STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E));
#define STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F));
/** @} */ // end of groupd STORE_ROW_n
/** Convert and store the 0th to (n-1)th rows of the given variables
* @name CONVERT_STORE_ROW_n
*
* @param[in] N0 The size of the vectors
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##0), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0));
#define CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##1), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1));
#define CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##2), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2));
#define CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##3), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3));
#define CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##4), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4));
#define CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##5), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5));
#define CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##6), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6));
#define CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##7), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7));
#define CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##8), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8));
#define CONVERT_STORE_ROW_10(N0, DATA, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##9), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9));
#define CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##A), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A));
#define CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##B), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B));
#define CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##C), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C));
#define CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##D), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D));
#define CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##E), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E));
#define CONVERT_STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##F), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F));
/** @} */ // end of groupd CONVERT_STORE_ROW_n
/** Store a block of the given size M0xN0
* @name STORE_BLOCK
*
* Supported cases are M0=1,2,3,...,16 and N0=2,3,4,8,16.
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store
* @param[in] N0 The size of each vector
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
#define STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
/** @} */ // end of group STORE_BLOCK
/** Convert and store a block of the given size M0xN0
* @name CONVERT_STORE_BLOCK
*
* Supported cases are M0=1,2,3,...,16 and N0=2,3,4,8,16.
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store
* @param[in] N0 The size of each vector
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
#define CONVERT_STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
/** @} */ // end of group CONVERT_STORE_BLOCK
/** Partially store the 0 to (n-1)th rows of the given variables
* @name STORE_ROW_PARTIAL_n
* Within each row, store the lower @p STORE_N0 elements of vectors of width @p N0
*
* @note in case @p STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* @param[in] N0 The width of the passed in vector. Supported: 1, 2, 3, 4, 8, 16
* @param[in] STORE_N0 The **lower** size of the vectors to store. Supported: [1-16 and <= @p N0
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0));
#define STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1));
#define STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2));
#define STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3));
#define STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4));
#define STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5));
#define STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6));
#define STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7));
#define STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8));
#define STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9));
#define STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A));
#define STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B));
#define STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C));
#define STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D));
#define STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E));
#define STORE_ROW_PARTIAL_16(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F));
/** @} */ // end of groupd STORE_ROW_PARTIAL_n
/** Partially store a block of the given size STORE_M0xSTORE_N0
* @name STORE_BLOCK_PARTIAL
*
* @note The vector width @p N0 is also required for correct partial storing behaviour.
* @note in case @p STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* The data to store is expected to have consecutive names for each row.
* E.g., for STORE_M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for STORE_M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] STORE_M0 The number of rows to store. Supported: 1-16
* @param[in] STORE_N0 The lower number of elements of vectors to store. Supported: 1-16 and <= @p N0
* @param[in] N0 The size of each vector. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_PARTIAL_##STORE_M0(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
#define STORE_BLOCK_PARTIAL(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
/** Store a block that can be partial in both x and y dimensions
*
* @note in cases @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16
* @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported range: [1, @p M0)
* @param[in] PARTIAL_STORE_N0 The partial size in x, for partial blocks. Supported range: [1, @p N0)
* @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial store Y. True to use PARTIAL_STORE_M0 rather than M0.
* @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial store X. True to use PARTIAL_STORE_N0 rather than N0.
*/
#define STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
if(!(PARTIAL_COND_X) && !(PARTIAL_COND_Y)) \
{ \
STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
} \
else if((PARTIAL_COND_Y) && !(PARTIAL_COND_X)) \
{ \
STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
} \
else if(!(PARTIAL_COND_Y) && (PARTIAL_COND_X)) \
{ \
STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
} \
else \
{ \
STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
}
/** Store a block that can only be partial in x but not y.
*
* @note in case @p N0 or @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16
* @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @param[in] PARTIAL_STORE_N0 The partial size in x, for partial blocks. Supported range: [1, @p N0)
* @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial store X. True to use PARTIAL_STORE_N0 rather than N0.
*/
#define STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) \
if(!(PARTIAL_COND_X)) \
{ \
STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
} \
else \
{ \
STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
}
/** Store a block that can only be partial in y but not x.
*
* @note in case @p N0 or @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16
* @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported range: [1, @p M0)
* @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial store Y. True to use PARTIAL_STORE_M0 rather than M0.
*/
#define STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) \
if(!(PARTIAL_COND_Y)) \
{ \
STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
} \
else \
{ \
STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
}
/** @} */ // end of group STORE_BLOCK_PARTIAL
#if defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
/** Boundary-aware GEMM block store
* @name STORE_BLOCK_BOUNDARY_AWARE
* This macro assumes the following schemes to achieve boundary-awareness:
* - Overlapping load in Y axis from lhs tensor. This implies lhs has no padding along y dim.
* - Non-Overlapping(normal) load from rhs tensor. This imples rhs can have paddings.
* - Overlapping load in Y axis from bias tensor. This implies rhs has no padding along y dim.
* The macro then ensures that the dst tensor can be stored without any paddings in both x and y dim.
*
* In the y dimension, we place the partial blocks **at the beginning** while in the x dimension, we place the partial
* blocks **at the end**.
* Say, the dst tensor is of shape MxN and we have M0 and N0 as the block size, this is how we define "partial blocks"/
* "boundary block" (we use the 2 terms "partial blocks" and "boundary blocks" interchangeably) and its various parameters:
*
* *--x--> x == 0 x == 1
* | |<------------------------------N-------------------------->|
* y |<--------------N0------------->|<----PARTIAL_STORE_N0----->|
* | -------------#############################################################
* * | | |...............................|...........................|
* y == 0 | PAR_..._M0 |......Boundary block in y......|.Boundary block in x and y.|
* | | |...............................|...........................|
* M --#############################################################
* | | | |...........................|
* y == 1 | M0 | Non-boundary block |....Boundary block in x....|
* | | | |...........................|
* |------------#############################################################
*
* Then @p PARTIAL_STORE_M0 = M % M0 and @p PARTIAL_STORE_N0 = N % N0
*
* @note in cases @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* It automatically detects if a giving M,N,M0,N0 combination can yield partial blocks in either X and Y dimension,
* and select corresponding store methods such that the boundary detection logic is only added when needed.
*
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16
* @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported: [0, @p M0)
* @param[in] PARTIAL_STORE_N0 The partial size in x, for partial blocks. Supported: [0, @p N0)
* @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial store Y. True to use PARTIAL_STORE_M0 rather than M0.
* @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial store X. True to use PARTIAL_STORE_N0 rather than N0.
* @{
*/
#if PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0
// Case1: No partial blocks in either x or y
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
#elif PARTIAL_STORE_M0 > 0 && PARTIAL_STORE_N0 == 0
// Case2: Partial blocks in y
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y)
#elif PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 > 0
// Case3: Partial blocks in x
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X)
#else // PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0
// Case4: Partial blocks in both x and y
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X)
#endif // PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0
#endif // defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
/** @} */ // end of group STORE_BLOCK_BOUNDARY_AWARE
#if defined(PARTIAL_STORE_M0)
/** Compute the start m0 row (LHS, BIAS and DST) in a boundary-aware way so as to avoid padding
* @name COMPUTE_M0_START_ROW
* If there're any partial blocks in y dimension, they are placed at the beginning of the rows.
* This shift amount is added to all rows such that the partial block (at the beginning) overlaps with the subsequent
* blocks in the y dimension to avoid any padding.
* EG: M0=4, PARTIAL_STORE_M0=1:
* | Non-overlapping | +M0_ROW_SHIFT (Overlapping)
* block 0 (partial)| start row = 0 | start row = 0
* block 1 (full) | start row = 4 | start row = 1
* block 2 (full) | start row = 8 | start row = 5
*
* @param[in] y Global id of current block in y.
* @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16
* @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported: [0, @p M0)
* @{
*/
#define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \
((uint)(max(0, (int)(y * M0) - (int)((M0 - PARTIAL_STORE_M0) % M0))))
#else // defined(PARTIAL_STORE_M0)
#define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \
((uint)(y * M0))
#endif // defined(PARTIAL_STORE_M0)
/** @} */ // end of group COMPUTE_M0_START_ROW
/** Store a vector that can only be partial in x.
*
* @note in case @p vec_size or @p leftover != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* The data to store is expected to end in a 0.
* E.g., for basename=c, the expected name is c0.
*
* @param[in] basename The name of the variable without trailing 0
* @param[in] data_type The data type of the vector
* @param[in] ptr The base pointer
* @param[in] vec_size The vector size if cond = false. Supported: 1, 2, 3, 4, 8, 16
* @param[in] leftover The vector size if cond = true. Supported range: [1, @p vec_size0)
* @param[in] cond Condition to select either vec_size0 or vec_size1
* @{
*/
#define STORE_VECTOR_SELECT(basename, data_type, ptr, vec_size, leftover, cond) \
STORE_BLOCK_PARTIAL_IN_X(1, vec_size, data_type, basename, ptr, 0, 0, leftover, cond)
/** @} */ // end of group STORE_VECTOR_SELECT
#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
#pragma OPENCL EXTENSION cl_khr_fp16 : enable
#endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
#pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : enable
#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
#if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : enable
#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
#if defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf)
#pragma OPENCL EXTENSION cl_arm_printf : enable
#endif // defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf)
#define GPU_ARCH_MIDGARD 0x100
#define GPU_ARCH_BIFROST 0x200
/** Concatenate two inputs.
*
* @param[in] a The first input to be concatenated
* @param[in] b The second input to be concatenated
*
* @return The concatenated output
*/
#define CONCAT(a, b) a##b
/** Expand the given vector
*
* @param[in] x The vector to be expanded
*
* @return The expanded output
*/
#define EXPAND(x) x
/** Clamp the given value between an upper and lower bound.
*
* @param[in] x The value to be clamped
* @param[in] min_val The lower bound
* @param[in] max_val The upper bound
*
* @return The clamped value.
*/
#define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val)
/** REVn reverses the given vector whose size is n.
* @name REVn
*
* @param[in] x The vector to be reversed
*
* @return The reversed vector
* @{
*/
#define REV1(x) ((x))
#define REV2(x) ((x).s10)
#define REV3(x) ((x).s210)
#define REV4(x) ((x).s3210)
#define REV8(x) ((x).s76543210)
#define REV16(x) ((x).sFEDCBA9876543210)
/** @} */ // end of group REVn
/** Reverse the given vector.
* @name REVERSE
*
* @param[in] x The vector to be reversed
* @param[in] s The size of the vector
*
* @return The reversed vector
* @{
*/
#define REVERSE_STR(x, s) REV##s((x))
#define REVERSE(x, s) REVERSE_STR(x, s)
/** @} */ // end of group REVERSE
/** Circular-right-shift (rotate-right) the vector of size s by the amount of n.
* @name ROTs_n
*
* @param[in] x The vector to be shifted
*
* @return The shifted vector
* @{
*/
#define ROT1_0(x) ((x))
#define ROT2_0(x) ((x))
#define ROT2_1(x) ((x).s10)
#define ROT3_0(x) ((x))
#define ROT3_1(x) ((x).s201)
#define ROT3_2(x) ((x).s120)
#define ROT4_0(x) ((x))
#define ROT4_1(x) ((x).s3012)
#define ROT4_2(x) ((x).s2301)
#define ROT4_3(x) ((x).s1230)
#define ROT8_0(x) ((x))
#define ROT8_1(x) ((x).s70123456)
#define ROT8_2(x) ((x).s67012345)
#define ROT8_3(x) ((x).s56701234)
#define ROT8_4(x) ((x).s45670123)
#define ROT8_5(x) ((x).s34567012)
#define ROT8_6(x) ((x).s23456701)
#define ROT8_7(x) ((x).s12345670)
#define ROT16_0(x) ((x))
#define ROT16_1(x) ((x).sF0123456789ABCDE)
#define ROT16_2(x) ((x).sEF0123456789ABCD)
#define ROT16_3(x) ((x).sDEF0123456789ABC)
#define ROT16_4(x) ((x).sCDEF0123456789AB)
#define ROT16_5(x) ((x).sBCDEF0123456789A)
#define ROT16_6(x) ((x).sABCDEF0123456789)
#define ROT16_7(x) ((x).s9ABCDEF012345678)
#define ROT16_8(x) ((x).s89ABCDEF01234567)
#define ROT16_9(x) ((x).s789ABCDEF0123456)
#define ROT16_10(x) ((x).s6789ABCDEF012345)
#define ROT16_11(x) ((x).s56789ABCDEF01234)
#define ROT16_12(x) ((x).s456789ABCDEF0123)
#define ROT16_13(x) ((x).s3456789ABCDEF012)
#define ROT16_14(x) ((x).s23456789ABCDEF01)
#define ROT16_15(x) ((x).s123456789ABCDEF0)
/** @} */ // end of group ROTs_n
/** Circular-right-shift (rotate-right) the given vector by the given amount.
* @name ROTATE
*
* @param[in] x The vector to be shifted
* @param[in] s The size of the vector
* @param[in] n The amount to be shifted
*
* @return The shifted vector
* @{
*/
#define ROTATE_STR(x, s, n) ROT##s##_##n(x)
#define ROTATE(x, s, n) ROTATE_STR(x, s, n)
/** @} */ // end of group ROTATE
/** Creates a vector of size n filled with offset values corresponding to the location of each element.
* @name V_OFFSn
*
* @param[in] dt The data type of the output vector
*
* @return The vector filled with offset values
* @{
*/
#define V_OFFS1(dt) (dt##1)(0)
#define V_OFFS2(dt) (dt##2)(0, 1)
#define V_OFFS3(dt) (dt##3)(0, 1, 2)
#define V_OFFS4(dt) (dt##4)(0, 1, 2, 3)
#define V_OFFS8(dt) (dt##8)(0, 1, 2, 3, 4, 5, 6, 7)
#define V_OFFS16(dt) (dt##16)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)
/** @} */ // end of group V_OFFSn
/** Create a vector filled with offset values corresponding to the location of each element.
* @name VEC_OFFS
*
* @param[in] dt The data type of the output vector
* @param[in] s The size of the output vector
*
* @return The vector filled with offset values
* @{
*/
#define VEC_OFFS_STR(dt, s) V_OFFS##s(dt)
#define VEC_OFFS(dt, s) VEC_OFFS_STR(dt, s)
/** @} */ // end of group VEC_OFFS
#define VLOAD_STR(size) vload##size
#define VLOAD(size) VLOAD_STR(size)
#define PIXEL_UNIT4 1
#define PIXEL_UNIT8 2
#define PIXEL_UNIT16 4
/** Utility macro to convert a vector size in pixel unit.
*
* @name CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT
*
* @param[in] vec_size Vector size. Only 4,8 and 16 is supported
*
* @return The pixel unit (number of pixels)
* @{
*/
#define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) PIXEL_UNIT##vec_size
#define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(vec_size) CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size)
/** @} */ // end of group CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT
#define read_image2d_floatx1(img, x_coord, y_coord) (float4)(read_imagef(img, (int2)(x_coord, y_coord)));
#define read_image2d_floatx2(img, x_coord, y_coord) (float8)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)));
#define read_image2d_floatx4(img, x_coord, y_coord) (float16)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)), read_imagef(img, (int2)(x_coord + 2, y_coord)), read_imagef(img, (int2)(x_coord + 3, y_coord)));
#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
#define read_image2d_halfx1(img, x_coord, y_coord) (half4)(read_imageh(img, (int2)(x_coord, y_coord)));
#define read_image2d_halfx2(img, x_coord, y_coord) (half8)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)));
#define read_image2d_halfx4(img, x_coord, y_coord) (half16)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)), read_imageh(img, (int2)(x_coord + 2, y_coord)), read_imageh(img, (int2)(x_coord + 3, y_coord)));
#endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
/** Utility macro to read a 2D OpenCL image object.
*
* @note Coordinates are not normalized
*
* @param[in] data_type Data type
* @param[in] n0 Number of pixel to read. Only 1,2 and 4 is supported
* @param[in] img OpenCL image object
* @param[in] x_coord The x coordinate for the top-left pixel
* @param[in] y_coord The y coordinate for the top-left pixel
*
* @return Pixels from the 2D OpenCL image object
* @{
*/
#define READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) read_image2d_##data_type##x##n0(img, x_coord, y_coord)
#define READ_IMAGE2D(data_type, n0, img, x_coord, y_coord) READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord)
#define VSTORE_STR(size) vstore##size
#define VSTORE(size) VSTORE_STR(size)
#define float1 float
#define half1 half
#define char1 char
#define uchar1 uchar
#define short1 short
#define ushort1 ushort
#define int1 int
#define uint1 uint
#define long1 long
#define ulong1 ulong
#define double1 double
#define vload1(OFFSET, PTR) *(OFFSET + PTR)
#define vstore1(DATA, OFFSET, PTR) *(OFFSET + PTR) = DATA
/** Extended partial vstore that correctly handles scalar values as well.
* Store the **lower** 0 to (n-1)th elements of the given vector while minimising the amount of vstore ops
* @name VSTORE_PARTIAL
*
* @note With this macro, the passed data can be both a vector and a scalar
* @note @p store_size needs to be <= @p size
* eg 1: Valid
* VSTORE_PARTIAL(16, 15) ...;
* eg 2: Invalid
* VSTORE_PARTIAL(4, 7) ...;
*
* @param[in] size The width of @p DATA. Supported values: 1(scalar), 2, 3, 4, 8, 16
* @param[in] store_size The number of lower elements to store. Supported values: 1-16, but has to be <= @p size
* @{
*/
#define VSTORE_PARTIAL_STR(size, store_size) vstore_partial_##size##_##store_size
#define VSTORE_PARTIAL(size, store_size) VSTORE_PARTIAL_STR(size, store_size)
#define NO_STORE(data, offs, ptr) \
{ \
}
// Size == 1 (scalar)
#define vstore_partial_1_0 NO_STORE
#define vstore_partial_1_1 vstore1
#define vstore_partial_1_2 NO_STORE
#define vstore_partial_1_3 NO_STORE
#define vstore_partial_1_4 NO_STORE
#define vstore_partial_1_5 NO_STORE
#define vstore_partial_1_6 NO_STORE
#define vstore_partial_1_7 NO_STORE
#define vstore_partial_1_8 NO_STORE
#define vstore_partial_1_9 NO_STORE
#define vstore_partial_1_10 NO_STORE
#define vstore_partial_1_11 NO_STORE
#define vstore_partial_1_12 NO_STORE
#define vstore_partial_1_13 NO_STORE
#define vstore_partial_1_14 NO_STORE
#define vstore_partial_1_15 NO_STORE
#define vstore_partial_1_16 NO_STORE
// Size == 2
#define vstore_partial_2_0 NO_STORE
#define vstore_partial_2_1 vstore_partial_1
#define vstore_partial_2_2 vstore_partial_2
#define vstore_partial_2_3 NO_STORE
#define vstore_partial_2_4 NO_STORE
#define vstore_partial_2_5 NO_STORE
#define vstore_partial_2_6 NO_STORE
#define vstore_partial_2_7 NO_STORE
#define vstore_partial_2_8 NO_STORE
#define vstore_partial_2_9 NO_STORE
#define vstore_partial_2_10 NO_STORE
#define vstore_partial_2_11 NO_STORE
#define vstore_partial_2_12 NO_STORE
#define vstore_partial_2_13 NO_STORE
#define vstore_partial_2_14 NO_STORE
#define vstore_partial_2_15 NO_STORE
#define vstore_partial_2_16 NO_STORE
// Size == 3
#define vstore_partial_3_0 NO_STORE
#define vstore_partial_3_1 vstore_partial_1
#define vstore_partial_3_2 vstore_partial_2
#define vstore_partial_3_3 vstore_partial_3
#define vstore_partial_3_4 NO_STORE
#define vstore_partial_3_5 NO_STORE
#define vstore_partial_3_6 NO_STORE
#define vstore_partial_3_7 NO_STORE
#define vstore_partial_3_8 NO_STORE
#define vstore_partial_3_9 NO_STORE
#define vstore_partial_3_10 NO_STORE
#define vstore_partial_3_11 NO_STORE
#define vstore_partial_3_12 NO_STORE
#define vstore_partial_3_13 NO_STORE
#define vstore_partial_3_14 NO_STORE
#define vstore_partial_3_15 NO_STORE
#define vstore_partial_3_16 NO_STORE
// Size == 4
#define vstore_partial_4_0 NO_STORE
#define vstore_partial_4_1 vstore_partial_1
#define vstore_partial_4_2 vstore_partial_2
#define vstore_partial_4_3 vstore_partial_3
#define vstore_partial_4_4 vstore_partial_4
#define vstore_partial_4_5 NO_STORE
#define vstore_partial_4_6 NO_STORE
#define vstore_partial_4_7 NO_STORE
#define vstore_partial_4_8 NO_STORE
#define vstore_partial_4_9 NO_STORE
#define vstore_partial_4_10 NO_STORE
#define vstore_partial_4_11 NO_STORE
#define vstore_partial_4_12 NO_STORE
#define vstore_partial_4_13 NO_STORE
#define vstore_partial_4_14 NO_STORE
#define vstore_partial_4_15 NO_STORE
#define vstore_partial_4_16 NO_STORE
// Size == 8
#define vstore_partial_8_0 NO_STORE
#define vstore_partial_8_1 vstore_partial_1
#define vstore_partial_8_2 vstore_partial_2
#define vstore_partial_8_3 vstore_partial_3
#define vstore_partial_8_4 vstore_partial_4
#define vstore_partial_8_5 vstore_partial_5
#define vstore_partial_8_6 vstore_partial_6
#define vstore_partial_8_7 vstore_partial_7
#define vstore_partial_8_8 vstore_partial_8
#define vstore_partial_8_9 NO_STORE
#define vstore_partial_8_10 NO_STORE
#define vstore_partial_8_11 NO_STORE
#define vstore_partial_8_12 NO_STORE
#define vstore_partial_8_13 NO_STORE
#define vstore_partial_8_14 NO_STORE
#define vstore_partial_8_15 NO_STORE
#define vstore_partial_8_16 NO_STORE
// Size == 16
#define vstore_partial_16_0 NO_STORE
#define vstore_partial_16_1 vstore_partial_1
#define vstore_partial_16_2 vstore_partial_2
#define vstore_partial_16_3 vstore_partial_3
#define vstore_partial_16_4 vstore_partial_4
#define vstore_partial_16_5 vstore_partial_5
#define vstore_partial_16_6 vstore_partial_6
#define vstore_partial_16_7 vstore_partial_7
#define vstore_partial_16_8 vstore_partial_8
#define vstore_partial_16_9 vstore_partial_9
#define vstore_partial_16_10 vstore_partial_10
#define vstore_partial_16_11 vstore_partial_11
#define vstore_partial_16_12 vstore_partial_12
#define vstore_partial_16_13 vstore_partial_13
#define vstore_partial_16_14 vstore_partial_14
#define vstore_partial_16_15 vstore_partial_15
#define vstore_partial_16_16 vstore_partial_16
/** Partial vstore. Store the **lower** 0 to (n-1)th elements of the given vector while minimising the amount of vstore ops
* @name vstore_partial_n
*
* @note @p DATA needs to be a vector not a scalar
* @note n needs to be <= the vector width of the input variable @p DATA
* eg 1: Valid
* vstore_partial_15(var:float16, 0, 0xabcd);
* eg 2: Invalid
* vstore_partial_7(var:float4, 0, 0xabcd);
*
* @note in cases n == 1, 2, 3, 4, 8, 16, no extra vstore is invoked, thus there's no performance penalty.
*
* @param[in] DATA The name of the variable
* @param[in] OFFSET Offset in n
* @param[in] PTR The base pointer
* @{
*/
#define vstore_partial_1(DATA, OFFSET, PTR) \
vstore1(DATA.s0, OFFSET, PTR);
#define vstore_partial_2(DATA, OFFSET, PTR) \
vstore2(DATA.s01, OFFSET, PTR);
#define vstore_partial_3(DATA, OFFSET, PTR) \
vstore3(DATA.s012, OFFSET, PTR);
#define vstore_partial_4(DATA, OFFSET, PTR) \
vstore4(DATA.s0123, OFFSET, PTR);
#define vstore_partial_5(DATA, OFFSET, PTR) \
vstore_partial_4(DATA.s0123, OFFSET, PTR); \
vstore1(DATA.s4, OFFSET, PTR + 4);
#define vstore_partial_6(DATA, OFFSET, PTR) \
vstore_partial_4(DATA.s0123, OFFSET, PTR); \
vstore_partial_2(DATA.s45, OFFSET, PTR + 4);
#define vstore_partial_7(DATA, OFFSET, PTR) \
vstore_partial_4(DATA.s0123, OFFSET, PTR); \
vstore_partial_3(DATA.s456, OFFSET, PTR + 4);
#define vstore_partial_8(DATA, OFFSET, PTR) \
vstore8(DATA.s01234567, OFFSET, PTR);
#define vstore_partial_9(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore1(DATA.s8, OFFSET, PTR + 8);
#define vstore_partial_10(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_2(DATA.s89, OFFSET, PTR + 8);
#define vstore_partial_11(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_3(DATA.s89a, OFFSET, PTR + 8);
#define vstore_partial_12(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_4(DATA.s89ab, OFFSET, PTR + 8);
#define vstore_partial_13(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_5(DATA.s89abcdef, OFFSET, PTR + 8);
#define vstore_partial_14(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_6(DATA.s89abcdef, OFFSET, PTR + 8);
#define vstore_partial_15(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_7(DATA.s89abcdef, OFFSET, PTR + 8);
#define vstore_partial_16(DATA, OFFSET, PTR) \
vstore16(DATA, OFFSET, PTR);
/** @} */ // end of groupd vstore_partial_n
/** @} */ // end of groupd VSTORE_PARTIAL
// Convert built-in functions with _sat modifier are not supported in floating point so we create defines
// without _sat to overcome this issue
#define convert_float_sat convert_float
#define convert_float1_sat convert_float
#define convert_float2_sat convert_float2
#define convert_float3_sat convert_float3
#define convert_float4_sat convert_float4
#define convert_float8_sat convert_float8
#define convert_float16_sat convert_float16
#define convert_half_sat convert_float
#define convert_half1_sat convert_half
#define convert_half2_sat convert_half2
#define convert_half3_sat convert_half3
#define convert_half4_sat convert_half4
#define convert_half8_sat convert_half8
#define convert_half16_sat convert_half16
#define convert_float1 convert_float
#define convert_half1 convert_half
#define convert_char1 convert_char
#define convert_uchar1 convert_uchar
#define convert_short1 convert_short
#define convert_ushort1 convert_ushort
#define convert_int1 convert_int
#define convert_uint1 convert_uint
#define convert_long1 convert_long
#define convert_ulong1 convert_ulong
#define convert_double1 convert_double
#define convert_char1_sat convert_char_sat
#define convert_uchar1_sat convert_uchar_sat
#define convert_short1_sat convert_short_sat
#define convert_ushort1_sat convert_ushort_sat
#define convert_int1_sat convert_int_sat
#define convert_uint1_sat convert_uint_sat
#define convert_long1_sat convert_long_sat
#define convert_ulong1_sat convert_ulong_sat
#define convert_double1_sat convert_double_sat
#define VEC_DATA_TYPE_STR(type, size) type##size
#define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size)
#define CONVERT_STR(x, type) (convert_##type((x)))
#define CONVERT(x, type) CONVERT_STR(x, type)
#define CONVERT_SAT_STR(x, type) (convert_##type##_sat((x)))
#define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type)
#define CONVERT_SAT_ROUND_STR(x, type, round) (convert_##type##_sat_##round((x)))
#define CONVERT_SAT_ROUND(x, type, round) CONVERT_SAT_ROUND_STR(x, type, round)
#define select_vec_dt_uchar(size) uchar##size
#define select_vec_dt_char(size) char##size
#define select_vec_dt_ushort(size) ushort##size
#define select_vec_dt_short(size) short##size
#define select_vec_dt_half(size) short##size
#define select_vec_dt_uint(size) uint##size
#define select_vec_dt_int(size) int##size
#define select_vec_dt_float(size) int##size
#define select_vec_dt_ulong(size) ulong##size
#define select_vec_dt_long(size) long##size
#define SELECT_VEC_DATA_TYPE_STR(type, size) select_vec_dt_##type(size)
#define SELECT_VEC_DATA_TYPE(type, size) SELECT_VEC_DATA_TYPE_STR(type, size)
#define SELECT_DATA_TYPE(type) SELECT_VEC_DATA_TYPE_STR(type, 1)
#define sum_reduce_1(x) (x)
#define sum_reduce_2(x) ((x).s0) + ((x).s1)
#define sum_reduce_3(x) sum_reduce_2((x).s01) + ((x).s2)
#define sum_reduce_4(x) sum_reduce_2((x).s01) + sum_reduce_2((x).s23)
#define sum_reduce_8(x) sum_reduce_4((x).s0123) + sum_reduce_4((x).s4567)
#define sum_reduce_16(x) sum_reduce_8((x).s01234567) + sum_reduce_8((x).s89ABCDEF)
#define SUM_REDUCE_STR(x, size) sum_reduce_##size(x)
#define SUM_REDUCE(x, size) SUM_REDUCE_STR(x, size)
#define max_reduce_1(x) (x)
#define max_reduce_2(x) max(((x).s0), ((x).s1))
#define max_reduce_3(x) max(max_reduce_2((x).s01), ((x).s2))
#define max_reduce_4(x) max(max_reduce_2((x).s01), max_reduce_2((x).s23))
#define max_reduce_8(x) max(max_reduce_4((x).s0123), max_reduce_4((x).s4567))
#define max_reduce_16(x) max(max_reduce_8((x).s01234567), max_reduce_8((x).s89ABCDEF))
#define MAX_REDUCE_STR(x, size) max_reduce_##size(x)
#define MAX_REDUCE(x, size) MAX_REDUCE_STR(x, size)
#define VECTOR_DECLARATION(name) \
__global uchar *name##_ptr, \
uint name##_stride_x, \
uint name##_step_x, \
uint name##_offset_first_element_in_bytes
#define IMAGE_DECLARATION(name) \
__global uchar *name##_ptr, \
uint name##_stride_x, \
uint name##_step_x, \
uint name##_stride_y, \
uint name##_step_y, \
uint name##_offset_first_element_in_bytes
#define TENSOR3D_DECLARATION(name) \
__global uchar *name##_ptr, \
uint name##_stride_x, \
uint name##_step_x, \
uint name##_stride_y, \
uint name##_step_y, \
uint name##_stride_z, \
uint name##_step_z, \
uint name##_offset_first_element_in_bytes
#define TENSOR4D_DECLARATION(name) \
__global uchar *name##_ptr, \
uint name##_stride_x, \
uint name##_step_x, \
uint name##_stride_y, \
uint name##_step_y, \
uint name##_stride_z, \
uint name##_step_z, \
uint name##_stride_w, \
uint name##_step_w, \
uint name##_offset_first_element_in_bytes
#define CONVERT_TO_VECTOR_STRUCT(name) \
update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x)
#define CONVERT_TO_VECTOR_STRUCT_NO_STEP(name) \
update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0)
#define CONVERT_TO_IMAGE_STRUCT(name) \
update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y)
#define CONVERT_TO_IMAGE_STRUCT_NO_STEP(name) \
update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0)
#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \
update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z)
#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(name) \
update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, name##_step_z)
#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \
update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z)
#define CONVERT_TO_TENSOR3D_STRUCT(name) \
update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
name##_stride_z, name##_step_z)
#define CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(name) \
update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0)
#define CONVERT_TO_TENSOR4D_STRUCT(name, mod_size) \
update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
name##_stride_z, name##_step_z, name##_stride_w, name##_step_w, mod_size)
#define CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(name, mod_size) \
update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0, name##_stride_w, 0, mod_size)
#define CONVERT_TO_TENSOR3D_STRUCT_NO_UPDATE_PTR(name) \
tensor3D_ptr_no_update(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
name##_stride_z, name##_step_z)
/** Structure to hold Vector information */
typedef struct Vector
{
__global uchar *ptr; /**< Pointer to the starting postion of the buffer */
int offset_first_element_in_bytes; /**< The offset of the first element in the source image */
int stride_x; /**< Stride of the image in X dimension (in bytes) */
} Vector;
/** Structure to hold Image information */
typedef struct Image
{
__global uchar *ptr; /**< Pointer to the starting postion of the buffer */
int offset_first_element_in_bytes; /**< The offset of the first element in the source image */
int stride_x; /**< Stride of the image in X dimension (in bytes) */
int stride_y; /**< Stride of the image in Y dimension (in bytes) */
} Image;
/** Structure to hold 3D tensor information */
typedef struct Tensor3D
{
__global uchar *ptr; /**< Pointer to the starting postion of the buffer */
int offset_first_element_in_bytes; /**< The offset of the first element in the source image */
int stride_x; /**< Stride of the image in X dimension (in bytes) */
int stride_y; /**< Stride of the image in Y dimension (in bytes) */
int stride_z; /**< Stride of the image in Z dimension (in bytes) */
} Tensor3D;
/** Structure to hold 4D tensor information */
typedef struct Tensor4D
{
__global uchar *ptr; /**< Pointer to the starting postion of the buffer */
int offset_first_element_in_bytes; /**< The offset of the first element in the source image */
int stride_x; /**< Stride of the image in X dimension (in bytes) */
int stride_y; /**< Stride of the image in Y dimension (in bytes) */
int stride_z; /**< Stride of the image in Z dimension (in bytes) */
int stride_w; /**< Stride of the image in W dimension (in bytes) */
} Tensor4D;
/** Wrap vector information into an Vector structure, and make the pointer point at this workitem's data.
*
* @param[in] ptr Pointer to the starting postion of the buffer
* @param[in] offset_first_element_in_bytes The offset of the first element in the source vector
* @param[in] stride_x Stride of the vector in X dimension (in bytes)
* @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
*
* @return An image object
*/
inline Vector update_vector_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x)
{
Vector vector =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
};
vector.ptr += vector.offset_first_element_in_bytes + get_global_id(0) * step_x;
return vector;
}
/** Wrap image information into an Image structure, and make the pointer point at this workitem's data.
*
* @param[in] ptr Pointer to the starting postion of the buffer
* @param[in] offset_first_element_in_bytes The offset of the first element in the source image
* @param[in] stride_x Stride of the image in X dimension (in bytes)
* @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] stride_y Stride of the image in Y dimension (in bytes)
* @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes)
*
* @return An image object
*/
inline Image update_image_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y)
{
Image img =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
.stride_y = stride_y
};
img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y;
return img;
}
/** Wrap 3D tensor information into an image structure, and make the pointer point at this workitem's data.
*
* @param[in] ptr Pointer to the starting postion of the buffer
* @param[in] offset_first_element_in_bytes The offset of the first element in the source image
* @param[in] stride_x Stride of the image in X dimension (in bytes)
* @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] stride_y Stride of the image in Y dimension (in bytes)
* @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] stride_z Stride of the image in Z dimension (in bytes)
* @param[in] step_z stride_z * number of elements along Z processed per workitem(in bytes)
*
* @return A 3D tensor object
*/
inline Image update_image_from_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
{
Image img =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
.stride_y = stride_y
};
img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z;
return img;
}
/** Wrap 3D tensor information into an tensor structure, and make the pointer point at this workitem's data.
*
* @param[in] ptr Pointer to the starting postion of the buffer
* @param[in] offset_first_element_in_bytes The offset of the first element in the source image
* @param[in] stride_x Stride of the image in X dimension (in bytes)
* @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] stride_y Stride of the image in Y dimension (in bytes)
* @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] stride_z Stride of the image in Z dimension (in bytes)
* @param[in] step_z stride_z * number of elements along Z processed per workitem(in bytes)
*
* @return A 3D tensor object
*/
inline Tensor3D update_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
{
Tensor3D tensor =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
.stride_y = stride_y,
.stride_z = stride_z
};
tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z;
return tensor;
}
/** Wrap 3D tensor information into an tensor structure.
*
* @param[in] ptr Pointer to the starting postion of the buffer
* @param[in] offset_first_element_in_bytes The offset of the first element in the source image
* @param[in] stride_x Stride of the image in X dimension (in bytes)
* @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] stride_y Stride of the image in Y dimension (in bytes)
* @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] stride_z Stride of the image in Z dimension (in bytes)
* @param[in] step_z stride_z * number of elements along Z processed per workitem(in bytes)
*
* @return A 3D tensor object
*/
inline Tensor3D tensor3D_ptr_no_update(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
{
Tensor3D tensor =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
.stride_y = stride_y,
.stride_z = stride_z
};
return tensor;
}
inline Tensor4D update_tensor4D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z, uint stride_w,
uint step_w,
uint mod_size)
{
Tensor4D tensor =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
.stride_y = stride_y,
.stride_z = stride_z,
.stride_w = stride_w
};
tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + (get_global_id(2) % mod_size) * step_z + (get_global_id(2) / mod_size) * step_w;
return tensor;
}
/** Get the pointer position of a Vector
*
* @param[in] vec Pointer to the starting position of the buffer
* @param[in] x Relative X position
*/
inline __global const uchar *vector_offset(const Vector *vec, int x)
{
return vec->ptr + x * vec->stride_x;
}
/** Get the pointer position of a Image
*
* @param[in] img Pointer to the starting position of the buffer
* @param[in] x Relative X position
* @param[in] y Relative Y position
*/
inline __global uchar *offset(const Image *img, int x, int y)
{
return img->ptr + x * img->stride_x + y * img->stride_y;
}
/** Get the pointer position of a Tensor3D
*
* @param[in] tensor Pointer to the starting position of the buffer
* @param[in] x Relative X position
* @param[in] y Relative Y position
* @param[in] z Relative Z position
*/
inline __global const uchar *tensor3D_offset(const Tensor3D *tensor, int x, int y, int z)
{
return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z;
}
/** Get the pointer position of a Tensor4D
*
* @param[in] tensor Pointer to the starting position of the buffer
* @param[in] x Relative X position
* @param[in] y Relative Y position
* @param[in] z Relative Z position
* @param[in] w Relative W position
*/
inline __global const uchar *tensor4D_offset(const Tensor4D *tensor, int x, int y, int z, int w)
{
return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + w * tensor->stride_w;
}
/** Get the offset for a given linear index of a Tensor3D
*
* @param[in] tensor Pointer to the starting position of the buffer
* @param[in] width Width of the input tensor
* @param[in] height Height of the input tensor
* @param[in] depth Depth of the input tensor
* @param[in] index Linear index
*/
inline __global const uchar *tensor3D_index2ptr(const Tensor3D *tensor, uint width, uint height, uint depth, uint index)
{
uint num_elements = width * height;
const uint z = index / num_elements;
index %= num_elements;
const uint y = index / width;
index %= width;
const uint x = index;
return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + tensor->offset_first_element_in_bytes;
}
#endif // _HELPER_H
/** Utility macro to access a vector with the scalar positions
*
* Supported cases are: Offset can only be of the same size of the OpenCL vector (2,3,4,8,16)
*
* @param[in] offset The offset within the vector. Offset can only be of the same size of the OpenCL vector (2,3,4,8,16)
* @param[in] n0 The number of consecutive columns to access. n0 + offset must be <= 16
* @param[in] x Vector to access
* @{
*/
#define SCALAR_ACCESS_STR(offset, n0, x) scalar_access_##offset##_##n0(x)
#define SCALAR_ACCESS(offset, n0, x) SCALAR_ACCESS_STR(offset, n0, x)
// offset == 0
#define scalar_access_0_1(x) ((x).s0)
#define scalar_access_0_2(x) ((x).s01)
#define scalar_access_0_3(x) ((x).s012)
#define scalar_access_0_4(x) ((x).s0123)
#define scalar_access_0_8(x) ((x).s01234567)
#define scalar_access_0_16(x) ((x).s0123456789ABCDEF)
// offset == 1
#define scalar_access_1_1(x) ((x).s1)
#define scalar_access_1_2(x) ((x).s12)
#define scalar_access_1_3(x) ((x).s123)
#define scalar_access_1_4(x) ((x).s1234)
#define scalar_access_1_8(x) ((x).s12345678)
// offset == 2
#define scalar_access_2_1(x) ((x).s2)
#define scalar_access_2_2(x) ((x).s23)
#define scalar_access_2_3(x) ((x).s234)
#define scalar_access_2_4(x) ((x).s2345)
#define scalar_access_2_8(x) ((x).s23456789)
// offset == 3
#define scalar_access_3_1(x) ((x).s3)
#define scalar_access_3_2(x) ((x).s34)
#define scalar_access_3_3(x) ((x).s345)
#define scalar_access_3_4(x) ((x).s3456)
#define scalar_access_3_8(x) ((x).s3456789A)
// offset == 4
#define scalar_access_4_1(x) ((x).s4)
#define scalar_access_4_2(x) ((x).s45)
#define scalar_access_4_3(x) ((x).s456)
#define scalar_access_4_4(x) ((x).s4567)
#define scalar_access_4_8(x) ((x).s456789AB)
// offset == 8
#define scalar_access_8_1(x) ((x).s8)
#define scalar_access_8_2(x) ((x).s89)
#define scalar_access_8_3(x) ((x).s89A)
#define scalar_access_8_4(x) ((x).s89AB)
#define scalar_access_8_8(x) ((x).s89ABCDEF)
// offset == 12
#define scalar_access_12_1(x) ((x).sC)
#define scalar_access_12_2(x) ((x).sCD)
#define scalar_access_12_3(x) ((x).sCDE)
#define scalar_access_12_4(x) ((x).sCDEF)
// offset == 16
#define scalar_access_16_1(x) ((x).sF)
/** Loads the rows from 0 to n-1 in the given variables (BASENAME0 to BASENAMEn-1) without allocating variables.
* @name LOAD_TENSOR_ROW_n
*
* @param[in] N0 The number of columns to load
* @param[in] DATA_TYPE The data type of variables
* @param[in] BASENAME The basename of the destination variables for the loaded rows
* @param[in] PTR The base pointer
* @param[in] COL_OFFSET The column vector offset. COL_OFFSET + N0 must be <= 16
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The z-axis offset vector
* @{
*/
#define LOAD_TENSOR_ROW_0(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
({})
#define LOAD_TENSOR_ROW_1(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##0) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0));
#define LOAD_TENSOR_ROW_2(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
LOAD_TENSOR_ROW_1(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##1) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1));
#define LOAD_TENSOR_ROW_3(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
LOAD_TENSOR_ROW_2(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##2) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2));
#define LOAD_TENSOR_ROW_4(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
LOAD_TENSOR_ROW_3(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##3) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3));
#define LOAD_TENSOR_ROW_5(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
LOAD_TENSOR_ROW_4(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##4) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4));
#define LOAD_TENSOR_ROW_6(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
LOAD_TENSOR_ROW_5(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##5) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5));
#define LOAD_TENSOR_ROW_7(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
LOAD_TENSOR_ROW_6(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##6) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6));
#define LOAD_TENSOR_ROW_8(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
LOAD_TENSOR_ROW_7(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##7) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7));
#define LOAD_TENSOR_ROW_9(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
LOAD_TENSOR_ROW_8(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##8) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8));
#define LOAD_TENSOR_ROW_10(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
LOAD_TENSOR_ROW_9(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##9) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9));
#define LOAD_TENSOR_ROW_11(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
LOAD_TENSOR_ROW_10(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##A) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A));
#define LOAD_TENSOR_ROW_12(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
LOAD_TENSOR_ROW_11(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##B) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B));
#define LOAD_TENSOR_ROW_13(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
LOAD_TENSOR_ROW_12(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##C) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C));
#define LOAD_TENSOR_ROW_14(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
LOAD_TENSOR_ROW_13(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##D) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D));
#define LOAD_TENSOR_ROW_15(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
LOAD_TENSOR_ROW_14(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##E) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E));
#define LOAD_TENSOR_ROW_16(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
LOAD_TENSOR_ROW_15(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) \
SCALAR_ACCESS(COL_OFFSET, N0, BASENAME##F) = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F));
/** @}*/ // end of group LOAD_TENSOR_ROW_n
/** Load tensor (consecutive rows and columns) with Z offset.
* @name LOAD_TENSOR
*
* Supported cases are M0=1,2,3,...,16 and N0=1,2,3,4,8,16
* The data to load is expected to have consecutive names for each row.
* E.g., for M0=3, and BASENAME=c, the expected data is c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3, and Z=zin, the expected Z offsets are zin0, zin1 and zin2.
*
* @param[in] M0 The number of consecutive rows
* @param[in] N0 The number of consecutive columns
* @param[in] DATA_TYPE The data type of the target
* @param[in] BASENAME The basename of the result variables
* @param[in] PTR The base pointer for the data
* @param[in] COL_OFFSET The column vector offset. COL_OFFSET + N0 must be <= 16
* @param[in] STRIDE_Y The stride in y-axis direction
* @param[in] Z The z-axis offset vector
* @{
*/
#define LOAD_TENSOR_STR(M0, N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) LOAD_TENSOR_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z)
#define LOAD_TENSOR(M0, N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z) LOAD_TENSOR_STR(M0, N0, DATA_TYPE, BASENAME, PTR, COL_OFFSET, STRIDE_Y, Z)
/** @} */ // end of group LOAD_TENSOR
/** Load 2D tensor (consecutive rows and columns) with Z offset.
* @name LOAD_TENSOR_M0Xn
*
* @param[in] M0 The number of rows to load [0-16]
* @param[in] N0 The number of columns to load [0-16]
* @param[in] DATA_TYPE The data type of variables
* @param[in] BASENAME The basename of the destination variables for the loaded rows
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The z-axis offset vector
* @{
*/
#define LOAD_TENSOR_M0X0(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \
({})
#define LOAD_TENSOR_M0X1(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \
LOAD_TENSOR(M0, N0, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin);
#define LOAD_TENSOR_M0X2(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \
LOAD_TENSOR(M0, N0, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin);
#define LOAD_TENSOR_M0X3(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \
LOAD_TENSOR(M0, N0, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin);
#define LOAD_TENSOR_M0X4(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \
LOAD_TENSOR(M0, N0, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin);
#define LOAD_TENSOR_M0X5(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \
LOAD_TENSOR(M0, 4, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin); \
LOAD_TENSOR(M0, 1, DATA_TYPE, a, input_ptr + 4 * sizeof(DATA_TYPE), 4, src_stride_y, zin);
#define LOAD_TENSOR_M0X6(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \
LOAD_TENSOR(M0, 4, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin); \
LOAD_TENSOR(M0, 2, DATA_TYPE, a, input_ptr + 4 * sizeof(DATA_TYPE), 4, src_stride_y, zin);
#define LOAD_TENSOR_M0X7(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \
LOAD_TENSOR(M0, 4, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin); \
LOAD_TENSOR(M0, 3, DATA_TYPE, a, input_ptr + 4 * sizeof(DATA_TYPE), 4, src_stride_y, zin);
#define LOAD_TENSOR_M0X8(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \
LOAD_TENSOR(M0, N0, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin);
#define LOAD_TENSOR_M0X9(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \
LOAD_TENSOR(M0, 8, DATA_TYPE, a, input_ptr 0, src_stride_y, zin); \
LOAD_TENSOR(M0, 1, DATA_TYPE, a, input_ptr + 8 * sizeof(DATA_TYPE), 8, src_stride_y, zin);
#define LOAD_TENSOR_M0X10(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \
LOAD_TENSOR(M0, 8, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin); \
LOAD_TENSOR(M0, 2, DATA_TYPE, a, input_ptr + 8 * sizeof(DATA_TYPE), 8, src_stride_y, zin);
#define LOAD_TENSOR_M0X11(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \
LOAD_TENSOR(M0, 8, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin); \
LOAD_TENSOR(M0, 3, DATA_TYPE, a, input_ptr + 8 * sizeof(DATA_TYPE), 8, src_stride_y, zin);
#define LOAD_TENSOR_M0X12(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \
LOAD_TENSOR(M0, 8, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin); \
LOAD_TENSOR(M0, 4, DATA_TYPE, a, input_ptr + 8 * sizeof(DATA_TYPE), 8, src_stride_y, zin);
#define LOAD_TENSOR_M0X13(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \
LOAD_TENSOR(M0, 8, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin); \
LOAD_TENSOR(M0, 4, DATA_TYPE, a, input_ptr + 8 * sizeof(DATA_TYPE), 8, src_stride_y, zin); \
LOAD_TENSOR(M0, 1, DATA_TYPE, a, input_ptr + 12 * sizeof(DATA_TYPE), 12, src_stride_y, zin);
#define LOAD_TENSOR_M0X14(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \
LOAD_TENSOR(M0, 8, DATA_TYPE, a, input_ptr 0, src_stride_y, zin); \
LOAD_TENSOR(M0, 4, DATA_TYPE, a, input_ptr + 8 * sizeof(DATA_TYPE), 8, src_stride_y, zin); \
LOAD_TENSOR(M0, 2, DATA_TYPE, a, input_ptr + 12 * sizeof(DATA_TYPE), 12, src_stride_y, zin);
#define LOAD_TENSOR_M0X15(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \
LOAD_TENSOR(M0, 8, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin); \
LOAD_TENSOR(M0, 4, DATA_TYPE, a, input_ptr + 8 * sizeof(DATA_TYPE), 8, src_stride_y, zin); \
LOAD_TENSOR(M0, 3, DATA_TYPE, a, input_ptr + 12 * sizeof(DATA_TYPE), 12, src_stride_y, zin);
#define LOAD_TENSOR_M0X16(M0, N0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \
LOAD_TENSOR(M0, N0, DATA_TYPE, a, input_ptr, 0, src_stride_y, zin);
/** @}*/ // end of group LOAD_TENSOR_M0Xn
/** Load 2D tensor (consecutive rows and columns) with Z offset.
* @name LOAD_TENSOR_M0XN0
*
* @param[in] M0 The number of consecutive rows [0-16]
* @param[in] N0 The number of consecutive columns [0-16]
* @param[in] DATA_TYPE The data type of the target
* @param[in] BASENAME The basename of the result variables
* @param[in] PTR The base pointer for the data
* @param[in] STRIDE_Y The stride in y-axis direction
* @param[in] Z The z-axis offset vector
* @{
*/
#define LOAD_TENSOR_M0XN0_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) LOAD_TENSOR_M0X##N0(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
#define LOAD_TENSOR_M0XN0(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) LOAD_TENSOR_M0XN0_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
/** Loads the rows from 0 to n-1 in the given variables (BASENAME0 to BASENAMEn-1).
* @name LOAD_ROW_n
*
* @param[in] N0 The number of columns to load
* @param[in] DATA_TYPE The data type of variables
* @param[in] BASENAME The basename of the destination variables for the loaded rows
* @param[in] PTR The base pointer
* @param[in] OFFSET The offset within a row
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The z-axis offset vector
* @{
*/
#define LOAD_ROW_1(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##0 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 0 * STRIDE_Y + Z##0));
#define LOAD_ROW_2(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
LOAD_ROW_1(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##1 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 1 * STRIDE_Y + Z##1));
#define LOAD_ROW_3(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
LOAD_ROW_2(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##2 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 2 * STRIDE_Y + Z##2));
#define LOAD_ROW_4(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
LOAD_ROW_3(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##3 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 3 * STRIDE_Y + Z##3));
#define LOAD_ROW_5(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
LOAD_ROW_4(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##4 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 4 * STRIDE_Y + Z##4));
#define LOAD_ROW_6(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
LOAD_ROW_5(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##5 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 5 * STRIDE_Y + Z##5));
#define LOAD_ROW_7(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
LOAD_ROW_6(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##6 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 6 * STRIDE_Y + Z##6));
#define LOAD_ROW_8(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
LOAD_ROW_7(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##7 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 7 * STRIDE_Y + Z##7));
#define LOAD_ROW_9(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
LOAD_ROW_8(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##8 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 8 * STRIDE_Y + Z##8));
#define LOAD_ROW_10(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
LOAD_ROW_9(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##9 = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 9 * STRIDE_Y + Z##9));
#define LOAD_ROW_11(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
LOAD_ROW_10(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##A = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 10 * STRIDE_Y + Z##A));
#define LOAD_ROW_12(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
LOAD_ROW_11(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##B = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 11 * STRIDE_Y + Z##B));
#define LOAD_ROW_13(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
LOAD_ROW_12(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##C = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 12 * STRIDE_Y + Z##C));
#define LOAD_ROW_14(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
LOAD_ROW_13(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##D = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 13 * STRIDE_Y + Z##D));
#define LOAD_ROW_15(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
LOAD_ROW_14(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##E = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 14 * STRIDE_Y + Z##E));
#define LOAD_ROW_16(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
LOAD_ROW_15(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##F = VLOAD(N0)(0, (__global DATA_TYPE *)(PTR + OFFSET + 15 * STRIDE_Y + Z##F));
/** @}*/ // end of group LOAD_ROW_n
/** Load Blocks (consecutive rows and columns) with Z offset.
* @name LOAD_BLOCK
*
* Supported cases are M0=1,2,3,...,16 and N0=1,2,3,4,8,16
* The data to load is expected to have consecutive names for each row.
* E.g., for M0=3, and BASENAME=c, the expected data is c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3, and Z=zin, the expected Z offsets are zin0, zin1 and zin2.
*
* @param[in] M0 The number of consecutive rows
* @param[in] N0 The number of consecutive columns
* @param[in] DATA_TYPE The data type of the target
* @param[in] BASENAME The basename of the result variables
* @param[in] PTR The base pointer for the data
* @param[in] OFFSET The offset within a row
* @param[in] STRIDE_Y The stride in y-axis direction
* @param[in] Z The z-axis offset vector
* @{
*/
#define LOAD_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) LOAD_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z)
#define LOAD_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z) LOAD_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y, Z)
/** @} */ // end of group LOAD_BLOCK
/** Loads the rows from 0 to n-1 in the given variables (BASENAME0 to BASENAMEn-1).
* @name LOAD_TEXTURE2D_ROW_n
*
* @param[in] N0 The number of pixels to read
* @param[in] DATA_TYPE The data type of variables
* @param[in] BASENAME The basename of the destination variables for the loaded rows
* @param[in] IMG The 2D OpenCL image object
* @param[in] X_COORD The x coordinate for the top-left pixel
* @param[in] Y_COORD The y coordinate for the top-left pixel
* @param[in] X_STEP_ROW The incremental step row for the x coordinate (in pixels)
* @param[in] Y_STEP_ROW The incremental step row for the y coordinate (in pixels)
* @{
*/
#define LOAD_TEXTURE2D_ROW_1(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
BASENAME##0 = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 0 * X_STEP_ROW), (Y_COORD + 0 * Y_STEP_ROW))
#define LOAD_TEXTURE2D_ROW_2(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
LOAD_TEXTURE2D_ROW_1(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
BASENAME##1 = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 1 * X_STEP_ROW), (Y_COORD + 1 * Y_STEP_ROW))
#define LOAD_TEXTURE2D_ROW_3(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
LOAD_TEXTURE2D_ROW_2(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
BASENAME##2 = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 2 * X_STEP_ROW), (Y_COORD + 2 * Y_STEP_ROW))
#define LOAD_TEXTURE2D_ROW_4(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
LOAD_TEXTURE2D_ROW_3(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
BASENAME##3 = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 3 * X_STEP_ROW), (Y_COORD + 3 * Y_STEP_ROW))
#define LOAD_TEXTURE2D_ROW_5(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
LOAD_TEXTURE2D_ROW_4(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
BASENAME##4 = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 4 * X_STEP_ROW), (Y_COORD + 4 * Y_STEP_ROW))
#define LOAD_TEXTURE2D_ROW_6(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
LOAD_TEXTURE2D_ROW_5(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
BASENAME##5 = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 5 * X_STEP_ROW), (Y_COORD + 5 * Y_STEP_ROW))
#define LOAD_TEXTURE2D_ROW_7(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
LOAD_TEXTURE2D_ROW_6(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
BASENAME##6 = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 6 * X_STEP_ROW), (Y_COORD + 6 * Y_STEP_ROW))
#define LOAD_TEXTURE2D_ROW_8(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
LOAD_TEXTURE2D_ROW_7(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
BASENAME##7 = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 7 * X_STEP_ROW), (Y_COORD + 7 * Y_STEP_ROW))
#define LOAD_TEXTURE2D_ROW_9(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
LOAD_TEXTURE2D_ROW_8(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
BASENAME##8 = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 8 * X_STEP_ROW), (Y_COORD + 8 * Y_STEP_ROW))
#define LOAD_TEXTURE2D_ROW_10(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
LOAD_TEXTURE2D_ROW_9(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
BASENAME##9 = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 9 * X_STEP_ROW), (Y_COORD + 9 * Y_STEP_ROW))
#define LOAD_TEXTURE2D_ROW_11(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
LOAD_TEXTURE2D_ROW_10(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
BASENAME##A = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 10 * X_STEP_ROW), (Y_COORD + 10 * Y_STEP_ROW))
#define LOAD_TEXTURE2D_ROW_12(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
LOAD_TEXTURE2D_ROW_11(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
BASENAME##B = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 11 * X_STEP_ROW), (Y_COORD + 11 * Y_STEP_ROW))
#define LOAD_TEXTURE2D_ROW_13(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
LOAD_TEXTURE2D_ROW_12(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
BASENAME##C = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 12 * X_STEP_ROW), (Y_COORD + 12 * Y_STEP_ROW))
#define LOAD_TEXTURE2D_ROW_14(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
LOAD_TEXTURE2D_ROW_13(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
BASENAME##D = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 13 * X_STEP_ROW), (Y_COORD + 13 * Y_STEP_ROW))
#define LOAD_TEXTURE2D_ROW_15(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
LOAD_TEXTURE2D_ROW_14(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
BASENAME##E = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 14 * X_STEP_ROW), (Y_COORD + 14 * Y_STEP_ROW))
#define LOAD_TEXTURE2D_ROW_16(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
LOAD_TEXTURE2D_ROW_15(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) \
BASENAME##F = READ_IMAGE2D(DATA_TYPE, N0, IMG, (X_COORD + 15 * X_STEP_ROW), (Y_COORD + 15 * Y_STEP_ROW))
/** @} */ // end of group LOAD_TEXTURE2D_ROW_n
/** Load a 2D texture in unit of pixel. A pixel is made of 4 floating point values
* @name LOAD_TEXTURE2D
*
* Supported cases are M0=1,2,3,...,16 and N0=1
* The data to load is expected to have consecutive names for each row.
* E.g., for M0=3, and BASENAME=c, the expected data is c0, c1 and c2.
*
* @param[in] M0 The number of consecutive rows
* @param[in] N0 The number of consecutive pixels. Only 1, 2 and 4 are supported
* @param[in] DATA_TYPE The data type of the target
* @param[in] BASENAME The basename of the result variables
* @param[in] IMG The 2D OpenCL image object
* @param[in] X_COORD The x coordinate for the top-left pixel
* @param[in] Y_COORD The y coordinate for the top-left pixel
* @param[in] X_STEP_ROW The incremental step row for the x coordinate (in pixels)
* @param[in] Y_STEP_ROW The incremental step row for the y coordinate (in pixels)
* @{
*/
#define LOAD_TEXTURE2D_STR(M0, N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) LOAD_TEXTURE2D_ROW_##M0(N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW)
#define LOAD_TEXTURE2D(M0, N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW) LOAD_TEXTURE2D_STR(M0, N0, DATA_TYPE, BASENAME, IMG, X_COORD, Y_COORD, X_STEP_ROW, Y_STEP_ROW)
/** @} */ // end of group LOAD_TEXTURE2D
/** Loads the elements from 0 to n-1 in the given variables (BASENAME0 to BASENAMEn-1).
* @name LOAD_ELEMENT_n
*
* @param[in] N0 The number of rows to load
* @param[in] DATA_TYPE The data type of variables
* @param[in] BASENAME The basename of the destination variables for the loaded rows
* @param[in] PTR The base pointer
* @param[in] OFFSET The offset within a row
* @param[in] STRIDE_Y The stride value in y-axis direction
* @{
*/
#define LOAD_ELEMENT_1(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##0 = *((__global DATA_TYPE *)(PTR + OFFSET + 0 * STRIDE_Y));
#define LOAD_ELEMENT_2(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
LOAD_ELEMENT_1(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##1 = *((__global DATA_TYPE *)(PTR + OFFSET + 1 * STRIDE_Y));
#define LOAD_ELEMENT_3(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
LOAD_ELEMENT_2(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##2 = *((__global DATA_TYPE *)(PTR + OFFSET + 2 * STRIDE_Y));
#define LOAD_ELEMENT_4(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
LOAD_ELEMENT_3(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##3 = *((__global DATA_TYPE *)(PTR + OFFSET + 3 * STRIDE_Y));
#define LOAD_ELEMENT_5(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
LOAD_ELEMENT_4(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##4 = *((__global DATA_TYPE *)(PTR + OFFSET + 4 * STRIDE_Y));
#define LOAD_ELEMENT_6(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
LOAD_ELEMENT_5(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##5 = *((__global DATA_TYPE *)(PTR + OFFSET + 5 * STRIDE_Y));
#define LOAD_ELEMENT_7(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
LOAD_ELEMENT_6(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##6 = *((__global DATA_TYPE *)(PTR + OFFSET + 6 * STRIDE_Y));
#define LOAD_ELEMENT_8(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
LOAD_ELEMENT_7(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##7 = *((__global DATA_TYPE *)(PTR + OFFSET + 7 * STRIDE_Y));
#define LOAD_ELEMENT_9(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
LOAD_ELEMENT_8(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##8 = *((__global DATA_TYPE *)(PTR + OFFSET + 8 * STRIDE_Y));
#define LOAD_ELEMENT_10(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
LOAD_ELEMENT_9(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##9 = *((__global DATA_TYPE *)(PTR + OFFSET + 9 * STRIDE_Y));
#define LOAD_ELEMENT_11(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
LOAD_ELEMENT_10(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##A = *((__global DATA_TYPE *)(PTR + OFFSET + 10 * STRIDE_Y));
#define LOAD_ELEMENT_12(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
LOAD_ELEMENT_11(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##B = *((__global DATA_TYPE *)(PTR + OFFSET + 11 * STRIDE_Y));
#define LOAD_ELEMENT_13(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
LOAD_ELEMENT_12(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##C = *((__global DATA_TYPE *)(PTR + OFFSET + 12 * STRIDE_Y));
#define LOAD_ELEMENT_14(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
LOAD_ELEMENT_13(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##D = *((__global DATA_TYPE *)(PTR + OFFSET + 13 * STRIDE_Y));
#define LOAD_ELEMENT_15(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
LOAD_ELEMENT_14(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##E = *((__global DATA_TYPE *)(PTR + OFFSET + 14 * STRIDE_Y));
#define LOAD_ELEMENT_16(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
LOAD_ELEMENT_15(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) \
VEC_DATA_TYPE(DATA_TYPE, N0) \
BASENAME##F = *((__global DATA_TYPE *)(PTR + OFFSET + 15 * STRIDE_Y));
/** @}*/ // end of group LOAD_ELEMENT_n
/** Load Scalar as Vector (consecutive elements).
* @name LOAD_SCALAR_AS_VECTOR
*
* Supported cases are M0=1,2,3,...,16 and N0=1,2,3,4,8,16
* The data to load is expected to have consecutive names for each row.
* E.g., for M0=3, and BASENAME=c, the expected data is c0, c1 and c2.
*
* @param[in] M0 The number of consecutive rows
* @param[in] N0 The number of consecutive columns
* @param[in] DATA_TYPE The data type of the target
* @param[in] BASENAME The basename of the result variables
* @param[in] PTR The base pointer for the data
* @param[in] OFFSET The offset within a row
* @param[in] STRIDE_Y The stride in y-axis direction
* @{
*/
#define LOAD_SCALAR_AS_VECTOR_STR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) LOAD_ELEMENT_##M0(N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y)
#define LOAD_SCALAR_AS_VECTOR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y) LOAD_SCALAR_AS_VECTOR_STR(M0, N0, DATA_TYPE, BASENAME, PTR, OFFSET, STRIDE_Y)
/** @} */ // end of group LOAD_SCALAR_AS_VECTOR
/** Basic macros to calculate Z offset values from Z0 to Zn-1
* @name CALCULATE_Z_OFFSET_n
*
* @param[in] M0 The number of offset values to calculate
* @param[in] DATA_TYPE The data type of the results
* @param[in] Z The basename of the result variables
* @param[in] Y The work-itme ID of y-axis
* @param[in] HEIGHT_GEMM3D The height of GEMM3D
* @param[in] DEPTH_GEMM3D The depth of GEMM3D
* @param[in] CROSS_PLANE_PAD The padding required for plane changes accross the z-dimension
* @param[in] STRIDE_Y The stride value in y-axis direction
*
* @{
*/
#define CALCULATE_Z_OFFSET_1(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
Z##0 = (0 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \
Z##0 = min((DATA_TYPE)(DEPTH_GEMM3D - 1), Z##0); \
Z##0 *= (CROSS_PLANE_PAD * STRIDE_Y);
#define CALCULATE_Z_OFFSET_2(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
CALCULATE_Z_OFFSET_1(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
Z##1 = (1 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \
Z##1 = min((DATA_TYPE)(DEPTH_GEMM3D - 1), Z##1); \
Z##1 *= (CROSS_PLANE_PAD * STRIDE_Y);
#define CALCULATE_Z_OFFSET_3(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
CALCULATE_Z_OFFSET_2(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
Z##2 = (2 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \
Z##2 = min((DATA_TYPE)(DEPTH_GEMM3D - 1), Z##2); \
Z##2 *= (CROSS_PLANE_PAD * STRIDE_Y);
#define CALCULATE_Z_OFFSET_4(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
CALCULATE_Z_OFFSET_3(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
Z##3 = (3 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \
Z##3 = min((DATA_TYPE)(DEPTH_GEMM3D - 1), Z##3); \
Z##3 *= (CROSS_PLANE_PAD * STRIDE_Y);
#define CALCULATE_Z_OFFSET_5(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
CALCULATE_Z_OFFSET_4(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
Z##4 = (4 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \
Z##4 = min((DATA_TYPE)(DEPTH_GEMM3D - 1), Z##4); \
Z##4 *= (CROSS_PLANE_PAD * STRIDE_Y);
#define CALCULATE_Z_OFFSET_6(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
CALCULATE_Z_OFFSET_5(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
Z##5 = (5 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \
Z##5 = min((DATA_TYPE)(DEPTH_GEMM3D - 1), Z##5); \
Z##5 *= (CROSS_PLANE_PAD * STRIDE_Y);
#define CALCULATE_Z_OFFSET_7(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
CALCULATE_Z_OFFSET_6(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
Z##6 = (6 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \
Z##6 = min((DATA_TYPE)(DEPTH_GEMM3D - 1), Z##6); \
Z##6 *= (CROSS_PLANE_PAD * STRIDE_Y);
#define CALCULATE_Z_OFFSET_8(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
CALCULATE_Z_OFFSET_7(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) \
Z##7 = (7 + (DATA_TYPE)(Y)) / (DATA_TYPE)HEIGHT_GEMM3D; \
Z##7 = min((DATA_TYPE)(DEPTH_GEMM3D - 1), Z##7); \
Z##7 *= (CROSS_PLANE_PAD * STRIDE_Y);
/** @} */ // end of group CALCULATE_Z_OFFSET_n
/** Calculate Z offset values from Z0 to Zn-1
* @name CALCULATE_Z_OFFSET
*
* The Z offsets are expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected names of Z offsets are zin1, zin2, zin3.
* Note that, CROSS_PLANE_PAD (cross plain padding) is required to take into account
* the possible cross plane paddings in case of the plance changes across the z-dimension.
*
* <!--
* | |
* | plane0 |
* | |
* |__________________|
* |******************|
* | cross_plane_pad |
* |******************|
* | |
* | plane1 |
* | |
* |__________________|
* -->
*
* @param[in] M0 The number of offset values to calculate
* @param[in] DATA_TYPE The data type of the results
* @param[in] Z The basename of the result variables
* @param[in] Y The work-itme ID of y-axis
* @param[in] HEIGHT_GEMM3D The height of GEMM3D
* @param[in] DEPTH_GEMM3D The depth of GEMM3D
* @param[in] CROSS_PLANE_PAD The padding required for plane changes accross the z-dimension
* @param[in] STRIDE_Y The stride value in y-axis direction
* @{
*/
#define CALCULATE_Z_OFFSET_STR(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) CALCULATE_Z_OFFSET_##M0(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y)
#define CALCULATE_Z_OFFSET(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y) CALCULATE_Z_OFFSET_STR(M0, DATA_TYPE, Z, Y, HEIGHT_GEMM3D, DEPTH_GEMM3D, CROSS_PLANE_PAD, STRIDE_Y)
/** @} */ // end of group CALCULATE_Z_OFFSET
/** Scale the rows in the given variables (BASENAME0 to BASENAMEn-1)
* @name SCALE_ROW_n
*
* @param[in] DATA_TYPE The data type of the variables
* @param[in] BASENAME The basename of the variables
* @param[in] SCALE The scale factor
* @{
*/
#define SCALE_ROW_1(DATA_TYPE, BASENAME, SCALE) \
BASENAME##0 *= (DATA_TYPE)SCALE;
#define SCALE_ROW_2(DATA_TYPE, BASENAME, SCALE) \
SCALE_ROW_1(DATA_TYPE, BASENAME, SCALE) \
BASENAME##1 *= (DATA_TYPE)SCALE;
#define SCALE_ROW_3(DATA_TYPE, BASENAME, SCALE) \
SCALE_ROW_2(DATA_TYPE, BASENAME, SCALE) \
BASENAME##2 *= (DATA_TYPE)SCALE;
#define SCALE_ROW_4(DATA_TYPE, BASENAME, SCALE) \
SCALE_ROW_3(DATA_TYPE, BASENAME, SCALE) \
BASENAME##3 *= (DATA_TYPE)SCALE;
#define SCALE_ROW_5(DATA_TYPE, BASENAME, SCALE) \
SCALE_ROW_4(DATA_TYPE, BASENAME, SCALE) \
BASENAME##4 *= (DATA_TYPE)SCALE;
#define SCALE_ROW_6(DATA_TYPE, BASENAME, SCALE) \
SCALE_ROW_5(DATA_TYPE, BASENAME, SCALE) \
BASENAME##5 *= (DATA_TYPE)SCALE;
#define SCALE_ROW_7(DATA_TYPE, BASENAME, SCALE) \
SCALE_ROW_6(DATA_TYPE, BASENAME, SCALE) \
BASENAME##6 *= (DATA_TYPE)SCALE;
#define SCALE_ROW_8(DATA_TYPE, BASENAME, SCALE) \
SCALE_ROW_7(DATA_TYPE, BASENAME, SCALE) \
BASENAME##7 *= (DATA_TYPE)SCALE;
#define SCALE_ROW_9(DATA_TYPE, BASENAME, SCALE) \
SCALE_ROW_8(DATA_TYPE, BASENAME, SCALE) \
BASENAME##8 *= (DATA_TYPE)SCALE;
#define SCALE_ROW_10(DATA_TYPE, BASENAME, SCALE) \
SCALE_ROW_9(DATA_TYPE, BASENAME, SCALE) \
BASENAME##9 *= (DATA_TYPE)SCALE;
#define SCALE_ROW_11(DATA_TYPE, BASENAME, SCALE) \
SCALE_ROW_10(DATA_TYPE, BASENAME, SCALE) \
BASENAME##A *= (DATA_TYPE)SCALE;
#define SCALE_ROW_12(DATA_TYPE, BASENAME, SCALE) \
SCALE_ROW_11(DATA_TYPE, BASENAME, SCALE) \
BASENAME##B *= (DATA_TYPE)SCALE;
#define SCALE_ROW_13(DATA_TYPE, BASENAME, SCALE) \
SCALE_ROW_12(DATA_TYPE, BASENAME, SCALE) \
BASENAME##C *= (DATA_TYPE)SCALE;
#define SCALE_ROW_14(DATA_TYPE, BASENAME, SCALE) \
SCALE_ROW_13(DATA_TYPE, BASENAME, SCALE) \
BASENAME##D *= (DATA_TYPE)SCALE;
#define SCALE_ROW_15(DATA_TYPE, BASENAME, SCALE) \
SCALE_ROW_14(DATA_TYPE, BASENAME, SCALE) \
BASENAME##E *= (DATA_TYPE)SCALE;
#define SCALE_ROW_16(DATA_TYPE, BASENAME, SCALE) \
SCALE_ROW_15(DATA_TYPE, BASENAME, SCALE) \
BASENAME##F *= (DATA_TYPE)SCALE;
/** @} */ // end of group SCALE_ROW_n
/** Scale elements stored in a block (BASENAME)
* @name SCALE_BLOCK
*
* Supported cases are N=1,2,3,...,16
*
* @param[in] N The number of rows in the block
* @param[in] DATA_TYPE The data type of the block
* @param[in] BASENAME The basename of the block
* @param[in] SCALE The scale factor
* @{
*/
#define SCALE_BLOCK_STR(N, DATA_TYPE, BASENAME, SCALE) SCALE_ROW_##N(DATA_TYPE, BASENAME, SCALE)
#define SCALE_BLOCK(N, DATA_TYPE, BASENAME, SCALE) SCALE_BLOCK_STR(N, DATA_TYPE, BASENAME, SCALE)
/** @} */ // end of group SCALE_BLOCK
/** Create a new vector containing the values at the given index for a set of given vectors
* @name COLUMN_VECTORn
*
* @param[in] IDX_COL The index value
* @param[in] BASENAME The basename of the destination vectors
* @param[in] X The basename of the source vectors
* @param[in] TYPE The data type of the destination vectors
* @{
*/
#define COLUMN_VECTOR1(IDX_COL, BASENAME, X, TYPE) \
TYPE BASENAME##IDX_COL = (TYPE)((X##0).s##IDX_COL);
#define COLUMN_VECTOR2(IDX_COL, BASENAME, X, TYPE) \
VEC_DATA_TYPE(TYPE, 2) \
BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 2))((X##0).s##IDX_COL, (X##1).s##IDX_COL);
#define COLUMN_VECTOR3(IDX_COL, BASENAME, X, TYPE) \
VEC_DATA_TYPE(TYPE, 3) \
BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 3))((X##0).s##IDX_COL, (X##1).s##IDX_COL, (X##2).s##IDX_COL);
#define COLUMN_VECTOR4(IDX_COL, BASENAME, X, TYPE) \
VEC_DATA_TYPE(TYPE, 4) \
BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 4))((X##0).s##IDX_COL, (X##1).s##IDX_COL, (X##2).s##IDX_COL, (X##3).s##IDX_COL);
#define COLUMN_VECTOR8(IDX_COL, BASENAME, X, TYPE) \
VEC_DATA_TYPE(TYPE, 8) \
BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 8))((X##0).s##IDX_COL, (X##1).s##IDX_COL, (X##2).s##IDX_COL, (X##3).s##IDX_COL, (X##4).s##IDX_COL, (X##5).s##IDX_COL, (X##6).s##IDX_COL, (X##7).s##IDX_COL);
#define COLUMN_VECTOR16(IDX_COL, BASENAME, X, TYPE) \
VEC_DATA_TYPE(TYPE, 16) \
BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 16))((X##0).s##IDX_COL, (X##1).s##IDX_COL, (X##2).s##IDX_COL, (X##3).s##IDX_COL, (X##4).s##IDX_COL, (X##5).s##IDX_COL, (X##6).s##IDX_COL, (X##7).s##IDX_COL, (X##8).s##IDX_COL, (X##9).s##IDX_COL, (X##A).s##IDX_COL, (X##B).s##IDX_COL, (X##C).s##IDX_COL, (X##D).s##IDX_COL, (X##E).s##IDX_COL, (X##F).s##IDX_COL);
/** @} */ // end of group COLUMN_VECTORn
/** Create a new vector containing the values at the given index. Utility macros for transposing a colum-vector
* @name COLUMN_VECTOR_SCALARn
*
* @param[in] IDX_COL The index value
* @param[in] BASENAME The basename of the destination vectors
* @param[in] X The basename of the source vectors
* @param[in] TYPE The data type of the destination vectors
* @{
*/
#define COLUMN_VECTOR_SCALAR1(IDX_COL, BASENAME, X, TYPE) \
TYPE BASENAME##IDX_COL = (TYPE)((X##0));
#define COLUMN_VECTOR_SCALAR2(IDX_COL, BASENAME, X, TYPE) \
VEC_DATA_TYPE(TYPE, 2) \
BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 2))((X##0), (X##1));
#define COLUMN_VECTOR_SCALAR3(IDX_COL, BASENAME, X, TYPE) \
VEC_DATA_TYPE(TYPE, 3) \
BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 3))((X##0), (X##1), (X##2));
#define COLUMN_VECTOR_SCALAR4(IDX_COL, BASENAME, X, TYPE) \
VEC_DATA_TYPE(TYPE, 4) \
BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 4))((X##0), (X##1), (X##2), (X##3));
#define COLUMN_VECTOR_SCALAR8(IDX_COL, BASENAME, X, TYPE) \
VEC_DATA_TYPE(TYPE, 8) \
BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 8))((X##0), (X##1), (X##2), (X##3), (X##4), (X##5), (X##6), (X##7));
#define COLUMN_VECTOR_SCALAR16(IDX_COL, BASENAME, X, TYPE) \
VEC_DATA_TYPE(TYPE, 16) \
BASENAME##IDX_COL = (VEC_DATA_TYPE(TYPE, 16))((X##0), (X##1), (X##2), (X##3), (X##4), (X##5), (X##6), (X##7), (X##8), (X##9), (X##A), (X##B), (X##C), (X##D), (X##E), (X##F));
/** @} */ // end of group COLUMN_VECTORn
/** Create transposed vectors of the given vectors
* @name TRANSPOSE_K0Xn
*
* @param[in] K0 The size of the source vectors
* @param[in] BASENAME The basename of transposed vectors
* @param[in] B The basename of source vectors for transposition
* @param[in] TYPE The data type of the transposed vectors
* @{
*/
#define TRANSPOSE_K0X1(K0, BASENAME, B, TYPE) \
COLUMN_VECTOR_SCALAR(K0, 0, BASENAME, B, TYPE);
#define TRANSPOSE_K0X2(K0, BASENAME, B, TYPE) \
COLUMN_VECTOR(K0, 0, BASENAME, B, TYPE); \
COLUMN_VECTOR(K0, 1, BASENAME, B, TYPE);
#define TRANSPOSE_K0X3(K0, BASENAME, B, TYPE) \
TRANSPOSE_K0X2(K0, BASENAME, B, TYPE); \
COLUMN_VECTOR(K0, 2, BASENAME, B, TYPE);
#define TRANSPOSE_K0X4(K0, BASENAME, B, TYPE) \
TRANSPOSE_K0X3(K0, BASENAME, B, TYPE); \
COLUMN_VECTOR(K0, 3, BASENAME, B, TYPE);
#define TRANSPOSE_K0X8(K0, BASENAME, B, TYPE) \
TRANSPOSE_K0X4(K0, BASENAME, B, TYPE); \
COLUMN_VECTOR(K0, 4, BASENAME, B, TYPE); \
COLUMN_VECTOR(K0, 5, BASENAME, B, TYPE); \
COLUMN_VECTOR(K0, 6, BASENAME, B, TYPE); \
COLUMN_VECTOR(K0, 7, BASENAME, B, TYPE);
#define TRANSPOSE_K0X16(K0, BASENAME, B, TYPE) \
TRANSPOSE_K0X8(K0, BASENAME, B, TYPE); \
COLUMN_VECTOR(K0, 8, BASENAME, B, TYPE); \
COLUMN_VECTOR(K0, 9, BASENAME, B, TYPE); \
COLUMN_VECTOR(K0, A, BASENAME, B, TYPE); \
COLUMN_VECTOR(K0, B, BASENAME, B, TYPE); \
COLUMN_VECTOR(K0, C, BASENAME, B, TYPE); \
COLUMN_VECTOR(K0, D, BASENAME, B, TYPE); \
COLUMN_VECTOR(K0, E, BASENAME, B, TYPE); \
COLUMN_VECTOR(K0, F, BASENAME, B, TYPE);
/** @} */ // end of group TRANSPOSE_K0Xn
/** Create column vectors to contain the values at the given index for a set of given vectors
*
* @param[in] K0 The number of source vectors
* @param[in] IDX_COL The index value
* @param[in] BASENAME The basename of the destination vectors
* @param[in] B The basename of the source vectors
* @param[in] TYPE The data type of the destination vectors
*/
#define COLUMN_VECTOR(K0, IDX_COL, BASENAME, B, TYPE) \
CONCAT(COLUMN_VECTOR, K0) \
(IDX_COL, BASENAME, B, TYPE);
/** Create column vectors to contain the values at the given index. Utility macro for transposing a column-vector
*
* @param[in] K0 The number of source vectors
* @param[in] IDX_COL The index value
* @param[in] BASENAME The basename of the destination vectors
* @param[in] B The basename of the source vectors
* @param[in] TYPE The data type of the destination vectors
*/
#define COLUMN_VECTOR_SCALAR(K0, IDX_COL, BASENAME, B, TYPE) \
CONCAT(COLUMN_VECTOR_SCALAR, K0) \
(IDX_COL, BASENAME, B, TYPE);
/** Create transposed vectors form the given source vectors
*
* @param[in] K0 The size of source vectors
* @param[in] N0 The number of source vectors
* @param[in] BASENAME The basename of transposed vectors
* @param[in] B The basename of source vectors for transposition
* @param[in] TYPE The data type of the transposed vectors
*
*/
#define TRANSPOSE_K0XN0(K0, N0, BASENAME, B, TYPE) \
CONCAT(TRANSPOSE_K0X, N0) \
(K0, BASENAME, B, TYPE);
/** Add the variables (BIAS0 to BIASn-1) to the others (BASENAME0 to BASENAMEn-1)
* @name ADD_ROW_n
*
* @param[in] BASENAME The basename of the destination variables
* @param[in] BIAS The basename of the added variables
* @{
*/
#define ADD_ROW_1(BASENAME, BIAS) \
BASENAME##0 += BIAS##0;
#define ADD_ROW_2(BASENAME, BIAS) \
ADD_ROW_1(BASENAME, BIAS) \
BASENAME##1 += BIAS##1;
#define ADD_ROW_3(BASENAME, BIAS) \
ADD_ROW_2(BASENAME, BIAS) \
BASENAME##2 += BIAS##2;
#define ADD_ROW_4(BASENAME, BIAS) \
ADD_ROW_3(BASENAME, BIAS) \
BASENAME##3 += BIAS##3;
#define ADD_ROW_5(BASENAME, BIAS) \
ADD_ROW_4(BASENAME, BIAS) \
BASENAME##4 += BIAS##4;
#define ADD_ROW_6(BASENAME, BIAS) \
ADD_ROW_5(BASENAME, BIAS) \
BASENAME##5 += BIAS##5;
#define ADD_ROW_7(BASENAME, BIAS) \
ADD_ROW_6(BASENAME, BIAS) \
BASENAME##6 += BIAS##6;
#define ADD_ROW_8(BASENAME, BIAS) \
ADD_ROW_7(BASENAME, BIAS) \
BASENAME##7 += BIAS##7;
#define ADD_ROW_9(BASENAME, BIAS) \
ADD_ROW_8(BASENAME, BIAS) \
BASENAME##8 += BIAS##8;
#define ADD_ROW_10(BASENAME, BIAS) \
ADD_ROW_9(BASENAME, BIAS) \
BASENAME##9 += BIAS##9;
#define ADD_ROW_11(BASENAME, BIAS) \
ADD_ROW_10(BASENAME, BIAS) \
BASENAME##A += BIAS##A;
#define ADD_ROW_12(BASENAME, BIAS) \
ADD_ROW_11(BASENAME, BIAS) \
BASENAME##B += BIAS##B;
#define ADD_ROW_13(BASENAME, BIAS) \
ADD_ROW_12(BASENAME, BIAS) \
BASENAME##C += BIAS##C;
#define ADD_ROW_14(BASENAME, BIAS) \
ADD_ROW_13(BASENAME, BIAS) \
BASENAME##D += BIAS##D;
#define ADD_ROW_15(BASENAME, BIAS) \
ADD_ROW_14(BASENAME, BIAS) \
BASENAME##E += BIAS##E;
#define ADD_ROW_16(BASENAME, BIAS) \
ADD_ROW_15(BASENAME, BIAS) \
BASENAME##F += BIAS##F;
/** @} */ // end of group ADD_ROW_n
/** Add the block (BIAS) to another block (BASENAME)
* @name ADD_BLOCK
*
* Supported cases are N=1,2,3,...,16
*
* @param[in] N The number of vectors in the block
* @param[in] BASENAME The basename of the destination variables
* @param[in] BIAS The basename of the added variables
* @{
*/
#define ADD_BLOCK_STR(N, BASENAME, BIAS) ADD_ROW_##N(BASENAME, BIAS)
#define ADD_BLOCK(N, BASENAME, BIAS) ADD_BLOCK_STR(N, BASENAME, BIAS)
/** @} */ // end of group ADD_BLOCK
/** Broadcast (add single value) to the each element of the destination variables
* @name ADD_ROW_BROADCAST_n
*
* @param[in] BASENAME The basename of the destination variables
* @param[in] BIAS The variable containing the value to add
* @{
*/
#define ADD_ROW_BROADCAST_1(BASENAME, BIAS) \
BASENAME##0 += BIAS;
#define ADD_ROW_BROADCAST_2(BASENAME, BIAS) \
ADD_ROW_BROADCAST_1(BASENAME, BIAS) \
BASENAME##1 += BIAS;
#define ADD_ROW_BROADCAST_3(BASENAME, BIAS) \
ADD_ROW_BROADCAST_2(BASENAME, BIAS) \
BASENAME##2 += BIAS;
#define ADD_ROW_BROADCAST_4(BASENAME, BIAS) \
ADD_ROW_BROADCAST_3(BASENAME, BIAS) \
BASENAME##3 += BIAS;
#define ADD_ROW_BROADCAST_5(BASENAME, BIAS) \
ADD_ROW_BROADCAST_4(BASENAME, BIAS) \
BASENAME##4 += BIAS;
#define ADD_ROW_BROADCAST_6(BASENAME, BIAS) \
ADD_ROW_BROADCAST_5(BASENAME, BIAS) \
BASENAME##5 += BIAS;
#define ADD_ROW_BROADCAST_7(BASENAME, BIAS) \
ADD_ROW_BROADCAST_6(BASENAME, BIAS) \
BASENAME##6 += BIAS;
#define ADD_ROW_BROADCAST_8(BASENAME, BIAS) \
ADD_ROW_BROADCAST_7(BASENAME, BIAS) \
BASENAME##7 += BIAS;
#define ADD_ROW_BROADCAST_9(BASENAME, BIAS) \
ADD_ROW_BROADCAST_8(BASENAME, BIAS) \
BASENAME##8 += BIAS;
#define ADD_ROW_BROADCAST_10(BASENAME, BIAS) \
ADD_ROW_BROADCAST_9(BASENAME, BIAS) \
BASENAME##9 += BIAS;
#define ADD_ROW_BROADCAST_11(BASENAME, BIAS) \
ADD_ROW_BROADCAST_10(BASENAME, BIAS) \
BASENAME##A += BIAS;
#define ADD_ROW_BROADCAST_12(BASENAME, BIAS) \
ADD_ROW_BROADCAST_11(BASENAME, BIAS) \
BASENAME##B += BIAS;
#define ADD_ROW_BROADCAST_13(BASENAME, BIAS) \
ADD_ROW_BROADCAST_12(BASENAME, BIAS) \
BASENAME##C += BIAS;
#define ADD_ROW_BROADCAST_14(BASENAME, BIAS) \
ADD_ROW_BROADCAST_13(BASENAME, BIAS) \
BASENAME##D += BIAS;
#define ADD_ROW_BROADCAST_15(BASENAME, BIAS) \
ADD_ROW_BROADCAST_14(BASENAME, BIAS) \
BASENAME##E += BIAS;
#define ADD_ROW_BROADCAST_16(BASENAME, BIAS) \
ADD_ROW_BROADCAST_15(BASENAME, BIAS) \
BASENAME##F += BIAS;
/** Broadcast (add a value) to the each element of the destination block (BASENAME)
* @name ADD_BLOCK_BROADCAST
*
* Supported cases are N=1,2,3,...,16.
*
* @param[in] N The number of vectors in the block
* @param[in] BASENAME The basename of the destination variables
* @param[in] BIAS The variable containing the value to add
* @{
*/
#define ADD_BLOCK_BROADCAST_STR(N, BASENAME, BIAS) ADD_ROW_BROADCAST_##N(BASENAME, BIAS)
#define ADD_BLOCK_BROADCAST(N, BASENAME, BIAS) ADD_BLOCK_BROADCAST_STR(N, BASENAME, BIAS)
/** @} */ // end of group ADD_BLOCK_BROADCAST
/** Apply activation to the given variables
* @name ACTIVATION_ROW_n
*
* @param[in] ACTIVATION_TYPE The type of the activation
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] A_VAL Additional value required by the activation
* @param[in] B_VAL Additional value required by the activation
* @{
*/
#define ACTIVATION_ROW_1(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
BASENAME##0 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##0, A_VAL, B_VAL);
#define ACTIVATION_ROW_2(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
ACTIVATION_ROW_1(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
BASENAME##1 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##1, A_VAL, B_VAL);
#define ACTIVATION_ROW_3(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
ACTIVATION_ROW_2(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
BASENAME##2 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##2, A_VAL, B_VAL);
#define ACTIVATION_ROW_4(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
ACTIVATION_ROW_3(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
BASENAME##3 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##3, A_VAL, B_VAL);
#define ACTIVATION_ROW_5(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
ACTIVATION_ROW_4(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
BASENAME##4 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##4, A_VAL, B_VAL);
#define ACTIVATION_ROW_6(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
ACTIVATION_ROW_5(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
BASENAME##5 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##5, A_VAL, B_VAL);
#define ACTIVATION_ROW_7(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
ACTIVATION_ROW_6(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
BASENAME##6 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##6, A_VAL, B_VAL);
#define ACTIVATION_ROW_8(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
ACTIVATION_ROW_7(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
BASENAME##7 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##7, A_VAL, B_VAL);
#define ACTIVATION_ROW_9(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
ACTIVATION_ROW_8(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
BASENAME##8 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##8, A_VAL, B_VAL);
#define ACTIVATION_ROW_10(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
ACTIVATION_ROW_9(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
BASENAME##9 = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##9, A_VAL, B_VAL);
#define ACTIVATION_ROW_11(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
ACTIVATION_ROW_10(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
BASENAME##A = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##A, A_VAL, B_VAL);
#define ACTIVATION_ROW_12(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
ACTIVATION_ROW_11(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
BASENAME##B = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##B, A_VAL, B_VAL);
#define ACTIVATION_ROW_13(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
ACTIVATION_ROW_12(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
BASENAME##C = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##C, A_VAL, B_VAL);
#define ACTIVATION_ROW_14(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
ACTIVATION_ROW_13(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
BASENAME##D = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##D, A_VAL, B_VAL);
#define ACTIVATION_ROW_15(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
ACTIVATION_ROW_14(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
BASENAME##E = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##E, A_VAL, B_VAL);
#define ACTIVATION_ROW_16(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
ACTIVATION_ROW_15(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) \
BASENAME##F = ACTIVATION(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME##F, A_VAL, B_VAL);
/** @} */ // end of group ACTIVATION_ROW_n
/** Apply activation to a block (BASENAME)
* @name ACTIVATION_BLOCK
*
* Supported cases are N=1,2,3,...,16.
*
* @param[in] N The number of vectors in the block
* @param[in] ACTIVATION_TYPE The type of the activation
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] A_VAL Additional value required by the activation
* @param[in] B_VAL Additional value required by the activation
* @{
*/
#define ACTIVATION_BLOCK_STR(N, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) ACTIVATION_ROW_##N(ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL)
#define ACTIVATION_BLOCK(N, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL) ACTIVATION_BLOCK_STR(N, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, BASENAME, A_VAL, B_VAL)
/** @} */ // end of group ACTIVATION_BLOCK
/** Apply convert_<data_type> to the given variables
* @name CONVERT_ROW_n
*
* @param[in] N The size of the vectors
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME_SRC The basename of the source variables
* @param[in] BASENAME_DST The basename of the destination variables
*/
#define CONVERT_ROW_1(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
VEC_DATA_TYPE(DATA_TYPE, N) \
BASENAME_DST##0 = CONVERT(BASENAME_SRC##0, VEC_DATA_TYPE(DATA_TYPE, N));
#define CONVERT_ROW_2(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
CONVERT_ROW_1(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
VEC_DATA_TYPE(DATA_TYPE, N) \
BASENAME_DST##1 = CONVERT(BASENAME_SRC##1, VEC_DATA_TYPE(DATA_TYPE, N));
#define CONVERT_ROW_3(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
CONVERT_ROW_2(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
VEC_DATA_TYPE(DATA_TYPE, N) \
BASENAME_DST##2 = CONVERT(BASENAME_SRC##2, VEC_DATA_TYPE(DATA_TYPE, N));
#define CONVERT_ROW_4(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
CONVERT_ROW_3(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
VEC_DATA_TYPE(DATA_TYPE, N) \
BASENAME_DST##3 = CONVERT(BASENAME_SRC##3, VEC_DATA_TYPE(DATA_TYPE, N));
#define CONVERT_ROW_5(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
CONVERT_ROW_4(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
VEC_DATA_TYPE(DATA_TYPE, N) \
BASENAME_DST##4 = CONVERT(BASENAME_SRC##4, VEC_DATA_TYPE(DATA_TYPE, N));
#define CONVERT_ROW_6(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
CONVERT_ROW_5(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
VEC_DATA_TYPE(DATA_TYPE, N) \
BASENAME_DST##5 = CONVERT(BASENAME_SRC##5, VEC_DATA_TYPE(DATA_TYPE, N));
#define CONVERT_ROW_7(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
CONVERT_ROW_6(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
VEC_DATA_TYPE(DATA_TYPE, N) \
BASENAME_DST##6 = CONVERT(BASENAME_SRC##6, VEC_DATA_TYPE(DATA_TYPE, N));
#define CONVERT_ROW_8(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
CONVERT_ROW_7(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
VEC_DATA_TYPE(DATA_TYPE, N) \
BASENAME_DST##7 = CONVERT(BASENAME_SRC##7, VEC_DATA_TYPE(DATA_TYPE, N));
#define CONVERT_ROW_9(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
CONVERT_ROW_8(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
VEC_DATA_TYPE(DATA_TYPE, N) \
BASENAME_DST##8 = CONVERT(BASENAME_SRC##8, VEC_DATA_TYPE(DATA_TYPE, N));
#define CONVERT_ROW_10(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
CONVERT_ROW_9(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
VEC_DATA_TYPE(DATA_TYPE, N) \
BASENAME_DST##9 = CONVERT(BASENAME_SRC##9, VEC_DATA_TYPE(DATA_TYPE, N));
#define CONVERT_ROW_11(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
CONVERT_ROW_10(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
VEC_DATA_TYPE(DATA_TYPE, N) \
BASENAME_DST##A = CONVERT(BASENAME_SRC##A, VEC_DATA_TYPE(DATA_TYPE, N));
#define CONVERT_ROW_12(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
CONVERT_ROW_11(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
VEC_DATA_TYPE(DATA_TYPE, N) \
BASENAME_DST##B = CONVERT(BASENAME_SRC##B, VEC_DATA_TYPE(DATA_TYPE, N));
#define CONVERT_ROW_13(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
CONVERT_ROW_12(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
VEC_DATA_TYPE(DATA_TYPE, N) \
BASENAME_DST##C = CONVERT(BASENAME_SRC##C, VEC_DATA_TYPE(DATA_TYPE, N));
#define CONVERT_ROW_14(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
CONVERT_ROW_13(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
VEC_DATA_TYPE(DATA_TYPE, N) \
BASENAME_DST##D = CONVERT(BASENAME_SRC##D, VEC_DATA_TYPE(DATA_TYPE, N));
#define CONVERT_ROW_15(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
CONVERT_ROW_14(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
VEC_DATA_TYPE(DATA_TYPE, N) \
BASENAME_DST##E = CONVERT(BASENAME_SRC##E, VEC_DATA_TYPE(DATA_TYPE, N));
#define CONVERT_ROW_16(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
CONVERT_ROW_15(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) \
VEC_DATA_TYPE(DATA_TYPE, N) \
BASENAME_DST##F = CONVERT(BASENAME_SRC##F, VEC_DATA_TYPE(DATA_TYPE, N));
/** @} */ // end of group CONVERT_ROW_n
/** Apply convert_<data_type> to a block (BASENAME_SRC) and save to another block (BASENAME_DST)
* @name CONVERT_BLOCK
*
* Supported cases N=1,2,3,...,16.
*
* @param[in] M The number of vectors to convert
* @param[in] N The size of the vectors
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME_SRC The basename of the source variables
* @param[in] BASENAME_DST The basename of the destination variables
*/
#define CONVERT_BLOCK_STR(M, N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) CONVERT_ROW_##M(N, DATA_TYPE, BASENAME_SRC, BASENAME_DST)
#define CONVERT_BLOCK(M, N, DATA_TYPE, BASENAME_SRC, BASENAME_DST) CONVERT_BLOCK_STR(M, N, DATA_TYPE, BASENAME_SRC, BASENAME_DST)
/** @} */ // end of group CONVERT_BLOCK
/*
* Copyright (c) 2019-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef ARM_COMPUTE_REPEAT_H
#define ARM_COMPUTE_REPEAT_H
/*
* Copyright (c) 2016-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef ARM_COMPUTE_HELPER_H
#define ARM_COMPUTE_HELPER_H
/*
* Copyright (c) 2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/** Store the 0 to (n-1)th rows of the given variables
* @name STORE_ROW_n
*
* @param[in] N0 The width of the passed in vector. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0));
#define STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1));
#define STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2));
#define STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3));
#define STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4));
#define STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5));
#define STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6));
#define STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7));
#define STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8));
#define STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9));
#define STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A));
#define STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B));
#define STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C));
#define STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D));
#define STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E));
#define STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F));
/** @} */ // end of groupd STORE_ROW_n
/** Convert and store the 0th to (n-1)th rows of the given variables
* @name CONVERT_STORE_ROW_n
*
* @param[in] N0 The size of the vectors
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##0), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0));
#define CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##1), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1));
#define CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##2), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2));
#define CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##3), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3));
#define CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##4), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4));
#define CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##5), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5));
#define CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##6), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6));
#define CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##7), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7));
#define CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##8), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8));
#define CONVERT_STORE_ROW_10(N0, DATA, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##9), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9));
#define CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##A), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A));
#define CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##B), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B));
#define CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##C), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C));
#define CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##D), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D));
#define CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##E), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E));
#define CONVERT_STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE(N0) \
(CONVERT_SAT((BASENAME##F), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F));
/** @} */ // end of groupd CONVERT_STORE_ROW_n
/** Store a block of the given size M0xN0
* @name STORE_BLOCK
*
* Supported cases are M0=1,2,3,...,16 and N0=2,3,4,8,16.
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store
* @param[in] N0 The size of each vector
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
#define STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
/** @} */ // end of group STORE_BLOCK
/** Convert and store a block of the given size M0xN0
* @name CONVERT_STORE_BLOCK
*
* Supported cases are M0=1,2,3,...,16 and N0=2,3,4,8,16.
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store
* @param[in] N0 The size of each vector
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
#define CONVERT_STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
/** @} */ // end of group CONVERT_STORE_BLOCK
/** Partially store the 0 to (n-1)th rows of the given variables
* @name STORE_ROW_PARTIAL_n
* Within each row, store the lower @p STORE_N0 elements of vectors of width @p N0
*
* @note in case @p STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* @param[in] N0 The width of the passed in vector. Supported: 1, 2, 3, 4, 8, 16
* @param[in] STORE_N0 The **lower** size of the vectors to store. Supported: [1-16 and <= @p N0
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0));
#define STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1));
#define STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2));
#define STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3));
#define STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4));
#define STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5));
#define STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6));
#define STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7));
#define STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8));
#define STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9));
#define STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A));
#define STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B));
#define STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C));
#define STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D));
#define STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E));
#define STORE_ROW_PARTIAL_16(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \
VSTORE_PARTIAL(N0, STORE_N0) \
(BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F));
/** @} */ // end of groupd STORE_ROW_PARTIAL_n
/** Partially store a block of the given size STORE_M0xSTORE_N0
* @name STORE_BLOCK_PARTIAL
*
* @note The vector width @p N0 is also required for correct partial storing behaviour.
* @note in case @p STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* The data to store is expected to have consecutive names for each row.
* E.g., for STORE_M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for STORE_M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] STORE_M0 The number of rows to store. Supported: 1-16
* @param[in] STORE_N0 The lower number of elements of vectors to store. Supported: 1-16 and <= @p N0
* @param[in] N0 The size of each vector. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @{
*/
#define STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_PARTIAL_##STORE_M0(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
#define STORE_BLOCK_PARTIAL(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
/** Store a block that can be partial in both x and y dimensions
*
* @note in cases @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16
* @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported range: [1, @p M0)
* @param[in] PARTIAL_STORE_N0 The partial size in x, for partial blocks. Supported range: [1, @p N0)
* @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial store Y. True to use PARTIAL_STORE_M0 rather than M0.
* @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial store X. True to use PARTIAL_STORE_N0 rather than N0.
*/
#define STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
if(!(PARTIAL_COND_X) && !(PARTIAL_COND_Y)) \
{ \
STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
} \
else if((PARTIAL_COND_Y) && !(PARTIAL_COND_X)) \
{ \
STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
} \
else if(!(PARTIAL_COND_Y) && (PARTIAL_COND_X)) \
{ \
STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
} \
else \
{ \
STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
}
/** Store a block that can only be partial in x but not y.
*
* @note in case @p N0 or @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16
* @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @param[in] PARTIAL_STORE_N0 The partial size in x, for partial blocks. Supported range: [1, @p N0)
* @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial store X. True to use PARTIAL_STORE_N0 rather than N0.
*/
#define STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) \
if(!(PARTIAL_COND_X)) \
{ \
STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
} \
else \
{ \
STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
}
/** Store a block that can only be partial in y but not x.
*
* @note in case @p N0 or @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16
* @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported range: [1, @p M0)
* @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial store Y. True to use PARTIAL_STORE_M0 rather than M0.
*/
#define STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) \
if(!(PARTIAL_COND_Y)) \
{ \
STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
} \
else \
{ \
STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \
}
/** @} */ // end of group STORE_BLOCK_PARTIAL
#if defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
/** Boundary-aware GEMM block store
* @name STORE_BLOCK_BOUNDARY_AWARE
* This macro assumes the following schemes to achieve boundary-awareness:
* - Overlapping load in Y axis from lhs tensor. This implies lhs has no padding along y dim.
* - Non-Overlapping(normal) load from rhs tensor. This imples rhs can have paddings.
* - Overlapping load in Y axis from bias tensor. This implies rhs has no padding along y dim.
* The macro then ensures that the dst tensor can be stored without any paddings in both x and y dim.
*
* In the y dimension, we place the partial blocks **at the beginning** while in the x dimension, we place the partial
* blocks **at the end**.
* Say, the dst tensor is of shape MxN and we have M0 and N0 as the block size, this is how we define "partial blocks"/
* "boundary block" (we use the 2 terms "partial blocks" and "boundary blocks" interchangeably) and its various parameters:
*
* *--x--> x == 0 x == 1
* | |<------------------------------N-------------------------->|
* y |<--------------N0------------->|<----PARTIAL_STORE_N0----->|
* | -------------#############################################################
* * | | |...............................|...........................|
* y == 0 | PAR_..._M0 |......Boundary block in y......|.Boundary block in x and y.|
* | | |...............................|...........................|
* M --#############################################################
* | | | |...........................|
* y == 1 | M0 | Non-boundary block |....Boundary block in x....|
* | | | |...........................|
* |------------#############################################################
*
* Then @p PARTIAL_STORE_M0 = M % M0 and @p PARTIAL_STORE_N0 = N % N0
*
* @note in cases @p PARTIAL_STORE_N0 != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* It automatically detects if a giving M,N,M0,N0 combination can yield partial blocks in either X and Y dimension,
* and select corresponding store methods such that the boundary detection logic is only added when needed.
*
* The data to store is expected to have consecutive names for each row.
* E.g., for M0=3 and basename=c, the expected names are c0, c1 and c2.
* The Z offset is expected to have consecutive names.
* E.g., for M0=3 and Z=zin, the expected z offset names are zin0, zin1 and zin2.
*
* @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16
* @param[in] N0 The size of each vector, for non-partial blocks. Supported: 1, 2, 3, 4, 8, 16
* @param[in] DATA_TYPE The data type of the vectors
* @param[in] BASENAME The basename of the variables
* @param[in] PTR The base pointer
* @param[in] STRIDE_Y The stride value in y-axis direction
* @param[in] Z The offset in z-axis direction
* @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported: [0, @p M0)
* @param[in] PARTIAL_STORE_N0 The partial size in x, for partial blocks. Supported: [0, @p N0)
* @param[in] PARTIAL_COND_Y Condition on the y axis to perform the partial store Y. True to use PARTIAL_STORE_M0 rather than M0.
* @param[in] PARTIAL_COND_X Condition on the x axis to perform the partial store X. True to use PARTIAL_STORE_N0 rather than N0.
* @{
*/
#if PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0
// Case1: No partial blocks in either x or y
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z)
#elif PARTIAL_STORE_M0 > 0 && PARTIAL_STORE_N0 == 0
// Case2: Partial blocks in y
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y)
#elif PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 > 0
// Case3: Partial blocks in x
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X)
#else // PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0
// Case4: Partial blocks in both x and y
#define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \
STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X)
#endif // PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0
#endif // defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0)
/** @} */ // end of group STORE_BLOCK_BOUNDARY_AWARE
#if defined(PARTIAL_STORE_M0)
/** Compute the start m0 row (LHS, BIAS and DST) in a boundary-aware way so as to avoid padding
* @name COMPUTE_M0_START_ROW
* If there're any partial blocks in y dimension, they are placed at the beginning of the rows.
* This shift amount is added to all rows such that the partial block (at the beginning) overlaps with the subsequent
* blocks in the y dimension to avoid any padding.
* EG: M0=4, PARTIAL_STORE_M0=1:
* | Non-overlapping | +M0_ROW_SHIFT (Overlapping)
* block 0 (partial)| start row = 0 | start row = 0
* block 1 (full) | start row = 4 | start row = 1
* block 2 (full) | start row = 8 | start row = 5
*
* @param[in] y Global id of current block in y.
* @param[in] M0 The number of rows to store, for non-partial blocks. Supported: 1-16
* @param[in] PARTIAL_STORE_M0 The partial size in y, for partial blocks. Supported: [0, @p M0)
* @{
*/
#define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \
((uint)(max(0, (int)(y * M0) - (int)((M0 - PARTIAL_STORE_M0) % M0))))
#else // defined(PARTIAL_STORE_M0)
#define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \
((uint)(y * M0))
#endif // defined(PARTIAL_STORE_M0)
/** @} */ // end of group COMPUTE_M0_START_ROW
/** Store a vector that can only be partial in x.
*
* @note in case @p vec_size or @p leftover != 1, 2, 3, 4, 8, 16, extra vstore(s) will be invoked, thus incurring small performance penalty.
*
* The data to store is expected to end in a 0.
* E.g., for basename=c, the expected name is c0.
*
* @param[in] basename The name of the variable without trailing 0
* @param[in] data_type The data type of the vector
* @param[in] ptr The base pointer
* @param[in] vec_size The vector size if cond = false. Supported: 1, 2, 3, 4, 8, 16
* @param[in] leftover The vector size if cond = true. Supported range: [1, @p vec_size0)
* @param[in] cond Condition to select either vec_size0 or vec_size1
* @{
*/
#define STORE_VECTOR_SELECT(basename, data_type, ptr, vec_size, leftover, cond) \
STORE_BLOCK_PARTIAL_IN_X(1, vec_size, data_type, basename, ptr, 0, 0, leftover, cond)
/** @} */ // end of group STORE_VECTOR_SELECT
#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
#pragma OPENCL EXTENSION cl_khr_fp16 : enable
#endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
#if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
#pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : enable
#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8)
#if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : enable
#endif // defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8)
#if defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf)
#pragma OPENCL EXTENSION cl_arm_printf : enable
#endif // defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf)
#define GPU_ARCH_MIDGARD 0x100
#define GPU_ARCH_BIFROST 0x200
/** Concatenate two inputs.
*
* @param[in] a The first input to be concatenated
* @param[in] b The second input to be concatenated
*
* @return The concatenated output
*/
#define CONCAT(a, b) a##b
/** Expand the given vector
*
* @param[in] x The vector to be expanded
*
* @return The expanded output
*/
#define EXPAND(x) x
/** Clamp the given value between an upper and lower bound.
*
* @param[in] x The value to be clamped
* @param[in] min_val The lower bound
* @param[in] max_val The upper bound
*
* @return The clamped value.
*/
#define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val)
/** REVn reverses the given vector whose size is n.
* @name REVn
*
* @param[in] x The vector to be reversed
*
* @return The reversed vector
* @{
*/
#define REV1(x) ((x))
#define REV2(x) ((x).s10)
#define REV3(x) ((x).s210)
#define REV4(x) ((x).s3210)
#define REV8(x) ((x).s76543210)
#define REV16(x) ((x).sFEDCBA9876543210)
/** @} */ // end of group REVn
/** Reverse the given vector.
* @name REVERSE
*
* @param[in] x The vector to be reversed
* @param[in] s The size of the vector
*
* @return The reversed vector
* @{
*/
#define REVERSE_STR(x, s) REV##s((x))
#define REVERSE(x, s) REVERSE_STR(x, s)
/** @} */ // end of group REVERSE
/** Circular-right-shift (rotate-right) the vector of size s by the amount of n.
* @name ROTs_n
*
* @param[in] x The vector to be shifted
*
* @return The shifted vector
* @{
*/
#define ROT1_0(x) ((x))
#define ROT2_0(x) ((x))
#define ROT2_1(x) ((x).s10)
#define ROT3_0(x) ((x))
#define ROT3_1(x) ((x).s201)
#define ROT3_2(x) ((x).s120)
#define ROT4_0(x) ((x))
#define ROT4_1(x) ((x).s3012)
#define ROT4_2(x) ((x).s2301)
#define ROT4_3(x) ((x).s1230)
#define ROT8_0(x) ((x))
#define ROT8_1(x) ((x).s70123456)
#define ROT8_2(x) ((x).s67012345)
#define ROT8_3(x) ((x).s56701234)
#define ROT8_4(x) ((x).s45670123)
#define ROT8_5(x) ((x).s34567012)
#define ROT8_6(x) ((x).s23456701)
#define ROT8_7(x) ((x).s12345670)
#define ROT16_0(x) ((x))
#define ROT16_1(x) ((x).sF0123456789ABCDE)
#define ROT16_2(x) ((x).sEF0123456789ABCD)
#define ROT16_3(x) ((x).sDEF0123456789ABC)
#define ROT16_4(x) ((x).sCDEF0123456789AB)
#define ROT16_5(x) ((x).sBCDEF0123456789A)
#define ROT16_6(x) ((x).sABCDEF0123456789)
#define ROT16_7(x) ((x).s9ABCDEF012345678)
#define ROT16_8(x) ((x).s89ABCDEF01234567)
#define ROT16_9(x) ((x).s789ABCDEF0123456)
#define ROT16_10(x) ((x).s6789ABCDEF012345)
#define ROT16_11(x) ((x).s56789ABCDEF01234)
#define ROT16_12(x) ((x).s456789ABCDEF0123)
#define ROT16_13(x) ((x).s3456789ABCDEF012)
#define ROT16_14(x) ((x).s23456789ABCDEF01)
#define ROT16_15(x) ((x).s123456789ABCDEF0)
/** @} */ // end of group ROTs_n
/** Circular-right-shift (rotate-right) the given vector by the given amount.
* @name ROTATE
*
* @param[in] x The vector to be shifted
* @param[in] s The size of the vector
* @param[in] n The amount to be shifted
*
* @return The shifted vector
* @{
*/
#define ROTATE_STR(x, s, n) ROT##s##_##n(x)
#define ROTATE(x, s, n) ROTATE_STR(x, s, n)
/** @} */ // end of group ROTATE
/** Creates a vector of size n filled with offset values corresponding to the location of each element.
* @name V_OFFSn
*
* @param[in] dt The data type of the output vector
*
* @return The vector filled with offset values
* @{
*/
#define V_OFFS1(dt) (dt##1)(0)
#define V_OFFS2(dt) (dt##2)(0, 1)
#define V_OFFS3(dt) (dt##3)(0, 1, 2)
#define V_OFFS4(dt) (dt##4)(0, 1, 2, 3)
#define V_OFFS8(dt) (dt##8)(0, 1, 2, 3, 4, 5, 6, 7)
#define V_OFFS16(dt) (dt##16)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)
/** @} */ // end of group V_OFFSn
/** Create a vector filled with offset values corresponding to the location of each element.
* @name VEC_OFFS
*
* @param[in] dt The data type of the output vector
* @param[in] s The size of the output vector
*
* @return The vector filled with offset values
* @{
*/
#define VEC_OFFS_STR(dt, s) V_OFFS##s(dt)
#define VEC_OFFS(dt, s) VEC_OFFS_STR(dt, s)
/** @} */ // end of group VEC_OFFS
#define VLOAD_STR(size) vload##size
#define VLOAD(size) VLOAD_STR(size)
#define PIXEL_UNIT4 1
#define PIXEL_UNIT8 2
#define PIXEL_UNIT16 4
/** Utility macro to convert a vector size in pixel unit.
*
* @name CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT
*
* @param[in] vec_size Vector size. Only 4,8 and 16 is supported
*
* @return The pixel unit (number of pixels)
* @{
*/
#define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) PIXEL_UNIT##vec_size
#define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(vec_size) CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size)
/** @} */ // end of group CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT
#define read_image2d_floatx1(img, x_coord, y_coord) (float4)(read_imagef(img, (int2)(x_coord, y_coord)));
#define read_image2d_floatx2(img, x_coord, y_coord) (float8)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)));
#define read_image2d_floatx4(img, x_coord, y_coord) (float16)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)), read_imagef(img, (int2)(x_coord + 2, y_coord)), read_imagef(img, (int2)(x_coord + 3, y_coord)));
#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
#define read_image2d_halfx1(img, x_coord, y_coord) (half4)(read_imageh(img, (int2)(x_coord, y_coord)));
#define read_image2d_halfx2(img, x_coord, y_coord) (half8)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)));
#define read_image2d_halfx4(img, x_coord, y_coord) (half16)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)), read_imageh(img, (int2)(x_coord + 2, y_coord)), read_imageh(img, (int2)(x_coord + 3, y_coord)));
#endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16)
/** Utility macro to read a 2D OpenCL image object.
*
* @note Coordinates are not normalized
*
* @param[in] data_type Data type
* @param[in] n0 Number of pixel to read. Only 1,2 and 4 is supported
* @param[in] img OpenCL image object
* @param[in] x_coord The x coordinate for the top-left pixel
* @param[in] y_coord The y coordinate for the top-left pixel
*
* @return Pixels from the 2D OpenCL image object
* @{
*/
#define READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) read_image2d_##data_type##x##n0(img, x_coord, y_coord)
#define READ_IMAGE2D(data_type, n0, img, x_coord, y_coord) READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord)
#define VSTORE_STR(size) vstore##size
#define VSTORE(size) VSTORE_STR(size)
#define float1 float
#define half1 half
#define char1 char
#define uchar1 uchar
#define short1 short
#define ushort1 ushort
#define int1 int
#define uint1 uint
#define long1 long
#define ulong1 ulong
#define double1 double
#define vload1(OFFSET, PTR) *(OFFSET + PTR)
#define vstore1(DATA, OFFSET, PTR) *(OFFSET + PTR) = DATA
/** Extended partial vstore that correctly handles scalar values as well.
* Store the **lower** 0 to (n-1)th elements of the given vector while minimising the amount of vstore ops
* @name VSTORE_PARTIAL
*
* @note With this macro, the passed data can be both a vector and a scalar
* @note @p store_size needs to be <= @p size
* eg 1: Valid
* VSTORE_PARTIAL(16, 15) ...;
* eg 2: Invalid
* VSTORE_PARTIAL(4, 7) ...;
*
* @param[in] size The width of @p DATA. Supported values: 1(scalar), 2, 3, 4, 8, 16
* @param[in] store_size The number of lower elements to store. Supported values: 1-16, but has to be <= @p size
* @{
*/
#define VSTORE_PARTIAL_STR(size, store_size) vstore_partial_##size##_##store_size
#define VSTORE_PARTIAL(size, store_size) VSTORE_PARTIAL_STR(size, store_size)
#define NO_STORE(data, offs, ptr) \
{ \
}
// Size == 1 (scalar)
#define vstore_partial_1_0 NO_STORE
#define vstore_partial_1_1 vstore1
#define vstore_partial_1_2 NO_STORE
#define vstore_partial_1_3 NO_STORE
#define vstore_partial_1_4 NO_STORE
#define vstore_partial_1_5 NO_STORE
#define vstore_partial_1_6 NO_STORE
#define vstore_partial_1_7 NO_STORE
#define vstore_partial_1_8 NO_STORE
#define vstore_partial_1_9 NO_STORE
#define vstore_partial_1_10 NO_STORE
#define vstore_partial_1_11 NO_STORE
#define vstore_partial_1_12 NO_STORE
#define vstore_partial_1_13 NO_STORE
#define vstore_partial_1_14 NO_STORE
#define vstore_partial_1_15 NO_STORE
#define vstore_partial_1_16 NO_STORE
// Size == 2
#define vstore_partial_2_0 NO_STORE
#define vstore_partial_2_1 vstore_partial_1
#define vstore_partial_2_2 vstore_partial_2
#define vstore_partial_2_3 NO_STORE
#define vstore_partial_2_4 NO_STORE
#define vstore_partial_2_5 NO_STORE
#define vstore_partial_2_6 NO_STORE
#define vstore_partial_2_7 NO_STORE
#define vstore_partial_2_8 NO_STORE
#define vstore_partial_2_9 NO_STORE
#define vstore_partial_2_10 NO_STORE
#define vstore_partial_2_11 NO_STORE
#define vstore_partial_2_12 NO_STORE
#define vstore_partial_2_13 NO_STORE
#define vstore_partial_2_14 NO_STORE
#define vstore_partial_2_15 NO_STORE
#define vstore_partial_2_16 NO_STORE
// Size == 3
#define vstore_partial_3_0 NO_STORE
#define vstore_partial_3_1 vstore_partial_1
#define vstore_partial_3_2 vstore_partial_2
#define vstore_partial_3_3 vstore_partial_3
#define vstore_partial_3_4 NO_STORE
#define vstore_partial_3_5 NO_STORE
#define vstore_partial_3_6 NO_STORE
#define vstore_partial_3_7 NO_STORE
#define vstore_partial_3_8 NO_STORE
#define vstore_partial_3_9 NO_STORE
#define vstore_partial_3_10 NO_STORE
#define vstore_partial_3_11 NO_STORE
#define vstore_partial_3_12 NO_STORE
#define vstore_partial_3_13 NO_STORE
#define vstore_partial_3_14 NO_STORE
#define vstore_partial_3_15 NO_STORE
#define vstore_partial_3_16 NO_STORE
// Size == 4
#define vstore_partial_4_0 NO_STORE
#define vstore_partial_4_1 vstore_partial_1
#define vstore_partial_4_2 vstore_partial_2
#define vstore_partial_4_3 vstore_partial_3
#define vstore_partial_4_4 vstore_partial_4
#define vstore_partial_4_5 NO_STORE
#define vstore_partial_4_6 NO_STORE
#define vstore_partial_4_7 NO_STORE
#define vstore_partial_4_8 NO_STORE
#define vstore_partial_4_9 NO_STORE
#define vstore_partial_4_10 NO_STORE
#define vstore_partial_4_11 NO_STORE
#define vstore_partial_4_12 NO_STORE
#define vstore_partial_4_13 NO_STORE
#define vstore_partial_4_14 NO_STORE
#define vstore_partial_4_15 NO_STORE
#define vstore_partial_4_16 NO_STORE
// Size == 8
#define vstore_partial_8_0 NO_STORE
#define vstore_partial_8_1 vstore_partial_1
#define vstore_partial_8_2 vstore_partial_2
#define vstore_partial_8_3 vstore_partial_3
#define vstore_partial_8_4 vstore_partial_4
#define vstore_partial_8_5 vstore_partial_5
#define vstore_partial_8_6 vstore_partial_6
#define vstore_partial_8_7 vstore_partial_7
#define vstore_partial_8_8 vstore_partial_8
#define vstore_partial_8_9 NO_STORE
#define vstore_partial_8_10 NO_STORE
#define vstore_partial_8_11 NO_STORE
#define vstore_partial_8_12 NO_STORE
#define vstore_partial_8_13 NO_STORE
#define vstore_partial_8_14 NO_STORE
#define vstore_partial_8_15 NO_STORE
#define vstore_partial_8_16 NO_STORE
// Size == 16
#define vstore_partial_16_0 NO_STORE
#define vstore_partial_16_1 vstore_partial_1
#define vstore_partial_16_2 vstore_partial_2
#define vstore_partial_16_3 vstore_partial_3
#define vstore_partial_16_4 vstore_partial_4
#define vstore_partial_16_5 vstore_partial_5
#define vstore_partial_16_6 vstore_partial_6
#define vstore_partial_16_7 vstore_partial_7
#define vstore_partial_16_8 vstore_partial_8
#define vstore_partial_16_9 vstore_partial_9
#define vstore_partial_16_10 vstore_partial_10
#define vstore_partial_16_11 vstore_partial_11
#define vstore_partial_16_12 vstore_partial_12
#define vstore_partial_16_13 vstore_partial_13
#define vstore_partial_16_14 vstore_partial_14
#define vstore_partial_16_15 vstore_partial_15
#define vstore_partial_16_16 vstore_partial_16
/** Partial vstore. Store the **lower** 0 to (n-1)th elements of the given vector while minimising the amount of vstore ops
* @name vstore_partial_n
*
* @note @p DATA needs to be a vector not a scalar
* @note n needs to be <= the vector width of the input variable @p DATA
* eg 1: Valid
* vstore_partial_15(var:float16, 0, 0xabcd);
* eg 2: Invalid
* vstore_partial_7(var:float4, 0, 0xabcd);
*
* @note in cases n == 1, 2, 3, 4, 8, 16, no extra vstore is invoked, thus there's no performance penalty.
*
* @param[in] DATA The name of the variable
* @param[in] OFFSET Offset in n
* @param[in] PTR The base pointer
* @{
*/
#define vstore_partial_1(DATA, OFFSET, PTR) \
vstore1(DATA.s0, OFFSET, PTR);
#define vstore_partial_2(DATA, OFFSET, PTR) \
vstore2(DATA.s01, OFFSET, PTR);
#define vstore_partial_3(DATA, OFFSET, PTR) \
vstore3(DATA.s012, OFFSET, PTR);
#define vstore_partial_4(DATA, OFFSET, PTR) \
vstore4(DATA.s0123, OFFSET, PTR);
#define vstore_partial_5(DATA, OFFSET, PTR) \
vstore_partial_4(DATA.s0123, OFFSET, PTR); \
vstore1(DATA.s4, OFFSET, PTR + 4);
#define vstore_partial_6(DATA, OFFSET, PTR) \
vstore_partial_4(DATA.s0123, OFFSET, PTR); \
vstore_partial_2(DATA.s45, OFFSET, PTR + 4);
#define vstore_partial_7(DATA, OFFSET, PTR) \
vstore_partial_4(DATA.s0123, OFFSET, PTR); \
vstore_partial_3(DATA.s456, OFFSET, PTR + 4);
#define vstore_partial_8(DATA, OFFSET, PTR) \
vstore8(DATA.s01234567, OFFSET, PTR);
#define vstore_partial_9(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore1(DATA.s8, OFFSET, PTR + 8);
#define vstore_partial_10(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_2(DATA.s89, OFFSET, PTR + 8);
#define vstore_partial_11(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_3(DATA.s89a, OFFSET, PTR + 8);
#define vstore_partial_12(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_4(DATA.s89ab, OFFSET, PTR + 8);
#define vstore_partial_13(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_5(DATA.s89abcdef, OFFSET, PTR + 8);
#define vstore_partial_14(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_6(DATA.s89abcdef, OFFSET, PTR + 8);
#define vstore_partial_15(DATA, OFFSET, PTR) \
vstore_partial_8(DATA.s01234567, OFFSET, PTR); \
vstore_partial_7(DATA.s89abcdef, OFFSET, PTR + 8);
#define vstore_partial_16(DATA, OFFSET, PTR) \
vstore16(DATA, OFFSET, PTR);
/** @} */ // end of groupd vstore_partial_n
/** @} */ // end of groupd VSTORE_PARTIAL
// Convert built-in functions with _sat modifier are not supported in floating point so we create defines
// without _sat to overcome this issue
#define convert_float_sat convert_float
#define convert_float1_sat convert_float
#define convert_float2_sat convert_float2
#define convert_float3_sat convert_float3
#define convert_float4_sat convert_float4
#define convert_float8_sat convert_float8
#define convert_float16_sat convert_float16
#define convert_half_sat convert_float
#define convert_half1_sat convert_half
#define convert_half2_sat convert_half2
#define convert_half3_sat convert_half3
#define convert_half4_sat convert_half4
#define convert_half8_sat convert_half8
#define convert_half16_sat convert_half16
#define convert_float1 convert_float
#define convert_half1 convert_half
#define convert_char1 convert_char
#define convert_uchar1 convert_uchar
#define convert_short1 convert_short
#define convert_ushort1 convert_ushort
#define convert_int1 convert_int
#define convert_uint1 convert_uint
#define convert_long1 convert_long
#define convert_ulong1 convert_ulong
#define convert_double1 convert_double
#define convert_char1_sat convert_char_sat
#define convert_uchar1_sat convert_uchar_sat
#define convert_short1_sat convert_short_sat
#define convert_ushort1_sat convert_ushort_sat
#define convert_int1_sat convert_int_sat
#define convert_uint1_sat convert_uint_sat
#define convert_long1_sat convert_long_sat
#define convert_ulong1_sat convert_ulong_sat
#define convert_double1_sat convert_double_sat
#define VEC_DATA_TYPE_STR(type, size) type##size
#define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size)
#define CONVERT_STR(x, type) (convert_##type((x)))
#define CONVERT(x, type) CONVERT_STR(x, type)
#define CONVERT_SAT_STR(x, type) (convert_##type##_sat((x)))
#define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type)
#define CONVERT_SAT_ROUND_STR(x, type, round) (convert_##type##_sat_##round((x)))
#define CONVERT_SAT_ROUND(x, type, round) CONVERT_SAT_ROUND_STR(x, type, round)
#define select_vec_dt_uchar(size) uchar##size
#define select_vec_dt_char(size) char##size
#define select_vec_dt_ushort(size) ushort##size
#define select_vec_dt_short(size) short##size
#define select_vec_dt_half(size) short##size
#define select_vec_dt_uint(size) uint##size
#define select_vec_dt_int(size) int##size
#define select_vec_dt_float(size) int##size
#define select_vec_dt_ulong(size) ulong##size
#define select_vec_dt_long(size) long##size
#define SELECT_VEC_DATA_TYPE_STR(type, size) select_vec_dt_##type(size)
#define SELECT_VEC_DATA_TYPE(type, size) SELECT_VEC_DATA_TYPE_STR(type, size)
#define SELECT_DATA_TYPE(type) SELECT_VEC_DATA_TYPE_STR(type, 1)
#define sum_reduce_1(x) (x)
#define sum_reduce_2(x) ((x).s0) + ((x).s1)
#define sum_reduce_3(x) sum_reduce_2((x).s01) + ((x).s2)
#define sum_reduce_4(x) sum_reduce_2((x).s01) + sum_reduce_2((x).s23)
#define sum_reduce_8(x) sum_reduce_4((x).s0123) + sum_reduce_4((x).s4567)
#define sum_reduce_16(x) sum_reduce_8((x).s01234567) + sum_reduce_8((x).s89ABCDEF)
#define SUM_REDUCE_STR(x, size) sum_reduce_##size(x)
#define SUM_REDUCE(x, size) SUM_REDUCE_STR(x, size)
#define max_reduce_1(x) (x)
#define max_reduce_2(x) max(((x).s0), ((x).s1))
#define max_reduce_3(x) max(max_reduce_2((x).s01), ((x).s2))
#define max_reduce_4(x) max(max_reduce_2((x).s01), max_reduce_2((x).s23))
#define max_reduce_8(x) max(max_reduce_4((x).s0123), max_reduce_4((x).s4567))
#define max_reduce_16(x) max(max_reduce_8((x).s01234567), max_reduce_8((x).s89ABCDEF))
#define MAX_REDUCE_STR(x, size) max_reduce_##size(x)
#define MAX_REDUCE(x, size) MAX_REDUCE_STR(x, size)
#define VECTOR_DECLARATION(name) \
__global uchar *name##_ptr, \
uint name##_stride_x, \
uint name##_step_x, \
uint name##_offset_first_element_in_bytes
#define IMAGE_DECLARATION(name) \
__global uchar *name##_ptr, \
uint name##_stride_x, \
uint name##_step_x, \
uint name##_stride_y, \
uint name##_step_y, \
uint name##_offset_first_element_in_bytes
#define TENSOR3D_DECLARATION(name) \
__global uchar *name##_ptr, \
uint name##_stride_x, \
uint name##_step_x, \
uint name##_stride_y, \
uint name##_step_y, \
uint name##_stride_z, \
uint name##_step_z, \
uint name##_offset_first_element_in_bytes
#define TENSOR4D_DECLARATION(name) \
__global uchar *name##_ptr, \
uint name##_stride_x, \
uint name##_step_x, \
uint name##_stride_y, \
uint name##_step_y, \
uint name##_stride_z, \
uint name##_step_z, \
uint name##_stride_w, \
uint name##_step_w, \
uint name##_offset_first_element_in_bytes
#define CONVERT_TO_VECTOR_STRUCT(name) \
update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x)
#define CONVERT_TO_VECTOR_STRUCT_NO_STEP(name) \
update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0)
#define CONVERT_TO_IMAGE_STRUCT(name) \
update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y)
#define CONVERT_TO_IMAGE_STRUCT_NO_STEP(name) \
update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0)
#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \
update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z)
#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(name) \
update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, name##_step_z)
#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \
update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z)
#define CONVERT_TO_TENSOR3D_STRUCT(name) \
update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
name##_stride_z, name##_step_z)
#define CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(name) \
update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0)
#define CONVERT_TO_TENSOR4D_STRUCT(name, mod_size) \
update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
name##_stride_z, name##_step_z, name##_stride_w, name##_step_w, mod_size)
#define CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(name, mod_size) \
update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0, name##_stride_w, 0, mod_size)
#define CONVERT_TO_TENSOR3D_STRUCT_NO_UPDATE_PTR(name) \
tensor3D_ptr_no_update(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \
name##_stride_z, name##_step_z)
/** Structure to hold Vector information */
typedef struct Vector
{
__global uchar *ptr; /**< Pointer to the starting postion of the buffer */
int offset_first_element_in_bytes; /**< The offset of the first element in the source image */
int stride_x; /**< Stride of the image in X dimension (in bytes) */
} Vector;
/** Structure to hold Image information */
typedef struct Image
{
__global uchar *ptr; /**< Pointer to the starting postion of the buffer */
int offset_first_element_in_bytes; /**< The offset of the first element in the source image */
int stride_x; /**< Stride of the image in X dimension (in bytes) */
int stride_y; /**< Stride of the image in Y dimension (in bytes) */
} Image;
/** Structure to hold 3D tensor information */
typedef struct Tensor3D
{
__global uchar *ptr; /**< Pointer to the starting postion of the buffer */
int offset_first_element_in_bytes; /**< The offset of the first element in the source image */
int stride_x; /**< Stride of the image in X dimension (in bytes) */
int stride_y; /**< Stride of the image in Y dimension (in bytes) */
int stride_z; /**< Stride of the image in Z dimension (in bytes) */
} Tensor3D;
/** Structure to hold 4D tensor information */
typedef struct Tensor4D
{
__global uchar *ptr; /**< Pointer to the starting postion of the buffer */
int offset_first_element_in_bytes; /**< The offset of the first element in the source image */
int stride_x; /**< Stride of the image in X dimension (in bytes) */
int stride_y; /**< Stride of the image in Y dimension (in bytes) */
int stride_z; /**< Stride of the image in Z dimension (in bytes) */
int stride_w; /**< Stride of the image in W dimension (in bytes) */
} Tensor4D;
/** Wrap vector information into an Vector structure, and make the pointer point at this workitem's data.
*
* @param[in] ptr Pointer to the starting postion of the buffer
* @param[in] offset_first_element_in_bytes The offset of the first element in the source vector
* @param[in] stride_x Stride of the vector in X dimension (in bytes)
* @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
*
* @return An image object
*/
inline Vector update_vector_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x)
{
Vector vector =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
};
vector.ptr += vector.offset_first_element_in_bytes + get_global_id(0) * step_x;
return vector;
}
/** Wrap image information into an Image structure, and make the pointer point at this workitem's data.
*
* @param[in] ptr Pointer to the starting postion of the buffer
* @param[in] offset_first_element_in_bytes The offset of the first element in the source image
* @param[in] stride_x Stride of the image in X dimension (in bytes)
* @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] stride_y Stride of the image in Y dimension (in bytes)
* @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes)
*
* @return An image object
*/
inline Image update_image_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y)
{
Image img =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
.stride_y = stride_y
};
img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y;
return img;
}
/** Wrap 3D tensor information into an image structure, and make the pointer point at this workitem's data.
*
* @param[in] ptr Pointer to the starting postion of the buffer
* @param[in] offset_first_element_in_bytes The offset of the first element in the source image
* @param[in] stride_x Stride of the image in X dimension (in bytes)
* @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] stride_y Stride of the image in Y dimension (in bytes)
* @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] stride_z Stride of the image in Z dimension (in bytes)
* @param[in] step_z stride_z * number of elements along Z processed per workitem(in bytes)
*
* @return A 3D tensor object
*/
inline Image update_image_from_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
{
Image img =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
.stride_y = stride_y
};
img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z;
return img;
}
/** Wrap 3D tensor information into an tensor structure, and make the pointer point at this workitem's data.
*
* @param[in] ptr Pointer to the starting postion of the buffer
* @param[in] offset_first_element_in_bytes The offset of the first element in the source image
* @param[in] stride_x Stride of the image in X dimension (in bytes)
* @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] stride_y Stride of the image in Y dimension (in bytes)
* @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] stride_z Stride of the image in Z dimension (in bytes)
* @param[in] step_z stride_z * number of elements along Z processed per workitem(in bytes)
*
* @return A 3D tensor object
*/
inline Tensor3D update_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
{
Tensor3D tensor =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
.stride_y = stride_y,
.stride_z = stride_z
};
tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z;
return tensor;
}
/** Wrap 3D tensor information into an tensor structure.
*
* @param[in] ptr Pointer to the starting postion of the buffer
* @param[in] offset_first_element_in_bytes The offset of the first element in the source image
* @param[in] stride_x Stride of the image in X dimension (in bytes)
* @param[in] step_x stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] stride_y Stride of the image in Y dimension (in bytes)
* @param[in] step_y stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] stride_z Stride of the image in Z dimension (in bytes)
* @param[in] step_z stride_z * number of elements along Z processed per workitem(in bytes)
*
* @return A 3D tensor object
*/
inline Tensor3D tensor3D_ptr_no_update(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z)
{
Tensor3D tensor =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
.stride_y = stride_y,
.stride_z = stride_z
};
return tensor;
}
inline Tensor4D update_tensor4D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z, uint stride_w,
uint step_w,
uint mod_size)
{
Tensor4D tensor =
{
.ptr = ptr,
.offset_first_element_in_bytes = offset_first_element_in_bytes,
.stride_x = stride_x,
.stride_y = stride_y,
.stride_z = stride_z,
.stride_w = stride_w
};
tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + (get_global_id(2) % mod_size) * step_z + (get_global_id(2) / mod_size) * step_w;
return tensor;
}
/** Get the pointer position of a Vector
*
* @param[in] vec Pointer to the starting position of the buffer
* @param[in] x Relative X position
*/
inline __global const uchar *vector_offset(const Vector *vec, int x)
{
return vec->ptr + x * vec->stride_x;
}
/** Get the pointer position of a Image
*
* @param[in] img Pointer to the starting position of the buffer
* @param[in] x Relative X position
* @param[in] y Relative Y position
*/
inline __global uchar *offset(const Image *img, int x, int y)
{
return img->ptr + x * img->stride_x + y * img->stride_y;
}
/** Get the pointer position of a Tensor3D
*
* @param[in] tensor Pointer to the starting position of the buffer
* @param[in] x Relative X position
* @param[in] y Relative Y position
* @param[in] z Relative Z position
*/
inline __global const uchar *tensor3D_offset(const Tensor3D *tensor, int x, int y, int z)
{
return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z;
}
/** Get the pointer position of a Tensor4D
*
* @param[in] tensor Pointer to the starting position of the buffer
* @param[in] x Relative X position
* @param[in] y Relative Y position
* @param[in] z Relative Z position
* @param[in] w Relative W position
*/
inline __global const uchar *tensor4D_offset(const Tensor4D *tensor, int x, int y, int z, int w)
{
return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + w * tensor->stride_w;
}
/** Get the offset for a given linear index of a Tensor3D
*
* @param[in] tensor Pointer to the starting position of the buffer
* @param[in] width Width of the input tensor
* @param[in] height Height of the input tensor
* @param[in] depth Depth of the input tensor
* @param[in] index Linear index
*/
inline __global const uchar *tensor3D_index2ptr(const Tensor3D *tensor, uint width, uint height, uint depth, uint index)
{
uint num_elements = width * height;
const uint z = index / num_elements;
index %= num_elements;
const uint y = index / width;
index %= width;
const uint x = index;
return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + tensor->offset_first_element_in_bytes;
}
#endif // _HELPER_H
/** Macros that help in loop unrolling */
//Repeat macros with 3 param, excluding the implicit ID param
#define REPEAT_3_1(P_X, P_A, P_B, P_C) P_X##_DEF(0, P_A, P_B, P_C)
#define REPEAT_3_2(P_X, P_A, P_B, P_C) \
P_X##_DEF(1, P_A, P_B, P_C); \
REPEAT_3_1(P_X, P_A, P_B, P_C)
#define REPEAT_3_3(P_X, P_A, P_B, P_C) \
P_X##_DEF(2, P_A, P_B, P_C); \
REPEAT_3_2(P_X, P_A, P_B, P_C)
#define REPEAT_3_4(P_X, P_A, P_B, P_C) \
P_X##_DEF(3, P_A, P_B, P_C); \
REPEAT_3_3(P_X, P_A, P_B, P_C)
#define REPEAT_3_5(P_X, P_A, P_B, P_C) \
P_X##_DEF(4, P_A, P_B, P_C); \
REPEAT_3_4(P_X, P_A, P_B, P_C)
#define REPEAT_3_6(P_X, P_A, P_B, P_C) \
P_X##_DEF(5, P_A, P_B, P_C); \
REPEAT_3_5(P_X, P_A, P_B, P_C)
#define REPEAT_3_7(P_X, P_A, P_B, P_C) \
P_X##_DEF(6, P_A, P_B, P_C); \
REPEAT_3_6(P_X, P_A, P_B, P_C)
#define REPEAT_3_8(P_X, P_A, P_B, P_C) \
P_X##_DEF(7, P_A, P_B, P_C); \
REPEAT_3_7(P_X, P_A, P_B, P_C)
#define REPEAT_3_9(P_X, P_A, P_B, P_C) \
P_X##_DEF(8, P_A, P_B, P_C); \
REPEAT_3_8(P_X, P_A, P_B, P_C)
#define REPEAT_3_10(P_X, P_A, P_B, P_C) \
P_X##_DEF(9, P_A, P_B, P_C); \
REPEAT_3_9(P_X, P_A, P_B, P_C)
#define REPEAT_3_11(P_X, P_A, P_B, P_C) \
P_X##_DEF(A, P_A, P_B, P_C); \
REPEAT_3_10(P_X, P_A, P_B, P_C)
#define REPEAT_3_12(P_X, P_A, P_B, P_C) \
P_X##_DEF(B, P_A, P_B, P_C); \
REPEAT_3_11(P_X, P_A, P_B, P_C)
#define REPEAT_3_13(P_X, P_A, P_B, P_C) \
P_X##_DEF(C, P_A, P_B, P_C); \
REPEAT_3_12(P_X, P_A, P_B, P_C)
#define REPEAT_3_14(P_X, P_A, P_B, P_C) \
P_X##_DEF(D, P_A, P_B, P_C); \
REPEAT_3_13(P_X, P_A, P_B, P_C)
#define REPEAT_3_15(P_X, P_A, P_B, P_C) \
P_X##_DEF(E, P_A, P_B, P_C); \
REPEAT_3_14(P_X, P_A, P_B, P_C)
#define REPEAT_3_16(P_X, P_A, P_B, P_C) \
P_X##_DEF(F, P_A, P_B, P_C); \
REPEAT_3_15(P_X, P_A, P_B, P_C)
#define REPEAT_DEF_3_N(P_NUM, P_OP, P_A, P_B, P_C) REPEAT_3_##P_NUM(P_OP, P_A, P_B, P_C) //One level of indirection to ensure order of expansion does not affect preprocessing P_NUM
#define REPEAT_3_N(P_NUM, P_OP, P_A, P_B, P_C) REPEAT_DEF_3_N(P_NUM, P_OP, P_A, P_B, P_C)
// Repeat macros with 4 param, excluding the implicit ID param
#define REPEAT_4_1(P_X, P_A, P_B, P_C, P_D) P_X##_DEF(0, P_A, P_B, P_C, P_D)
#define REPEAT_4_2(P_X, P_A, P_B, P_C, P_D) \
P_X##_DEF(1, P_A, P_B, P_C, P_D); \
REPEAT_4_1(P_X, P_A, P_B, P_C, P_D)
#define REPEAT_4_3(P_X, P_A, P_B, P_C, P_D) \
P_X##_DEF(2, P_A, P_B, P_C, P_D); \
REPEAT_4_2(P_X, P_A, P_B, P_C, P_D)
#define REPEAT_4_4(P_X, P_A, P_B, P_C, P_D) \
P_X##_DEF(3, P_A, P_B, P_C, P_D); \
REPEAT_4_3(P_X, P_A, P_B, P_C, P_D)
#define REPEAT_4_5(P_X, P_A, P_B, P_C, P_D) \
P_X##_DEF(4, P_A, P_B, P_C, P_D); \
REPEAT_4_4(P_X, P_A, P_B, P_C, P_D)
#define REPEAT_4_6(P_X, P_A, P_B, P_C, P_D) \
P_X##_DEF(5, P_A, P_B, P_C, P_D); \
REPEAT_4_5(P_X, P_A, P_B, P_C, P_D)
#define REPEAT_4_7(P_X, P_A, P_B, P_C, P_D) \
P_X##_DEF(6, P_A, P_B, P_C, P_D); \
REPEAT_4_6(P_X, P_A, P_B, P_C, P_D)
#define REPEAT_4_8(P_X, P_A, P_B, P_C, P_D) \
P_X##_DEF(7, P_A, P_B, P_C, P_D); \
REPEAT_4_7(P_X, P_A, P_B, P_C, P_D)
#define REPEAT_4_9(P_X, P_A, P_B, P_C, P_D) \
P_X##_DEF(8, P_A, P_B, P_C, P_D); \
REPEAT_4_8(P_X, P_A, P_B, P_C, P_D)
#define REPEAT_4_10(P_X, P_A, P_B, P_C, P_D) \
P_X##_DEF(9, P_A, P_B, P_C, P_D); \
REPEAT_4_9(P_X, P_A, P_B, P_C, P_D)
#define REPEAT_4_11(P_X, P_A, P_B, P_C, P_D) \
P_X##_DEF(A, P_A, P_B, P_C, P_D); \
REPEAT_4_10(P_X, P_A, P_B, P_C, P_D)
#define REPEAT_4_12(P_X, P_A, P_B, P_C, P_D) \
P_X##_DEF(B, P_A, P_B, P_C, P_D); \
REPEAT_4_11(P_X, P_A, P_B, P_C, P_D)
#define REPEAT_4_13(P_X, P_A, P_B, P_C, P_D) \
P_X##_DEF(C, P_A, P_B, P_C, P_D); \
REPEAT_4_12(P_X, P_A, P_B, P_C, P_D)
#define REPEAT_4_14(P_X, P_A, P_B, P_C, P_D) \
P_X##_DEF(D, P_A, P_B, P_C, P_D); \
REPEAT_4_13(P_X, P_A, P_B, P_C, P_D)
#define REPEAT_4_15(P_X, P_A, P_B, P_C, P_D) \
P_X##_DEF(E, P_A, P_B, P_C, P_D); \
REPEAT_4_14(P_X, P_A, P_B, P_C, P_D)
#define REPEAT_4_16(P_X, P_A, P_B, P_C, P_D) \
P_X##_DEF(F, P_A, P_B, P_C, P_D); \
REPEAT_4_15(P_X, P_A, P_B, P_C, P_D)
#define REPEAT_DEF_4_N(P_NUM, P_OP, P_A, P_B, P_C, P_D) REPEAT_4_##P_NUM(P_OP, P_A, P_B, P_C, P_D) //One level of indirection to ensure order of expansion does not affect preprocessing P_NUM
#define REPEAT_4_N(P_NUM, P_OP, P_A, P_B, P_C, P_D) REPEAT_DEF_4_N(P_NUM, P_OP, P_A, P_B, P_C, P_D)
// Macro for initializing N variables. Generates N statements that defines VAR##N = RHS_ACCESSOR_DEF(...)
#define VAR_INIT_TO_CONST_DEF(ID, TYPE, VAR, VAL) TYPE VAR##ID = VAL
#define REPEAT_VAR_INIT_TO_CONST(N, TYPE, VAR, VAL) REPEAT_3_N(N, VAR_INIT_TO_CONST, TYPE, VAR, VAL)
// Macro for initializing N variables by converting the data type. Generates N statements that defines VAR##N = RHS_ACCESSOR_DEF(...)
#define VAR_INIT_CONVERT_DEF(ID, TYPE_OUT, VAR_IN, VAR_OUT) TYPE_OUT VAR_OUT##ID = CONVERT(VAR_IN##ID, TYPE_OUT)
#define REPEAT_VAR_INIT_CONVERT(N, TYPE_OUT, VAR_IN, VAR_OUT) REPEAT_3_N(N, VAR_INIT_CONVERT, TYPE_OUT, VAR_IN, VAR_OUT)
// Macro for initializing N variables by converting the data type with saturation. Generates N statements that defines VAR##N = RHS_ACCESSOR_DEF(...)
#define VAR_INIT_CONVERT_SAT_DEF(ID, TYPE_OUT, VAR_IN, VAR_OUT) TYPE_OUT VAR_OUT##ID = CONVERT_SAT(VAR_IN##ID, TYPE_OUT)
#define REPEAT_VAR_INIT_CONVERT_SAT(N, TYPE_OUT, VAR_IN, VAR_OUT) REPEAT_3_N(N, VAR_INIT_CONVERT_SAT, TYPE_OUT, VAR_IN, VAR_OUT)
// Macro for adding a constant to N variables. Generates N statements that defines VAR##N =RHS_ACCESSOR_DEF(...)
#define ADD_CONST_TO_VAR_DEF(ID, TYPE, VAR, VAL) VAR##ID += (TYPE)VAL
#define REPEAT_ADD_CONST_TO_VAR(N, TYPE, VAR, VAL) REPEAT_3_N(N, ADD_CONST_TO_VAR, TYPE, VAR, VAL)
// Macro for multiplying N variables (VAR_B) by a constant (VAL) and adding to other N variables (VAR_A). Generates N statements that defines VAR_A##N =RHS_ACCESSOR_DEF(...)
#define MLA_VAR_WITH_CONST_VEC_DEF(ID, VAR_A, VAR_B, VAL) VAR_A##ID += VAR_B##ID * VAL
#define REPEAT_MLA_VAR_WITH_CONST_VEC(N, VAR_A, VAR_B, VAL) REPEAT_3_N(N, MLA_VAR_WITH_CONST_VEC, VAR_A, VAR_B, VAL)
// Macro for adding a vector to N-variables. Generates N statements that defines VAR##N =RHS_ACCESSOR_DEF(...)
#define ADD_VECTOR_TO_VAR_DEF(ID, TYPE, VAR, VEC) VAR##ID += VEC
#define REPEAT_ADD_VECTOR_TO_VAR(N, VAR, VEC) REPEAT_3_N(N, ADD_VECTOR_TO_VAR, "", VAR, VEC)
// Macro for adding a two N-variables. Generates N statements that defines VAR##N =RHS_ACCESSOR_DEF(...)
#define ADD_TWO_VARS_DEF(ID, TYPE, VAR_A, VAR_B) VAR_A##ID += VAR_B##ID
#define REPEAT_ADD_TWO_VARS(N, VAR_A, VAR_B) REPEAT_3_N(N, ADD_TWO_VARS, "", VAR_A, VAR_B)
// Macro for performing Max between a constant and N variables. Generates N statements that defines VAR##N =RHS_ACCESSOR_DEF(...)
#define MAX_CONST_VAR_DEF(ID, TYPE, VAR, VAL) VAR##ID = max(VAR##ID, (TYPE)VAL)
#define REPEAT_MAX_CONST_VAR(N, TYPE, VAR, VAL) REPEAT_3_N(N, MAX_CONST_VAR, TYPE, VAR, VAL)
// Macro for performing Min between a constant and N variables. Generates N statements that defines VAR##N =RHS_ACCESSOR_DEF(...)
#define MIN_CONST_VAR_DEF(ID, TYPE, VAR, VAL) VAR##ID = min(VAR##ID, (TYPE)VAL)
#define REPEAT_MIN_CONST_VAR(N, TYPE, VAR, VAL) REPEAT_3_N(N, MIN_CONST_VAR, TYPE, VAR, VAL)
// Macro for performing ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE to N variables. Generates N statements that defines VAR##N =RHS_ACCESSOR_DEF(...)
#define ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE_DEF(ID, SIZE, VAR, RES_MUL, RES_SHIFT) VAR##ID = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(VAR##ID, RES_MUL, RES_SHIFT, SIZE)
#define REPEAT_ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(N, SIZE, VAR, RES_MUL, RES_SHIFT) REPEAT_4_N(N, ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE, SIZE, VAR, RES_MUL, RES_SHIFT)
// Macro for performing ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE to N variables. Generates N statements that defines VAR##N =RHS_ACCESSOR_DEF(...)
#define ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE_DEF(ID, SIZE, VAR, RES_MUL, RES_SHIFT) VAR##ID = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(VAR##ID, RES_MUL, RES_SHIFT, SIZE)
#define REPEAT_ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(N, SIZE, VAR, RES_MUL, RES_SHIFT) REPEAT_4_N(N, ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE, SIZE, VAR, RES_MUL, RES_SHIFT)
// Macro for performing per-channel ASYMM_MULT_BY_QUANT_MULTIPLIER to N variables.
#define ASYMM_MULT_BY_QUANT_MULTIPLIER_PER_CHANNEL_DEF(ID, SIZE, VAR, RES_MUL, RES_SHIFT) \
({ \
VEC_DATA_TYPE(int, N0) \
VAR##ID_shift_lt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_GREATER_THAN_ONE(VAR##ID, RES_MUL, RES_SHIFT, N0); \
VEC_DATA_TYPE(int, N0) \
VAR##ID_shift_gt0 = ASYMM_MULT_BY_QUANT_MULTIPLIER_LESS_THAN_ONE(VAR##ID, RES_MUL, RES_SHIFT, N0); \
VAR##ID = select(VAR##ID_shift_lt0, VAR##ID_shift_gt0, RES_SHIFT >= 0); \
})
#define REPEAT_ASYMM_MULT_BY_QUANT_MULTIPLIER_PER_CHANNEL(N, SIZE, VAR, RES_MUL, RES_SHIFT) REPEAT_4_N(N, ASYMM_MULT_BY_QUANT_MULTIPLIER_PER_CHANNEL, SIZE, VAR, RES_MUL, RES_SHIFT)
#endif // ARM_COMPUTE_REPEAT_H
#if defined(M0) && defined(K0) && defined(V0) && defined(DATA_TYPE) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(PARTIAL_LOAD_M0) && defined(PARTIAL_LOAD_K0)
#define INC2 (VEC_DATA_TYPE(uint, 2))(0, 1)
#define INC3 (VEC_DATA_TYPE(uint, 3))(0, 1, 2)
#define INC4 (VEC_DATA_TYPE(uint, 4))(0, 1, 2, 3)
#define INC8 (VEC_DATA_TYPE(uint, 8))(0, 1, 2, 3, 4, 5, 6, 7)
#define INC16 (VEC_DATA_TYPE(uint, 16))(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)
#define CONCAT_INC(K0) INC##K0
#define INC(K0) CONCAT_INC(K0)
#if(SRC_WIDTH % K0)
#define BOUNDARY_CONDITION_X(x, a) \
({ \
a = select(0, a, CONVERT(((x * (VEC_DATA_TYPE(uint, K0))K0 + INC(K0)) < (VEC_DATA_TYPE(uint, K0))SRC_WIDTH), VEC_DATA_TYPE(DATA_TYPE, K0))); \
})
#else // (SRC_WIDTH % K0)
#define BOUNDARY_CONDITION_X(x, a) \
({})
#endif // (SRC_WIDTH % K0)
#define LOAD_TENSOR_BOUNDARY_AWARE_M0XK0(M0, K0, DATA_TYPE, a, input_ptr, src_stride_y, zin) \
({ \
if(y * M0 + M0 >= SRC_HEIGHT && PARTIAL_LOAD_M0 != 0) \
{ \
if(x * K0 + K0 >= SRC_WIDTH && (PARTIAL_LOAD_K0 != 0)) \
{ \
LOAD_TENSOR_M0XN0(PARTIAL_LOAD_M0, PARTIAL_LOAD_K0, DATA_TYPE, a, input_ptr, src_stride_y, zin); \
} \
else \
{ \
LOAD_TENSOR_M0XN0(PARTIAL_LOAD_M0, K0, DATA_TYPE, a, input_ptr, src_stride_y, zin); \
} \
} \
else \
{ \
if(x * K0 + K0 >= SRC_WIDTH && (PARTIAL_LOAD_K0 != 0)) \
{ \
LOAD_TENSOR_M0XN0(M0, PARTIAL_LOAD_K0, DATA_TYPE, a, input_ptr, src_stride_y, zin); \
} \
else \
{ \
LOAD_TENSOR_M0XN0(M0, K0, DATA_TYPE, a, input_ptr, src_stride_y, zin); \
} \
} \
})
/** This OpenCL kernel reshapes the lhs input matrix. The kernel splits the input matrix in blocks of size M0xK0 and stores each one (not transposed) in
* the output matrix unrolling the values.
*
* @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float)
* @note The width of the input tensor must be passed at compile time using -DSRC_WIDTH (e.g. -DSRC_WIDTH=16)
* @note The height of the input tensor must be passed at compile time using -DSRC_HEIGHT (e.g. -DSRC_HEIGHT=16)
* @note The block's dimensions (M0 and K0) must be passed at compile time using -DM0 and -DK0 (e.g. -DM0=2, -DK0=2).
* @note The number of M0xK0 vertical blocks to store on the same output row must be passed at compile time using -DV0 (e.g. -DV0=2)
* @note The size of the partial load block in y must be passed at compile time using -DPARTIAL_LOAD_M0 (e.g. -DPARTIAL_LOAD_M0=1)
* @note The size of the partial load block in x must be passed at compile time using -DPARTIAL_LOAD_K0 (e.g. -DPARTIAL_LOAD_K0=1)
* @note Only the following values for M0, K0 and V0 are supported:
* M0: 2,3,4,5,6,7,8
* K0: 2,3,4,8,16
* V0: greater than 0
* @note In case the input has to be reinterpreted as a 3D tensor (e.g. input of convolution layer 1x1), the following information must be passed at compile time:
* -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D
* -# HEIGHT_GEMM3D: The height of the input in case it has to be reinterpreted as a 3D tensor.
* -# DEPTH_GEMM3D: The depth of the input in case it has to be reinterpreted as a 3D tensor
* (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped
* @note If the M0xK0 blocks have to be interleaved, the option -DINTERLEAVE must passed at compile time.
*
* @param[in] src_ptr Pointer to the source LHS tensor. Supported data types: All
* @param[in] src_stride_x Stride of the source LHS tensor in X dimension (in bytes)
* @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src_stride_y Stride of the source LHS tensor in Y dimension (in bytes)
* @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src_stride_z Stride of the source LHS tensor in Z dimension (in bytes)
* @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] src_offset_first_element_in_bytes The offset of the first element in the source LHS tensor
* @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
* @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
* @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
* @param[in] cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_INPUT_AS_3D)
*/
__kernel void gemm_reshape_lhs_matrix_nt(TENSOR3D_DECLARATION(src),
TENSOR3D_DECLARATION(dst)
#if defined(REINTERPRET_INPUT_AS_3D)
,
uint cross_plane_pad
#endif // REINTERPRET_INPUT_AS_3D
)
{
// Block size
#define BLOCK_SIZE ((M0) * (K0))
// Output offset X
#if defined(INTERLEAVE)
#define OUTPUT_OFFSET_X (K0)
#else // defined(INTERLEAVE)
#define OUTPUT_OFFSET_X (BLOCK_SIZE)
#endif // defined(INTERLEAVE)
// Output step X
#if defined(INTERLEAVE)
#define OUTPUT_STEP_X (K0) * (V0)
#else // Do not interleave
#define OUTPUT_STEP_X (K0)
#endif // defined(INTERLEAVE)
// Compute source and destination addresses
uint x = get_global_id(0);
uint y = get_global_id(1);
uint z = get_global_id(2);
// ------------------ Compute input/output addresses ---------------------------
// Compute the input address
__global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + x * (uint)K0 * sizeof(DATA_TYPE) + y * (uint)M0 * src_stride_y;
// Compute the output address
__global uchar *output_ptr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)BLOCK_SIZE * (uint)V0 * sizeof(DATA_TYPE)) + ((y / (uint)V0) * (uint)dst_stride_y) + ((y % V0) *
(uint)OUTPUT_OFFSET_X * sizeof(DATA_TYPE));
// Create variables: uint zin0=0, zin1=0, zin2=0...zin(M0-1)=0;
REPEAT_VAR_INIT_TO_CONST(M0, uint, zin, 0);
#if defined(REINTERPRET_INPUT_AS_3D)
// Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
// multiply src_stride_z by DEPTH_GEMM3D
input_ptr += z * (uint)src_stride_z * DEPTH_GEMM3D;
// The plane (zin) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
CALCULATE_Z_OFFSET(M0, uint, zin, y, HEIGHT_GEMM3D, DEPTH_GEMM3D, cross_plane_pad, src_stride_y);
#else // defined(REINTERPRET_INPUT_AS_3D)
input_ptr += z * (uint)src_stride_z;
#endif // defined(REINTERPRET_INPUT_AS_3D)
// Add offset for batched GEMM
output_ptr += z * (uint)dst_stride_z;
// ---------------------------Load input values --------------------------------
// Load values from the LHS matrix
REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DATA_TYPE, K0), a, 0);
LOAD_TENSOR_BOUNDARY_AWARE_M0XK0(M0, K0, DATA_TYPE, a, input_ptr, src_stride_y, zin);
// ---------------------------Store output values ------------------------------
REPEAT_VAR_INIT_TO_CONST(16, uint, zout, 0);
STORE_BLOCK(M0, K0, DATA_TYPE, a, output_ptr, OUTPUT_STEP_X * sizeof(DATA_TYPE), zout);
#undef BLOCK_SIZE
#undef OUTPUT_OFFSET_X
#undef OUTPUT_STEP_X
}
#if M0 == 2
#define TRANSPOSE_COLUMN_AND_STORE(output_ptr, output_step_x, i) \
({ \
VEC_DATA_TYPE(DATA_TYPE, M0) \
res = (VEC_DATA_TYPE(DATA_TYPE, M0))(a0.s##i, a1.s##i); \
VSTORE(M0) \
(res, 0, (__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE))); \
})
#elif M0 == 3 // M0 == 3
#define TRANSPOSE_COLUMN_AND_STORE(output_ptr, output_step_x, i) \
({ \
VEC_DATA_TYPE(DATA_TYPE, M0) \
res = (VEC_DATA_TYPE(DATA_TYPE, M0))(a0.s##i, a1.s##i, a2.s##i); \
VSTORE(M0) \
(res, 0, (__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE))); \
})
#elif M0 == 4 // M0 == 4
#define TRANSPOSE_COLUMN_AND_STORE(output_ptr, output_step_x, i) \
({ \
VEC_DATA_TYPE(DATA_TYPE, M0) \
res = (VEC_DATA_TYPE(DATA_TYPE, M0))(a0.s##i, a1.s##i, a2.s##i, a3.s##i); \
VSTORE(M0) \
(res, 0, (__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE))); \
})
#elif M0 == 5 // M0 == 5
#define TRANSPOSE_COLUMN_AND_STORE(output_ptr, output_step_x, i) \
({ \
VEC_DATA_TYPE(DATA_TYPE, 4) \
res0 = (VEC_DATA_TYPE(DATA_TYPE, 4))(a0.s##i, a1.s##i, a2.s##i, a3.s##i); \
DATA_TYPE res1 = a4.s##i; \
VSTORE(4) \
(res0, 0, (__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE))); \
*((__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE)) + 4) = res1; \
})
#elif M0 == 6 // M0 == 6
#define TRANSPOSE_COLUMN_AND_STORE(output_ptr, output_step_x, i) \
({ \
VEC_DATA_TYPE(DATA_TYPE, 4) \
res0 = (VEC_DATA_TYPE(DATA_TYPE, 4))(a0.s##i, a1.s##i, a2.s##i, a3.s##i); \
VEC_DATA_TYPE(DATA_TYPE, 2) \
res1 = (VEC_DATA_TYPE(DATA_TYPE, 2))(a4.s##i, a5.s##i); \
VSTORE(4) \
(res0, 0, (__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE))); \
VSTORE(2) \
(res1, 0, (__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE)) + 4); \
})
#elif M0 == 7 // M0 == 7
#define TRANSPOSE_COLUMN_AND_STORE(output_ptr, output_step_x, i) \
({ \
VEC_DATA_TYPE(DATA_TYPE, 4) \
res0 = (VEC_DATA_TYPE(DATA_TYPE, 4))(a0.s##i, a1.s##i, a2.s##i, a3.s##i); \
VEC_DATA_TYPE(DATA_TYPE, 3) \
res1 = (VEC_DATA_TYPE(DATA_TYPE, 3))(a4.s##i, a5.s##i, a6.s##i); \
VSTORE(4) \
(res0, 0, (__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE))); \
VSTORE(3) \
(res1, 0, (__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE)) + 4); \
})
#elif M0 == 8 // M0 == 8
#define TRANSPOSE_COLUMN_AND_STORE(output_ptr, output_step_x, i) \
({ \
VEC_DATA_TYPE(DATA_TYPE, M0) \
res = (VEC_DATA_TYPE(DATA_TYPE, M0))(a0.s##i, a1.s##i, a2.s##i, a3.s##i, a4.s##i, a5.s##i, a6.s##i, a7.s##i); \
VSTORE(M0) \
(res, 0, (__global DATA_TYPE *)(output_ptr + 0x##i * output_step_x * sizeof(DATA_TYPE))); \
})
#else // M0 not supported
#error "M0 value not supported"
#endif // N0 conditions
/** This OpenCL kernel reshapes the lhs input matrix. The kernel splits the input matrix in blocks of size M0xK0 and stores each one (transposed) in
* the output matrix unrolling the values.
*
* @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float)
* @note The width of the input tensor must be passed at compile time using -DSRC_WIDTH (e.g. -DSRC_WIDTH=16)
* @note The height of the input tensor must be passed at compile time using -DSRC_HEIGHT (e.g. -DSRC_HEIGHT=16)
* @note The block's dimensions (M0 and K0) must be passed at compile time using -DM0 and -DK0 (e.g. -DM0=2, -DK0=2).
* @note The number of M0xK0 vertical blocks to store on the same output row must be passed at compile time using -DV0 (e.g. -DV0=2)
* @note The size of the partial load block in y must be passed at compile time using -DPARTIAL_LOAD_M0 (e.g. -DPARTIAL_LOAD_M0=1)
* @note The size of the partial load block in x must be passed at compile time using -DPARTIAL_LOAD_K0 (e.g. -DPARTIAL_LOAD_K0=1)
* @note Only the following values for M0, K0 and V0 are supported:
* M0: 2,3,4,5,6,7,8
* K0: 2,3,4,8,16
* V0: greater than 0
* @note In case the input has to be reinterpreted as a 3D tensor (e.g. input of convolution layer 1x1), the following information must be passed at compile time:
* -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D
* -# HEIGHT_GEMM3D: The height of the input in case it has to be reinterpreted as a 3D tensor.
* -# DEPTH_GEMM3D: The depth of the input in case it has to be reinterpreted as a 3D tensor
* (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns matrix A NOT reshaped
* @note If the M0xK0 blocks have to be interleaved, the option -DINTERLEAVE must passed at compile time.
*
* @param[in] src_ptr Pointer to the source LHS tensor. Supported data types: All
* @param[in] src_stride_x Stride of the source LHS tensor in X dimension (in bytes)
* @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src_stride_y Stride of the source LHS tensor in Y dimension (in bytes)
* @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src_stride_z Stride of the source LHS tensor in Z dimension (in bytes)
* @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] src_offset_first_element_in_bytes The offset of the first element in the source LHS tensor
* @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
* @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
* @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
* @param[in] cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_INPUT_AS_3D)
*/
__kernel void gemm_reshape_lhs_matrix_t(TENSOR3D_DECLARATION(src),
TENSOR3D_DECLARATION(dst)
#if defined(REINTERPRET_INPUT_AS_3D)
,
uint cross_plane_pad
#endif // REINTERPRET_INPUT_AS_3D
)
{
// Block size
#define BLOCK_SIZE ((M0) * (K0))
// Output offset X
#if defined(INTERLEAVE)
#define OUTPUT_OFFSET_X (M0)
#else // defined(INTERLEAVE)
#define OUTPUT_OFFSET_X (BLOCK_SIZE)
#endif // defined(INTERLEAVE)
// Output step X
#if defined(INTERLEAVE)
#define OUTPUT_STEP_X (M0) * (V0)
#else // Do not interleave
#define OUTPUT_STEP_X (M0)
#endif // defined(INTERLEAVE)
// Compute source and destination addresses
uint x = get_global_id(0);
uint y = get_global_id(1);
uint z = get_global_id(2);
// ------------------ Compute input/output addresses ---------------------------
// Compute the input address
__global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + x * (uint)K0 * sizeof(DATA_TYPE) + y * (uint)M0 * src_stride_y;
// Compute the output address
__global uchar *output_ptr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)BLOCK_SIZE * (uint)V0 * sizeof(DATA_TYPE)) + ((y / (uint)V0) * (uint)dst_stride_y) + ((y % V0) *
(uint)OUTPUT_OFFSET_X * sizeof(DATA_TYPE));
// Create variables: uint zin0=0, zin1=0, zin2=0...zin(M0-1)=0;
REPEAT_VAR_INIT_TO_CONST(M0, uint, zin, 0);
#if defined(REINTERPRET_INPUT_AS_3D)
// Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
// multiply src_stride_z by DEPTH_GEMM3D
input_ptr += z * (uint)src_stride_z * DEPTH_GEMM3D;
// The plane (zin) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
CALCULATE_Z_OFFSET(M0, uint, zin, y, HEIGHT_GEMM3D, DEPTH_GEMM3D, cross_plane_pad, src_stride_y);
#else // defined(REINTERPRET_INPUT_AS_3D)
input_ptr += z * (uint)src_stride_z;
#endif // defined(REINTERPRET_INPUT_AS_3D)
// Add offset for batched GEMM
output_ptr += z * (uint)dst_stride_z;
// ---------------------------Load input values --------------------------------
REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DATA_TYPE, K0), a, 0);
LOAD_TENSOR_BOUNDARY_AWARE_M0XK0(M0, K0, DATA_TYPE, a, input_ptr, src_stride_y, zin);
// ---------------------------Transpose and store block -----------------------
TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 0);
TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 1);
#if K0 > 2
TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 2);
#endif // K0 > 2
#if K0 > 3
TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 3);
#endif // K0 > 3
#if K0 > 4
TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 4);
TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 5);
TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 6);
TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 7);
#endif // K0 > 4
#if K0 > 8
TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 8);
TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, 9);
TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, A);
TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, B);
TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, C);
TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, D);
TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, E);
TRANSPOSE_COLUMN_AND_STORE(output_ptr, OUTPUT_STEP_X, F);
#endif // K0 > 8
#undef BLOCK_SIZE
#undef OUTPUT_OFFSET_X
#undef OUTPUT_STEP_X
}
#endif // defined(M0) && defined(K0) && defined(V0) && defined(DATA_TYPE) && defined(SRC_WIDTH) && defined(SRC_HEIGHT) && defined(PARTIAL_LOAD_M0) && defined(PARTIAL_LOAD_K0)
#if defined(K0) && defined(N0) && defined(H0) && defined(DATA_TYPE) && defined(SRC_HEIGHT)
/** This OpenCL kernel reshapes the rhs input matrix. The kernel splits the input matrix in blocks of size K0xN0 and stores each one (not transposed) in
* the output matrix unrolling the values.
*
* @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float)
* @note The height of the input tensor must be passed at compile time using -DSRC_HEIGHT (e.g. -DSRC_HEIGHT=16)
* @note The block's dimensions (K0 and N0) must be passed at compile time using -DK0 and -DN0 (e.g. -DK0=2, -DN0=2).
* @note The number of K0xN0 vertical blocks to store on the same output row must be passed at compile time using -DH0 (e.g. -DH0=2)
* @note If the K0xN0 blocks have to be interleaved, the option -DINTERLEAVE must passed at compile time.
* @note Only the following values for K0, N0 and H0 are supported:
* N0: 2,3,4,8,16
* K0: 1,2,3,4,8,16
* H0: greater than 0
*
* @param[in] src_ptr Pointer to the source RHS tensor. Supported data types: All
* @param[in] src_stride_x Stride of the source RHS tensor in X dimension (in bytes)
* @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src_stride_y Stride of the source RHS tensor in Y dimension (in bytes)
* @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src_stride_z Stride of the source RHS tensor in Z dimension (in bytes)
* @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] src_offset_first_element_in_bytes The offset of the first element in the source RHS tensor
* @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
* @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
* @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
*/
__kernel void gemm_reshape_rhs_matrix_nt(TENSOR3D_DECLARATION(src),
TENSOR3D_DECLARATION(dst))
{
// Block size
#define BLOCK_SIZE ((K0) * (N0))
// Output offset X
#if defined(INTERLEAVE)
#define OUTPUT_OFFSET_X (N0)
#else // defined(INTERLEAVE)
#define OUTPUT_OFFSET_X (BLOCK_SIZE)
#endif // defined(INTERLEAVE)
// Output step X
#if defined(INTERLEAVE)
#define OUTPUT_STEP_X (N0) * (H0)
#else // Do not interleave
#define OUTPUT_STEP_X (N0)
#endif // defined(INTERLEAVE)
// Compute source and destination addresses
uint x = get_global_id(0);
uint y = get_global_id(1);
uint z = get_global_id(2);
// ------------------ Compute input/output addresses ---------------------------
// Compute the input address
__global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + x * (uint)N0 * sizeof(DATA_TYPE) + y * (uint)K0 * src_stride_y + z * (uint)src_stride_z;
// Compute the output address
__global uchar *output_ptr = dst_ptr + dst_offset_first_element_in_bytes + (y * (uint)BLOCK_SIZE * (uint)H0 * sizeof(DATA_TYPE)) + ((x % (uint)H0) * (uint)OUTPUT_OFFSET_X * sizeof(DATA_TYPE)) + ((
x / (uint)H0)
* (uint)dst_stride_y)
+ z * (uint)dst_stride_z;
// ---------------------------Load input values --------------------------------
REPEAT_VAR_INIT_TO_CONST(K0, VEC_DATA_TYPE(DATA_TYPE, N0), a, 0); ////uint a0=0, a1=0, a2=0...a(M0-1)=0;
// Load values from the RHS matrix
a0 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 0 * src_stride_y));
#if K0 > 1
if(y * (uint)K0 + 1 < SRC_HEIGHT)
{
a1 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 1 * src_stride_y));
}
#endif // K0 > 1
#if K0 > 2
if(y * (uint)K0 + 2 < SRC_HEIGHT)
{
a2 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 2 * src_stride_y));
}
#endif // K0 > 2
#if K0 > 3
if(y * (uint)K0 + 3 < SRC_HEIGHT)
{
a3 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 3 * src_stride_y));
}
#endif // K0 > 3
#if K0 > 4
if(y * (uint)K0 + 4 < SRC_HEIGHT)
{
a4 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 4 * src_stride_y));
}
if(y * (uint)K0 + 5 < SRC_HEIGHT)
{
a5 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 5 * src_stride_y));
}
if(y * (uint)K0 + 6 < SRC_HEIGHT)
{
a6 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 6 * src_stride_y));
}
if(y * (uint)K0 + 7 < SRC_HEIGHT)
{
a7 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 7 * src_stride_y));
}
#endif // K0 > 4
#if K0 > 8
if(y * (uint)K0 + 8 < SRC_HEIGHT)
{
a8 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 8 * src_stride_y));
}
if(y * (uint)K0 + 9 < SRC_HEIGHT)
{
a9 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 9 * src_stride_y));
}
if(y * (uint)K0 + 10 < SRC_HEIGHT)
{
aA = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 10 * src_stride_y));
}
if(y * (uint)K0 + 11 < SRC_HEIGHT)
{
aB = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 11 * src_stride_y));
}
if(y * (uint)K0 + 12 < SRC_HEIGHT)
{
aC = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 12 * src_stride_y));
}
if(y * (uint)K0 + 13 < SRC_HEIGHT)
{
aD = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 13 * src_stride_y));
}
if(y * (uint)K0 + 14 < SRC_HEIGHT)
{
aE = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 14 * src_stride_y));
}
if(y * (uint)K0 + 15 < SRC_HEIGHT)
{
aF = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 15 * src_stride_y));
}
#endif // K0 > 8
// ---------------------------Store output values ------------------------------
REPEAT_VAR_INIT_TO_CONST(16, uint, zout, 0);
STORE_BLOCK(K0, N0, DATA_TYPE, a, output_ptr, OUTPUT_STEP_X * sizeof(DATA_TYPE), zout);
#undef BLOCK_SIZE
#undef OUTPUT_OFFSET_X
#undef OUTPUT_STEP_X
}
#if defined(TRANSPOSE)
/** This OpenCL kernel reshapes the rhs input matrix. The kernel splits the input matrix in blocks of size K0xN0 and stores each one (transposed) in
* the output matrix unrolling the values.
*
* @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float)
* @note The height of the input tensor must be passed at compile time using -DSRC_HEIGHT (e.g. -DSRC_HEIGHT=16)
* @note The block's dimensions (K0 and N0) must be passed at compile time using -DK0 and -DN0 (e.g. -DK0=2, -DN0=2).
* @note The number of K0xN0 vertical blocks to store on the same output row must be passed at compile time using -DH0 (e.g. -DH0=2)
* @note If the K0xN0 blocks have to be interleaved, the option -DINTERLEAVE must passed at compile time.
* @note The option -DTRANSPOSE must passed at compile time.
* @note Only the following values for K0, N0 and H0 are supported:
* N0: 2,3,4,8,16
* K0: 2,3,4,8,16
* H0: greater than 0
*
* @param[in] src_ptr Pointer to the source RHS tensor. Supported data types: All
* @param[in] src_stride_x Stride of the source RHS tensor in X dimension (in bytes)
* @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src_stride_y Stride of the source RHS tensor in Y dimension (in bytes)
* @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src_stride_z Stride of the source RHS tensor in Z dimension (in bytes)
* @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] src_offset_first_element_in_bytes The offset of the first element in the source RHS tensor
* @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
* @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
* @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
*/
__kernel void gemm_reshape_rhs_matrix_t(TENSOR3D_DECLARATION(src),
TENSOR3D_DECLARATION(dst))
{
// Block size
#define BLOCK_SIZE ((K0) * (N0))
// Output offset X
#if defined(INTERLEAVE)
#define OUTPUT_OFFSET_X (K0)
#else // defined(INTERLEAVE)
#define OUTPUT_OFFSET_X (BLOCK_SIZE)
#endif // defined(INTERLEAVE)
// Output step X
#if defined(INTERLEAVE)
#define OUTPUT_STEP_X (K0) * (H0)
#else // Do not interleave
#define OUTPUT_STEP_X (K0)
#endif // defined(INTERLEAVE)
// Compute source and destination addresses
uint x = get_global_id(0);
uint y = get_global_id(1);
uint z = get_global_id(2);
// ------------------ Compute input/output addresses ---------------------------
// Compute the input address
__global uchar *input_ptr = src_ptr + src_offset_first_element_in_bytes + x * (uint)N0 * sizeof(DATA_TYPE) + y * (uint)K0 * src_stride_y + z * (uint)src_stride_z;
// Compute the output address
__global uchar *output_ptr = dst_ptr + dst_offset_first_element_in_bytes + (y * (uint)BLOCK_SIZE * (uint)H0 * sizeof(DATA_TYPE)) + ((x % H0) * (uint)OUTPUT_OFFSET_X * sizeof(DATA_TYPE)) + ((x /
(uint)H0) * (uint)dst_stride_y) + z * (uint)dst_stride_z;
// ---------------------------Load input values --------------------------------
REPEAT_VAR_INIT_TO_CONST(K0, VEC_DATA_TYPE(DATA_TYPE, N0), a, 0); //VEC_DATA_TYPE(DATA_TYPE, N0) a0=0, a1=0, ... a(K0-1)=0;
// Load values from the RHS matrix
a0 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 0 * src_stride_y));
if(y * (uint)K0 + 1 < SRC_HEIGHT)
{
a1 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 1 * src_stride_y));
}
#if K0 > 2
if(y * (uint)K0 + 2 < SRC_HEIGHT)
{
a2 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 2 * src_stride_y));
}
#endif // K0 > 2
#if K0 > 3
if(y * (uint)K0 + 3 < SRC_HEIGHT)
{
a3 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 3 * src_stride_y));
}
#endif // K0 > 3
#if K0 > 4
if(y * (uint)K0 + 4 < SRC_HEIGHT)
{
a4 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 4 * src_stride_y));
}
if(y * (uint)K0 + 5 < SRC_HEIGHT)
{
a5 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 5 * src_stride_y));
}
if(y * (uint)K0 + 6 < SRC_HEIGHT)
{
a6 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 6 * src_stride_y));
}
if(y * (uint)K0 + 7 < SRC_HEIGHT)
{
a7 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 7 * src_stride_y));
}
#endif // K0 > 4
#if K0 > 8
if(y * (uint)K0 + 8 < SRC_HEIGHT)
{
a8 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 8 * src_stride_y));
}
if(y * (uint)K0 + 9 < SRC_HEIGHT)
{
a9 = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 9 * src_stride_y));
}
if(y * (uint)K0 + 10 < SRC_HEIGHT)
{
aA = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 10 * src_stride_y));
}
if(y * (uint)K0 + 11 < SRC_HEIGHT)
{
aB = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 11 * src_stride_y));
}
if(y * (uint)K0 + 12 < SRC_HEIGHT)
{
aC = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 12 * src_stride_y));
}
if(y * (uint)K0 + 13 < SRC_HEIGHT)
{
aD = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 13 * src_stride_y));
}
if(y * (uint)K0 + 14 < SRC_HEIGHT)
{
aE = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 14 * src_stride_y));
}
if(y * (uint)K0 + 15 < SRC_HEIGHT)
{
aF = VLOAD(N0)(0, (__global DATA_TYPE *)(input_ptr + 15 * src_stride_y));
}
#endif // K0 > 8
// ---------------------------Transpose the block ------------------------------
REPEAT_VAR_INIT_TO_CONST(N0, VEC_DATA_TYPE(DATA_TYPE, K0), res, 0); //VEC_DATA_TYPE(DATA_TYPE, K0) res0=0, res1=0, res2=0,... res(N0-1)=0;
#if K0 == 2
// This part computes the following transpositions:
// 2x2 -> 2x2
// 2x4 -> 4x2
// 2x8 -> 8x2
// 2x16 -> 16x2
res0 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s0, a1.s0);
res1 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s1, a1.s1);
#if N0 > 2
res2 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s2, a1.s2);
#endif // N0 > 2
#if N0 > 3
res3 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s3, a1.s3);
#endif // N0 > 3
#if N0 > 4
res4 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s4, a1.s4);
res5 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s5, a1.s5);
res6 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s6, a1.s6);
res7 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s7, a1.s7);
#endif // N0 > 4
#if N0 > 8
res8 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s8, a1.s8);
res9 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s9, a1.s9);
resA = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sA, a1.sA);
resB = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sB, a1.sB);
resC = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sC, a1.sC);
resD = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sD, a1.sD);
resE = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sE, a1.sE);
resF = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sF, a1.sF);
#endif // N0 > 8
#elif K0 == 3 // K0 == 2
// This part computes the following transpositions:
// 3x2 -> 2x3
// 3x4 -> 4x3
// 3x8 -> 8x3
// 3x16 -> 16x3
res0 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s0, a1.s0, a2.s0);
res1 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s1, a1.s1, a2.s1);
#if N0 > 2
res2 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s2, a1.s2, a2.s2);
#endif // N0 > 2
#if N0 > 3
res3 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s3, a1.s3, a2.s3);
#endif // N0 > 3
#if N0 > 4
res4 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s4, a1.s4, a2.s4);
res5 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s5, a1.s5, a2.s5);
res6 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s6, a1.s6, a2.s6);
res7 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s7, a1.s7, a2.s7);
#endif // N0 > 4
#if N0 > 8
res8 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s8, a1.s8, a2.s8);
res9 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s9, a1.s9, a2.s9);
resA = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sA, a1.sA, a2.sA);
resB = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sB, a1.sB, a2.sB);
resC = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sC, a1.sC, a2.sC);
resD = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sD, a1.sD, a2.sD);
resE = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sE, a1.sE, a2.sE);
resF = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sF, a1.sF, a2.sF);
#endif // N0 > 8
#elif K0 == 4 // K0 == 4
// This part computes the following transpositions:
// 4x2 -> 2x4
// 4x4 -> 4x4
// 4x8 -> 8x4
// 4x16 -> 16x4
res0 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s0, a1.s0, a2.s0, a3.s0);
res1 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s1, a1.s1, a2.s1, a3.s1);
#if N0 > 2
res2 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s2, a1.s2, a2.s2, a3.s2);
#endif // N0 > 2
#if N0 > 3
res3 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s3, a1.s3, a2.s3, a3.s3);
#endif // N0 > 3
#if N0 > 4
res4 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s4, a1.s4, a2.s4, a3.s4);
res5 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s5, a1.s5, a2.s5, a3.s5);
res6 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s6, a1.s6, a2.s6, a3.s6);
res7 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s7, a1.s7, a2.s7, a3.s7);
#endif // N0 > 4
#if N0 > 8
res8 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s8, a1.s8, a2.s8, a3.s8);
res9 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s9, a1.s9, a2.s9, a3.s9);
resA = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sA, a1.sA, a2.sA, a3.sA);
resB = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sB, a1.sB, a2.sB, a3.sB);
resC = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sC, a1.sC, a2.sC, a3.sC);
resD = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sD, a1.sD, a2.sD, a3.sD);
resE = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sE, a1.sE, a2.sE, a3.sE);
resF = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sF, a1.sF, a2.sF, a3.sF);
#endif // N0 > 8
#elif K0 == 8 // K0 == 8
// This part computes the following transpositions:
// 8x2 -> 2x8
// 8x4 -> 4x8
// 8x8 -> 8x8
// 8x16 -> 16x8
res0 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s0, a1.s0, a2.s0, a3.s0, a4.s0, a5.s0, a6.s0, a7.s0);
res1 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s1, a1.s1, a2.s1, a3.s1, a4.s1, a5.s1, a6.s1, a7.s1);
#if N0 > 2
res2 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s2, a1.s2, a2.s2, a3.s2, a4.s2, a5.s2, a6.s2, a7.s2);
#endif // N0 > 2
#if N0 > 3
res3 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s3, a1.s3, a2.s3, a3.s3, a4.s3, a5.s3, a6.s3, a7.s3);
#endif // N0 > 3
#if N0 > 4
res4 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s4, a1.s4, a2.s4, a3.s4, a4.s4, a5.s4, a6.s4, a7.s4);
res5 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s5, a1.s5, a2.s5, a3.s5, a4.s5, a5.s5, a6.s5, a7.s5);
res6 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s6, a1.s6, a2.s6, a3.s6, a4.s6, a5.s6, a6.s6, a7.s6);
res7 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s7, a1.s7, a2.s7, a3.s7, a4.s7, a5.s7, a6.s7, a7.s7);
#endif // N0 > 4
#if N0 > 8
res8 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s8, a1.s8, a2.s8, a3.s8, a4.s8, a5.s8, a6.s8, a7.s8);
res9 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s9, a1.s9, a2.s9, a3.s9, a4.s9, a5.s9, a6.s9, a7.s9);
resA = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sA, a1.sA, a2.sA, a3.sA, a4.sA, a5.sA, a6.sA, a7.sA);
resB = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sB, a1.sB, a2.sB, a3.sB, a4.sB, a5.sB, a6.sB, a7.sB);
resC = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sC, a1.sC, a2.sC, a3.sC, a4.sC, a5.sC, a6.sC, a7.sC);
resD = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sD, a1.sD, a2.sD, a3.sD, a4.sD, a5.sD, a6.sD, a7.sD);
resE = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sE, a1.sE, a2.sE, a3.sE, a4.sE, a5.sE, a6.sE, a7.sE);
resF = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sF, a1.sF, a2.sF, a3.sF, a4.sF, a5.sF, a6.sF, a7.sF);
#endif // N0 > 8
#elif K0 == 16 // K0 == 16
// This part computes the following transpositions:
// 16x2 -> 2x16
// 16x4 -> 4x16
// 16x8 -> 8x16
// 16x16 -> 16x16
res0 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s0, a1.s0, a2.s0, a3.s0, a4.s0, a5.s0, a6.s0, a7.s0,
a8.s0, a9.s0, aA.s0, aB.s0, aC.s0, aD.s0, aE.s0, aF.s0);
res1 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s1, a1.s1, a2.s1, a3.s1, a4.s1, a5.s1, a6.s1, a7.s1,
a8.s1, a9.s1, aA.s1, aB.s1, aC.s1, aD.s1, aE.s1, aF.s1);
#if N0 > 2
res2 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s2, a1.s2, a2.s2, a3.s2, a4.s2, a5.s2, a6.s2, a7.s2,
a8.s2, a9.s2, aA.s2, aB.s2, aC.s2, aD.s2, aE.s2, aF.s2);
#endif // N0 > 2
#if N0 > 3
res3 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s3, a1.s3, a2.s3, a3.s3, a4.s3, a5.s3, a6.s3, a7.s3,
a8.s3, a9.s3, aA.s3, aB.s3, aC.s3, aD.s3, aE.s3, aF.s3);
#endif // N0 > 3
#if N0 > 4
res4 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s4, a1.s4, a2.s4, a3.s4, a4.s4, a5.s4, a6.s4, a7.s4,
a8.s4, a9.s4, aA.s4, aB.s4, aC.s4, aD.s4, aE.s4, aF.s4);
res5 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s5, a1.s5, a2.s5, a3.s5, a4.s5, a5.s5, a6.s5, a7.s5,
a8.s5, a9.s5, aA.s5, aB.s5, aC.s5, aD.s5, aE.s5, aF.s5);
res6 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s6, a1.s6, a2.s6, a3.s6, a4.s6, a5.s6, a6.s6, a7.s6,
a8.s6, a9.s6, aA.s6, aB.s6, aC.s6, aD.s6, aE.s6, aF.s6);
res7 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s7, a1.s7, a2.s7, a3.s7, a4.s7, a5.s7, a6.s7, a7.s7,
a8.s7, a9.s7, aA.s7, aB.s7, aC.s7, aD.s7, aE.s7, aF.s7);
#endif // N0 > 4
#if N0 > 8
res8 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s8, a1.s8, a2.s8, a3.s8, a4.s8, a5.s8, a6.s8, a7.s8,
a8.s8, a9.s8, aA.s8, aB.s8, aC.s8, aD.s8, aE.s8, aF.s8);
res9 = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.s9, a1.s9, a2.s9, a3.s9, a4.s9, a5.s9, a6.s9, a7.s9,
a8.s9, a9.s9, aA.s9, aB.s9, aC.s9, aD.s9, aE.s9, aF.s9);
resA = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sA, a1.sA, a2.sA, a3.sA, a4.sA, a5.sA, a6.sA, a7.sA,
a8.sA, a9.sA, aA.sA, aB.sA, aC.sA, aD.sA, aE.sA, aF.sA);
resB = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sB, a1.sB, a2.sB, a3.sB, a4.sB, a5.sB, a6.sB, a7.sB,
a8.sB, a9.sB, aA.sB, aB.sB, aC.sB, aD.sB, aE.sB, aF.sB);
resC = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sC, a1.sC, a2.sC, a3.sC, a4.sC, a5.sC, a6.sC, a7.sC,
a8.sC, a9.sC, aA.sC, aB.sC, aC.sC, aD.sC, aE.sC, aF.sC);
resD = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sD, a1.sD, a2.sD, a3.sD, a4.sD, a5.sD, a6.sD, a7.sD,
a8.sD, a9.sD, aA.sD, aB.sD, aC.sD, aD.sD, aE.sD, aF.sD);
resE = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sE, a1.sE, a2.sE, a3.sE, a4.sE, a5.sE, a6.sE, a7.sE,
a8.sE, a9.sE, aA.sE, aB.sE, aC.sE, aD.sE, aE.sE, aF.sE);
resF = (VEC_DATA_TYPE(DATA_TYPE, K0))(a0.sF, a1.sF, a2.sF, a3.sF, a4.sF, a5.sF, a6.sF, a7.sF,
a8.sF, a9.sF, aA.sF, aB.sF, aC.sF, aD.sF, aE.sF, aF.sF);
#endif // N0 > 8
#else // N0 == 16
#error "Not supported N0 value"
#endif // N0 > 2
// ---------------------------Store the output values ------------------------------
REPEAT_VAR_INIT_TO_CONST(16, uint, zout, 0);
STORE_BLOCK(N0, K0, DATA_TYPE, res, output_ptr, OUTPUT_STEP_X * sizeof(DATA_TYPE), zout);
#undef BLOCK_SIZE
#undef OUTPUT_OFFSET_X
#undef OUTPUT_STEP_X
}
#endif // defined(TRANSPOSE)
#endif // defined(K0) && defined(N0) && defined(H0) && defined(DATA_TYPE) && defined(SRC_HEIGHT)
#if defined(M0) && defined(N0) && defined(K0) && defined(H0) && defined(DATA_TYPE) && defined(M) && defined(N) && defined(K)
#define CONCAT(a, b) a##b
#define ARM_DOT1(a, b, c) \
({ \
c = fma(a, b, c); \
})
#define ARM_DOT2(a, b, c) \
({ \
c = fma(a.s0, b.s0, c); \
c = fma(a.s1, b.s1, c); \
})
#define ARM_DOT3(a, b, c) \
({ \
ARM_DOT2(a, b, c); \
c = fma((a.s2), (b.s2), c); \
})
#define ARM_DOT4(a, b, c) \
({ \
ARM_DOT3(a, b, c); \
c = fma((a.s3), (b.s3), c); \
})
#define ARM_DOT8(a, b, c) \
({ \
ARM_DOT4((a.lo), (b.lo), c); \
ARM_DOT4((a.hi), (b.hi), c); \
})
#define ARM_DOT16(a, b, c) \
({ \
ARM_DOT8((a.lo), (b.lo), c); \
ARM_DOT8((a.hi), (b.hi), c); \
})
#if N0 == 2
#define ARM_DOT_K0XN0(k0, a, b, c) \
({ \
CONCAT(ARM_DOT, k0) \
((a), (b##0), (c.s0)); \
CONCAT(ARM_DOT, k0) \
((a), (b##1), (c.s1)); \
})
#elif N0 == 3 // N0 == 3
#define ARM_DOT_K0XN0(k0, a, b, c) \
({ \
CONCAT(ARM_DOT, k0) \
((a), (b##0), (c.s0)); \
CONCAT(ARM_DOT, k0) \
((a), (b##1), (c.s1)); \
CONCAT(ARM_DOT, k0) \
((a), (b##2), (c.s2)); \
})
#elif N0 == 4 // N0 == 4
#define ARM_DOT_K0XN0(k0, a, b, c) \
({ \
CONCAT(ARM_DOT, k0) \
((a), (b##0), (c.s0)); \
CONCAT(ARM_DOT, k0) \
((a), (b##1), (c.s1)); \
CONCAT(ARM_DOT, k0) \
((a), (b##2), (c.s2)); \
CONCAT(ARM_DOT, k0) \
((a), (b##3), (c.s3)); \
})
#elif N0 == 8 // N0 == 8
#define ARM_DOT_K0XN0(k0, a, b, c) \
({ \
CONCAT(ARM_DOT, k0) \
((a), (b##0), (c.s0)); \
CONCAT(ARM_DOT, k0) \
((a), (b##1), (c.s1)); \
CONCAT(ARM_DOT, k0) \
((a), (b##2), (c.s2)); \
CONCAT(ARM_DOT, k0) \
((a), (b##3), (c.s3)); \
CONCAT(ARM_DOT, k0) \
((a), (b##4), (c.s4)); \
CONCAT(ARM_DOT, k0) \
((a), (b##5), (c.s5)); \
CONCAT(ARM_DOT, k0) \
((a), (b##6), (c.s6)); \
CONCAT(ARM_DOT, k0) \
((a), (b##7), (c.s7)); \
})
#elif N0 == 16 // N0 == 16
#define ARM_DOT_K0XN0(k0, a, b, c) \
({ \
CONCAT(ARM_DOT, k0) \
((a), (b##0), (c.s0)); \
CONCAT(ARM_DOT, k0) \
((a), (b##1), (c.s1)); \
CONCAT(ARM_DOT, k0) \
((a), (b##2), (c.s2)); \
CONCAT(ARM_DOT, k0) \
((a), (b##3), (c.s3)); \
CONCAT(ARM_DOT, k0) \
((a), (b##4), (c.s4)); \
CONCAT(ARM_DOT, k0) \
((a), (b##5), (c.s5)); \
CONCAT(ARM_DOT, k0) \
((a), (b##6), (c.s6)); \
CONCAT(ARM_DOT, k0) \
((a), (b##7), (c.s7)); \
CONCAT(ARM_DOT, k0) \
((a), (b##8), (c.s8)); \
CONCAT(ARM_DOT, k0) \
((a), (b##9), (c.s9)); \
CONCAT(ARM_DOT, k0) \
((a), (b##A), (c.sA)); \
CONCAT(ARM_DOT, k0) \
((a), (b##B), (c.sB)); \
CONCAT(ARM_DOT, k0) \
((a), (b##C), (c.sC)); \
CONCAT(ARM_DOT, k0) \
((a), (b##D), (c.sD)); \
CONCAT(ARM_DOT, k0) \
((a), (b##E), (c.sE)); \
CONCAT(ARM_DOT, k0) \
((a), (b##F), (c.sF)); \
})
#else // N0 not supported
#error "N0 value not supported"
#endif // N0 conditions
/** This OpenCL kernel computes the matrix multiplication between 2 matrices.
* The LHS matrix is NOT reshaped
* The RHS is reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the block K0xN0 is transposed
*
* @note If the first two dimensions of NDRange have been dispatched with "dummy_work_items" support, the option -DDUMMY_WORK_ITEMS must be passed at compile time.
* @note The GEMM's dimensions (M,N and K) must be passed at compile time using -DM, -DN and and -DK (e.g. -DM=52, -DN=30 and -DK=90)
* @note The number of columns of LHS matrix must be passed at compile time using -DK (e.g. -DK=64)
* @note The block's dimensions used for reshaping the RHS matrix (N0 and K0) must be passed at compile time using -DN0 and -DK0 (e.g. -DN0=8, -DK0=4).
* @note The number of M0 rows to process must be passed at compile time using -DM0 (e.g. -DM0=2)
* @note The number of K0xN0 horizontal blocks stored on the same output row of the reshaped RHS matrix must be passed at compile time using -DH0 (e.g. -DH0=2)
* @note If the K0xN0 blocks in the reshaped RHS matrix have been interleaved, the option -DRHS_INTERLEAVE must passed at compile time.
* @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1)
* @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1)
* @note Only the following configurations of M0, N0 and K0 are currently supported:
* - M0 = 1, 2, 3, 4, 5, 6, 7, 8
* - N0 = 2, 3, 4, 8, 16
* - K0 = 2, 3, 4, 8, 16
* - H0 >= 1
*
* @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively.
* The activation function is performed after the bias addition
* @note In case the input or output have to be reinterpreted as a 3D tensor, the following information must be passed at compile time:
* -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D
* -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
* -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
* -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
* (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns LHS matrix
*
* @param[in] lhs_ptr Pointer to the LHS matrix. Supported data type: F16/F32
* @param[in] lhs_stride_x Stride of the LHS matrix in X dimension (in bytes)
* @param[in] lhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] lhs_stride_y Stride of the LHS matrix in Y dimension (in bytes)
* @param[in] lhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the LHS matrix
* @param[in] rhs_ptr Pointer to the RHS reshaped matrix. Supported data type: same as @p lhs_ptr
* @param[in] rhs_stride_x Stride of the RHS reshaped matrix in X dimension (in bytes)
* @param[in] rhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] rhs_stride_y Stride of the RHS reshaped matrix in Y dimension (in bytes)
* @param[in] rhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the RHS reshaped matrix
* @param[in] bias_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr
* @param[in] bias_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes)
* @param[in] bias_step_x (Optional) bias_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] bias_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes)
* @param[in] bias_step_y (Optional) bias_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
* @param[out] dst_ptr Pointer to the destination matrix Supported data type: same as @p lhs_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
* @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
* @param[in] lhs_stride_z Stride of the LHS matrix in Z dimension (in bytes)
* @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes)
* @param[in] bias_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes)
* @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
* @param[in] lhs_cross_plane_pad (Optional) Bottom paddings for LHS matrix in unit of elements (only if defined REINTERPRET_INPUT_AS_3D)
* @param[in] dst_cross_plane_pad (Optional) Bottom paddings for the output matrix in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
*/
__kernel void gemm_mm_reshaped_only_rhs_t(IMAGE_DECLARATION(lhs),
IMAGE_DECLARATION(rhs),
#if defined(BETA)
IMAGE_DECLARATION(bias),
#endif // defined(BETA)
IMAGE_DECLARATION(dst),
uint lhs_stride_z,
uint rhs_stride_z,
#if defined(BETA)
uint bias_stride_z,
#endif //defined(BETA)
uint dst_stride_z
#if defined(REINTERPRET_INPUT_AS_3D)
,
uint lhs_cross_plane_pad
#endif // REINTERPRET_INPUT_AS_3D
#if defined(REINTERPRET_OUTPUT_AS_3D)
,
uint dst_cross_plane_pad
#endif // REINTERPRET_OUTPUT_AS_3D
)
{
// Block size
#define RHS_BLOCK_SIZE ((K0) * (N0))
// RHS offset and step X
#if defined(RHS_INTERLEAVE)
#define RHS_OFFSET_X (K0)
#define RHS_STEP_X ((K0) * (H0))
#define RHS_STEP_LOOP (1)
#else // defined(RHS_INTERLEAVE)
#define RHS_OFFSET_X (RHS_BLOCK_SIZE)
#define RHS_STEP_X (K0)
#define RHS_STEP_LOOP (H0)
#endif // defined(RHS_INTERLEAVE)
uint x = get_global_id(0);
uint y = get_global_id(1);
uint z = get_global_id(2);
#if defined(DUMMY_WORK_ITEMS)
if((x * N0 >= N) || (y * M0 >= M))
{
return;
}
#endif // defined(DUMMY_WORK_ITEMS)
// Compute LHS matrix address
uint lhs_offset = lhs_offset_first_element_in_bytes + COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * (uint)lhs_stride_y;
// Compute RHS reshaped matrix address
uint rhs_offset = rhs_offset_first_element_in_bytes + (x % H0) * (uint)RHS_OFFSET_X * sizeof(DATA_TYPE) + (x / (uint)H0) * rhs_stride_y;
#if defined(MATRIX_B_DEPTH)
// Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
rhs_offset += (z % MATRIX_B_DEPTH) * rhs_stride_z;
#else // defined(MATRIX_B_DEPTH)
rhs_offset += z * rhs_stride_z;
#endif // defined(MATRIX_B_DEPTH)
REPEAT_VAR_INIT_TO_CONST(8, uint, zlhs, 0); //uint zlhs0=0,zlhs1=0,zlhs2=0,... zlhs7=0;
REPEAT_VAR_INIT_TO_CONST(16, uint, zero, 0);
#if defined(REINTERPRET_INPUT_AS_3D)
// The plane (zlhs) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
CALCULATE_Z_OFFSET(M0, uint, zlhs, COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, lhs_cross_plane_pad, lhs_stride_y);
// Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
// multiply lhs_stride_z by DEPTH_GEMM3D
lhs_offset += z * lhs_stride_z * DEPTH_GEMM3D;
#else // defined(REINTERPRET_INPUT_AS_3D)
// Add offset for batched GEMM
lhs_offset += z * lhs_stride_z;
#endif // defined(REINTERPRET_INPUT_AS_3D)
// Initialize the accumulators
REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DATA_TYPE, N0), c, 0); //VEC_DATA_TYPE(DATA_TYPE, N0) c0=0,c1=0,c2=0,... c(M0-1)=0;
int i = 0;
for(; i <= (K - K0); i += K0)
{
// Supported cases (M0, K0):
// 1,2 - 1,3 - 1,4 - 1,8 - 1,16
// 2,2 - 2,3 - 2,4 - 2,8 - 2,16
// 3,2 - 3,3 - 3,4 - 3,8 - 3,16
// 4,2 - 4,3 - 4,4 - 4,8 - 4,16
// 5,2 - 5,3 - 5,4 - 5,8 - 5,16
// 6,2 - 6,3 - 6,4 - 6,8 - 6,16
// 7,2 - 7,3 - 7,4 - 7,8 - 7,16
// 8,2 - 8,3 - 8,4 - 8,8 - 8,16
// Load values from LHS matrix
LOAD_BLOCK(M0, K0, DATA_TYPE, a, lhs_ptr, lhs_offset, lhs_stride_y, zlhs);
// Load values from RHS reshaped matrix
LOAD_BLOCK(N0, K0, DATA_TYPE, b, rhs_ptr, rhs_offset, RHS_STEP_X * sizeof(DATA_TYPE), zero);
// Accumulate
ARM_DOT_K0XN0(K0, a0, b, c0);
#if M0 > 1
ARM_DOT_K0XN0(K0, a1, b, c1);
#endif // M0 > 1
#if M0 > 2
ARM_DOT_K0XN0(K0, a2, b, c2);
#endif // M0 > 2
#if M0 > 3
ARM_DOT_K0XN0(K0, a3, b, c3);
#endif // M0 > 3
#if M0 > 4
ARM_DOT_K0XN0(K0, a4, b, c4);
#endif // M0 > 4
#if M0 > 5
ARM_DOT_K0XN0(K0, a5, b, c5);
#endif // M0 > 5
#if M0 > 6
ARM_DOT_K0XN0(K0, a6, b, c6);
#endif // M0 > 6
#if M0 > 7
ARM_DOT_K0XN0(K0, a7, b, c7);
#endif // M0 > 7
lhs_offset += K0 * sizeof(DATA_TYPE);
rhs_offset += (N0 * RHS_STEP_X * RHS_STEP_LOOP) * sizeof(DATA_TYPE);
}
// Left-over accumulations
for(; i < K; ++i)
{
// Load values from LHS matrix
LOAD_BLOCK(M0, 1, DATA_TYPE, a, lhs_ptr, lhs_offset, lhs_stride_y, zlhs);
// Load values from RHS reshaped matrix
LOAD_BLOCK(N0, 1, DATA_TYPE, b, rhs_ptr, rhs_offset, RHS_STEP_X * sizeof(DATA_TYPE), zero);
// Accumulate
ARM_DOT_K0XN0(1, a0, b, c0);
#if M0 > 1
ARM_DOT_K0XN0(1, a1, b, c1);
#endif // M0 > 1
#if M0 > 2
ARM_DOT_K0XN0(1, a2, b, c2);
#endif // M0 > 2
#if M0 > 3
ARM_DOT_K0XN0(1, a3, b, c3);
#endif // M0 > 3
#if M0 > 4
ARM_DOT_K0XN0(1, a4, b, c4);
#endif // M0 > 4
#if M0 > 5
ARM_DOT_K0XN0(1, a5, b, c5);
#endif // M0 > 5
#if M0 > 6
ARM_DOT_K0XN0(1, a6, b, c6);
#endif // M0 > 6
#if M0 > 7
ARM_DOT_K0XN0(1, a7, b, c7);
#endif // M0 > 7
lhs_offset += sizeof(DATA_TYPE);
rhs_offset += sizeof(DATA_TYPE);
}
__global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * dst_stride_y);
REPEAT_VAR_INIT_TO_CONST(8, uint, zout, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0;
#if defined(REINTERPRET_OUTPUT_AS_3D)
// The plane (zout) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
CALCULATE_Z_OFFSET(M0, uint, zout, COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, dst_cross_plane_pad, dst_stride_y);
// Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
// multiply dst_stride_z by DEPTH_GEMM3D
dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
#else // defined(REINTERPRET_OUTPUT_AS_3D)
// Add offset for batched GEMM
dst_addr += z * dst_stride_z;
#endif // defined(REINTERPRET_OUTPUT_AS_3D)
// Multiply by the weight of matrix-matrix product and store the result
#if defined(ALPHA)
SCALE_BLOCK(M0, DATA_TYPE, c, ALPHA);
#endif // defined(ALPHA)
// Add beta*bias
#if defined(BETA)
#if defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE));
LOAD_BLOCK(1, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
#ifndef UNIT_BETA
SCALE_BLOCK(1, DATA_TYPE, bias, BETA);
#endif // UNIT_BIAS
// c = c + bias[broadcasted]
ADD_BLOCK_BROADCAST(M0, c, bias0);
#else // defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * bias_stride_y) + z * bias_stride_z;
LOAD_BLOCK(M0, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
#ifndef UNIT_BETA
SCALE_BLOCK(M0, DATA_TYPE, bias, BETA);
#endif // UNIT_BIAS
// c = c + bias
ADD_BLOCK(M0, c, bias);
#endif // defined(BROADCAST_BIAS)
#endif // defined(BETA)
#if defined(ACTIVATION_TYPE)
ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, c, A_VAL, B_VAL);
#endif // defined(ACTIVATION_TYPE)
const bool cond_y = y == 0;
const bool cond_x = ((x + 1) * N0 >= N);
// Store output block
STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, c, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
#undef RHS_BLOCK_SIZE
#undef RHS_OFFSET_X
#undef RHS_STEP_X
}
#if defined(OPENCL_IMAGE_SUPPORT)
/** This OpenCL kernel computes the matrix multiplication between 2 matrices. The RHS matrix is stored in OpenCL image
* The LHS matrix is NOT reshaped
* The RHS is reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the block K0xN0 is transposed
*
* @note -DOPENCL_IMAGE_SUPPORT must be passed at compile time in order to compile this OpenCL kernel
* @note If the first two dimensions of NDRange have been dispatched with "dummy_work_items" support, the option -DDUMMY_WORK_ITEMS must be passed at compile time.
* @note The GEMM's dimensions (M,N and K) must be passed at compile time using -DM, -DN and and -DK (e.g. -DM=52, -DN=30 and -DK=90)
* @note The height of the RHS matrix, defined before creating the OpenCL image object from the OpenCL buffer, should be passed at compile time using -DRHS_HEIGHT=<value> (e.g. -DRHS_HEIGHT=32)
* Since we cannot create a 3d image from a buffer, the third dimension could be collapsed with the second dimension so RHS_HEIGHT
* could be different from the value returned by get_image_height(rhs_img).
* @note The block's dimensions used for reshaping the RHS matrix (N0 and K0) must be passed at compile time using -DN0 and -DK0 (e.g. -DN0=8, -DK0=4).
* @note The number of M0 rows to process must be passed at compile time using -DM0 (e.g. -DM0=2)
* @note The number of K0xN0 horizontal blocks stored on the same output row of the reshaped RHS matrix must be passed at compile time using -DH0 (e.g. -DH0=2)
* @note If the K0xN0 blocks in the reshaped RHS matrix have been interleaved, the option -DRHS_INTERLEAVE must passed at compile time.
* @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1)
* @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1)
* @note Only the following configurations of M0, N0 and K0 are currently supported:
* - M0 = 1, 2, 3, 4, 5, 6, 7, 8
* - N0 = 4, 8, 16
* - K0 = 4, 8, 16
* - H0 >= 1
*
* @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively.
* The activation function is performed after the bias addition
* @note In case the input or output have to be reinterpreted as a 3D tensor, the following information must be passed at compile time:
* -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D
* -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
* -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
* -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
* (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns LHS matrix
*
* @param[in] lhs_ptr Pointer to the LHS matrix. Supported data type: F32
* @param[in] lhs_stride_x Stride of the LHS matrix in X dimension (in bytes)
* @param[in] lhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] lhs_stride_y Stride of the LHS matrix in Y dimension (in bytes)
* @param[in] lhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the LHS matrix
* @param[in] rhs_img The RHS reshaped matrix as OpenCL image object. Supported data type: same as @p lhs_ptr
* @param[in] bias_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr
* @param[in] bias_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes)
* @param[in] bias_step_x (Optional) bias_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] bias_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes)
* @param[in] bias_step_y (Optional) bias_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
* @param[out] dst_ptr Pointer to the destination matrix Supported data type: same as @p lhs_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
* @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
* @param[in] lhs_stride_z Stride of the LHS matrix in Z dimension (in bytes)
* @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes)
* @param[in] bias_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes)
* @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
* @param[in] lhs_cross_plane_pad (Optional) Bottom paddings for LHS matrix in unit of elements (only if defined REINTERPRET_INPUT_AS_3D)
* @param[in] dst_cross_plane_pad (Optional) Bottom paddings for the output matrix in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
*/
__kernel void gemm_mm_reshaped_only_rhs_t_texture(IMAGE_DECLARATION(lhs),
__read_only image2d_t rhs_img,
#if defined(BETA)
IMAGE_DECLARATION(bias),
#endif // defined(BETA)
IMAGE_DECLARATION(dst),
uint lhs_stride_z,
uint rhs_stride_z,
#if defined(BETA)
uint bias_stride_z,
#endif //defined(BETA)
uint dst_stride_z
#if defined(REINTERPRET_INPUT_AS_3D)
,
uint lhs_cross_plane_pad
#endif // REINTERPRET_INPUT_AS_3D
#if defined(REINTERPRET_OUTPUT_AS_3D)
,
uint dst_cross_plane_pad
#endif // REINTERPRET_OUTPUT_AS_3D
)
{
// Pixel unit
#define PIXEL_UNIT CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(K0)
#define LEFTOVER_K (K % K0)
// Block size
#define RHS_BLOCK_SIZE (PIXEL_UNIT * (N0))
// RHS offset and step X
#if defined(RHS_INTERLEAVE)
#define RHS_OFFSET_X (PIXEL_UNIT)
#define RHS_STEP_X (PIXEL_UNIT * (H0))
#define RHS_STEP_LOOP (1)
#else // defined(RHS_INTERLEAVE)
#define RHS_OFFSET_X (RHS_BLOCK_SIZE)
#define RHS_STEP_X PIXEL_UNIT
#define RHS_STEP_LOOP (H0)
#endif // defined(RHS_INTERLEAVE)
uint x = get_global_id(0);
uint y = get_global_id(1);
uint z = get_global_id(2);
#if defined(DUMMY_WORK_ITEMS)
if((x * N0 >= N) || (y * M0 >= M))
{
return;
}
#endif // defined(DUMMY_WORK_ITEMS)
// Compute LHS matrix address
uint lhs_offset = lhs_offset_first_element_in_bytes + COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * (uint)lhs_stride_y;
#if defined(MATRIX_B_DEPTH)
// Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
const uint z_rhs = (get_global_id(2) % MATRIX_B_DEPTH);
#else // defined(MATRIX_B_DEPTH)
const uint z_rhs = get_global_id(2);
#endif // defined(MATRIX_B_DEPTH)
// Compute RHS matrix coordinates
uint x_rhs = (get_global_id(0) % H0) * (uint)RHS_OFFSET_X;
const uint y_rhs = (get_global_id(0) / (uint)H0) + z_rhs * RHS_HEIGHT;
REPEAT_VAR_INIT_TO_CONST(M0, uint, zlhs, 0);
REPEAT_VAR_INIT_TO_CONST(16, uint, zero, 0);
#if defined(REINTERPRET_INPUT_AS_3D)
// The plane (zlhs) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
CALCULATE_Z_OFFSET(M0, uint, zlhs, COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, lhs_cross_plane_pad, lhs_stride_y);
// Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
// multiply lhs_stride_z by DEPTH_GEMM3D
lhs_offset += z * lhs_stride_z * DEPTH_GEMM3D;
#else // defined(REINTERPRET_INPUT_AS_3D)
// Add offset for batched GEMM
lhs_offset += z * lhs_stride_z;
#endif // defined(REINTERPRET_INPUT_AS_3D)
// Initialize the accumulators
REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DATA_TYPE, N0), c, 0);
int i = 0;
for(; i <= (K - K0); i += K0)
{
// Load values from LHS matrix
LOAD_BLOCK(M0, K0, DATA_TYPE, a, lhs_ptr, lhs_offset, lhs_stride_y, zlhs);
// Load values from RHS matrix stored in a cl_image
REPEAT_VAR_INIT_TO_CONST(N0, VEC_DATA_TYPE(DATA_TYPE, K0), b, 0);
LOAD_TEXTURE2D(N0, PIXEL_UNIT, DATA_TYPE, b, rhs_img, x_rhs, y_rhs, RHS_STEP_X, 0);
// Accumulate
ARM_DOT_K0XN0(K0, a0, b, c0);
#if M0 > 1
ARM_DOT_K0XN0(K0, a1, b, c1);
#endif // M0 > 1
#if M0 > 2
ARM_DOT_K0XN0(K0, a2, b, c2);
#endif // M0 > 2
#if M0 > 3
ARM_DOT_K0XN0(K0, a3, b, c3);
#endif // M0 > 3
#if M0 > 4
ARM_DOT_K0XN0(K0, a4, b, c4);
#endif // M0 > 4
#if M0 > 5
ARM_DOT_K0XN0(K0, a5, b, c5);
#endif // M0 > 5
#if M0 > 6
ARM_DOT_K0XN0(K0, a6, b, c6);
#endif // M0 > 6
#if M0 > 7
ARM_DOT_K0XN0(K0, a7, b, c7);
#endif // M0 > 7
lhs_offset += K0 * sizeof(DATA_TYPE);
x_rhs += N0 * RHS_STEP_X * RHS_STEP_LOOP;
}
#if LEFTOVER_K != 0
// Note: We cannot read out-of-bound elements from the RHS matrix because
// the RHS width is always multiple of K0. This is not be true for the LHS matrix
union UNION_VEC_TYPE
{
DATA_TYPE s[K0];
VEC_DATA_TYPE(DATA_TYPE, K0)
v;
};
union UNION_VEC_TYPE a0 = {.v = 0 };
#if M0 > 1
union UNION_VEC_TYPE a1 = {.v = 0 };
#endif // M0 > 1
#if M0 > 2
union UNION_VEC_TYPE a2 = {.v = 0 };
#endif // M0 > 2
#if M0 > 3
union UNION_VEC_TYPE a3 = {.v = 0 };
#endif // M0 > 3
#if M0 > 4
union UNION_VEC_TYPE a4 = {.v = 0 };
#endif // M0 > 4
#if M0 > 5
union UNION_VEC_TYPE a5 = {.v = 0 };
#endif // M0 > 5
#if M0 > 6
union UNION_VEC_TYPE a6 = {.v = 0 };
#endif // M0 > 6
#if M0 > 7
union UNION_VEC_TYPE a7 = {.v = 0 };
#endif // M0 > 7
REPEAT_VAR_INIT_TO_CONST(N0, VEC_DATA_TYPE(DATA_TYPE, K0), b, 0);
// Load from RHS matrix
LOAD_TEXTURE2D(N0, PIXEL_UNIT, DATA_TYPE, b, rhs_img, x_rhs, y_rhs, RHS_STEP_X, 0);
// Load from LHS matrix
for(int k = 0; k < LEFTOVER_K; ++k)
{
a0.s[k] = *(__global DATA_TYPE *)(lhs_ptr + lhs_offset + 0 * lhs_stride_y + zlhs0);
#if M0 > 1
a1.s[k] = *(__global DATA_TYPE *)(lhs_ptr + lhs_offset + 1 * lhs_stride_y + zlhs1);
#endif // M0 > 1
#if M0 > 2
a2.s[k] = *(__global DATA_TYPE *)(lhs_ptr + lhs_offset + 2 * lhs_stride_y + zlhs2);
#endif // M0 > 2
#if M0 > 3
a3.s[k] = *(__global DATA_TYPE *)(lhs_ptr + lhs_offset + 3 * lhs_stride_y + zlhs3);
#endif // M0 > 3
#if M0 > 4
a4.s[k] = *(__global DATA_TYPE *)(lhs_ptr + lhs_offset + 4 * lhs_stride_y + zlhs4);
#endif // M0 > 4
#if M0 > 5
a5.s[k] = *(__global DATA_TYPE *)(lhs_ptr + lhs_offset + 5 * lhs_stride_y + zlhs5);
#endif // M0 > 5
#if M0 > 6
a6.s[k] = *(__global DATA_TYPE *)(lhs_ptr + lhs_offset + 6 * lhs_stride_y + zlhs6);
#endif // M0 > 6
#if M0 > 7
a7.s[k] = *(__global DATA_TYPE *)(lhs_ptr + lhs_offset + 7 * lhs_stride_y + zlhs7);
#endif // M0 > 7
lhs_offset += sizeof(DATA_TYPE);
}
// Accumulate
ARM_DOT_K0XN0(K0, a0.v, b, c0);
#if M0 > 1
ARM_DOT_K0XN0(K0, a1.v, b, c1);
#endif // M0 > 1
#if M0 > 2
ARM_DOT_K0XN0(K0, a2.v, b, c2);
#endif // M0 > 2
#if M0 > 3
ARM_DOT_K0XN0(K0, a3.v, b, c3);
#endif // M0 > 3
#if M0 > 4
ARM_DOT_K0XN0(K0, a4.v, b, c4);
#endif // M0 > 4
#if M0 > 5
ARM_DOT_K0XN0(K0, a5.v, b, c5);
#endif // M0 > 5
#if M0 > 6
ARM_DOT_K0XN0(K0, a6.v, b, c6);
#endif // M0 > 6
#if M0 > 7
ARM_DOT_K0XN0(K0, a7.v, b, c7);
#endif // M0 > 7
#endif // LEFTOVER_K != 0
__global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * dst_stride_y);
REPEAT_VAR_INIT_TO_CONST(M0, uint, zout, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0;
#if defined(REINTERPRET_OUTPUT_AS_3D)
// The plane (zout) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
CALCULATE_Z_OFFSET(M0, uint, zout, COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, dst_cross_plane_pad, dst_stride_y);
// Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
// multiply dst_stride_z by DEPTH_GEMM3D
dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
#else // defined(REINTERPRET_OUTPUT_AS_3D)
// Add offset for batched GEMM
dst_addr += z * dst_stride_z;
#endif // defined(REINTERPRET_OUTPUT_AS_3D)
// Multiply by the weight of matrix-matrix product and store the result
#if defined(ALPHA)
SCALE_BLOCK(M0, DATA_TYPE, c, ALPHA);
#endif // defined(ALPHA)
// Add beta*bias
#if defined(BETA)
#if defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE));
LOAD_BLOCK(1, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
#ifndef UNIT_BETA
SCALE_BLOCK(1, DATA_TYPE, bias, BETA);
#endif // UNIT_BIAS
// c = c + bias[broadcasted]
ADD_BLOCK_BROADCAST(M0, c, bias0);
#else // defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * bias_stride_y) + z * bias_stride_z;
LOAD_BLOCK(M0, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
#ifndef UNIT_BETA
SCALE_BLOCK(M0, DATA_TYPE, bias, BETA);
#endif // UNIT_BIAS
// c = c + bias
ADD_BLOCK(M0, c, bias);
#endif // defined(BROADCAST_BIAS)
#endif // defined(BETA)
#if defined(ACTIVATION_TYPE)
ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, c, A_VAL, B_VAL);
#endif // defined(ACTIVATION_TYPE)
const bool cond_y = y == 0;
const bool cond_x = ((x + 1) * N0 >= N);
// Store output block
STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, c, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
#undef RHS_BLOCK_SIZE
#undef RHS_OFFSET_X
#undef RHS_STEP_X
#undef LEFTOVER_K
#undef PIXEL_UNIT
}
#endif // defined(OPENCL_IMAGE_SUPPORT)
#define VFMA(a, b, c) \
({ \
c = fma(a, b, c); \
})
#if M0 == 1
#define VFMA_M0xN0(i, a, b, c) \
({ \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
})
#elif M0 == 2 // M0 == 2
#define VFMA_M0xN0(i, a, b, c) \
({ \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
})
#elif M0 == 3 // M0 == 3
#define VFMA_M0xN0(i, a, b, c) \
({ \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \
})
#elif M0 == 4 // M0 == 4
#define VFMA_M0xN0(i, a, b, c) \
({ \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##3).s##i), b, (c##3)); \
})
#elif M0 == 5 // M0 == 5
#define VFMA_M0xN0(i, a, b, c) \
({ \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##3).s##i), b, (c##3)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##4).s##i), b, (c##4)); \
})
#elif M0 == 6 // M0 == 6
#define VFMA_M0xN0(i, a, b, c) \
({ \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##3).s##i), b, (c##3)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##4).s##i), b, (c##4)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##5).s##i), b, (c##5)); \
})
#elif M0 == 7 // M0 == 7
#define VFMA_M0xN0(i, a, b, c) \
({ \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##3).s##i), b, (c##3)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##4).s##i), b, (c##4)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##5).s##i), b, (c##5)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##6).s##i), b, (c##6)); \
})
#elif M0 == 8 // M0 == 8
#define VFMA_M0xN0(i, a, b, c) \
({ \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##3).s##i), b, (c##3)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##4).s##i), b, (c##4)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##5).s##i), b, (c##5)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##6).s##i), b, (c##6)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##7).s##i), b, (c##7)); \
})
#else // M0 not supported
#error "M0 not supported"
#endif // M0 not supported
/** This OpenCL kernel computes the matrix multiplication between 2 matrices.
* The LHS matrix is NOT reshaped
* The RHS is reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the block K0xN0 is NOT transposed
*
* @note If the first two dimensions of NDRange have been dispatched with "dummy_work_items" support, the option -DDUMMY_WORK_ITEMS must be passed at compile time.
* @note The GEMM's dimensions (M,N and K) must be passed at compile time using -DM, -DN and and -DK (e.g. -DM=52, -DN=30 and -DK=90).
* @note The block's dimensions used for reshaping the RHS matrix (N0 and K0) must be passed at compile time using -DN0 and -DK0 (e.g. -DN0=8, -DK0=4).
* @note The number of M0 rows to process must be passed at compile time using -DM0 (e.g. -DM0=2)
* @note The number of K0xN0 horizontal blocks stored on the same output row of the reshaped RHS matrix must be passed at compile time using -DH0 (e.g. -DH0=2)
* @note If the K0xN0 blocks in the reshaped RHS matrix have been interleaved, the option -DRHS_INTERLEAVE must passed at compile time.
* @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1)
* @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1)
* @note Only the following configurations of M0, N0 and K0 are currently supported:
* - M0 = 1, 2, 3, 4, 5, 6, 7, 8
* - N0 = 2, 3, 4, 8, 16
* - K0 = 2, 3, 4, 8, 16
* - H0 >= 1
*
* @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively.
* The activation function is performed after the bias addition
* @note In case the input or output have to be reinterpreted as a 3D tensor, the following information must be passed at compile time:
* -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D
* -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
* -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
* -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
* (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns LHS matrix
*
* @param[in] lhs_ptr Pointer to the LHS matrix. Supported data type: F16/F32
* @param[in] lhs_stride_x Stride of the LHS matrix in X dimension (in bytes)
* @param[in] lhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] lhs_stride_y Stride of the LHS matrix in Y dimension (in bytes)
* @param[in] lhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the LHS matrix
* @param[in] rhs_ptr Pointer to the RHS reshaped matrix. Supported data type: same as @p lhs_ptr
* @param[in] rhs_stride_x Stride of the RHS reshaped matrix in X dimension (in bytes)
* @param[in] rhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] rhs_stride_y Stride of the RHS reshaped matrix in Y dimension (in bytes)
* @param[in] rhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the RHS reshaped matrix
* @param[in] bias_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr
* @param[in] bias_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes)
* @param[in] bias_step_x (Optional) bias_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] bias_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes)
* @param[in] bias_step_y (Optional) bias_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
* @param[out] dst_ptr Pointer to the destination matrix Supported data type: same as @p lhs_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
* @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
* @param[in] lhs_stride_z Stride of the LHS matrix in Z dimension (in bytes)
* @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes)
* @param[in] bias_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes)
* @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
* @param[in] lhs_cross_plane_pad (Optional) Bottom paddings for LHS matrix in unit of elements (only if defined REINTERPRET_INPUT_AS_3D)
* @param[in] dst_cross_plane_pad (Optional) Bottom paddings for the output matrix in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
*/
__kernel void gemm_mm_reshaped_only_rhs_nt(IMAGE_DECLARATION(lhs),
IMAGE_DECLARATION(rhs),
#if defined(BETA)
IMAGE_DECLARATION(bias),
#endif // defined(BETA)
IMAGE_DECLARATION(dst),
uint lhs_stride_z,
uint rhs_stride_z,
#if defined(BETA)
uint bias_stride_z,
#endif //defined(BETA)
uint dst_stride_z
#if defined(REINTERPRET_INPUT_AS_3D)
,
uint lhs_cross_plane_pad
#endif // REINTERPRET_INPUT_AS_3D
#if defined(REINTERPRET_OUTPUT_AS_3D)
,
uint dst_cross_plane_pad
#endif // REINTERPRET_OUTPUT_AS_3D
)
{
// Block size
#define RHS_BLOCK_SIZE ((K0) * (N0))
// RHS offset and step X
#if defined(RHS_INTERLEAVE)
#define RHS_OFFSET_X (N0)
#define RHS_STEP_X ((N0) * (H0))
#define RHS_STEP_LOOP (1)
#else // defined(RHS_INTERLEAVE)
#define RHS_OFFSET_X (RHS_BLOCK_SIZE)
#define RHS_STEP_X (N0)
#define RHS_STEP_LOOP (H0)
#endif // defined(RHS_INTERLEAVE)
uint x = get_global_id(0);
uint y = get_global_id(1);
uint z = get_global_id(2);
#if defined(DUMMY_WORK_ITEMS)
if((x * N0 >= N) || (y * M0 >= M))
{
return;
}
#endif // defined(DUMMY_WORK_ITEMS)
// Compute LHS matrix address
uint lhs_offset = lhs_offset_first_element_in_bytes + COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * (uint)lhs_stride_y;
// Compute RHS reshaped matrix address
uint rhs_offset = rhs_offset_first_element_in_bytes + (x % H0) * (uint)RHS_OFFSET_X * sizeof(DATA_TYPE) + (x / (uint)H0) * rhs_stride_y;
#if defined(MATRIX_B_DEPTH)
// Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
rhs_offset += (z % MATRIX_B_DEPTH) * rhs_stride_z;
#else // defined(MATRIX_B_DEPTH)
rhs_offset += z * rhs_stride_z;
#endif // defined(MATRIX_B_DEPTH)
REPEAT_VAR_INIT_TO_CONST(8, uint, zin, 0); //uint zin0=0,zin1=0,zin2=0,... zin7=0;
REPEAT_VAR_INIT_TO_CONST(16, uint, zero, 0); //uint zero0=0,zero1=0,zero2=0,... zero7=0;
#if defined(REINTERPRET_INPUT_AS_3D)
// The plane (zin) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
CALCULATE_Z_OFFSET(M0, uint, zin, COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, lhs_cross_plane_pad, lhs_stride_y);
// Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
// multiply lhs_stride_z by DEPTH_GEMM3D
lhs_offset += z * lhs_stride_z * DEPTH_GEMM3D;
#else // defined(REINTERPRET_INPUT_AS_3D)
// Add offset for batched GEMM
lhs_offset += z * lhs_stride_z;
#endif // defined(REINTERPRET_INPUT_AS_3D)
// Initialize the accumulators
REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DATA_TYPE, N0), c, 0); //VEC_DATA_TYPE(DATA_TYPE, N0) c0=0,c1=0,c2=0,... c(N0-1)=0;
int i = 0;
for(; i <= (K - K0); i += K0)
{
// Supported cases (M0, K0):
// 1,2 - 1,3 - 1,4 - 1,8 - 1,16
// 2,2 - 2,3 - 2,4 - 2,8 - 2,16
// 3,2 - 3,3 - 3,4 - 3,8 - 3,16
// 4,2 - 4,3 - 4,4 - 4,8 - 4,16
// 5,2 - 5,3 - 5,4 - 5,8 - 5,16
// 6,2 - 6,3 - 6,4 - 6,8 - 6,16
// 7,2 - 7,3 - 7,4 - 7,8 - 7,16
// 8,2 - 8,3 - 8,4 - 8,8 - 8,16
// Load values from LHS matrix
LOAD_BLOCK(M0, K0, DATA_TYPE, a, lhs_ptr, lhs_offset, lhs_stride_y, zin);
VEC_DATA_TYPE(DATA_TYPE, N0)
b0;
b0 = VLOAD(N0)(0, (__global DATA_TYPE *)(rhs_ptr + rhs_offset + 0 * RHS_STEP_X * sizeof(DATA_TYPE)));
VFMA_M0xN0(0, a, b0, c);
b0 = VLOAD(N0)(0, (__global DATA_TYPE *)(rhs_ptr + rhs_offset + 1 * RHS_STEP_X * sizeof(DATA_TYPE)));
VFMA_M0xN0(1, a, b0, c);
#if K0 > 2
b0 = VLOAD(N0)(0, (__global DATA_TYPE *)(rhs_ptr + rhs_offset + 2 * RHS_STEP_X * sizeof(DATA_TYPE)));
VFMA_M0xN0(2, a, b0, c);
#endif // K0 > 2
#if K0 > 3
b0 = VLOAD(N0)(0, (__global DATA_TYPE *)(rhs_ptr + rhs_offset + 3 * RHS_STEP_X * sizeof(DATA_TYPE)));
VFMA_M0xN0(3, a, b0, c);
#endif // K0 > 3
#if K0 > 4
b0 = VLOAD(N0)(0, (__global DATA_TYPE *)(rhs_ptr + rhs_offset + 4 * RHS_STEP_X * sizeof(DATA_TYPE)));
VFMA_M0xN0(4, a, b0, c);
b0 = VLOAD(N0)(0, (__global DATA_TYPE *)(rhs_ptr + rhs_offset + 5 * RHS_STEP_X * sizeof(DATA_TYPE)));
VFMA_M0xN0(5, a, b0, c);
b0 = VLOAD(N0)(0, (__global DATA_TYPE *)(rhs_ptr + rhs_offset + 6 * RHS_STEP_X * sizeof(DATA_TYPE)));
VFMA_M0xN0(6, a, b0, c);
b0 = VLOAD(N0)(0, (__global DATA_TYPE *)(rhs_ptr + rhs_offset + 7 * RHS_STEP_X * sizeof(DATA_TYPE)));
VFMA_M0xN0(7, a, b0, c);
#endif // K0 > 4
#if K0 > 8
b0 = VLOAD(N0)(0, (__global DATA_TYPE *)(rhs_ptr + rhs_offset + 8 * RHS_STEP_X * sizeof(DATA_TYPE)));
VFMA_M0xN0(8, a, b0, c);
b0 = VLOAD(N0)(0, (__global DATA_TYPE *)(rhs_ptr + rhs_offset + 9 * RHS_STEP_X * sizeof(DATA_TYPE)));
VFMA_M0xN0(9, a, b0, c);
b0 = VLOAD(N0)(0, (__global DATA_TYPE *)(rhs_ptr + rhs_offset + 10 * RHS_STEP_X * sizeof(DATA_TYPE)));
VFMA_M0xN0(A, a, b0, c);
b0 = VLOAD(N0)(0, (__global DATA_TYPE *)(rhs_ptr + rhs_offset + 11 * RHS_STEP_X * sizeof(DATA_TYPE)));
VFMA_M0xN0(B, a, b0, c);
b0 = VLOAD(N0)(0, (__global DATA_TYPE *)(rhs_ptr + rhs_offset + 12 * RHS_STEP_X * sizeof(DATA_TYPE)));
VFMA_M0xN0(C, a, b0, c);
b0 = VLOAD(N0)(0, (__global DATA_TYPE *)(rhs_ptr + rhs_offset + 13 * RHS_STEP_X * sizeof(DATA_TYPE)));
VFMA_M0xN0(D, a, b0, c);
b0 = VLOAD(N0)(0, (__global DATA_TYPE *)(rhs_ptr + rhs_offset + 14 * RHS_STEP_X * sizeof(DATA_TYPE)));
VFMA_M0xN0(E, a, b0, c);
b0 = VLOAD(N0)(0, (__global DATA_TYPE *)(rhs_ptr + rhs_offset + 15 * RHS_STEP_X * sizeof(DATA_TYPE)));
VFMA_M0xN0(F, a, b0, c);
#endif // K0 > 8
lhs_offset += K0 * sizeof(DATA_TYPE);
rhs_offset += K0 * RHS_STEP_X * RHS_STEP_LOOP * sizeof(DATA_TYPE);
}
// Left-over accumulations
for(; i < K; ++i)
{
// Load values from LHS matrix
VEC_DATA_TYPE(DATA_TYPE, 2)
a0 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 0 * lhs_stride_y + zin0));
#if M0 > 1
VEC_DATA_TYPE(DATA_TYPE, 2)
a1 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 1 * lhs_stride_y + zin1));
#endif // M0 > 1
#if M0 > 2
VEC_DATA_TYPE(DATA_TYPE, 2)
a2 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 2 * lhs_stride_y + zin2));
#endif // M0 > 2
#if M0 > 3
VEC_DATA_TYPE(DATA_TYPE, 2)
a3 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 3 * lhs_stride_y + zin3));
#endif // M0 > 3
#if M0 > 4
VEC_DATA_TYPE(DATA_TYPE, 2)
a4 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 4 * lhs_stride_y + zin4));
#endif // M0 > 4
#if M0 > 5
VEC_DATA_TYPE(DATA_TYPE, 2)
a5 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 5 * lhs_stride_y + zin5));
#endif // M0 > 5
#if M0 > 6
VEC_DATA_TYPE(DATA_TYPE, 2)
a6 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 6 * lhs_stride_y + zin6));
#endif // M0 > 6
#if M0 > 7
VEC_DATA_TYPE(DATA_TYPE, 2)
a7 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 7 * lhs_stride_y + zin7));
#endif // M0 > 7
VEC_DATA_TYPE(DATA_TYPE, N0)
b0;
b0 = VLOAD(N0)(0, (__global DATA_TYPE *)(rhs_ptr + rhs_offset + 0 * RHS_STEP_X * sizeof(DATA_TYPE)));
VFMA_M0xN0(0, a, b0, c);
lhs_offset += sizeof(DATA_TYPE);
rhs_offset += RHS_STEP_X * sizeof(DATA_TYPE);
}
__global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * dst_stride_y);
REPEAT_VAR_INIT_TO_CONST(8, uint, zout, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0;
#if defined(REINTERPRET_OUTPUT_AS_3D)
// The plane (zout) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
CALCULATE_Z_OFFSET(M0, uint, zout, COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, dst_cross_plane_pad, dst_stride_y);
// Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
// multiply dst_stride_z by DEPTH_GEMM3D
dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
#else // defined(REINTERPRET_OUTPUT_AS_3D)
// Add offset for batched GEMM
dst_addr += z * dst_stride_z;
#endif // defined(REINTERPRET_OUTPUT_AS_3D)
// Multiply by the weight of matrix-matrix product and store the result
#if defined(ALPHA)
SCALE_BLOCK(M0, DATA_TYPE, c, ALPHA);
#endif // defined(ALPHA)
// Add beta*bias
#if defined(BETA)
#if defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE));
LOAD_BLOCK(1, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
#ifndef UNIT_BETA
SCALE_BLOCK(1, DATA_TYPE, bias, BETA);
#endif // UNIT_BIAS
// c = c + bias[broadcasted]
ADD_BLOCK_BROADCAST(M0, c, bias0);
#else // defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * bias_stride_y) + z * bias_stride_z;
LOAD_BLOCK(M0, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
#ifndef UNIT_BETA
SCALE_BLOCK(M0, DATA_TYPE, bias, BETA);
#endif // UNIT_BIAS
// c = c + bias
ADD_BLOCK(M0, c, bias);
#endif // defined(BROADCAST_BIAS)
#endif // defined(BETA)
#if defined(ACTIVATION_TYPE)
ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, c, A_VAL, B_VAL);
#endif // defined(ACTIVATION_TYPE)
const bool cond_y = y == 0;
const bool cond_x = ((x + 1) * N0 >= N);
// Store output block
STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, c, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
#undef RHS_BLOCK_SIZE
#undef RHS_OFFSET_X
#undef RHS_STEP_X
}
#if defined(OPENCL_IMAGE_SUPPORT)
/** This OpenCL kernel computes the matrix multiplication between 2 matrices.
* The LHS matrix is NOT reshaped
* The RHS is reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the block K0xN0 is NOT transposed
*
* @note -DOPENCL_IMAGE_SUPPORT must be passed at compile time in order to compile this OpenCL kernel
* @note If the first two dimensions of NDRange have been dispatched with "dummy_work_items" support, the option -DDUMMY_WORK_ITEMS must be passed at compile time.
* @note The GEMM's dimensions (M,N and K) must be passed at compile time using -DM, -DN and and -DK (e.g. -DM=52, -DN=30 and -DK=90).
* @note The height of the RHS matrix, defined before creating the OpenCL image object from the OpenCL buffer, should be passed at compile time using -DRHS_HEIGHT=<value> (e.g. -DRHS_HEIGHT=32)
* Since we cannot create a 3d image from a buffer, the third dimension could be collapsed with the second dimension so RHS_HEIGHT
* could be different from the value returned by get_image_height(rhs_img).
* @note The block's dimensions used for reshaping the RHS matrix (N0 and K0) must be passed at compile time using -DN0 and -DK0 (e.g. -DN0=8, -DK0=4).
* @note The number of M0 rows to process must be passed at compile time using -DM0 (e.g. -DM0=2)
* @note The number of K0xN0 horizontal blocks stored on the same output row of the reshaped RHS matrix must be passed at compile time using -DH0 (e.g. -DH0=2)
* @note If the K0xN0 blocks in the reshaped RHS matrix have been interleaved, the option -DRHS_INTERLEAVE must passed at compile time.
* @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1)
* @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1)
* @note Only the following configurations of M0, N0 and K0 are currently supported:
* - M0 = 1, 2, 3, 4, 5, 6, 7, 8
* - N0 = 4, 8, 16
* - K0 = 4, 8, 16
* - H0 >= 1
*
* @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively.
* The activation function is performed after the bias addition
* @note In case the input or output have to be reinterpreted as a 3D tensor, the following information must be passed at compile time:
* -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D
* -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
* -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
* -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
* (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns LHS matrix
*
* @param[in] lhs_ptr Pointer to the LHS matrix. Supported data type: F32
* @param[in] lhs_stride_x Stride of the LHS matrix in X dimension (in bytes)
* @param[in] lhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] lhs_stride_y Stride of the LHS matrix in Y dimension (in bytes)
* @param[in] lhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the LHS matrix
* @param[in] rhs_img The RHS reshaped matrix as OpenCL image object. Supported data type: same as @p lhs_ptr
* @param[in] bias_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr
* @param[in] bias_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes)
* @param[in] bias_step_x (Optional) bias_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] bias_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes)
* @param[in] bias_step_y (Optional) bias_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
* @param[out] dst_ptr Pointer to the destination matrix Supported data type: same as @p lhs_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
* @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
* @param[in] lhs_stride_z Stride of the LHS matrix in Z dimension (in bytes)
* @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes)
* @param[in] bias_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes)
* @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
* @param[in] lhs_cross_plane_pad (Optional) Bottom paddings for LHS matrix in unit of elements (only if defined REINTERPRET_INPUT_AS_3D)
* @param[in] dst_cross_plane_pad (Optional) Bottom paddings for the output matrix in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
*/
__kernel void gemm_mm_reshaped_only_rhs_nt_texture(IMAGE_DECLARATION(lhs),
__read_only image2d_t rhs_img,
#if defined(BETA)
IMAGE_DECLARATION(bias),
#endif // defined(BETA)
IMAGE_DECLARATION(dst),
uint lhs_stride_z,
uint rhs_stride_z,
#if defined(BETA)
uint bias_stride_z,
#endif //defined(BETA)
uint dst_stride_z
#if defined(REINTERPRET_INPUT_AS_3D)
,
uint lhs_cross_plane_pad
#endif // REINTERPRET_INPUT_AS_3D
#if defined(REINTERPRET_OUTPUT_AS_3D)
,
uint dst_cross_plane_pad
#endif // REINTERPRET_OUTPUT_AS_3D
)
{
// Pixel unit
#define PIXEL_UNIT CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(N0)
// Block size
#define RHS_BLOCK_SIZE ((K0) * (PIXEL_UNIT))
// RHS offset and step X
#if defined(RHS_INTERLEAVE)
#define RHS_OFFSET_X (PIXEL_UNIT)
#define RHS_STEP_X ((PIXEL_UNIT) * (H0))
#else // defined(RHS_INTERLEAVE)
#define RHS_OFFSET_X (RHS_BLOCK_SIZE)
#define RHS_STEP_X (PIXEL_UNIT)
#endif // defined(RHS_INTERLEAVE)
uint x = get_global_id(0);
uint y = get_global_id(1);
uint z = get_global_id(2);
#if defined(DUMMY_WORK_ITEMS)
if((x * N0 >= N) || (y * M0 >= M))
{
return;
}
#endif // defined(DUMMY_WORK_ITEMS)
// Compute LHS matrix address
uint lhs_offset = lhs_offset_first_element_in_bytes + COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * (uint)lhs_stride_y;
#if defined(MATRIX_B_DEPTH)
// Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
const uint z_rhs = (z % MATRIX_B_DEPTH);
#else // defined(MATRIX_B_DEPTH)
const uint z_rhs = z;
#endif // defined(MATRIX_B_DEPTH)
// Compute RHS matrix coordinates
uint x_rhs = (x % H0) * (uint)RHS_OFFSET_X;
const uint y_rhs = (x / (uint)H0) + z_rhs * RHS_HEIGHT;
REPEAT_VAR_INIT_TO_CONST(8, uint, zin, 0);
REPEAT_VAR_INIT_TO_CONST(16, uint, zero, 0);
#if defined(REINTERPRET_INPUT_AS_3D)
// The plane (zin) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
CALCULATE_Z_OFFSET(M0, uint, zin, COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, lhs_cross_plane_pad, lhs_stride_y);
// Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
// multiply lhs_stride_z by DEPTH_GEMM3D
lhs_offset += z * lhs_stride_z * DEPTH_GEMM3D;
#else // defined(REINTERPRET_INPUT_AS_3D)
// Add offset for batched GEMM
lhs_offset += z * lhs_stride_z;
#endif // defined(REINTERPRET_INPUT_AS_3D)
// Initialize the accumulators
REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DATA_TYPE, N0), c, 0);
int i = 0;
for(; i <= (K - K0); i += K0)
{
// Load values from LHS matrix
LOAD_BLOCK(M0, K0, DATA_TYPE, a, lhs_ptr, lhs_offset, lhs_stride_y, zin);
VEC_DATA_TYPE(DATA_TYPE, N0)
b0;
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 0 * RHS_STEP_X), (y_rhs));
VFMA_M0xN0(0, a, b0, c);
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 1 * RHS_STEP_X), (y_rhs));
VFMA_M0xN0(1, a, b0, c);
#if K0 > 2
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 2 * RHS_STEP_X), (y_rhs));
VFMA_M0xN0(2, a, b0, c);
#endif // K0 > 2
#if K0 > 3
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 3 * RHS_STEP_X), (y_rhs));
VFMA_M0xN0(3, a, b0, c);
#endif // K0 > 3
#if K0 > 4
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 4 * RHS_STEP_X), (y_rhs));
VFMA_M0xN0(4, a, b0, c);
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 5 * RHS_STEP_X), (y_rhs));
VFMA_M0xN0(5, a, b0, c);
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 6 * RHS_STEP_X), (y_rhs));
VFMA_M0xN0(6, a, b0, c);
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 7 * RHS_STEP_X), (y_rhs));
VFMA_M0xN0(7, a, b0, c);
#endif // K0 > 4
#if K0 > 8
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 8 * RHS_STEP_X), (y_rhs));
VFMA_M0xN0(8, a, b0, c);
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 9 * RHS_STEP_X), (y_rhs));
VFMA_M0xN0(9, a, b0, c);
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 10 * RHS_STEP_X), (y_rhs));
VFMA_M0xN0(A, a, b0, c);
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 11 * RHS_STEP_X), (y_rhs));
VFMA_M0xN0(B, a, b0, c);
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 12 * RHS_STEP_X), (y_rhs));
VFMA_M0xN0(C, a, b0, c);
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 13 * RHS_STEP_X), (y_rhs));
VFMA_M0xN0(D, a, b0, c);
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 14 * RHS_STEP_X), (y_rhs));
VFMA_M0xN0(E, a, b0, c);
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 15 * RHS_STEP_X), (y_rhs));
VFMA_M0xN0(F, a, b0, c);
#endif // K0 > 8
lhs_offset += K0 * sizeof(DATA_TYPE);
x_rhs += K0 * RHS_STEP_X * RHS_STEP_LOOP;
}
// Left-over accumulations
for(; i < K; ++i)
{
// Load values from LHS matrix
VEC_DATA_TYPE(DATA_TYPE, 2)
a0 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 0 * lhs_stride_y + zin0));
#if M0 > 1
VEC_DATA_TYPE(DATA_TYPE, 2)
a1 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 1 * lhs_stride_y + zin1));
#endif // M0 > 1
#if M0 > 2
VEC_DATA_TYPE(DATA_TYPE, 2)
a2 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 2 * lhs_stride_y + zin2));
#endif // M0 > 2
#if M0 > 3
VEC_DATA_TYPE(DATA_TYPE, 2)
a3 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 3 * lhs_stride_y + zin3));
#endif // M0 > 3
#if M0 > 4
VEC_DATA_TYPE(DATA_TYPE, 2)
a4 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 4 * lhs_stride_y + zin4));
#endif // M0 > 4
#if M0 > 5
VEC_DATA_TYPE(DATA_TYPE, 2)
a5 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 5 * lhs_stride_y + zin5));
#endif // M0 > 5
#if M0 > 6
VEC_DATA_TYPE(DATA_TYPE, 2)
a6 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 6 * lhs_stride_y + zin6));
#endif // M0 > 6
#if M0 > 7
VEC_DATA_TYPE(DATA_TYPE, 2)
a7 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 7 * lhs_stride_y + zin7));
#endif // M0 > 7
VEC_DATA_TYPE(DATA_TYPE, N0)
b0;
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 0 * RHS_STEP_X), (y_rhs));
VFMA_M0xN0(0, a, b0, c);
lhs_offset += sizeof(DATA_TYPE);
x_rhs += RHS_STEP_X;
}
__global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * dst_stride_y);
REPEAT_VAR_INIT_TO_CONST(8, uint, zout, 0); //uint zout0=0,zout1=0,zout2=0,... zout7=0;
#if defined(REINTERPRET_OUTPUT_AS_3D)
// The plane (zout) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
CALCULATE_Z_OFFSET(M0, uint, zout, COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, dst_cross_plane_pad, dst_stride_y);
// Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
// multiply dst_stride_z by DEPTH_GEMM3D
dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
#else // defined(REINTERPRET_OUTPUT_AS_3D)
// Add offset for batched GEMM
dst_addr += z * dst_stride_z;
#endif // defined(REINTERPRET_OUTPUT_AS_3D)
// Multiply by the weight of matrix-matrix product and store the result
#if defined(ALPHA)
SCALE_BLOCK(M0, DATA_TYPE, c, ALPHA);
#endif // defined(ALPHA)
// Add beta*bias
#if defined(BETA)
#if defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE));
LOAD_BLOCK(1, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
#ifndef UNIT_BETA
SCALE_BLOCK(1, DATA_TYPE, bias, BETA);
#endif // UNIT_BIAS
// c = c + bias[broadcasted]
ADD_BLOCK_BROADCAST(M0, c, bias0);
#else // defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * bias_stride_y) + z * bias_stride_z;
LOAD_BLOCK(M0, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
#ifndef UNIT_BETA
SCALE_BLOCK(M0, DATA_TYPE, bias, BETA);
#endif // UNIT_BIAS
// c = c + bias
ADD_BLOCK(M0, c, bias);
#endif // defined(BROADCAST_BIAS)
#endif // defined(BETA)
#if defined(ACTIVATION_TYPE)
ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, c, A_VAL, B_VAL);
#endif // defined(ACTIVATION_TYPE)
const bool cond_y = y == 0;
const bool cond_x = ((x + 1) * N0 >= N);
// Store output block
STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, c, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
#undef RHS_BLOCK_SIZE
#undef RHS_OFFSET_X
#undef RHS_STEP_X
}
#endif // defined(OPENCL_IMAGE_SUPPORT)
#endif // defined(M0) && defined(N0) && defined(K0) && defined(H0) && defined(DATA_TYPE) && defined(M) && defined(N) && defined(K)
#if defined(M0) && defined(N0) && defined(K0) && defined(V0) && defined(H0) && defined(DATA_TYPE) && defined(DATA_TYPE_ACCUMULATOR) && defined(M) && defined(N)
#if defined(MIXED_PRECISION)
#if K0 == 2
#define ARM_DOT_K0(a, b, c) \
({ \
c += a.s0 * b.s0; \
c += a.s1 * b.s1; \
})
#elif K0 == 3 // K0 == 3
#define ARM_DOT_K0(a, b, c) \
({ \
c += a.s0 * b.s0; \
c += a.s1 * b.s1; \
c += a.s2 * b.s2; \
})
#elif K0 == 4 // K0 == 4
#define ARM_DOT_K0(a, b, c) \
({ \
c += a.s0 * b.s0; \
c += a.s1 * b.s1; \
c += a.s2 * b.s2; \
c += a.s3 * b.s3; \
})
#elif K0 == 8 // K0 == 8
#define ARM_DOT_K0(a, b, c) \
({ \
c += a.s0 * b.s0; \
c += a.s1 * b.s1; \
c += a.s2 * b.s2; \
c += a.s3 * b.s3; \
c += a.s4 * b.s4; \
c += a.s5 * b.s5; \
c += a.s6 * b.s6; \
c += a.s7 * b.s7; \
})
#elif K0 == 16 // K0 == 16
#define ARM_DOT_K0(a, b, c) \
({ \
c += a.s0 * b.s0; \
c += a.s1 * b.s1; \
c += a.s2 * b.s2; \
c += a.s3 * b.s3; \
c += a.s4 * b.s4; \
c += a.s5 * b.s5; \
c += a.s6 * b.s6; \
c += a.s7 * b.s7; \
c += a.s8 * b.s8; \
c += a.s9 * b.s9; \
c += a.sA * b.sA; \
c += a.sB * b.sB; \
c += a.sC * b.sC; \
c += a.sD * b.sD; \
c += a.sE * b.sE; \
c += a.sF * b.sF; \
})
#else // K0 not supported
#error "K0 value not supported"
#endif // K0 conditions
#else // defined(MIXED_PRECISION)
#if K0 == 2
#define ARM_DOT_K0(a, b, c) \
({ \
c = fma(a.s0, b.s0, c); \
c = fma(a.s1, b.s1, c); \
})
#elif K0 == 3 // K0 == 3
#define ARM_DOT_K0(a, b, c) \
({ \
c = fma(a.s0, b.s0, c); \
c = fma(a.s1, b.s1, c); \
c = fma(a.s2, b.s2, c); \
})
#elif K0 == 4 // K0 == 4
#define ARM_DOT_K0(a, b, c) \
({ \
c = fma(a.s0, b.s0, c); \
c = fma(a.s1, b.s1, c); \
c = fma(a.s2, b.s2, c); \
c = fma(a.s3, b.s3, c); \
})
#elif K0 == 8 // K0 == 8
#define ARM_DOT_K0(a, b, c) \
({ \
c = fma(a.s0, b.s0, c); \
c = fma(a.s1, b.s1, c); \
c = fma(a.s2, b.s2, c); \
c = fma(a.s3, b.s3, c); \
c = fma(a.s4, b.s4, c); \
c = fma(a.s5, b.s5, c); \
c = fma(a.s6, b.s6, c); \
c = fma(a.s7, b.s7, c); \
})
#elif K0 == 16 // K0 == 16
#define ARM_DOT_K0(a, b, c) \
({ \
c = fma(a.s0, b.s0, c); \
c = fma(a.s1, b.s1, c); \
c = fma(a.s2, b.s2, c); \
c = fma(a.s3, b.s3, c); \
c = fma(a.s4, b.s4, c); \
c = fma(a.s5, b.s5, c); \
c = fma(a.s6, b.s6, c); \
c = fma(a.s7, b.s7, c); \
c = fma(a.s8, b.s8, c); \
c = fma(a.s9, b.s9, c); \
c = fma(a.sA, b.sA, c); \
c = fma(a.sB, b.sB, c); \
c = fma(a.sC, b.sC, c); \
c = fma(a.sD, b.sD, c); \
c = fma(a.sE, b.sE, c); \
c = fma(a.sF, b.sF, c); \
})
#else // K0 not supported
#error "K0 value not supported"
#endif // K0 conditions
#endif // defined(MIXED_PRECISION)
#if N0 == 2
#define ARM_DOT_K0XN0(a, b, c) \
({ \
ARM_DOT_K0((a), (b##0), (c.s0)); \
ARM_DOT_K0((a), (b##1), (c.s1)); \
})
#elif N0 == 3 // N0 == 3
#define ARM_DOT_K0XN0(a, b, c) \
({ \
ARM_DOT_K0((a), (b##0), (c.s0)); \
ARM_DOT_K0((a), (b##1), (c.s1)); \
ARM_DOT_K0((a), (b##2), (c.s2)); \
})
#elif N0 == 4 // N0 == 4
#define ARM_DOT_K0XN0(a, b, c) \
({ \
ARM_DOT_K0((a), (b##0), (c.s0)); \
ARM_DOT_K0((a), (b##1), (c.s1)); \
ARM_DOT_K0((a), (b##2), (c.s2)); \
ARM_DOT_K0((a), (b##3), (c.s3)); \
})
#elif N0 == 8 // N0 == 8
#define ARM_DOT_K0XN0(a, b, c) \
({ \
ARM_DOT_K0((a), (b##0), (c.s0)); \
ARM_DOT_K0((a), (b##1), (c.s1)); \
ARM_DOT_K0((a), (b##2), (c.s2)); \
ARM_DOT_K0((a), (b##3), (c.s3)); \
ARM_DOT_K0((a), (b##4), (c.s4)); \
ARM_DOT_K0((a), (b##5), (c.s5)); \
ARM_DOT_K0((a), (b##6), (c.s6)); \
ARM_DOT_K0((a), (b##7), (c.s7)); \
})
#elif N0 == 16 // N0 == 16
#define ARM_DOT_K0XN0(a, b, c) \
({ \
ARM_DOT_K0((a), (b##0), (c.s0)); \
ARM_DOT_K0((a), (b##1), (c.s1)); \
ARM_DOT_K0((a), (b##2), (c.s2)); \
ARM_DOT_K0((a), (b##3), (c.s3)); \
ARM_DOT_K0((a), (b##4), (c.s4)); \
ARM_DOT_K0((a), (b##5), (c.s5)); \
ARM_DOT_K0((a), (b##6), (c.s6)); \
ARM_DOT_K0((a), (b##7), (c.s7)); \
ARM_DOT_K0((a), (b##8), (c.s8)); \
ARM_DOT_K0((a), (b##9), (c.s9)); \
ARM_DOT_K0((a), (b##A), (c.sA)); \
ARM_DOT_K0((a), (b##B), (c.sB)); \
ARM_DOT_K0((a), (b##C), (c.sC)); \
ARM_DOT_K0((a), (b##D), (c.sD)); \
ARM_DOT_K0((a), (b##E), (c.sE)); \
ARM_DOT_K0((a), (b##F), (c.sF)); \
})
#else // N0 not supported
#error "N0 value not supported"
#endif // N0 conditions
/** This OpenCL kernel computes the matrix multiplication between 2 matrices.
* The LHS matrix must be reshaped with @ref CLGEMMReshapeLHSMatrixKernel and the M0xK0 must be NOT transposed
* The RHS matrix must be reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the K0xN0 must be transposed
*
* @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float)
* @note The data type used for the accumulators must be passed at compile time using -DDATA_TYPE_ACCUMULATOR (e.g. -DDATA_TYPE_ACCUMULATOR=float)
* @note The F16 computation also supports mixed precision through the option -DMIXED_PRECISION passed at compile time. If enabled, DATA_TYPE_ACCUMULATOR should be set to float
* @note If the first two dimensions of NDRange have been dispatched with "dummy_work_items" support, the option -DDUMMY_WORK_ITEMS must be passed at compile time.
* @note The GEMM's dimensions M, N and K must be passed at compile time using -DM, -DN and -DK (e.g. -DM=52, -DN=90 and -DK=24).
* @note The block's dimensions used for reshaping the LHS matrix and the RHS matrix (M0, N0 and K0) must be passed at compile time using -DM0, -DN0 and -DK0 (e.g. -DM0=4, -DN0=8, -DK0=4).
* @note The number of M0xK0 vertical blocks stored on the same output row of the reshaped LHS matrix must be passed at compile time using -DV0 (e.g. -DV0=2)
* @note The number of K0xN0 horizontal blocks stored on the same output row of the reshaped RHS matrix must be passed at compile time using -DH0 (e.g. -DH0=2)
* @note If the M0xK0 blocks in the reshaped LHS matrix have been interleaved, the option -DLHS_INTERLEAVE must passed at compile time.
* @note If the K0xN0 blocks in the reshaped RHS matrix have been interleaved, the option -DRHS_INTERLEAVE must passed at compile time.
* @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1)
* @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1)
* @note Only the following configurations of M0, N0 and K0 are currently supported:
* - M0 = 2, 3, 4, 5, 6, 7, 8
* - N0 = 2, 3, 4, 8, 16
* - K0 = 2, 3, 4, 8, 16
* - V0 >= 1
* - H0 >= 1
*
* @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively.
* The activation function is performed after the bias addition
* @note In case the output has to be reinterpreted as a 3D tensor (e.g. output of convolution layer), the following information must be passed at compile time:
* -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
* -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
* -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
* (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns LHS matrix NOT reshaped
*
* @param[in] lhs_ptr Pointer to the LHS reshaped matrix. Supported data type: F16/F32
* @param[in] lhs_stride_x Stride of the LHS reshaped matrix in X dimension (in bytes)
* @param[in] lhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] lhs_stride_y Stride of the LHS reshaped matrix in Y dimension (in bytes)
* @param[in] lhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the LHS reshaped matrix
* @param[in] rhs_ptr Pointer to the RHS reshaped matrix. Supported data type: same as @p lhs_ptr
* @param[in] rhs_stride_x Stride of the RHS reshaped matrix in X dimension (in bytes)
* @param[in] rhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] rhs_stride_y Stride of the RHS reshaped matrix in Y dimension (in bytes)
* @param[in] rhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the RHS reshaped matrix
* @param[in] bias_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr
* @param[in] bias_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes)
* @param[in] bias_step_x (Optional) bias_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] bias_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes)
* @param[in] bias_step_y (Optional) bias_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
* @param[out] dst_ptr Pointer to the destination matrix Supported data type: same as @p lhs_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
* @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
* @param[in] k Number of columns in LHS matrix and rows in RHS matrix not reshaped.
* @param[in] lhs_stride_z Stride of the LHS reshaped matrix in Z dimension (in bytes)
* @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes)
* @param[in] bias_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes)
* @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
* @param[in] dst_cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
*/
__kernel void gemm_mm_reshaped_lhs_nt_rhs_t(IMAGE_DECLARATION(lhs),
IMAGE_DECLARATION(rhs),
#if defined(BETA)
IMAGE_DECLARATION(bias),
#endif // defined(BETA)
IMAGE_DECLARATION(dst),
uint k,
uint lhs_stride_z,
uint rhs_stride_z,
#if defined(BETA)
uint bias_stride_z,
#endif //defined(BETA)
uint dst_stride_z
#if defined(REINTERPRET_OUTPUT_AS_3D)
,
uint dst_cross_plane_pad
#endif // REINTERPRET_OUTPUT_AS_3D
)
{
// Block size
#define LHS_BLOCK_SIZE ((K0) * (M0))
#if defined(LHS_INTERLEAVE)
#define LHS_OFFSET_X (K0)
#define LHS_STEP_X ((K0) * (V0))
#define LHS_STEP_LOOP (1)
#else // defined(INTERLEAVE)
#define LHS_OFFSET_X (LHS_BLOCK_SIZE)
#define LHS_STEP_X (K0)
#define LHS_STEP_LOOP (V0)
#endif // defined(INTERLEAVE)
// Block size
#define RHS_BLOCK_SIZE ((K0) * (N0))
// RHS offset and step X
#if defined(RHS_INTERLEAVE)
#define RHS_OFFSET_X (K0)
#define RHS_STEP_X ((K0) * (H0))
#define RHS_STEP_LOOP (1)
#else // defined(RHS_INTERLEAVE)
#define RHS_OFFSET_X (RHS_BLOCK_SIZE)
#define RHS_STEP_X (K0)
#define RHS_STEP_LOOP (H0)
#endif // defined(RHS_INTERLEAVE)
#if defined(DUMMY_WORK_ITEMS)
if((get_global_id(0) * N0 >= N) || (get_global_id(1) * M0 >= M))
{
return;
}
#endif // defined(DUMMY_WORK_ITEMS)
// Compute LHS matrix address
__global uchar *lhs_addr = lhs_ptr + lhs_offset_first_element_in_bytes + (get_global_id(1) % V0) * (uint)LHS_OFFSET_X * sizeof(DATA_TYPE) + (get_global_id(1) / V0) * (uint)lhs_stride_y +
(get_global_id(2) * lhs_stride_z);
// Compute RHS matrix address
__global uchar *rhs_addr = rhs_ptr + rhs_offset_first_element_in_bytes + (get_global_id(0) % H0) * (uint)RHS_OFFSET_X * sizeof(DATA_TYPE) + (get_global_id(0) / (uint)H0) * rhs_stride_y;
#if defined(MATRIX_B_DEPTH)
// Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
rhs_addr += (get_global_id(2) % MATRIX_B_DEPTH) * rhs_stride_z;
#else // defined(MATRIX_B_DEPTH)
rhs_addr += get_global_id(2) * rhs_stride_z;
#endif // defined(MATRIX_B_DEPTH)
// Initialize the accumulators
REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DATA_TYPE_ACCUMULATOR, N0), c, 0);
REPEAT_VAR_INIT_TO_CONST(M0, uint, zlhs, 0); //uint zlhs0=0,zlhs1=0,zlhs2=0,... zlhs7=0;
REPEAT_VAR_INIT_TO_CONST(16, uint, zero, 0);
for(int i = 0; i < k; i += K0)
{
// Supported cases (M0, K0):
// 1,2 - 1,3 - 1,4 - 1,8 - 1,16
// 2,2 - 2,3 - 2,4 - 2,8 - 2,16
// 3,2 - 3,3 - 3,4 - 3,8 - 3,16
// 4,2 - 4,3 - 4,4 - 4,8 - 4,16
// 5,2 - 5,3 - 5,4 - 5,8 - 5,16
// 6,2 - 6,3 - 6,4 - 6,8 - 6,16
// 7,2 - 7,3 - 7,4 - 7,8 - 7,16
// 8,2 - 8,3 - 8,4 - 8,8 - 8,16
// Load values from LHS matrix
LOAD_BLOCK(M0, K0, DATA_TYPE, a, lhs_addr, 0, LHS_STEP_X * sizeof(DATA_TYPE), zlhs);
// Load values from RHS matrix
LOAD_BLOCK(N0, K0, DATA_TYPE, b, rhs_addr, 0, RHS_STEP_X * sizeof(DATA_TYPE), zero);
// Accumulate
ARM_DOT_K0XN0(a0, b, c0);
#if M0 > 1
ARM_DOT_K0XN0(a1, b, c1);
#endif // M0 > 1
#if M0 > 2
ARM_DOT_K0XN0(a2, b, c2);
#endif // M0 > 2
#if M0 > 3
ARM_DOT_K0XN0(a3, b, c3);
#endif // M0 > 3
#if M0 > 4
ARM_DOT_K0XN0(a4, b, c4);
#endif // M0 > 4
#if M0 > 5
ARM_DOT_K0XN0(a5, b, c5);
#endif // M0 > 5
#if M0 > 6
ARM_DOT_K0XN0(a6, b, c6);
#endif // M0 > 6
#if M0 > 7
ARM_DOT_K0XN0(a7, b, c7);
#endif // M0 > 7
lhs_addr += (M0 * LHS_STEP_X * LHS_STEP_LOOP) * sizeof(DATA_TYPE);
rhs_addr += (N0 * RHS_STEP_X * RHS_STEP_LOOP) * sizeof(DATA_TYPE);
}
__global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE)) + (get_global_id(1) * (uint)M0 * dst_stride_y);
REPEAT_VAR_INIT_TO_CONST(M0, uint, zout, 0);
#if defined(REINTERPRET_OUTPUT_AS_3D)
// The plane (zin) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
CALCULATE_Z_OFFSET(M0, uint, zout, get_global_id(1) * (uint)M0, HEIGHT_GEMM3D, DEPTH_GEMM3D, dst_cross_plane_pad, dst_stride_y);
// Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
// multiply dst_stride_z by DEPTH_GEMM3D
dst_addr += get_global_id(2) * dst_stride_z * DEPTH_GEMM3D;
#else // defined(REINTERPRET_OUTPUT_AS_3D)
// Add offset for batched GEMM
dst_addr += get_global_id(2) * dst_stride_z;
#endif // defined(REINTERPRET_OUTPUT_AS_3D)
// Multiply by the weight of matrix-matrix product and store the result
#if defined(ALPHA)
SCALE_BLOCK(M0, DATA_TYPE, c, ALPHA);
#endif // defined(ALPHA)
// Add beta*bias
#if defined(BETA)
#if defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE));
LOAD_BLOCK(1, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
#ifndef UNIT_BETA
SCALE_BLOCK(1, DATA_TYPE, bias, BETA);
#endif // UNIT_BIAS
// c = c + bias[broadcasted]
#if defined(MIXED_PRECISION)
CONVERT_BLOCK(1, N0, DATA_TYPE_ACCUMULATOR, bias, bias_hp);
ADD_BLOCK_BROADCAST(M0, c, bias_hp0);
#else // defined(MIXED_PRECISION)
ADD_BLOCK_BROADCAST(M0, c, bias0);
#endif // defined(MIXED_PRECISION)
#else // defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE)) + (get_global_id(1) * (uint)M0 * bias_stride_y) + get_global_id(
2) * bias_stride_z;
LOAD_BLOCK(M0, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
#ifndef UNIT_BETA
SCALE_BLOCK(M0, DATA_TYPE, bias, BETA);
#endif // UNIT_BIAS
// c = c + bias
#if defined(MIXED_PRECISION)
CONVERT_BLOCK(M0, N0, DATA_TYPE_ACCUMULATOR, bias, bias_hp);
ADD_BLOCK(M0, c, bias_hp);
#else // defined(MIXED_PRECISION)
ADD_BLOCK(M0, c, bias);
#endif // defined(MIXED_PRECISION)
#endif // defined(BROADCAST_BIAS)
#endif // defined(BETA)
#if defined(ACTIVATION_TYPE)
#if defined(MIXED_PRECISION)
ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE_ACCUMULATOR, VEC_SIZE, c, A_VAL, B_VAL);
#else // defined(MIXED_PRECISION)
ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, c, A_VAL, B_VAL);
#endif // defined(MIXED_PRECISION)
#endif // defined(ACTIVATION_TYPE)
const bool cond_y = ((get_global_id(1) + 1) * M0 >= M);
const bool cond_x = ((get_global_id(0) + 1) * N0 >= N);
// Store output block
#if defined(MIXED_PRECISION)
CONVERT_BLOCK(M0, N0, DATA_TYPE, c, c_lp);
STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, c_lp, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
#else // defined(MIXED_PRECISION)
STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, c, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
#endif // defined(MIXED_PRECISION)
#undef LHS_BLOCK_SIZE
#undef LHS_OFFSET_X
#undef LHS_STEP_X
#undef RHS_BLOCK_SIZE
#undef RHS_OFFSET_X
#undef RHS_STEP_X
#undef LHS_STEP_LOOP
#undef RHS_STEP_LOOP
}
#if defined(OPENCL_IMAGE_SUPPORT)
/** This OpenCL kernel computes the matrix multiplication between 2 matrices. The RHS matrix is stored in OpenCL image object.
* The LHS matrix must be reshaped with @ref CLGEMMReshapeLHSMatrixKernel and the M0xK0 must be NOT transposed
* The RHS matrix must be reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the K0xN0 must be transposed
*
* @note -DOPENCL_IMAGE_SUPPORT must be passed at compile time in order to compile this OpenCL kernel
* @note The data type must be passed at compile time using -DDATA_TYPE (e.g. -DDATA_TYPE=float)
* @note The data type used for the accumulators must be passed at compile time using -DDATA_TYPE_ACCUMULATOR (e.g. -DDATA_TYPE_ACCUMULATOR=float)
* @note The F16 computation also supports mixed precision through the option -DMIXED_PRECISION passed at compile time. If enabled, DATA_TYPE_ACCUMULATOR should be set to float
* @note If the first two dimensions of NDRange have been dispatched with "dummy_work_items" support, the option -DDUMMY_WORK_ITEMS must be passed at compile time.
* @note The GEMM's dimensions M, N and K must be passed at compile time using -DM, -DN and -DK (e.g. -DM=52, -DN=90 and -DK=24).
* @note The height of the RHS matrix, defined before creating the OpenCL image object from the OpenCL buffer, should be passed at compile time using -DRHS_HEIGHT=<value> (e.g. -DRHS_HEIGHT=32)
* Since we cannot create a 3d image from a buffer, the third dimension could be collapsed with the second dimension so RHS_HEIGHT
* could be different from the value returned by get_image_height(rhs_img).
* @note The block's dimensions used for reshaping the LHS matrix and the RHS matrix (M0, N0 and K0) must be passed at compile time using -DM0, -DN0 and -DK0 (e.g. -DM0=4, -DN0=8, -DK0=4).
* @note The number of M0xK0 vertical blocks stored on the same output row of the reshaped LHS matrix must be passed at compile time using -DV0 (e.g. -DV0=2)
* @note The number of K0xN0 horizontal blocks stored on the same output row of the reshaped RHS matrix must be passed at compile time using -DH0 (e.g. -DH0=2)
* @note If the M0xK0 blocks in the reshaped LHS matrix have been interleaved, the option -DLHS_INTERLEAVE must passed at compile time.
* @note If the K0xN0 blocks in the reshaped RHS matrix have been interleaved, the option -DRHS_INTERLEAVE must passed at compile time.
* @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1)
* @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1)
* @note Only the following configurations of M0, N0 and K0 are currently supported:
* - M0 = 2, 3, 4, 5, 6, 7, 8
* - N0 = 4, 8, 16
* - K0 = 4, 8, 16
* - V0 >= 1
* - H0 >= 1
*
* @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively.
* The activation function is performed after the bias addition
* @note In case the output has to be reinterpreted as a 3D tensor (e.g. output of convolution layer), the following information must be passed at compile time:
* -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
* -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
* -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
* (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns LHS matrix NOT reshaped
*
* @param[in] lhs_ptr Pointer to the LHS reshaped matrix. Supported data type: F32
* @param[in] lhs_stride_x Stride of the LHS reshaped matrix in X dimension (in bytes)
* @param[in] lhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] lhs_stride_y Stride of the LHS reshaped matrix in Y dimension (in bytes)
* @param[in] lhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the LHS reshaped matrix
* @param[in] rhs_img The RHS reshaped matrix as OpenCL image object. Supported data type: same as @p lhs_ptr
* @param[in] bias_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr
* @param[in] bias_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes)
* @param[in] bias_step_x (Optional) bias_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] bias_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes)
* @param[in] bias_step_y (Optional) bias_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
* @param[out] dst_ptr Pointer to the destination matrix Supported data type: same as @p lhs_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
* @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
* @param[in] k Number of columns in LHS matrix and rows in RHS matrix not reshaped.
* @param[in] lhs_stride_z Stride of the LHS reshaped matrix in Z dimension (in bytes)
* @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes)
* @param[in] bias_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes)
* @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
* @param[in] dst_cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
*/
__kernel void gemm_mm_reshaped_lhs_nt_rhs_t_texture(IMAGE_DECLARATION(lhs),
__read_only image2d_t rhs_img,
#if defined(BETA)
IMAGE_DECLARATION(bias),
#endif // defined(BETA)
IMAGE_DECLARATION(dst),
uint k,
uint lhs_stride_z,
uint rhs_stride_z,
#if defined(BETA)
uint bias_stride_z,
#endif //defined(BETA)
uint dst_stride_z
#if defined(REINTERPRET_OUTPUT_AS_3D)
,
uint dst_cross_plane_pad
#endif // REINTERPRET_OUTPUT_AS_3D
)
{
// Pixel unit
#define PIXEL_UNIT CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(K0)
// Block size
#define LHS_BLOCK_SIZE ((K0) * (M0))
#if defined(LHS_INTERLEAVE)
#define LHS_OFFSET_X (K0)
#define LHS_STEP_X ((K0) * (V0))
#define LHS_STEP_LOOP (1)
#else // defined(INTERLEAVE)
#define LHS_OFFSET_X (LHS_BLOCK_SIZE)
#define LHS_STEP_X (K0)
#define LHS_STEP_LOOP (V0)
#endif // defined(INTERLEAVE)
// Block size
#define RHS_BLOCK_SIZE (PIXEL_UNIT * (N0))
// RHS offset and step X
#if defined(RHS_INTERLEAVE)
#define RHS_OFFSET_X (PIXEL_UNIT)
#define RHS_STEP_X (PIXEL_UNIT * (H0))
#define RHS_STEP_LOOP (1)
#else // defined(RHS_INTERLEAVE)
#define RHS_OFFSET_X (RHS_BLOCK_SIZE)
#define RHS_STEP_X PIXEL_UNIT
#define RHS_STEP_LOOP (H0)
#endif // defined(RHS_INTERLEAVE)
#if defined(DUMMY_WORK_ITEMS)
if((get_global_id(0) * N0 >= N) || (get_global_id(1) * M0 >= M))
{
return;
}
#endif // defined(DUMMY_WORK_ITEMS)
// Compute LHS matrix address
__global uchar *lhs_addr = lhs_ptr + lhs_offset_first_element_in_bytes + (get_global_id(1) % V0) * (uint)LHS_OFFSET_X * sizeof(DATA_TYPE) + (get_global_id(1) / V0) * (uint)lhs_stride_y +
(get_global_id(2) * lhs_stride_z);
#if defined(MATRIX_B_DEPTH)
// Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
const uint z_rhs = (get_global_id(2) % MATRIX_B_DEPTH);
#else // defined(MATRIX_B_DEPTH)
const uint z_rhs = get_global_id(2);
#endif // defined(MATRIX_B_DEPTH)
// Compute RHS matrix coordinates
uint x_rhs = (get_global_id(0) % H0) * (uint)RHS_OFFSET_X;
const uint y_rhs = (get_global_id(0) / (uint)H0) + z_rhs * RHS_HEIGHT;
// Initialize the accumulators
REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DATA_TYPE_ACCUMULATOR, N0), c, 0);
REPEAT_VAR_INIT_TO_CONST(M0, uint, zlhs, 0); //uint zlhs0=0,zlhs1=0,zlhs2=0,... zlhs7=0;
REPEAT_VAR_INIT_TO_CONST(16, uint, zero, 0);
for(int i = 0; i < K; i += K0)
{
// Load values from LHS matrix
LOAD_BLOCK(M0, K0, DATA_TYPE, a, lhs_addr, 0, LHS_STEP_X * sizeof(DATA_TYPE), zlhs);
// Load values from RHS matrix stored in a cl_image
REPEAT_VAR_INIT_TO_CONST(N0, VEC_DATA_TYPE(DATA_TYPE, K0), b, 0);
LOAD_TEXTURE2D(N0, PIXEL_UNIT, DATA_TYPE, b, rhs_img, x_rhs, y_rhs, RHS_STEP_X, 0);
// Accumulate
ARM_DOT_K0XN0(a0, b, c0);
#if M0 > 1
ARM_DOT_K0XN0(a1, b, c1);
#endif // M0 > 1
#if M0 > 2
ARM_DOT_K0XN0(a2, b, c2);
#endif // M0 > 2
#if M0 > 3
ARM_DOT_K0XN0(a3, b, c3);
#endif // M0 > 3
#if M0 > 4
ARM_DOT_K0XN0(a4, b, c4);
#endif // M0 > 4
#if M0 > 5
ARM_DOT_K0XN0(a5, b, c5);
#endif // M0 > 5
#if M0 > 6
ARM_DOT_K0XN0(a6, b, c6);
#endif // M0 > 6
#if M0 > 7
ARM_DOT_K0XN0(a7, b, c7);
#endif // M0 > 7
lhs_addr += (M0 * LHS_STEP_X * LHS_STEP_LOOP) * sizeof(DATA_TYPE);
x_rhs += N0 * RHS_STEP_X * RHS_STEP_LOOP;
}
__global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE)) + (get_global_id(1) * (uint)M0 * dst_stride_y);
REPEAT_VAR_INIT_TO_CONST(M0, uint, zout, 0);
#if defined(REINTERPRET_OUTPUT_AS_3D)
// The plane (zin) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
CALCULATE_Z_OFFSET(M0, uint, zout, get_global_id(1) * (uint)M0, HEIGHT_GEMM3D, DEPTH_GEMM3D, dst_cross_plane_pad, dst_stride_y);
// Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
// multiply dst_stride_z by DEPTH_GEMM3D
dst_addr += get_global_id(2) * dst_stride_z * DEPTH_GEMM3D;
#else // defined(REINTERPRET_OUTPUT_AS_3D)
// Add offset for batched GEMM
dst_addr += get_global_id(2) * dst_stride_z;
#endif // defined(REINTERPRET_OUTPUT_AS_3D)
// Multiply by the weight of matrix-matrix product and store the result
#if defined(ALPHA)
SCALE_BLOCK(M0, DATA_TYPE, c, ALPHA);
#endif // defined(ALPHA)
// Add beta*bias
#if defined(BETA)
#if defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE));
LOAD_BLOCK(1, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
#ifndef UNIT_BETA
SCALE_BLOCK(1, DATA_TYPE, bias, BETA);
#endif // UNIT_BIAS
// c = c + bias[broadcasted]
#if defined(MIXED_PRECISION)
CONVERT_BLOCK(1, N0, DATA_TYPE_ACCUMULATOR, bias, bias_hp);
ADD_BLOCK_BROADCAST(M0, c, bias_hp0);
#else // defined(MIXED_PRECISION)
ADD_BLOCK_BROADCAST(M0, c, bias0);
#endif // defined(MIXED_PRECISION)
#else // defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE)) + (get_global_id(1) * (uint)M0 * bias_stride_y) + get_global_id(
2) * bias_stride_z;
LOAD_BLOCK(M0, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
#ifndef UNIT_BETA
SCALE_BLOCK(M0, DATA_TYPE, bias, BETA);
#endif // UNIT_BIAS
// c = c + bias
#if defined(MIXED_PRECISION)
CONVERT_BLOCK(M0, N0, DATA_TYPE_ACCUMULATOR, bias, bias_hp);
ADD_BLOCK(M0, c, bias_hp);
#else // defined(MIXED_PRECISION)
ADD_BLOCK(M0, c, bias);
#endif // defined(MIXED_PRECISION)
#endif // defined(BROADCAST_BIAS)
#endif // defined(BETA)
#if defined(ACTIVATION_TYPE)
#if defined(MIXED_PRECISION)
ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE_ACCUMULATOR, VEC_SIZE, c, A_VAL, B_VAL);
#else // defined(MIXED_PRECISION)
ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, c, A_VAL, B_VAL);
#endif // defined(MIXED_PRECISION)
#endif // defined(ACTIVATION_TYPE)
const bool cond_y = ((get_global_id(1) + 1) * M0 >= M);
const bool cond_x = ((get_global_id(0) + 1) * N0 >= N);
// Store output block
#if defined(MIXED_PRECISION)
CONVERT_BLOCK(M0, N0, DATA_TYPE, c, c_lp);
STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, c_lp, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
#else // defined(MIXED_PRECISION)
STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, c, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
#endif // defined(MIXED_PRECISION)
#undef LHS_BLOCK_SIZE
#undef LHS_OFFSET_X
#undef LHS_STEP_X
#undef RHS_BLOCK_SIZE
#undef RHS_OFFSET_X
#undef RHS_STEP_X
#undef PIXEL_UNIT
#undef LHS_STEP_LOOP
#undef RHS_STEP_LOOP
}
#endif // defined(OPENCL_IMAGE_SUPPORT)
#if defined(LHS_TRANSPOSE)
#define VTYPE(TYPE, SIZE) VEC_DATA_TYPE(TYPE, SIZE)
#if defined(MIXED_PRECISION)
#if(GPU_ARCH == GPU_ARCH_MIDGARD)
#define ARM_VFMA(N0, a, b, c) c += (CONVERT(a, VEC_DATA_TYPE(DATA_TYPE_ACCUMULATOR, N0))) * (CONVERT(b, VEC_DATA_TYPE(DATA_TYPE_ACCUMULATOR, N0)));
#else // GPU_ARCH == GPU_ARCH_MIDGARD
#define ARM_VFMA(N0, a, b, c) c = fma((CONVERT(a, VEC_DATA_TYPE(DATA_TYPE_ACCUMULATOR, N0))), (CONVERT(b, VEC_DATA_TYPE(DATA_TYPE_ACCUMULATOR, N0))), (c));
#endif // GPU_ARCH == GPU_ARCH_MIDGARD
#else // defined(MIXED_PRECISION
#if(GPU_ARCH == GPU_ARCH_MIDGARD)
#define ARM_VFMA(N0, a, b, c) c += (a) * (b);
#else // GPU_ARCH == GPU_ARCH_MIDGARD
#define ARM_VFMA(N0, a, b, c) c = fma((a), (b), (c));
#endif // GPU_ARCH == GPU_ARCH_MIDGARD
#endif // defined(MIXED_PRECISION)
#define ARM_VVM_T_NT_1xN0x1(N0, TYPE, a, b, C) \
({ \
ARM_VFMA(N0, (VTYPE(TYPE, N0))(a), b, (C##0)); \
})
#define ARM_VVM_T_NT_2xN0x1(N0, TYPE, a, b, C) \
({ \
ARM_VFMA(N0, (VTYPE(TYPE, N0))(a.s0), b, (C##0)); \
ARM_VFMA(N0, (VTYPE(TYPE, N0))(a.s1), b, (C##1)); \
})
#define ARM_VVM_T_NT_3xN0x1(N0, TYPE, a, b, C) \
({ \
ARM_VVM_T_NT_2xN0x1(N0, TYPE, a, b, C); \
ARM_VFMA(N0, (VTYPE(TYPE, N0))(a.s2), b, (C##2)); \
})
#define ARM_VVM_T_NT_4xN0x1(N0, TYPE, a, b, C) \
({ \
ARM_VVM_T_NT_3xN0x1(N0, TYPE, a, b, C); \
ARM_VFMA(N0, (VTYPE(TYPE, N0))(a.s3), b, (C##3)); \
})
#define ARM_VVM_T_NT_8xN0x1(N0, TYPE, a, b, C) \
({ \
ARM_VVM_T_NT_4xN0x1(N0, TYPE, a, b, C); \
ARM_VFMA(N0, (VTYPE(TYPE, N0))(a.s4), b, (C##4)); \
ARM_VFMA(N0, (VTYPE(TYPE, N0))(a.s5), b, (C##5)); \
ARM_VFMA(N0, (VTYPE(TYPE, N0))(a.s6), b, (C##6)); \
ARM_VFMA(N0, (VTYPE(TYPE, N0))(a.s7), b, (C##7)); \
})
// Factory macro for the column-vector (transposed) by row-vector (not transposed) multiplication. K0 = 1
// a is the column-vector (transposed)
// b is the row-vector (not transposed)
// C is the output matrix
// Lower case is a vector (a, b)
// Upper case is a matrix (C)
#define ARM_VVM_T_NT_M0xN0x1(M0, N0, TYPE, a, b, C) ARM_VVM_T_NT_##M0##xN0x1(N0, TYPE, a, b, C)
#define ARM_MM_T_NT_M0xN0x1(M0, N0, TYPE, A, B, C) \
({ \
ARM_VVM_T_NT_M0xN0x1(M0, N0, TYPE, (A##0), (B##0), C); \
})
#define ARM_MM_T_NT_M0xN0x2(M0, N0, TYPE, A, B, C) \
({ \
ARM_MM_T_NT_M0xN0x1(M0, N0, TYPE, A, B, C); \
ARM_VVM_T_NT_M0xN0x1(M0, N0, TYPE, (A##1), (B##1), C); \
})
#define ARM_MM_T_NT_M0xN0x3(M0, N0, TYPE, A, B, C) \
({ \
ARM_MM_T_NT_M0xN0x2(M0, N0, TYPE, A, B, C); \
ARM_VVM_T_NT_M0xN0x1(M0, N0, TYPE, (A##2), (B##2), C); \
})
#define ARM_MM_T_NT_M0xN0x4(M0, N0, TYPE, A, B, C) \
({ \
ARM_MM_T_NT_M0xN0x3(M0, N0, TYPE, A, B, C); \
ARM_VVM_T_NT_M0xN0x1(M0, N0, TYPE, (A##3), (B##3), C); \
})
#define ARM_MM_T_NT_M0xN0x8(M0, N0, TYPE, A, B, C) \
({ \
ARM_MM_T_NT_M0xN0x4(M0, N0, TYPE, A, B, C); \
ARM_VVM_T_NT_M0xN0x1(M0, N0, TYPE, (A##4), (B##4), C); \
ARM_VVM_T_NT_M0xN0x1(M0, N0, TYPE, (A##5), (B##5), C); \
ARM_VVM_T_NT_M0xN0x1(M0, N0, TYPE, (A##6), (B##6), C); \
ARM_VVM_T_NT_M0xN0x1(M0, N0, TYPE, (A##7), (B##7), C); \
})
#define ARM_MM_T_NT_M0xN0x16(M0, N0, TYPE, A, B, C) \
({ \
ARM_MM_T_NT_M0xN0x8(M0, N0, TYPE, A, B, C); \
ARM_MM_T_NT_M0xN0x1(M0, N0, TYPE, (A##8), (B##8), C); \
ARM_MM_T_NT_M0xN0x1(M0, N0, TYPE, (A##9), (B##9), C); \
ARM_MM_T_NT_M0xN0x1(M0, N0, TYPE, (A##A), (B##A), C); \
ARM_MM_T_NT_M0xN0x1(M0, N0, TYPE, (A##B), (B##B), C); \
ARM_MM_T_NT_M0xN0x1(M0, N0, TYPE, (A##C), (B##C), C); \
ARM_MM_T_NT_M0xN0x1(M0, N0, TYPE, (A##D), (B##D), C); \
ARM_MM_T_NT_M0xN0x1(M0, N0, TYPE, (A##E), (B##E), C); \
ARM_MM_T_NT_M0xN0x1(M0, N0, TYPE, (A##F), (B##F), C); \
})
// Factory macro for the matrix (transposed) by matrix (not transposed) multiplication.
// The dimensions for this matrix multiplications are defined through M0, N0 and K0
// The dimensions supported are:
// M0: 1, 2, 3, 4, 8
// N0: 1, 2, 3, 4, 8, 16
// K0: 1, 2, 3, 4, 8, 16
// This macro calls the vector-by-matrix macro K0 times
// A, B and C are matrices
#define ARM_MM_T_NT(M0, N0, K0, TYPE, A, B, C) \
CONCAT(ARM_MM_T_NT_M0xN0x, K0) \
(M0, N0, TYPE, A, B, C)
/** This OpenCL kernel computes the matrix multiplication between 2 matrices.
* The LHS matrix must be reshaped with @ref CLGEMMReshapeLHSMatrixKernel and the M0xK0 must be transposed
* The RHS matrix must be reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the K0xN0 must be NOT transposed
*
* @note LHS_TRANSPOSE should be passed at compile time in order to compile this OpenCL kernel (e.g. -DLHS_TRANSPOSE).
* @note If the first two dimensions of NDRange have been dispatched with "dummy_work_items" support, the option -DDUMMY_WORK_ITEMS must be passed at compile time.
* @note The GEMM's dimensions M, N and K must be passed at compile time using -DM, -DN and -DK (e.g. -DM=52, -DN=90 and -DK=24).
* @note The block's dimensions used for reshaping the LHS matrix and the RHS matrix (M0, N0 and K0) must be passed at compile time using -DM0, -DN0 and -DK0 (e.g. -DM0=4, -DN0=8, -DK0=4).
* @note The number of M0xK0 vertical blocks stored on the same output row of the reshaped LHS matrix must be passed at compile time using -DV0 (e.g. -DV0=2)
* @note The number of K0xN0 horizontal blocks stored on the same output row of the reshaped RHS matrix must be passed at compile time using -DH0 (e.g. -DH0=2)
* @note If the M0xK0 blocks in the reshaped LHS matrix have been interleaved, the option -DLHS_INTERLEAVE must passed at compile time.
* @note If the K0xN0 blocks in the reshaped RHS matrix have been interleaved, the option -DRHS_INTERLEAVE must passed at compile time.
* @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1)
* @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1)
* @note Only the following configurations of M0, N0 and K0 are currently supported:
* - M0 = 2, 3, 4, 8
* - N0 = 2, 3, 4, 8, 16
* - K0 = 2, 3, 4, 8, 16
* - V0 >= 1
* - H0 >= 1
*
* @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively.
* The activation function is performed after the bias addition
* @note In case the output has to be reinterpreted as a 3D tensor (e.g. output of convolution layer), the following information must be passed at compile time:
* -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
* -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
* -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
* (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns LHS matrix NOT reshaped
*
* @param[in] lhs_ptr Pointer to the LHS reshaped matrix. Supported data type: F16/F32
* @param[in] lhs_stride_x Stride of the LHS reshaped matrix in X dimension (in bytes)
* @param[in] lhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] lhs_stride_y Stride of the LHS reshaped matrix in Y dimension (in bytes)
* @param[in] lhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the LHS reshaped matrix
* @param[in] rhs_ptr Pointer to the RHS reshaped matrix. Supported data type: same as @p lhs_ptr
* @param[in] rhs_stride_x Stride of the RHS reshaped matrix in X dimension (in bytes)
* @param[in] rhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] rhs_stride_y Stride of the RHS reshaped matrix in Y dimension (in bytes)
* @param[in] rhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the RHS reshaped matrix
* @param[in] bias_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr
* @param[in] bias_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes)
* @param[in] bias_step_x (Optional) bias_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] bias_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes)
* @param[in] bias_step_y (Optional) bias_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
* @param[out] dst_ptr Pointer to the destination matrix Supported data type: same as @p lhs_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
* @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
* @param[in] k Number of columns in LHS matrix and rows in RHS matrix not reshaped.
* @param[in] lhs_stride_z Stride of the LHS reshaped matrix in Z dimension (in bytes)
* @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes)
* @param[in] bias_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes)
* @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
* @param[in] dst_cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
*/
__kernel void gemm_mm_reshaped_lhs_t_rhs_nt(IMAGE_DECLARATION(lhs),
IMAGE_DECLARATION(rhs),
#if defined(BETA)
IMAGE_DECLARATION(bias),
#endif // defined(BETA)
IMAGE_DECLARATION(dst),
uint k,
uint lhs_stride_z,
uint rhs_stride_z,
#if defined(BETA)
uint bias_stride_z,
#endif //defined(BETA)
uint dst_stride_z
#if defined(REINTERPRET_OUTPUT_AS_3D)
,
uint dst_cross_plane_pad
#endif // REINTERPRET_OUTPUT_AS_3D
)
{
// Block size
#define LHS_BLOCK_SIZE ((K0) * (M0))
#if defined(LHS_INTERLEAVE)
#define LHS_OFFSET_X (M0)
#define LHS_STEP_X ((M0) * (V0))
#define LHS_STEP_LOOP (1)
#else // defined(INTERLEAVE)
#define LHS_OFFSET_X (LHS_BLOCK_SIZE)
#define LHS_STEP_X (M0)
#define LHS_STEP_LOOP (V0)
#endif // defined(INTERLEAVE)
// Block size
#define RHS_BLOCK_SIZE ((K0) * (N0))
// RHS offset and step X
#if defined(RHS_INTERLEAVE)
#define RHS_OFFSET_X (N0)
#define RHS_STEP_X ((N0) * (H0))
#else // defined(RHS_INTERLEAVE)
#define RHS_OFFSET_X (RHS_BLOCK_SIZE)
#define RHS_STEP_X (N0)
#endif // defined(RHS_INTERLEAVE)
const uint x = get_global_id(0);
const uint y = get_global_id(1);
const uint z = get_global_id(2);
#if defined(DUMMY_WORK_ITEMS)
if((x * N0 >= N) || (y * M0 >= M))
{
return;
}
#endif // defined(DUMMY_WORK_ITEMS)
// Compute LHS matrix address
__global uchar *lhs_addr = lhs_ptr + lhs_offset_first_element_in_bytes + (y % V0) * (uint)LHS_OFFSET_X * sizeof(DATA_TYPE) + (y / V0) * (uint)lhs_stride_y + (z * lhs_stride_z);
// Compute RHS matrix address
__global uchar *rhs_addr = rhs_ptr + rhs_offset_first_element_in_bytes + (x % H0) * (uint)RHS_OFFSET_X * sizeof(DATA_TYPE) + (x / (uint)H0) * rhs_stride_y;
#if defined(MATRIX_B_DEPTH)
// Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
rhs_addr += (z % MATRIX_B_DEPTH) * rhs_stride_z;
#else // defined(MATRIX_B_DEPTH)
rhs_addr += z * rhs_stride_z;
#endif // defined(MATRIX_B_DEPTH)
// Initialize the accumulators
REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DATA_TYPE_ACCUMULATOR, N0), c, 0);
REPEAT_VAR_INIT_TO_CONST(M0, uint, zero, 0);
__global DATA_TYPE *lhs = (__global DATA_TYPE *)(lhs_addr);
__global DATA_TYPE *rhs = (__global DATA_TYPE *)(rhs_addr);
for(int i = 0; i < k; i += K0)
{
VEC_DATA_TYPE(DATA_TYPE, M0)
a0;
VEC_DATA_TYPE(DATA_TYPE, N0)
b0;
a0 = VLOAD(M0)(0, lhs);
b0 = VLOAD(N0)(0, rhs);
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
rhs += RHS_STEP_X;
#if K0 > 1
a0 = VLOAD(M0)(0, lhs);
b0 = VLOAD(N0)(0, rhs);
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
rhs += RHS_STEP_X;
#endif // K0 > 1
#if K0 > 2
a0 = VLOAD(M0)(0, lhs);
b0 = VLOAD(N0)(0, rhs);
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
rhs += RHS_STEP_X;
#endif // K0 > 2
#if K0 > 3
a0 = VLOAD(M0)(0, lhs);
b0 = VLOAD(N0)(0, rhs);
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
rhs += RHS_STEP_X;
#endif // K0 > 3
#if K0 > 4
a0 = VLOAD(M0)(0, lhs);
b0 = VLOAD(N0)(0, rhs);
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
rhs += RHS_STEP_X;
a0 = VLOAD(M0)(0, lhs);
b0 = VLOAD(N0)(0, rhs);
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
rhs += RHS_STEP_X;
a0 = VLOAD(M0)(0, lhs);
b0 = VLOAD(N0)(0, rhs);
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
rhs += RHS_STEP_X;
a0 = VLOAD(M0)(0, lhs);
b0 = VLOAD(N0)(0, rhs);
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
rhs += RHS_STEP_X;
#endif // K0 > 4
#if K0 > 8
a0 = VLOAD(M0)(0, lhs);
b0 = VLOAD(N0)(0, rhs);
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
rhs += RHS_STEP_X;
a0 = VLOAD(M0)(0, lhs);
b0 = VLOAD(N0)(0, rhs);
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
rhs += RHS_STEP_X;
a0 = VLOAD(M0)(0, lhs);
b0 = VLOAD(N0)(0, rhs);
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
rhs += RHS_STEP_X;
a0 = VLOAD(M0)(0, lhs);
b0 = VLOAD(N0)(0, rhs);
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
rhs += RHS_STEP_X;
a0 = VLOAD(M0)(0, lhs);
b0 = VLOAD(N0)(0, rhs);
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
rhs += RHS_STEP_X;
a0 = VLOAD(M0)(0, lhs);
b0 = VLOAD(N0)(0, rhs);
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
rhs += RHS_STEP_X;
a0 = VLOAD(M0)(0, lhs);
b0 = VLOAD(N0)(0, rhs);
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
rhs += RHS_STEP_X;
a0 = VLOAD(M0)(0, lhs);
b0 = VLOAD(N0)(0, rhs);
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
rhs += RHS_STEP_X;
#endif // K0 > 8
#ifndef LHS_INTERLEAVE
lhs += (M0 * K0 * (V0 - 1));
#endif // LHS_INTERLEAVE
#ifndef RHS_INTERLEAVE
rhs += (N0 * K0 * (H0 - 1));
#endif // RHS_INTERLEAVE
}
__global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(DATA_TYPE)) + (y * (uint)M0 * dst_stride_y);
REPEAT_VAR_INIT_TO_CONST(M0, uint, zout, 0);
#if defined(REINTERPRET_OUTPUT_AS_3D)
// The plane (zin) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
CALCULATE_Z_OFFSET(M0, uint, zout, y * (uint)M0, HEIGHT_GEMM3D, DEPTH_GEMM3D, dst_cross_plane_pad, dst_stride_y);
// Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
// multiply dst_stride_z by DEPTH_GEMM3D
dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
#else // defined(REINTERPRET_OUTPUT_AS_3D)
// Add offset for batched GEMM
dst_addr += z * dst_stride_z;
#endif // defined(REINTERPRET_OUTPUT_AS_3D)
// Multiply by the weight of matrix-matrix product and store the result
#if defined(ALPHA)
SCALE_BLOCK(M0, DATA_TYPE, c, ALPHA);
#endif // defined(ALPHA)
// Add beta*bias
#if defined(BETA)
#if defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(DATA_TYPE));
LOAD_BLOCK(1, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
#ifndef UNIT_BETA
SCALE_BLOCK(1, DATA_TYPE, bias, BETA);
#endif // UNIT_BIAS
// c = c + bias[broadcasted]
#if defined(MIXED_PRECISION)
CONVERT_BLOCK(1, N0, DATA_TYPE_ACCUMULATOR, bias, bias_hp);
ADD_BLOCK_BROADCAST(M0, c, bias_hp0);
#else // defined(MIXED_PRECISION)
ADD_BLOCK_BROADCAST(M0, c, bias0);
#endif // defined(MIXED_PRECISION)
#else // defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE)) + (get_global_id(1) * (uint)M0 * bias_stride_y) + get_global_id(
2) * bias_stride_z;
LOAD_BLOCK(M0, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
#ifndef UNIT_BETA
SCALE_BLOCK(M0, DATA_TYPE, bias, BETA);
#endif // UNIT_BIAS
#if defined(MIXED_PRECISION)
CONVERT_BLOCK(M0, N0, DATA_TYPE_ACCUMULATOR, bias, bias_hp);
ADD_BLOCK(M0, c, bias_hp);
#else // defined(MIXED_PRECISION)
ADD_BLOCK(M0, c, bias);
#endif // defined(MIXED_PRECISION)
#endif // defined(BROADCAST_BIAS)
#endif // defined(BETA)
#if defined(ACTIVATION_TYPE)
#if defined(MIXED_PRECISION)
ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE_ACCUMULATOR, VEC_SIZE, c, A_VAL, B_VAL);
#else // defined(MIXED_PRECISION)
ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, c, A_VAL, B_VAL);
#endif // defined(MIXED_PRECISION)
#endif // defined(ACTIVATION_TYPE)
const bool cond_y = ((get_global_id(1) + 1) * M0 >= M);
const bool cond_x = ((get_global_id(0) + 1) * N0 >= N);
// Store output block
#if defined(MIXED_PRECISION)
CONVERT_BLOCK(M0, N0, DATA_TYPE, c, c_lp);
STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, c_lp, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
#else // defined(MIXED_PRECISION)
STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, c, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
#endif // defined(MIXED_PRECISION)
#undef LHS_BLOCK_SIZE
#undef LHS_OFFSET_X
#undef LHS_STEP_X
#undef RHS_BLOCK_SIZE
#undef RHS_OFFSET_X
#undef RHS_STEP_X
}
#if defined(OPENCL_IMAGE_SUPPORT)
/** This OpenCL kernel computes the matrix multiplication between 2 matrices. The RHS matrix is stored in OpenCL image object.
* The LHS matrix must be reshaped with @ref CLGEMMReshapeLHSMatrixKernel and the M0xK0 must be transposed
* The RHS matrix must be reshaped with @ref CLGEMMReshapeRHSMatrixKernel and the K0xN0 must be NOT transposed
*
* @note -DOPENCL_IMAGE_SUPPORT must be passed at compile time in order to compile this OpenCL kernel
* @note LHS_TRANSPOSE should be passed at compile time in order to compile this OpenCL kernel (e.g. -DLHS_TRANSPOSE).
* @note If the first two dimensions of NDRange have been dispatched with "dummy_work_items" support, the option -DDUMMY_WORK_ITEMS must be passed at compile time.
* @note The GEMM's dimensions M, N and K must be passed at compile time using -DM, -DN and -DK (e.g. -DM=52, -DN=90 and -DK=24).
* @note The height of the RHS matrix, defined before creating the OpenCL image object from the OpenCL buffer, should be passed at compile time using -DRHS_HEIGHT=<value> (e.g. -DRHS_HEIGHT=32)
* Since we cannot create a 3d image from a buffer, the third dimension could be collapsed with the second dimension so RHS_HEIGHT
* could be different from the value returned by get_image_height(rhs_img).
* @note The block's dimensions used for reshaping the LHS matrix and the RHS matrix (M0, N0 and K0) must be passed at compile time using -DM0, -DN0 and -DK0 (e.g. -DM0=4, -DN0=8, -DK0=4).
* @note The number of M0xK0 vertical blocks stored on the same output row of the reshaped LHS matrix must be passed at compile time using -DV0 (e.g. -DV0=2)
* @note The number of K0xN0 horizontal blocks stored on the same output row of the reshaped RHS matrix must be passed at compile time using -DH0 (e.g. -DH0=2)
* @note If the M0xK0 blocks in the reshaped LHS matrix have been interleaved, the option -DLHS_INTERLEAVE must passed at compile time.
* @note If the K0xN0 blocks in the reshaped RHS matrix have been interleaved, the option -DRHS_INTERLEAVE must passed at compile time.
* @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1)
* @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1)
* @note Only the following configurations of M0, N0 and K0 are currently supported:
* - M0 = 2, 3, 4, 8
* - N0 = 4, 8, 16
* - K0 = 4, 8, 16
* - V0 >= 1
* - H0 >= 1
*
* @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively.
* The activation function is performed after the bias addition
* @note In case the output has to be reinterpreted as a 3D tensor (e.g. output of convolution layer), the following information must be passed at compile time:
* -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
* -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
* -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
* (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns LHS matrix NOT reshaped
*
* @param[in] lhs_ptr Pointer to the LHS reshaped matrix. Supported data type: F32
* @param[in] lhs_stride_x Stride of the LHS reshaped matrix in X dimension (in bytes)
* @param[in] lhs_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] lhs_stride_y Stride of the LHS reshaped matrix in Y dimension (in bytes)
* @param[in] lhs_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the LHS reshaped matrix
* @param[in] rhs_img The RHS reshaped matrix as cl_image 2d. Supported data type: same as @p lhs_ptr
* @param[in] bias_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr
* @param[in] bias_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes)
* @param[in] bias_step_x (Optional) bias_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] bias_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes)
* @param[in] bias_step_y (Optional) bias_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
* @param[out] dst_ptr Pointer to the destination matrix Supported data type: same as @p lhs_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
* @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
* @param[in] k Number of columns in LHS matrix and rows in RHS matrix not reshaped.
* @param[in] lhs_stride_z Stride of the LHS reshaped matrix in Z dimension (in bytes)
* @param[in] rhs_stride_z Stride of the RHS reshaped matrix in Z dimension (in bytes)
* @param[in] bias_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes)
* @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
* @param[in] dst_cross_plane_pad (Optional) Bottom paddings in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
*/
__kernel void gemm_mm_reshaped_lhs_t_rhs_nt_texture(IMAGE_DECLARATION(lhs),
__read_only image2d_t rhs_img,
#if defined(BETA)
IMAGE_DECLARATION(bias),
#endif // defined(BETA)
IMAGE_DECLARATION(dst),
uint k,
uint lhs_stride_z,
uint rhs_stride_z,
#if defined(BETA)
uint bias_stride_z,
#endif //defined(BETA)
uint dst_stride_z
#if defined(REINTERPRET_OUTPUT_AS_3D)
,
uint dst_cross_plane_pad
#endif // REINTERPRET_OUTPUT_AS_3D
)
{
// Pixel unit
#define PIXEL_UNIT CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(N0)
// Block size
#define LHS_BLOCK_SIZE ((K0) * (M0))
#if defined(LHS_INTERLEAVE)
#define LHS_OFFSET_X (M0)
#define LHS_STEP_X ((M0) * (V0))
#define LHS_STEP_LOOP (1)
#else // defined(INTERLEAVE)
#define LHS_OFFSET_X (LHS_BLOCK_SIZE)
#define LHS_STEP_X (M0)
#define LHS_STEP_LOOP (V0)
#endif // defined(INTERLEAVE)
// Block size
#define RHS_BLOCK_SIZE ((K0) * (PIXEL_UNIT))
// RHS offset and step X
#if defined(RHS_INTERLEAVE)
#define RHS_OFFSET_X (PIXEL_UNIT)
#define RHS_STEP_X ((PIXEL_UNIT) * (H0))
#else // defined(RHS_INTERLEAVE)
#define RHS_OFFSET_X (RHS_BLOCK_SIZE)
#define RHS_STEP_X (PIXEL_UNIT)
#endif // defined(RHS_INTERLEAVE)
const uint x = get_global_id(0);
const uint y = get_global_id(1);
const uint z = get_global_id(2);
#if defined(DUMMY_WORK_ITEMS)
if((x * N0 >= N) || (y * M0 >= M))
{
return;
}
#endif // defined(DUMMY_WORK_ITEMS)
// Compute LHS matrix address
__global uchar *lhs_addr = lhs_ptr + lhs_offset_first_element_in_bytes + (y % V0) * (uint)LHS_OFFSET_X * sizeof(DATA_TYPE) + (y / V0) * (uint)lhs_stride_y + (z * lhs_stride_z);
#if defined(MATRIX_B_DEPTH)
// Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
const uint z_rhs = (z % MATRIX_B_DEPTH);
#else // defined(MATRIX_B_DEPTH)
const uint z_rhs = z;
#endif // defined(MATRIX_B_DEPTH)
// Compute RHS matrix coordinates
uint x_rhs = (x % H0) * (uint)RHS_OFFSET_X;
const uint y_rhs = (x / (uint)H0) + z_rhs * RHS_HEIGHT;
// Initialize the accumulators
REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DATA_TYPE_ACCUMULATOR, N0), c, 0);
REPEAT_VAR_INIT_TO_CONST(M0, uint, zero, 0);
__global DATA_TYPE *lhs = (__global DATA_TYPE *)(lhs_addr);
for(int i = 0; i < K; i += K0)
{
VEC_DATA_TYPE(DATA_TYPE, M0)
a0;
VEC_DATA_TYPE(DATA_TYPE, N0)
b0;
a0 = VLOAD(M0)(0, lhs);
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 0 * RHS_STEP_X), (y_rhs));
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
#if K0 > 1
a0 = VLOAD(M0)(0, lhs);
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 1 * RHS_STEP_X), (y_rhs));
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
#endif // K0 > 1
#if K0 > 2
a0 = VLOAD(M0)(0, lhs);
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 2 * RHS_STEP_X), (y_rhs));
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
#endif // K0 > 2
#if K0 > 3
a0 = VLOAD(M0)(0, lhs);
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 3 * RHS_STEP_X), (y_rhs));
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
#endif // K0 > 3
#if K0 > 4
a0 = VLOAD(M0)(0, lhs);
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 4 * RHS_STEP_X), (y_rhs));
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
a0 = VLOAD(M0)(0, lhs);
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 5 * RHS_STEP_X), (y_rhs));
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
a0 = VLOAD(M0)(0, lhs);
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 6 * RHS_STEP_X), (y_rhs));
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
a0 = VLOAD(M0)(0, lhs);
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 7 * RHS_STEP_X), (y_rhs));
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
#endif // K0 > 4
#if K0 > 8
a0 = VLOAD(M0)(0, lhs);
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 8 * RHS_STEP_X), (y_rhs));
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
a0 = VLOAD(M0)(0, lhs);
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 9 * RHS_STEP_X), (y_rhs));
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
a0 = VLOAD(M0)(0, lhs);
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 10 * RHS_STEP_X), (y_rhs));
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
a0 = VLOAD(M0)(0, lhs);
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 11 * RHS_STEP_X), (y_rhs));
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
a0 = VLOAD(M0)(0, lhs);
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 12 * RHS_STEP_X), (y_rhs));
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
a0 = VLOAD(M0)(0, lhs);
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 13 * RHS_STEP_X), (y_rhs));
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
a0 = VLOAD(M0)(0, lhs);
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 14 * RHS_STEP_X), (y_rhs));
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
a0 = VLOAD(M0)(0, lhs);
b0 = READ_IMAGE2D(DATA_TYPE, PIXEL_UNIT, rhs_img, (x_rhs + 15 * RHS_STEP_X), (y_rhs));
ARM_MM_T_NT(M0, N0, 1, DATA_TYPE, a, b, c);
lhs += LHS_STEP_X;
#endif // K0 > 8
#ifndef LHS_INTERLEAVE
lhs += (M0 * K0 * (V0 - 1));
#endif // LHS_INTERLEAVE
x_rhs += K0 * RHS_STEP_X;
#ifndef RHS_INTERLEAVE
x_rhs += (PIXEL_UNIT * K0 * (H0 - 1));
#endif // RHS_INTERLEAVE
}
__global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(DATA_TYPE)) + (y * (uint)M0 * dst_stride_y);
REPEAT_VAR_INIT_TO_CONST(M0, uint, zout, 0);
#if defined(REINTERPRET_OUTPUT_AS_3D)
// The plane (zin) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
CALCULATE_Z_OFFSET(M0, uint, zout, y * (uint)M0, HEIGHT_GEMM3D, DEPTH_GEMM3D, dst_cross_plane_pad, dst_stride_y);
// Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
// multiply dst_stride_z by DEPTH_GEMM3D
dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
#else // defined(REINTERPRET_OUTPUT_AS_3D)
// Add offset for batched GEMM
dst_addr += z * dst_stride_z;
#endif // defined(REINTERPRET_OUTPUT_AS_3D)
// Multiply by the weight of matrix-matrix product and store the result
#if defined(ALPHA)
SCALE_BLOCK(M0, DATA_TYPE, c, ALPHA);
#endif // defined(ALPHA)
// Add beta*bias
#if defined(BETA)
#if defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(DATA_TYPE));
LOAD_BLOCK(1, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
#ifndef UNIT_BETA
SCALE_BLOCK(1, DATA_TYPE, bias, BETA);
#endif // UNIT_BIAS
// c = c + bias[broadcasted]
#if defined(MIXED_PRECISION)
CONVERT_BLOCK(1, N0, DATA_TYPE_ACCUMULATOR, bias, bias_hp);
ADD_BLOCK_BROADCAST(M0, c, bias_hp0);
#else // defined(MIXED_PRECISION)
ADD_BLOCK_BROADCAST(M0, c, bias0);
#endif // defined(MIXED_PRECISION)
#else // defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(DATA_TYPE)) + (y * (uint)M0 * bias_stride_y) + z * bias_stride_z;
LOAD_BLOCK(M0, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
#ifndef UNIT_BETA
SCALE_BLOCK(M0, DATA_TYPE, bias, BETA);
#endif // UNIT_BIAS
#if defined(MIXED_PRECISION)
CONVERT_BLOCK(M0, N0, DATA_TYPE_ACCUMULATOR, bias, bias_hp);
ADD_BLOCK(M0, c, bias_hp);
#else // defined(MIXED_PRECISION)
ADD_BLOCK(M0, c, bias);
#endif // defined(MIXED_PRECISION)
#endif // defined(BROADCAST_BIAS)
#endif // defined(BETA)
#if defined(ACTIVATION_TYPE)
#if defined(MIXED_PRECISION)
ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE_ACCUMULATOR, VEC_SIZE, c, A_VAL, B_VAL);
#else // defined(MIXED_PRECISION)
ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, c, A_VAL, B_VAL);
#endif // defined(MIXED_PRECISION)
#endif // defined(ACTIVATION_TYPE)
const bool cond_y = ((get_global_id(1) + 1) * M0 >= M);
const bool cond_x = ((get_global_id(0) + 1) * N0 >= N);
// Store output block
#if defined(MIXED_PRECISION)
CONVERT_BLOCK(M0, N0, DATA_TYPE, c, c_lp);
STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, c_lp, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
#else // defined(MIXED_PRECISION)
STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, c, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
#endif // defined(MIXED_PRECISION)
#undef LHS_BLOCK_SIZE
#undef LHS_OFFSET_X
#undef LHS_STEP_X
#undef RHS_BLOCK_SIZE
#undef RHS_OFFSET_X
#undef RHS_STEP_X
#undef PIXEL_UNIT
#undef LHS_STEP_LOOP
#undef RHS_STEP_LOOP
}
#endif // defined(OPENCL_IMAGE_SUPPORT)
#endif // defined(LHS_TRANSPOSE)
#endif // defined(M0) && defined(N0) && defined(K0) && defined(V0) && defined(H0) && defined(K) && defined(DATA_TYPE)
#if defined(M0) && defined(N0) && defined(K0) && defined(K) && defined(DATA_TYPE)
#define VFMA(a, b, c) \
({ \
c = fma(a, b, c); \
})
#if M0 == 1
#define RHS_VFMA_M0xN0(i, a, b, c) \
({ \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
})
#elif M0 == 2 // M0 == 2
#define RHS_VFMA_M0xN0(i, a, b, c) \
({ \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
})
#elif M0 == 3 // M0 == 3
#define RHS_VFMA_M0xN0(i, a, b, c) \
({ \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \
})
#elif M0 == 4 // M0 == 4
#define RHS_VFMA_M0xN0(i, a, b, c) \
({ \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##3).s##i), b, (c##3)); \
})
#elif M0 == 5 // M0 == 5
#define RHS_VFMA_M0xN0(i, a, b, c) \
({ \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##3).s##i), b, (c##3)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##4).s##i), b, (c##4)); \
})
#elif M0 == 6 // M0 == 6
#define RHS_VFMA_M0xN0(i, a, b, c) \
({ \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##3).s##i), b, (c##3)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##4).s##i), b, (c##4)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##5).s##i), b, (c##5)); \
})
#elif M0 == 7 // M0 == 7
#define RHS_VFMA_M0xN0(i, a, b, c) \
({ \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##3).s##i), b, (c##3)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##4).s##i), b, (c##4)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##5).s##i), b, (c##5)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##6).s##i), b, (c##6)); \
})
#elif M0 == 8 // M0 == 8
#define RHS_VFMA_M0xN0(i, a, b, c) \
({ \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##3).s##i), b, (c##3)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##4).s##i), b, (c##4)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##5).s##i), b, (c##5)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##6).s##i), b, (c##6)); \
VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##7).s##i), b, (c##7)); \
})
#else // M0 not supported
#error "M0 not supported"
#endif // M0 not supported
/** This OpenCL kernel computes the matrix multiplication between 2 matrices.
* The LHS matrix is NOT reshaped
* The RHS matrix is NOT reshaped
*
* @note If the first two dimensions of NDRange have been dispatched with "dummy_work_items" support, the option -DDUMMY_WORK_ITEMS must be passed at compile time.
* @note The GEMM's dimensions (M,N and K) must be passed at compile time using -DM, -DN and and -DK (e.g. -DM=52, -DN=30 and -DK=90)
* @note The number of columns of LHS matrix must be passed at compile time using -DK (e.g. -DK=64)
* @note The number of M0 rows to process must be passed at compile time using -DM0 (e.g. -DM0=2)
* @note The number of K0 partial accumulations must be passed at compile time using -DK0 (e.g., -DK0=2)
* @note The number of N0 columns to process must be passed at compile time using -DN0 (e.g. -DN0=2)
* @note The size of the partial store block in y must be passed at compile time using -DPARTIAL_STORE_M0 (e.g. -DPARTIAL_STORE_M0=1)
* @note The size of the partial store block in x must be passed at compile time using -DPARTIAL_STORE_N0 (e.g. -DPARTIAL_STORE_N0=1)
* @note Only the following configurations of M0, N0 and K0 are currently supported:
* - M0 = 1, 2, 3, 4, 5, 6, 7, 8
* - N0 = 2, 3, 4, 8, 16
* - K0 = 2, 3, 4, 8, 16
*
* @note If the activation type were passed at compile time through -DACTIVATION_TYPE (e.g. -DACTIVATION_TYPE=RELU), A, B variables, required by some activation functions, should be passed at compile time as well using -DA_VAL= and -DB_VAL= respectively.
* The activation function is performed after the bias addition
* @note In case the input or output have to be reinterpreted as a 3D tensor, the following information must be passed at compile time:
* -# REINTERPRET_INPUT_AS_3D: To reinterpret the input as 3D
* -# REINTERPRET_OUTPUT_AS_3D: To reinterpret the output as 3D
* -# HEIGHT_GEMM3D: The height of the output in case it has to be reinterpreted as a 3D tensor.
* -# DEPTH_GEMM3D: The depth of the output in case it has to be reinterpreted as a 3D tensor
* (HEIGHT_GEMM3D * DEPTH_GEMM3D) = columns LHS matrix
*
* @param[in] lhs_ptr Pointer to the LHS matrix. Supported data type: F16/F32
* @param[in] lhs_stride_x Stride of the LHS matrix in X dimension (in bytes)
* @param[in] lhs_step_x lhs_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] lhs_stride_y Stride of the LHS matrix in Y dimension (in bytes)
* @param[in] lhs_step_y lhs_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] lhs_offset_first_element_in_bytes The offset of the first element in the LHS matrix
* @param[in] rhs_ptr Pointer to the RHS matrix. Supported data type: same as @p lhs_ptr
* @param[in] rhs_stride_x Stride of the RHS matrix in X dimension (in bytes)
* @param[in] rhs_step_x rhs_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] rhs_stride_y Stride of the RHS matrix in Y dimension (in bytes)
* @param[in] rhs_step_y rhs_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] rhs_offset_first_element_in_bytes The offset of the first element in the RHS matrix
* @param[in] bias_ptr (Optional) Pointer to the bias matrix. Supported data type: same as @p lhs_ptr
* @param[in] bias_stride_x (Optional) Stride of the bias matrix in X dimension (in bytes)
* @param[in] bias_step_x (Optional) bias_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] bias_stride_y (Optional) Stride of the bias matrix in Y dimension (in bytes)
* @param[in] bias_step_y (Optional) bias_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] bias_offset_first_element_in_bytes (Optional) The offset of the first element in the bias matrix
* @param[out] dst_ptr Pointer to the destination matrix Supported data type: same as @p lhs_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
* @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
* @param[in] lhs_stride_z Stride of the LHS matrix in Z dimension (in bytes)
* @param[in] rhs_stride_z Stride of the RHS matrix in Z dimension (in bytes)
* @param[in] bias_stride_z (Optional) Stride of the bias matrix in Z dimension (in bytes)
* @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
* @param[in] lhs_cross_plane_pad (Optional) Bottom paddings for LHS matrix in unit of elements (only if defined REINTERPRET_INPUT_AS_3D)
* @param[in] dst_cross_plane_pad (Optional) Bottom paddings for the output matrix in unit of elements (only if defined REINTERPRET_OUTPUT_AS_3D)
*/
__kernel void gemm_mm_native(IMAGE_DECLARATION(lhs),
IMAGE_DECLARATION(rhs),
#if defined(BETA)
IMAGE_DECLARATION(bias),
#endif // defined(BETA)
IMAGE_DECLARATION(dst),
uint lhs_stride_z,
uint rhs_stride_z,
#if defined(BETA)
uint bias_stride_z,
#endif //defined(BETA)
uint dst_stride_z
#if defined(REINTERPRET_INPUT_AS_3D)
,
uint lhs_cross_plane_pad
#endif // REINTERPRET_INPUT_AS_3D
#if defined(REINTERPRET_OUTPUT_AS_3D)
,
uint dst_cross_plane_pad
#endif // REINTERPRET_OUTPUT_AS_3D
)
{
// Block size
#define RHS_BLOCK_SIZE ((K0) * (N0))
// RHS offset and step X
#define RHS_OFFSET_X (RHS_BLOCK_SIZE)
uint x = get_global_id(0);
uint y = get_global_id(1);
uint z = get_global_id(2);
#if defined(DUMMY_WORK_ITEMS)
if((x * N0 >= N) || (y * M0 >= M))
{
return;
}
#endif // defined(DUMMY_WORK_ITEMS)
// Compute LHS matrix address
uint lhs_offset = lhs_offset_first_element_in_bytes + COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * (uint)lhs_stride_y;
// Compute RHS matrix address
uint rhs_offset = rhs_offset_first_element_in_bytes + x * N0 * sizeof(DATA_TYPE);
#if defined(MATRIX_B_DEPTH)
// Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
rhs_offset += (z % MATRIX_B_DEPTH) * rhs_stride_z;
#else // defined(MATRIX_B_DEPTH)
rhs_offset += z * rhs_stride_z;
#endif // defined(MATRIX_B_DEPTH)
REPEAT_VAR_INIT_TO_CONST(M0, uint, zlhs, 0);
REPEAT_VAR_INIT_TO_CONST(16, uint, zero, 0);
#if defined(REINTERPRET_INPUT_AS_3D)
// The plane (zlhs) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
CALCULATE_Z_OFFSET(M0, uint, zlhs, COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, lhs_cross_plane_pad, lhs_stride_y);
// Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
// multiply lhs_stride_z by DEPTH_GEMM3D
lhs_offset += z * lhs_stride_z * DEPTH_GEMM3D;
#else // defined(REINTERPRET_INPUT_AS_3D)
// Add offset for batched GEMM
lhs_offset += z * lhs_stride_z;
#endif // defined(REINTERPRET_INPUT_AS_3D)
// Initialize the accumulators
REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DATA_TYPE, N0), c, 0); //VEC_DATA_TYPE(DATA_TYPE, N0) c0=0,c1=0,c2=0,... c(M0-1)=0;
int i = 0;
for(; i <= (K - K0); i += K0)
{
// Supported cases (M0, K0):
// 1,2 - 1,3 - 1,4 - 1,8 - 1,16
// 2,2 - 2,3 - 2,4 - 2,8 - 2,16
// 3,2 - 3,3 - 3,4 - 3,8 - 3,16
// 4,2 - 4,3 - 4,4 - 4,8 - 4,16
// 5,2 - 5,3 - 5,4 - 5,8 - 5,16
// 6,2 - 6,3 - 6,4 - 6,8 - 6,16
// 7,2 - 7,3 - 7,4 - 7,8 - 7,16
// 8,2 - 8,3 - 8,4 - 8,8 - 8,16
// Load values from LHS matrix
LOAD_BLOCK(M0, K0, DATA_TYPE, a, lhs_ptr, lhs_offset, lhs_stride_y, zlhs);
// Load values from RHS matrix
LOAD_BLOCK(K0, N0, DATA_TYPE, b, rhs_ptr, rhs_offset, rhs_stride_y, zero);
RHS_VFMA_M0xN0(0, a, b0, c);
RHS_VFMA_M0xN0(1, a, b1, c);
#if K0 > 2
RHS_VFMA_M0xN0(2, a, b2, c);
#endif // K0 > 2
#if K0 > 3
RHS_VFMA_M0xN0(3, a, b3, c);
#endif // K0 > 3
#if K0 > 4
RHS_VFMA_M0xN0(4, a, b4, c);
RHS_VFMA_M0xN0(5, a, b5, c);
RHS_VFMA_M0xN0(6, a, b6, c);
RHS_VFMA_M0xN0(7, a, b7, c);
#endif // K0 > 4
#if K0 > 8
RHS_VFMA_M0xN0(8, a, b8, c);
RHS_VFMA_M0xN0(9, a, b9, c);
RHS_VFMA_M0xN0(A, a, bA, c);
RHS_VFMA_M0xN0(B, a, bB, c);
RHS_VFMA_M0xN0(C, a, bC, c);
RHS_VFMA_M0xN0(D, a, bD, c);
RHS_VFMA_M0xN0(E, a, bE, c);
RHS_VFMA_M0xN0(F, a, bF, c);
#endif // K0 > 8
lhs_offset += K0 * sizeof(DATA_TYPE);
rhs_offset += K0 * rhs_stride_y;
}
// Left-over accumulations
for(; i < K; ++i)
{
// Load values from LHS matrix
VEC_DATA_TYPE(DATA_TYPE, 2)
a0 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 0 * lhs_stride_y + zlhs0));
#if M0 > 1
VEC_DATA_TYPE(DATA_TYPE, 2)
a1 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 1 * lhs_stride_y + zlhs1));
#endif // M0 > 1
#if M0 > 2
VEC_DATA_TYPE(DATA_TYPE, 2)
a2 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 2 * lhs_stride_y + zlhs2));
#endif // M0 > 2
#if M0 > 3
VEC_DATA_TYPE(DATA_TYPE, 2)
a3 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 3 * lhs_stride_y + zlhs3));
#endif // M0 > 3
#if M0 > 4
VEC_DATA_TYPE(DATA_TYPE, 2)
a4 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 4 * lhs_stride_y + zlhs4));
#endif // M0 > 4
#if M0 > 5
VEC_DATA_TYPE(DATA_TYPE, 2)
a5 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 5 * lhs_stride_y + zlhs5));
#endif // M0 > 5
#if M0 > 6
VEC_DATA_TYPE(DATA_TYPE, 2)
a6 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 6 * lhs_stride_y + zlhs6));
#endif // M0 > 6
#if M0 > 7
VEC_DATA_TYPE(DATA_TYPE, 2)
a7 = *((__global DATA_TYPE *)(lhs_ptr + lhs_offset + 7 * lhs_stride_y + zlhs7));
#endif // M0 > 7
VEC_DATA_TYPE(DATA_TYPE, N0)
b = VLOAD(N0)(0, (__global DATA_TYPE *)(rhs_ptr + rhs_offset + 0 * rhs_stride_y));
RHS_VFMA_M0xN0(0, a, b, c);
lhs_offset += sizeof(DATA_TYPE);
rhs_offset += rhs_stride_y;
}
__global uchar *dst_addr = dst_ptr + dst_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * dst_stride_y);
REPEAT_VAR_INIT_TO_CONST(M0, uint, zout, 0);
#if defined(REINTERPRET_OUTPUT_AS_3D)
// The plane (zout) is calculated dividing M (y * M0) by HEIGHT_GEMM3D
CALCULATE_Z_OFFSET(M0, uint, zout, COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, dst_cross_plane_pad, dst_stride_y);
// Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
// multiply dst_stride_z by DEPTH_GEMM3D
dst_addr += z * dst_stride_z * DEPTH_GEMM3D;
#else // defined(REINTERPRET_OUTPUT_AS_3D)
// Add offset for batched GEMM
dst_addr += z * dst_stride_z;
#endif // defined(REINTERPRET_OUTPUT_AS_3D)
// Multiply by the weight of matrix-matrix product and store the result
#if defined(ALPHA)
SCALE_BLOCK(M0, DATA_TYPE, c, ALPHA);
#endif // defined(ALPHA)
// Add beta*bias
#if defined(BETA)
#if defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE));
LOAD_BLOCK(1, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
#ifndef UNIT_BETA
SCALE_BLOCK(1, DATA_TYPE, bias, BETA);
#endif // UNIT_BIAS
// c = c + bias[broadcasted]
ADD_BLOCK_BROADCAST(M0, c, bias0);
#else // defined(BROADCAST_BIAS)
__global uchar *bias_addr = bias_ptr + bias_offset_first_element_in_bytes + (x * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) * bias_stride_y) + z * bias_stride_z;
LOAD_BLOCK(M0, N0, DATA_TYPE, bias, bias_addr, 0, bias_stride_y, zero);
#ifndef UNIT_BETA
SCALE_BLOCK(M0, DATA_TYPE, bias, BETA);
#endif // UNIT_BIAS
// c = c + bias
ADD_BLOCK(M0, c, bias);
#endif // defined(BROADCAST_BIAS)
#endif // defined(BETA)
#if defined(ACTIVATION_TYPE)
ACTIVATION_BLOCK(M0, ACTIVATION_TYPE, DATA_TYPE, VEC_SIZE, c, A_VAL, B_VAL);
#endif // defined(ACTIVATION_TYPE)
const bool cond_y = y == 0;
const bool cond_x = ((x + 1) * N0 >= N);
// Store output block
STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, c, dst_addr, dst_stride_y, zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, cond_y, cond_x);
#undef RHS_BLOCK_SIZE
#undef RHS_OFFSET_X
#undef RHS_STEP_X
}
#endif // defined(M0) && defined(N0) && defined(K0) && defined(K) && defined(DATA_TYPE)
#if defined(BETA)
/** This OpenCL kernel performs the in-place matrix addition between 2 matrices taking into account that the second matrix might be weighted by a scalar value beta:
*
* @note The beta's value need to be passed at compile time using -DBETA
*
* @param[in] src_ptr Pointer to the source matrix. Supported data types: F32
* @param[in] src_stride_x Stride of the source matrix in X dimension (in bytes)
* @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src_stride_y Stride of the source matrix in Y dimension (in bytes)
* @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src_stride_z Stride of the destination tensor in Z dimension (in bytes)
* @param[in] src_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] src_offset_first_element_in_bytes The offset of the first element in the source matrix
* @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
* @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
* @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
*/
__kernel void gemm_ma_f32(TENSOR3D_DECLARATION(src),
TENSOR3D_DECLARATION(dst))
{
// Compute source and destination addresses
Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
// Load values from A x B
float4 alpha_ab = vload4(0, (__global float *)dst.ptr);
// Load values from Matrix C
float4 c = vload4(0, (__global float *)src.ptr);
// Computes alpha * axb + beta * c
float4 out = alpha_ab + (float4)BETA * c;
// Store final result in axb matrix
vstore4(out, 0, (__global float *)dst.ptr);
}
#if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED)
/** This OpenCL kernel performs the in-place matrix addition between 2 matrices taking into account that the second matrix might be weighted by a scalar value beta:
*
* @note The beta's value need to be passed at compile time using -DBETA
*
* @param[in] src_ptr Pointer to the source matrix. Supported data types: F16
* @param[in] src_stride_x Stride of the source matrix in X dimension (in bytes)
* @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src_stride_y Stride of the source matrix in Y dimension (in bytes)
* @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src_stride_z Stride of the destination tensor in Z dimension (in bytes)
* @param[in] src_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] src_offset_first_element_in_bytes The offset of the first element in the source matrix
* @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
* @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)
* @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
*/
__kernel void gemm_ma_f16(TENSOR3D_DECLARATION(src),
TENSOR3D_DECLARATION(dst))
{
// Compute source and destination addresses
Tensor3D src = CONVERT_TO_TENSOR3D_STRUCT(src);
Tensor3D dst = CONVERT_TO_TENSOR3D_STRUCT(dst);
// Load values from A x B
half8 alpha_ab = vload8(0, (__global half *)dst.ptr);
// Load values from Matrix C
half8 c = vload8(0, (__global half *)src.ptr);
// Computes alpha * axb + beta * c
half8 out = alpha_ab + (half8)BETA * c;
// Store final result in axb matrix
vstore8(out, 0, (__global half *)dst.ptr);
}
#endif // defined(ARM_COMPUTE_OPENCL_FP16_ENABLED)
#endif // defined(BETA)
#if defined(WIDTH_VECTOR_A)
/** This OpenCL kernel computes the vector by matrix multiplication between each row of A (src0) and matrix B (src1) used for locally connected layer
*
* @note The width of A need to be passed at compile time using -DWIDTH_VECTOR_A
*
* @note The input A and matrix B must not be reshaped
*
* @param[in] src0_ptr Pointer to the source matrix. Supported data types: F32
* @param[in] src0_stride_x Stride of the source matrix in X dimension (in bytes)
* @param[in] src0_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src0_stride_y Stride of the source matrix in Y dimension (in bytes)
* @param[in] src0_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src0_offset_first_element_in_bytes The offset of the first element in the source matrix
* @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
* @param[in] src1_stride_x Stride of the source matrix in X dimension (in bytes)
* @param[in] src1_step_x src_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] src1_stride_y Stride of the source matrix in Y dimension (in bytes)
* @param[in] src1_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] src1_stride_z Stride of the source matrix in Z dimension (in bytes)
* @param[in] src1_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] src1_offset_first_element_in_bytes The offset of the first element in the source matrix
* @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
* @param[in] dst_stride_x Stride of the destination matrix in X dimension (in bytes)
* @param[in] dst_step_x dst_gx_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] dst_stride_y Stride of the destination matrix in Y dimension (in bytes)
* @param[in] dst_step_y dst_gx_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination matrix
*/
__kernel void gemm_lc_vm_f32(IMAGE_DECLARATION(src0),
TENSOR3D_DECLARATION(src1),
IMAGE_DECLARATION(dst))
{
int idx = get_global_id(0) * 4;
int idy = get_global_id(1);
// Compute the address for the vector A and matrix B
int2 src_addr = ((int2)(src0_offset_first_element_in_bytes + src0_stride_y * idy, src1_offset_first_element_in_bytes + src1_stride_z * idy));
src_addr.s1 += idx * sizeof(float);
int end_row_vec_a = src_addr.s0 + (WIDTH_VECTOR_A * sizeof(float));
float4 acc = 0.0f;
for(; src_addr.s0 <= (end_row_vec_a - 2 * (int)sizeof(float)); src_addr += (int2)(2 * sizeof(float), 2 * src1_stride_y))
{
float2 a0 = vload2(0, (__global float *)(src0_ptr + src_addr.s0));
float4 b0 = vload4(0, (__global float *)(src1_ptr + src_addr.s1));
float4 b1 = vload4(0, (__global float *)(src1_ptr + src_addr.s1 + src1_stride_y));
acc += b0 * (float4)a0.s0;
acc += b1 * (float4)a0.s1;
}
for(; src_addr.s0 < end_row_vec_a; src_addr += (int2)(sizeof(float), src1_stride_y))
{
float a0 = *((__global float *)(src0_ptr + src_addr.s0));
float4 b0 = vload4(0, (__global float *)(src1_ptr + src_addr.s1));
acc += b0 * (float4)a0;
}
// Compute destination address
Image dst = CONVERT_TO_IMAGE_STRUCT(dst);
vstore4(acc, 0, (__global float *)(offset(&dst, 0, 0)));
}
#endif // defined(WIDTH_VECTOR_A)
)"