blob: 018991a5e7f35c692de25e1b316e061aa5b748df [file] [log] [blame]
<!-- HTML header for doxygen 1.8.15-->
<!-- Remember to use version doxygen 1.8.15 +-->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen 1.8.15"/>
<meta name="robots" content="NOINDEX, NOFOLLOW" /> <!-- Prevent indexing by search engines -->
<title>Compute Library: src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.cpp Source File</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="navtree.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="resize.js"></script>
<script type="text/javascript" src="navtreedata.js"></script>
<script type="text/javascript" src="navtree.js"></script>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(document).ready(initResizable);
/* @license-end */</script>
<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="search/searchdata.js"></script>
<script type="text/javascript" src="search/search.js"></script>
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
extensions: ["tex2jax.js"],
jax: ["input/TeX","output/HTML-CSS"],
});
</script><script type="text/javascript" async="async" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
<link href="stylesheet.css" rel="stylesheet" type="text/css"/>
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
<tbody>
<tr style="height: 56px;">
<img alt="Compute Library" src="https://raw.githubusercontent.com/ARM-software/ComputeLibrary/gh-pages/ACL_logo.png" style="max-width: 100%;margin-top: 15px;margin-left: 10px"/>
<td style="padding-left: 0.5em;">
<div id="projectname">
&#160;<span id="projectnumber">20.02.1</span>
</div>
</td>
</tr>
</tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.8.15 -->
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
var searchBox = new SearchBox("searchBox", "search",false,'Search');
/* @license-end */
</script>
<script type="text/javascript" src="menudata.js"></script>
<script type="text/javascript" src="menu.js"></script>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(function() {
initMenu('',true,false,'search.php','Search');
$(document).ready(function() { init_search(); });
});
/* @license-end */</script>
<div id="main-nav"></div>
</div><!-- top -->
<div id="side-nav" class="ui-resizable side-nav-resizable">
<div id="nav-tree">
<div id="nav-tree-contents">
<div id="nav-sync" class="sync"></div>
</div>
</div>
<div id="splitbar" style="-moz-user-select:none;"
class="ui-resizable-handle">
</div>
</div>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(document).ready(function(){initNavTree('_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel_8cpp_source.xhtml','');});
/* @license-end */
</script>
<div id="doc-content">
<!-- window showing the filter options -->
<div id="MSearchSelectWindow"
onmouseover="return searchBox.OnSearchSelectShow()"
onmouseout="return searchBox.OnSearchSelectHide()"
onkeydown="return searchBox.OnSearchSelectKey(event)">
</div>
<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="javascript:void(0)" frameborder="0"
name="MSearchResults" id="MSearchResults">
</iframe>
</div>
<div class="header">
<div class="headertitle">
<div class="title">NEGEMMLowpOffsetContributionOutputStageKernel.cpp</div> </div>
</div><!--header-->
<div class="contents">
<a href="_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel_8cpp.xhtml">Go to the documentation of this file.</a><div class="fragment"><div class="line"><a name="l00001"></a><span class="lineno"> 1</span>&#160;<span class="comment">/*</span></div><div class="line"><a name="l00002"></a><span class="lineno"> 2</span>&#160;<span class="comment"> * Copyright (c) 2019-2020 ARM Limited.</span></div><div class="line"><a name="l00003"></a><span class="lineno"> 3</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00004"></a><span class="lineno"> 4</span>&#160;<span class="comment"> * SPDX-License-Identifier: MIT</span></div><div class="line"><a name="l00005"></a><span class="lineno"> 5</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00006"></a><span class="lineno"> 6</span>&#160;<span class="comment"> * Permission is hereby granted, free of charge, to any person obtaining a copy</span></div><div class="line"><a name="l00007"></a><span class="lineno"> 7</span>&#160;<span class="comment"> * of this software and associated documentation files (the &quot;Software&quot;), to</span></div><div class="line"><a name="l00008"></a><span class="lineno"> 8</span>&#160;<span class="comment"> * deal in the Software without restriction, including without limitation the</span></div><div class="line"><a name="l00009"></a><span class="lineno"> 9</span>&#160;<span class="comment"> * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or</span></div><div class="line"><a name="l00010"></a><span class="lineno"> 10</span>&#160;<span class="comment"> * sell copies of the Software, and to permit persons to whom the Software is</span></div><div class="line"><a name="l00011"></a><span class="lineno"> 11</span>&#160;<span class="comment"> * furnished to do so, subject to the following conditions:</span></div><div class="line"><a name="l00012"></a><span class="lineno"> 12</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00013"></a><span class="lineno"> 13</span>&#160;<span class="comment"> * The above copyright notice and this permission notice shall be included in all</span></div><div class="line"><a name="l00014"></a><span class="lineno"> 14</span>&#160;<span class="comment"> * copies or substantial portions of the Software.</span></div><div class="line"><a name="l00015"></a><span class="lineno"> 15</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00016"></a><span class="lineno"> 16</span>&#160;<span class="comment"> * THE SOFTWARE IS PROVIDED &quot;AS IS&quot;, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR</span></div><div class="line"><a name="l00017"></a><span class="lineno"> 17</span>&#160;<span class="comment"> * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,</span></div><div class="line"><a name="l00018"></a><span class="lineno"> 18</span>&#160;<span class="comment"> * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE</span></div><div class="line"><a name="l00019"></a><span class="lineno"> 19</span>&#160;<span class="comment"> * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER</span></div><div class="line"><a name="l00020"></a><span class="lineno"> 20</span>&#160;<span class="comment"> * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,</span></div><div class="line"><a name="l00021"></a><span class="lineno"> 21</span>&#160;<span class="comment"> * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE</span></div><div class="line"><a name="l00022"></a><span class="lineno"> 22</span>&#160;<span class="comment"> * SOFTWARE.</span></div><div class="line"><a name="l00023"></a><span class="lineno"> 23</span>&#160;<span class="comment"> */</span></div><div class="line"><a name="l00024"></a><span class="lineno"> 24</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel_8h.xhtml">arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h</a>&quot;</span></div><div class="line"><a name="l00025"></a><span class="lineno"> 25</span>&#160;</div><div class="line"><a name="l00026"></a><span class="lineno"> 26</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_access_window_static_8h.xhtml">arm_compute/core/AccessWindowStatic.h</a>&quot;</span></div><div class="line"><a name="l00027"></a><span class="lineno"> 27</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_error_8h.xhtml">arm_compute/core/Error.h</a>&quot;</span></div><div class="line"><a name="l00028"></a><span class="lineno"> 28</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="arm__compute_2core_2_helpers_8h.xhtml">arm_compute/core/Helpers.h</a>&quot;</span></div><div class="line"><a name="l00029"></a><span class="lineno"> 29</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_i_tensor_8h.xhtml">arm_compute/core/ITensor.h</a>&quot;</span></div><div class="line"><a name="l00030"></a><span class="lineno"> 30</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_n_e_asymm_8h.xhtml">arm_compute/core/NEON/NEAsymm.h</a>&quot;</span></div><div class="line"><a name="l00031"></a><span class="lineno"> 31</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="wrapper_8h.xhtml">arm_compute/core/NEON/wrapper/wrapper.h</a>&quot;</span></div><div class="line"><a name="l00032"></a><span class="lineno"> 32</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_tensor_info_8h.xhtml">arm_compute/core/TensorInfo.h</a>&quot;</span></div><div class="line"><a name="l00033"></a><span class="lineno"> 33</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="arm__compute_2core_2_types_8h.xhtml">arm_compute/core/Types.h</a>&quot;</span></div><div class="line"><a name="l00034"></a><span class="lineno"> 34</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="arm__compute_2core_2_utils_8h.xhtml">arm_compute/core/Utils.h</a>&quot;</span></div><div class="line"><a name="l00035"></a><span class="lineno"> 35</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_validate_8h.xhtml">arm_compute/core/Validate.h</a>&quot;</span></div><div class="line"><a name="l00036"></a><span class="lineno"> 36</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_window_8h.xhtml">arm_compute/core/Window.h</a>&quot;</span></div><div class="line"><a name="l00037"></a><span class="lineno"> 37</span>&#160;</div><div class="line"><a name="l00038"></a><span class="lineno"> 38</span>&#160;<span class="preprocessor">#include &lt;arm_neon.h&gt;</span></div><div class="line"><a name="l00039"></a><span class="lineno"> 39</span>&#160;<span class="preprocessor">#include &lt;cstddef&gt;</span></div><div class="line"><a name="l00040"></a><span class="lineno"> 40</span>&#160;<span class="preprocessor">#include &lt;cstdint&gt;</span></div><div class="line"><a name="l00041"></a><span class="lineno"> 41</span>&#160;<span class="preprocessor">#include &lt;map&gt;</span></div><div class="line"><a name="l00042"></a><span class="lineno"> 42</span>&#160;</div><div class="line"><a name="l00043"></a><span class="lineno"> 43</span>&#160;<span class="keyword">namespace </span><a class="code" href="namespacearm__compute.xhtml">arm_compute</a></div><div class="line"><a name="l00044"></a><span class="lineno"> 44</span>&#160;{</div><div class="line"><a name="l00045"></a><span class="lineno"> 45</span>&#160;<span class="keyword">class </span>Coordinates;</div><div class="line"><a name="l00046"></a><span class="lineno"> 46</span>&#160;</div><div class="line"><a name="l00047"></a><span class="lineno"> 47</span>&#160;<span class="keyword">namespace</span></div><div class="line"><a name="l00048"></a><span class="lineno"> 48</span>&#160;{</div><div class="line"><a name="l00049"></a><span class="lineno"> 49</span>&#160;<span class="keyword">inline</span> int32x4x4_t load_results_input(<span class="keyword">const</span> Iterator &amp;mm_result_it, int32_t x)</div><div class="line"><a name="l00050"></a><span class="lineno"> 50</span>&#160;{</div><div class="line"><a name="l00051"></a><span class="lineno"> 51</span>&#160; <span class="keywordflow">return</span></div><div class="line"><a name="l00052"></a><span class="lineno"> 52</span>&#160; {</div><div class="line"><a name="l00053"></a><span class="lineno"> 53</span>&#160; {</div><div class="line"><a name="l00054"></a><span class="lineno"> 54</span>&#160; vld1q_s32(reinterpret_cast&lt;const int32_t *&gt;(mm_result_it.ptr()) + x + 0),</div><div class="line"><a name="l00055"></a><span class="lineno"> 55</span>&#160; vld1q_s32(reinterpret_cast&lt;const int32_t *&gt;(mm_result_it.ptr()) + x + 4),</div><div class="line"><a name="l00056"></a><span class="lineno"> 56</span>&#160; vld1q_s32(reinterpret_cast&lt;const int32_t *&gt;(mm_result_it.ptr()) + x + 8),</div><div class="line"><a name="l00057"></a><span class="lineno"> 57</span>&#160; vld1q_s32(reinterpret_cast&lt;const int32_t *&gt;(mm_result_it.ptr()) + x + 12)</div><div class="line"><a name="l00058"></a><span class="lineno"> 58</span>&#160; }</div><div class="line"><a name="l00059"></a><span class="lineno"> 59</span>&#160; };</div><div class="line"><a name="l00060"></a><span class="lineno"> 60</span>&#160;}</div><div class="line"><a name="l00061"></a><span class="lineno"> 61</span>&#160;</div><div class="line"><a name="l00062"></a><span class="lineno"> 62</span>&#160;<span class="keyword">inline</span> int32x4x4_t load(<span class="keyword">const</span> int32_t *ptr, int32_t x)</div><div class="line"><a name="l00063"></a><span class="lineno"> 63</span>&#160;{</div><div class="line"><a name="l00064"></a><span class="lineno"> 64</span>&#160; <span class="keywordflow">return</span></div><div class="line"><a name="l00065"></a><span class="lineno"> 65</span>&#160; {</div><div class="line"><a name="l00066"></a><span class="lineno"> 66</span>&#160; {</div><div class="line"><a name="l00067"></a><span class="lineno"> 67</span>&#160; vld1q_s32(ptr + x + 0),</div><div class="line"><a name="l00068"></a><span class="lineno"> 68</span>&#160; vld1q_s32(ptr + x + 4),</div><div class="line"><a name="l00069"></a><span class="lineno"> 69</span>&#160; vld1q_s32(ptr + x + 8),</div><div class="line"><a name="l00070"></a><span class="lineno"> 70</span>&#160; vld1q_s32(ptr + x + 12)</div><div class="line"><a name="l00071"></a><span class="lineno"> 71</span>&#160; }</div><div class="line"><a name="l00072"></a><span class="lineno"> 72</span>&#160; };</div><div class="line"><a name="l00073"></a><span class="lineno"> 73</span>&#160;}</div><div class="line"><a name="l00074"></a><span class="lineno"> 74</span>&#160;</div><div class="line"><a name="l00075"></a><span class="lineno"> 75</span>&#160;<span class="keyword">inline</span> int32x4x4_t add_s32(int32x4x4_t a, int32x4_t <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#aa76b4a6e74940dabc5b7fc6b2dab3545">b</a>)</div><div class="line"><a name="l00076"></a><span class="lineno"> 76</span>&#160;{</div><div class="line"><a name="l00077"></a><span class="lineno"> 77</span>&#160; <span class="keywordflow">return</span></div><div class="line"><a name="l00078"></a><span class="lineno"> 78</span>&#160; {</div><div class="line"><a name="l00079"></a><span class="lineno"> 79</span>&#160; {</div><div class="line"><a name="l00080"></a><span class="lineno"> 80</span>&#160; vaddq_s32(a.val[0], <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#aa76b4a6e74940dabc5b7fc6b2dab3545">b</a>),</div><div class="line"><a name="l00081"></a><span class="lineno"> 81</span>&#160; vaddq_s32(a.val[1], <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#aa76b4a6e74940dabc5b7fc6b2dab3545">b</a>),</div><div class="line"><a name="l00082"></a><span class="lineno"> 82</span>&#160; vaddq_s32(a.val[2], <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#aa76b4a6e74940dabc5b7fc6b2dab3545">b</a>),</div><div class="line"><a name="l00083"></a><span class="lineno"> 83</span>&#160; vaddq_s32(a.val[3], <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#aa76b4a6e74940dabc5b7fc6b2dab3545">b</a>)</div><div class="line"><a name="l00084"></a><span class="lineno"> 84</span>&#160; }</div><div class="line"><a name="l00085"></a><span class="lineno"> 85</span>&#160; };</div><div class="line"><a name="l00086"></a><span class="lineno"> 86</span>&#160;}</div><div class="line"><a name="l00087"></a><span class="lineno"> 87</span>&#160;</div><div class="line"><a name="l00088"></a><span class="lineno"> 88</span>&#160;<span class="keyword">inline</span> int32x4x4_t add_s32(int32x4x4_t a, int32x4x4_t <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#aa76b4a6e74940dabc5b7fc6b2dab3545">b</a>)</div><div class="line"><a name="l00089"></a><span class="lineno"> 89</span>&#160;{</div><div class="line"><a name="l00090"></a><span class="lineno"> 90</span>&#160; <span class="keywordflow">return</span></div><div class="line"><a name="l00091"></a><span class="lineno"> 91</span>&#160; {</div><div class="line"><a name="l00092"></a><span class="lineno"> 92</span>&#160; {</div><div class="line"><a name="l00093"></a><span class="lineno"> 93</span>&#160; vaddq_s32(a.val[0], <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#aa76b4a6e74940dabc5b7fc6b2dab3545">b</a>.val[0]),</div><div class="line"><a name="l00094"></a><span class="lineno"> 94</span>&#160; vaddq_s32(a.val[1], <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#aa76b4a6e74940dabc5b7fc6b2dab3545">b</a>.val[1]),</div><div class="line"><a name="l00095"></a><span class="lineno"> 95</span>&#160; vaddq_s32(a.val[2], <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#aa76b4a6e74940dabc5b7fc6b2dab3545">b</a>.val[2]),</div><div class="line"><a name="l00096"></a><span class="lineno"> 96</span>&#160; vaddq_s32(a.val[3], <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#aa76b4a6e74940dabc5b7fc6b2dab3545">b</a>.val[3])</div><div class="line"><a name="l00097"></a><span class="lineno"> 97</span>&#160; }</div><div class="line"><a name="l00098"></a><span class="lineno"> 98</span>&#160; };</div><div class="line"><a name="l00099"></a><span class="lineno"> 99</span>&#160;}</div><div class="line"><a name="l00100"></a><span class="lineno"> 100</span>&#160;</div><div class="line"><a name="l00101"></a><span class="lineno"> 101</span>&#160;<span class="keyword">inline</span> int32x4x4_t mul_s32(int32x4x4_t &amp;a, int32_t mul_scalar)</div><div class="line"><a name="l00102"></a><span class="lineno"> 102</span>&#160;{</div><div class="line"><a name="l00103"></a><span class="lineno"> 103</span>&#160; <span class="keywordflow">return</span></div><div class="line"><a name="l00104"></a><span class="lineno"> 104</span>&#160; {</div><div class="line"><a name="l00105"></a><span class="lineno"> 105</span>&#160; {</div><div class="line"><a name="l00106"></a><span class="lineno"> 106</span>&#160; vmulq_n_s32(a.val[0], mul_scalar),</div><div class="line"><a name="l00107"></a><span class="lineno"> 107</span>&#160; vmulq_n_s32(a.val[1], mul_scalar),</div><div class="line"><a name="l00108"></a><span class="lineno"> 108</span>&#160; vmulq_n_s32(a.val[2], mul_scalar),</div><div class="line"><a name="l00109"></a><span class="lineno"> 109</span>&#160; vmulq_n_s32(a.val[3], mul_scalar)</div><div class="line"><a name="l00110"></a><span class="lineno"> 110</span>&#160; }</div><div class="line"><a name="l00111"></a><span class="lineno"> 111</span>&#160; };</div><div class="line"><a name="l00112"></a><span class="lineno"> 112</span>&#160;}</div><div class="line"><a name="l00113"></a><span class="lineno"> 113</span>&#160;</div><div class="line"><a name="l00114"></a><span class="lineno"> 114</span>&#160;<span class="keyword">inline</span> int32x4x4_t mul_s32(int32x4x4_t &amp;a, <span class="keyword">const</span> int32_t *multilpier)</div><div class="line"><a name="l00115"></a><span class="lineno"> 115</span>&#160;{</div><div class="line"><a name="l00116"></a><span class="lineno"> 116</span>&#160; <span class="keywordflow">return</span></div><div class="line"><a name="l00117"></a><span class="lineno"> 117</span>&#160; {</div><div class="line"><a name="l00118"></a><span class="lineno"> 118</span>&#160; {</div><div class="line"><a name="l00119"></a><span class="lineno"> 119</span>&#160; vmulq_s32(a.val[0], vld1q_s32(multilpier)),</div><div class="line"><a name="l00120"></a><span class="lineno"> 120</span>&#160; vmulq_s32(a.val[1], vld1q_s32(multilpier + 4)),</div><div class="line"><a name="l00121"></a><span class="lineno"> 121</span>&#160; vmulq_s32(a.val[2], vld1q_s32(multilpier + 8)),</div><div class="line"><a name="l00122"></a><span class="lineno"> 122</span>&#160; vmulq_s32(a.val[3], vld1q_s32(multilpier + 12))</div><div class="line"><a name="l00123"></a><span class="lineno"> 123</span>&#160; }</div><div class="line"><a name="l00124"></a><span class="lineno"> 124</span>&#160; };</div><div class="line"><a name="l00125"></a><span class="lineno"> 125</span>&#160;}</div><div class="line"><a name="l00126"></a><span class="lineno"> 126</span>&#160;</div><div class="line"><a name="l00127"></a><span class="lineno"> 127</span>&#160;<span class="keyword">inline</span> int32x4x4_t get_a_offset(<span class="keyword">const</span> int32_t *vector_sum_col_ptr, int32_t a_offset, int32_t x)</div><div class="line"><a name="l00128"></a><span class="lineno"> 128</span>&#160;{</div><div class="line"><a name="l00129"></a><span class="lineno"> 129</span>&#160; int32x4x4_t a_offset_term_s32 = load(vector_sum_col_ptr, x);</div><div class="line"><a name="l00130"></a><span class="lineno"> 130</span>&#160;</div><div class="line"><a name="l00131"></a><span class="lineno"> 131</span>&#160; a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], a_offset);</div><div class="line"><a name="l00132"></a><span class="lineno"> 132</span>&#160; a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], a_offset);</div><div class="line"><a name="l00133"></a><span class="lineno"> 133</span>&#160; a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], a_offset);</div><div class="line"><a name="l00134"></a><span class="lineno"> 134</span>&#160; a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], a_offset);</div><div class="line"><a name="l00135"></a><span class="lineno"> 135</span>&#160; <span class="keywordflow">return</span> a_offset_term_s32;</div><div class="line"><a name="l00136"></a><span class="lineno"> 136</span>&#160;}</div><div class="line"><a name="l00137"></a><span class="lineno"> 137</span>&#160;</div><div class="line"><a name="l00138"></a><span class="lineno"> 138</span>&#160;<span class="keyword">inline</span> int32x4_t get_b_offset(<span class="keyword">const</span> int32_t *vector_sum_row_ptr, int32_t b_offset)</div><div class="line"><a name="l00139"></a><span class="lineno"> 139</span>&#160;{</div><div class="line"><a name="l00140"></a><span class="lineno"> 140</span>&#160; int32x4_t b_offset_term_s32 = vld1q_dup_s32(vector_sum_row_ptr);</div><div class="line"><a name="l00141"></a><span class="lineno"> 141</span>&#160; b_offset_term_s32 = vmulq_n_s32(b_offset_term_s32, b_offset);</div><div class="line"><a name="l00142"></a><span class="lineno"> 142</span>&#160; <span class="keywordflow">return</span> b_offset_term_s32;</div><div class="line"><a name="l00143"></a><span class="lineno"> 143</span>&#160;}</div><div class="line"><a name="l00144"></a><span class="lineno"> 144</span>&#160;</div><div class="line"><a name="l00145"></a><span class="lineno"> 145</span>&#160;<span class="keyword">inline</span> int32x4x4_t get_k_offset(int32_t k_offset)</div><div class="line"><a name="l00146"></a><span class="lineno"> 146</span>&#160;{</div><div class="line"><a name="l00147"></a><span class="lineno"> 147</span>&#160; <span class="keywordflow">return</span></div><div class="line"><a name="l00148"></a><span class="lineno"> 148</span>&#160; {</div><div class="line"><a name="l00149"></a><span class="lineno"> 149</span>&#160; {</div><div class="line"><a name="l00150"></a><span class="lineno"> 150</span>&#160; vdupq_n_s32(k_offset),</div><div class="line"><a name="l00151"></a><span class="lineno"> 151</span>&#160; vdupq_n_s32(k_offset),</div><div class="line"><a name="l00152"></a><span class="lineno"> 152</span>&#160; vdupq_n_s32(k_offset),</div><div class="line"><a name="l00153"></a><span class="lineno"> 153</span>&#160; vdupq_n_s32(k_offset)</div><div class="line"><a name="l00154"></a><span class="lineno"> 154</span>&#160; }</div><div class="line"><a name="l00155"></a><span class="lineno"> 155</span>&#160; };</div><div class="line"><a name="l00156"></a><span class="lineno"> 156</span>&#160;}</div><div class="line"><a name="l00157"></a><span class="lineno"> 157</span>&#160;</div><div class="line"><a name="l00158"></a><span class="lineno"> 158</span>&#160;<span class="keyword">template</span> &lt;<span class="keywordtype">bool</span> is_bounded_relu&gt;</div><div class="line"><a name="l00159"></a><span class="lineno"> 159</span>&#160;<span class="keyword">inline</span> uint8x16_t finalize_quantization_floating_point(int32x4x4_t &amp;in_s32, int32x4_t result_shift_s32, uint8x16_t min_u8, uint8x16_t max_u8)</div><div class="line"><a name="l00160"></a><span class="lineno"> 160</span>&#160;{</div><div class="line"><a name="l00161"></a><span class="lineno"> 161</span>&#160; <span class="keyword">const</span> <span class="keyword">static</span> int32x4_t zero_s32 = vdupq_n_s32(0);</div><div class="line"><a name="l00162"></a><span class="lineno"> 162</span>&#160;</div><div class="line"><a name="l00163"></a><span class="lineno"> 163</span>&#160; <span class="comment">// Shift final result (negative value shift right)</span></div><div class="line"><a name="l00164"></a><span class="lineno"> 164</span>&#160; in_s32.val[0] = vshlq_s32(in_s32.val[0], result_shift_s32);</div><div class="line"><a name="l00165"></a><span class="lineno"> 165</span>&#160; in_s32.val[1] = vshlq_s32(in_s32.val[1], result_shift_s32);</div><div class="line"><a name="l00166"></a><span class="lineno"> 166</span>&#160; in_s32.val[2] = vshlq_s32(in_s32.val[2], result_shift_s32);</div><div class="line"><a name="l00167"></a><span class="lineno"> 167</span>&#160; in_s32.val[3] = vshlq_s32(in_s32.val[3], result_shift_s32);</div><div class="line"><a name="l00168"></a><span class="lineno"> 168</span>&#160;</div><div class="line"><a name="l00169"></a><span class="lineno"> 169</span>&#160; <span class="comment">// Saturate negative values</span></div><div class="line"><a name="l00170"></a><span class="lineno"> 170</span>&#160; in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);</div><div class="line"><a name="l00171"></a><span class="lineno"> 171</span>&#160; in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);</div><div class="line"><a name="l00172"></a><span class="lineno"> 172</span>&#160; in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);</div><div class="line"><a name="l00173"></a><span class="lineno"> 173</span>&#160; in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);</div><div class="line"><a name="l00174"></a><span class="lineno"> 174</span>&#160;</div><div class="line"><a name="l00175"></a><span class="lineno"> 175</span>&#160; <span class="comment">// Convert S32 to S16</span></div><div class="line"><a name="l00176"></a><span class="lineno"> 176</span>&#160; <span class="keyword">const</span> int16x8x2_t in_s16 =</div><div class="line"><a name="l00177"></a><span class="lineno"> 177</span>&#160; {</div><div class="line"><a name="l00178"></a><span class="lineno"> 178</span>&#160; {</div><div class="line"><a name="l00179"></a><span class="lineno"> 179</span>&#160; vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),</div><div class="line"><a name="l00180"></a><span class="lineno"> 180</span>&#160; vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))</div><div class="line"><a name="l00181"></a><span class="lineno"> 181</span>&#160; }</div><div class="line"><a name="l00182"></a><span class="lineno"> 182</span>&#160; };</div><div class="line"><a name="l00183"></a><span class="lineno"> 183</span>&#160;</div><div class="line"><a name="l00184"></a><span class="lineno"> 184</span>&#160; <span class="comment">// Convert S16 to U8</span></div><div class="line"><a name="l00185"></a><span class="lineno"> 185</span>&#160; uint8x16_t out_u8 = vcombine_u8(vqmovun_s16(in_s16.val[0]), vqmovun_s16(in_s16.val[1]));</div><div class="line"><a name="l00186"></a><span class="lineno"> 186</span>&#160;</div><div class="line"><a name="l00187"></a><span class="lineno"> 187</span>&#160; <span class="keywordflow">if</span>(is_bounded_relu)</div><div class="line"><a name="l00188"></a><span class="lineno"> 188</span>&#160; {</div><div class="line"><a name="l00189"></a><span class="lineno"> 189</span>&#160; out_u8 = vmaxq_u8(out_u8, min_u8);</div><div class="line"><a name="l00190"></a><span class="lineno"> 190</span>&#160; out_u8 = vminq_u8(out_u8, max_u8);</div><div class="line"><a name="l00191"></a><span class="lineno"> 191</span>&#160; }</div><div class="line"><a name="l00192"></a><span class="lineno"> 192</span>&#160;</div><div class="line"><a name="l00193"></a><span class="lineno"> 193</span>&#160; <span class="keywordflow">return</span> out_u8;</div><div class="line"><a name="l00194"></a><span class="lineno"> 194</span>&#160;}</div><div class="line"><a name="l00195"></a><span class="lineno"> 195</span>&#160;</div><div class="line"><a name="l00196"></a><span class="lineno"> 196</span>&#160;<span class="keyword">template</span> &lt;<span class="keywordtype">bool</span> is_bounded_relu&gt;</div><div class="line"><a name="l00197"></a><span class="lineno"> 197</span>&#160;<span class="keyword">inline</span> int8x16_t finalize_quantization_floating_point(int32x4x4_t &amp;in_s32, int32x4_t result_shift_s32, int8x16_t min_s8, int8x16_t max_s8)</div><div class="line"><a name="l00198"></a><span class="lineno"> 198</span>&#160;{</div><div class="line"><a name="l00199"></a><span class="lineno"> 199</span>&#160; <span class="keyword">const</span> <span class="keyword">static</span> int32x4_t zero_s32 = vdupq_n_s32(0);</div><div class="line"><a name="l00200"></a><span class="lineno"> 200</span>&#160;</div><div class="line"><a name="l00201"></a><span class="lineno"> 201</span>&#160; <span class="comment">// Shift final result (negative value shift right)</span></div><div class="line"><a name="l00202"></a><span class="lineno"> 202</span>&#160; in_s32.val[0] = vshlq_s32(in_s32.val[0], result_shift_s32);</div><div class="line"><a name="l00203"></a><span class="lineno"> 203</span>&#160; in_s32.val[1] = vshlq_s32(in_s32.val[1], result_shift_s32);</div><div class="line"><a name="l00204"></a><span class="lineno"> 204</span>&#160; in_s32.val[2] = vshlq_s32(in_s32.val[2], result_shift_s32);</div><div class="line"><a name="l00205"></a><span class="lineno"> 205</span>&#160; in_s32.val[3] = vshlq_s32(in_s32.val[3], result_shift_s32);</div><div class="line"><a name="l00206"></a><span class="lineno"> 206</span>&#160;</div><div class="line"><a name="l00207"></a><span class="lineno"> 207</span>&#160; <span class="comment">// Saturate negative values</span></div><div class="line"><a name="l00208"></a><span class="lineno"> 208</span>&#160; in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);</div><div class="line"><a name="l00209"></a><span class="lineno"> 209</span>&#160; in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);</div><div class="line"><a name="l00210"></a><span class="lineno"> 210</span>&#160; in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);</div><div class="line"><a name="l00211"></a><span class="lineno"> 211</span>&#160; in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);</div><div class="line"><a name="l00212"></a><span class="lineno"> 212</span>&#160;</div><div class="line"><a name="l00213"></a><span class="lineno"> 213</span>&#160; <span class="comment">// Convert S32 to S16</span></div><div class="line"><a name="l00214"></a><span class="lineno"> 214</span>&#160; <span class="keyword">const</span> int16x8x2_t in_s16 =</div><div class="line"><a name="l00215"></a><span class="lineno"> 215</span>&#160; {</div><div class="line"><a name="l00216"></a><span class="lineno"> 216</span>&#160; {</div><div class="line"><a name="l00217"></a><span class="lineno"> 217</span>&#160; vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),</div><div class="line"><a name="l00218"></a><span class="lineno"> 218</span>&#160; vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))</div><div class="line"><a name="l00219"></a><span class="lineno"> 219</span>&#160; }</div><div class="line"><a name="l00220"></a><span class="lineno"> 220</span>&#160; };</div><div class="line"><a name="l00221"></a><span class="lineno"> 221</span>&#160;</div><div class="line"><a name="l00222"></a><span class="lineno"> 222</span>&#160; <span class="comment">// Convert S16 to S8</span></div><div class="line"><a name="l00223"></a><span class="lineno"> 223</span>&#160; int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));</div><div class="line"><a name="l00224"></a><span class="lineno"> 224</span>&#160;</div><div class="line"><a name="l00225"></a><span class="lineno"> 225</span>&#160; <span class="keywordflow">if</span>(is_bounded_relu)</div><div class="line"><a name="l00226"></a><span class="lineno"> 226</span>&#160; {</div><div class="line"><a name="l00227"></a><span class="lineno"> 227</span>&#160; out_s8 = vmaxq_s8(out_s8, min_s8);</div><div class="line"><a name="l00228"></a><span class="lineno"> 228</span>&#160; out_s8 = vminq_s8(out_s8, max_s8);</div><div class="line"><a name="l00229"></a><span class="lineno"> 229</span>&#160; }</div><div class="line"><a name="l00230"></a><span class="lineno"> 230</span>&#160;</div><div class="line"><a name="l00231"></a><span class="lineno"> 231</span>&#160; <span class="keywordflow">return</span> out_s8;</div><div class="line"><a name="l00232"></a><span class="lineno"> 232</span>&#160;}</div><div class="line"><a name="l00233"></a><span class="lineno"> 233</span>&#160;</div><div class="line"><a name="l00234"></a><span class="lineno"> 234</span>&#160;<span class="keyword">template</span> &lt;<span class="keywordtype">bool</span> is_bounded_relu&gt;</div><div class="line"><a name="l00235"></a><span class="lineno"> 235</span>&#160;<span class="keyword">inline</span> int8x16_t finalize_quantization_floating_point(int32x4x4_t &amp;in_s32, int32x4x4_t result_shift_s32, int8x16_t min_s8, int8x16_t max_s8)</div><div class="line"><a name="l00236"></a><span class="lineno"> 236</span>&#160;{</div><div class="line"><a name="l00237"></a><span class="lineno"> 237</span>&#160; <span class="keyword">const</span> <span class="keyword">static</span> int32x4_t zero_s32 = vdupq_n_s32(0);</div><div class="line"><a name="l00238"></a><span class="lineno"> 238</span>&#160;</div><div class="line"><a name="l00239"></a><span class="lineno"> 239</span>&#160; <span class="comment">// Shift final result (negative value shift right)</span></div><div class="line"><a name="l00240"></a><span class="lineno"> 240</span>&#160; in_s32.val[0] = vshlq_s32(in_s32.val[0], vnegq_s32(result_shift_s32.val[0]));</div><div class="line"><a name="l00241"></a><span class="lineno"> 241</span>&#160; in_s32.val[1] = vshlq_s32(in_s32.val[1], vnegq_s32(result_shift_s32.val[1]));</div><div class="line"><a name="l00242"></a><span class="lineno"> 242</span>&#160; in_s32.val[2] = vshlq_s32(in_s32.val[2], vnegq_s32(result_shift_s32.val[2]));</div><div class="line"><a name="l00243"></a><span class="lineno"> 243</span>&#160; in_s32.val[3] = vshlq_s32(in_s32.val[3], vnegq_s32(result_shift_s32.val[3]));</div><div class="line"><a name="l00244"></a><span class="lineno"> 244</span>&#160;</div><div class="line"><a name="l00245"></a><span class="lineno"> 245</span>&#160; <span class="comment">// Saturate negative values</span></div><div class="line"><a name="l00246"></a><span class="lineno"> 246</span>&#160; in_s32.val[0] = vmaxq_s32(in_s32.val[0], zero_s32);</div><div class="line"><a name="l00247"></a><span class="lineno"> 247</span>&#160; in_s32.val[1] = vmaxq_s32(in_s32.val[1], zero_s32);</div><div class="line"><a name="l00248"></a><span class="lineno"> 248</span>&#160; in_s32.val[2] = vmaxq_s32(in_s32.val[2], zero_s32);</div><div class="line"><a name="l00249"></a><span class="lineno"> 249</span>&#160; in_s32.val[3] = vmaxq_s32(in_s32.val[3], zero_s32);</div><div class="line"><a name="l00250"></a><span class="lineno"> 250</span>&#160;</div><div class="line"><a name="l00251"></a><span class="lineno"> 251</span>&#160; <span class="comment">// Convert S32 to S16</span></div><div class="line"><a name="l00252"></a><span class="lineno"> 252</span>&#160; <span class="keyword">const</span> int16x8x2_t in_s16 =</div><div class="line"><a name="l00253"></a><span class="lineno"> 253</span>&#160; {</div><div class="line"><a name="l00254"></a><span class="lineno"> 254</span>&#160; {</div><div class="line"><a name="l00255"></a><span class="lineno"> 255</span>&#160; vcombine_s16(vqmovn_s32(in_s32.val[0]), vqmovn_s32(in_s32.val[1])),</div><div class="line"><a name="l00256"></a><span class="lineno"> 256</span>&#160; vcombine_s16(vqmovn_s32(in_s32.val[2]), vqmovn_s32(in_s32.val[3]))</div><div class="line"><a name="l00257"></a><span class="lineno"> 257</span>&#160; }</div><div class="line"><a name="l00258"></a><span class="lineno"> 258</span>&#160; };</div><div class="line"><a name="l00259"></a><span class="lineno"> 259</span>&#160;</div><div class="line"><a name="l00260"></a><span class="lineno"> 260</span>&#160; <span class="comment">// Convert S16 to S8</span></div><div class="line"><a name="l00261"></a><span class="lineno"> 261</span>&#160; int8x16_t out_s8 = vcombine_s8(vqmovn_s16(in_s16.val[0]), vqmovn_s16(in_s16.val[1]));</div><div class="line"><a name="l00262"></a><span class="lineno"> 262</span>&#160;</div><div class="line"><a name="l00263"></a><span class="lineno"> 263</span>&#160; <span class="keywordflow">if</span>(is_bounded_relu)</div><div class="line"><a name="l00264"></a><span class="lineno"> 264</span>&#160; {</div><div class="line"><a name="l00265"></a><span class="lineno"> 265</span>&#160; out_s8 = vmaxq_s8(out_s8, min_s8);</div><div class="line"><a name="l00266"></a><span class="lineno"> 266</span>&#160; out_s8 = vminq_s8(out_s8, max_s8);</div><div class="line"><a name="l00267"></a><span class="lineno"> 267</span>&#160; }</div><div class="line"><a name="l00268"></a><span class="lineno"> 268</span>&#160;</div><div class="line"><a name="l00269"></a><span class="lineno"> 269</span>&#160; <span class="keywordflow">return</span> out_s8;</div><div class="line"><a name="l00270"></a><span class="lineno"> 270</span>&#160;}</div><div class="line"><a name="l00271"></a><span class="lineno"> 271</span>&#160;</div><div class="line"><a name="l00272"></a><span class="lineno"> 272</span>&#160;<span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div><div class="line"><a name="l00273"></a><span class="lineno"> 273</span>&#160;<span class="keyword">struct </span>VectorTyper</div><div class="line"><a name="l00274"></a><span class="lineno"> 274</span>&#160;{</div><div class="line"><a name="l00275"></a><span class="lineno"> 275</span>&#160; <span class="keyword">using</span> stype = T;</div><div class="line"><a name="l00276"></a><span class="lineno"> 276</span>&#160; <span class="keyword">using</span> vtype = <span class="keyword">typename</span> wrapper::traits::neon_bitvector_t&lt;T, wrapper::traits::BitWidth::W128&gt;;</div><div class="line"><a name="l00277"></a><span class="lineno"> 277</span>&#160;};</div><div class="line"><a name="l00278"></a><span class="lineno"> 278</span>&#160;</div><div class="line"><a name="l00279"></a><span class="lineno"> 279</span>&#160;<span class="keyword">inline</span> Window get_win_vector_sum(<span class="keyword">const</span> Window &amp;window)</div><div class="line"><a name="l00280"></a><span class="lineno"> 280</span>&#160;{</div><div class="line"><a name="l00281"></a><span class="lineno"> 281</span>&#160; Window win_vector_sum(window);</div><div class="line"><a name="l00282"></a><span class="lineno"> 282</span>&#160; win_vector_sum.set(<a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>, Window::Dimension(0, 0, 0));</div><div class="line"><a name="l00283"></a><span class="lineno"> 283</span>&#160; win_vector_sum.set(<a class="code" href="classarm__compute_1_1_window.xhtml#a893d17b56b9abc4423ce26e9a24ac5dc">Window::DimZ</a>, Window::Dimension(0, 0, 0));</div><div class="line"><a name="l00284"></a><span class="lineno"> 284</span>&#160; <span class="keywordflow">return</span> win_vector_sum;</div><div class="line"><a name="l00285"></a><span class="lineno"> 285</span>&#160;}</div><div class="line"><a name="l00286"></a><span class="lineno"> 286</span>&#160;</div><div class="line"><a name="l00287"></a><span class="lineno"> 287</span>&#160;<span class="keyword">inline</span> Iterator get_vector_sum_col_it(<span class="keyword">const</span> Window &amp;window, <span class="keyword">const</span> ITensor *vector_sum_col)</div><div class="line"><a name="l00288"></a><span class="lineno"> 288</span>&#160;{</div><div class="line"><a name="l00289"></a><span class="lineno"> 289</span>&#160; Iterator vector_sum_col_it(vector_sum_col, get_win_vector_sum(window));</div><div class="line"><a name="l00290"></a><span class="lineno"> 290</span>&#160; <span class="keywordflow">return</span> vector_sum_col_it;</div><div class="line"><a name="l00291"></a><span class="lineno"> 291</span>&#160;}</div><div class="line"><a name="l00292"></a><span class="lineno"> 292</span>&#160;</div><div class="line"><a name="l00293"></a><span class="lineno"> 293</span>&#160;<span class="keyword">inline</span> Iterator get_vector_sum_row_it(<span class="keyword">const</span> Window &amp;window, <span class="keyword">const</span> ITensor *vector_sum_row)</div><div class="line"><a name="l00294"></a><span class="lineno"> 294</span>&#160;{</div><div class="line"><a name="l00295"></a><span class="lineno"> 295</span>&#160; Window win_vector_sum_row = get_win_vector_sum(window);</div><div class="line"><a name="l00296"></a><span class="lineno"> 296</span>&#160; win_vector_sum_row.set(<a class="code" href="classarm__compute_1_1_window.xhtml#aa96e81276ee4f87ab386cd05a5539a7d">Window::DimX</a>, Window::Dimension(0, 0, 0));</div><div class="line"><a name="l00297"></a><span class="lineno"> 297</span>&#160; Iterator vector_sum_row_it(vector_sum_row, win_vector_sum_row);</div><div class="line"><a name="l00298"></a><span class="lineno"> 298</span>&#160; <span class="keywordflow">return</span> vector_sum_row_it;</div><div class="line"><a name="l00299"></a><span class="lineno"> 299</span>&#160;}</div><div class="line"><a name="l00300"></a><span class="lineno"> 300</span>&#160;</div><div class="line"><a name="l00301"></a><span class="lineno"> 301</span>&#160;<span class="keyword">inline</span> Iterator get_bias_it(<span class="keyword">const</span> Window &amp;window, <span class="keyword">const</span> ITensor *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">bias</a>)</div><div class="line"><a name="l00302"></a><span class="lineno"> 302</span>&#160;{</div><div class="line"><a name="l00303"></a><span class="lineno"> 303</span>&#160; Window win_bias(window);</div><div class="line"><a name="l00304"></a><span class="lineno"> 304</span>&#160; win_bias.set(<a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>, Window::Dimension(0, 1, 1));</div><div class="line"><a name="l00305"></a><span class="lineno"> 305</span>&#160; win_bias.set(<a class="code" href="classarm__compute_1_1_window.xhtml#a893d17b56b9abc4423ce26e9a24ac5dc">Window::DimZ</a>, Window::Dimension(0, 1, 1));</div><div class="line"><a name="l00306"></a><span class="lineno"> 306</span>&#160; Iterator bias_it(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">bias</a>, win_bias);</div><div class="line"><a name="l00307"></a><span class="lineno"> 307</span>&#160; <span class="keywordflow">return</span> bias_it;</div><div class="line"><a name="l00308"></a><span class="lineno"> 308</span>&#160;}</div><div class="line"><a name="l00309"></a><span class="lineno"> 309</span>&#160;</div><div class="line"><a name="l00310"></a><span class="lineno"> 310</span>&#160;<span class="keyword">template</span> &lt;<span class="keyword">typename</span> VT, <span class="keywordtype">bool</span> has_a_offset, <span class="keywordtype">bool</span> has_b_offset, <span class="keywordtype">bool</span> has_bias, <span class="keywordtype">bool</span> is_bounded_relu, <span class="keywordtype">bool</span> is_fixed_po<span class="keywordtype">int</span>&gt;</div><div class="line"><a name="l00311"></a><span class="lineno"> 311</span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> run_offset_contribution_output_stage_window(<span class="keyword">const</span> int32_t *vector_sum_col_ptr, <span class="keyword">const</span> int32_t *vector_sum_row_ptr, <span class="keyword">const</span> int32_t *bias_ptr, Iterator mm_result_it, Iterator out_it,</div><div class="line"><a name="l00312"></a><span class="lineno"> 312</span>&#160; <span class="keyword">const</span> int32x4_t result_offset_s32, <span class="keyword">const</span> int32x4_t result_shift_s32,</div><div class="line"><a name="l00313"></a><span class="lineno"> 313</span>&#160; <span class="keyword">typename</span> VT::vtype min_vec, <span class="keyword">typename</span> VT::vtype max_vec,</div><div class="line"><a name="l00314"></a><span class="lineno"> 314</span>&#160; int32_t a_offset, int32_t b_offset, int32_t k_offset,</div><div class="line"><a name="l00315"></a><span class="lineno"> 315</span>&#160; int32_t multiplier, int32_t <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a979a54caef6e77ce0259e427136847e8">shift</a>, int32_t <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>, int32_t min_bound, int32_t max_bound,</div><div class="line"><a name="l00316"></a><span class="lineno"> 316</span>&#160; <span class="keywordtype">int</span> window_step_x, <span class="keywordtype">int</span> window_start_x, <span class="keywordtype">int</span> window_end_x)</div><div class="line"><a name="l00317"></a><span class="lineno"> 317</span>&#160;{</div><div class="line"><a name="l00318"></a><span class="lineno"> 318</span>&#160; int32x4x4_t offset_term_s32 = { 0, 0, 0, 0 };</div><div class="line"><a name="l00319"></a><span class="lineno"> 319</span>&#160; <span class="keywordflow">if</span>(!is_fixed_point)</div><div class="line"><a name="l00320"></a><span class="lineno"> 320</span>&#160; {</div><div class="line"><a name="l00321"></a><span class="lineno"> 321</span>&#160; <span class="comment">// Combine quantization offset with other offsets.</span></div><div class="line"><a name="l00322"></a><span class="lineno"> 322</span>&#160; offset_term_s32 = add_s32(offset_term_s32, result_offset_s32);</div><div class="line"><a name="l00323"></a><span class="lineno"> 323</span>&#160; }</div><div class="line"><a name="l00324"></a><span class="lineno"> 324</span>&#160; <span class="keywordflow">if</span>(has_a_offset &amp;&amp; has_b_offset)</div><div class="line"><a name="l00325"></a><span class="lineno"> 325</span>&#160; {</div><div class="line"><a name="l00326"></a><span class="lineno"> 326</span>&#160; offset_term_s32 = add_s32(offset_term_s32, get_k_offset(k_offset));</div><div class="line"><a name="l00327"></a><span class="lineno"> 327</span>&#160; }</div><div class="line"><a name="l00328"></a><span class="lineno"> 328</span>&#160; <span class="keywordflow">if</span>(has_b_offset)</div><div class="line"><a name="l00329"></a><span class="lineno"> 329</span>&#160; {</div><div class="line"><a name="l00330"></a><span class="lineno"> 330</span>&#160; offset_term_s32 = add_s32(offset_term_s32, get_b_offset(vector_sum_row_ptr, b_offset));</div><div class="line"><a name="l00331"></a><span class="lineno"> 331</span>&#160; }</div><div class="line"><a name="l00332"></a><span class="lineno"> 332</span>&#160;</div><div class="line"><a name="l00333"></a><span class="lineno"> 333</span>&#160; <span class="keywordtype">int</span> x = window_start_x;</div><div class="line"><a name="l00334"></a><span class="lineno"> 334</span>&#160; <span class="keywordflow">for</span>(; x &lt;= (window_end_x - window_step_x); x += window_step_x)</div><div class="line"><a name="l00335"></a><span class="lineno"> 335</span>&#160; {</div><div class="line"><a name="l00336"></a><span class="lineno"> 336</span>&#160; int32x4x4_t in_s32 = load_results_input(mm_result_it, x);</div><div class="line"><a name="l00337"></a><span class="lineno"> 337</span>&#160;</div><div class="line"><a name="l00338"></a><span class="lineno"> 338</span>&#160; <span class="keywordflow">if</span>(has_a_offset)</div><div class="line"><a name="l00339"></a><span class="lineno"> 339</span>&#160; {</div><div class="line"><a name="l00340"></a><span class="lineno"> 340</span>&#160; in_s32 = add_s32(in_s32, get_a_offset(vector_sum_col_ptr, a_offset, x));</div><div class="line"><a name="l00341"></a><span class="lineno"> 341</span>&#160; }</div><div class="line"><a name="l00342"></a><span class="lineno"> 342</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a9aeced5a5128f60a31ea3e327a45ee21">has_bias</a>)</div><div class="line"><a name="l00343"></a><span class="lineno"> 343</span>&#160; {</div><div class="line"><a name="l00344"></a><span class="lineno"> 344</span>&#160; in_s32 = add_s32(in_s32, load(bias_ptr, x));</div><div class="line"><a name="l00345"></a><span class="lineno"> 345</span>&#160; }</div><div class="line"><a name="l00346"></a><span class="lineno"> 346</span>&#160; <span class="keywordflow">if</span>(!is_fixed_point || has_b_offset)</div><div class="line"><a name="l00347"></a><span class="lineno"> 347</span>&#160; {</div><div class="line"><a name="l00348"></a><span class="lineno"> 348</span>&#160; in_s32 = add_s32(in_s32, offset_term_s32);</div><div class="line"><a name="l00349"></a><span class="lineno"> 349</span>&#160; }</div><div class="line"><a name="l00350"></a><span class="lineno"> 350</span>&#160; <span class="keywordflow">if</span>(!is_fixed_point)</div><div class="line"><a name="l00351"></a><span class="lineno"> 351</span>&#160; {</div><div class="line"><a name="l00352"></a><span class="lineno"> 352</span>&#160; in_s32 = mul_s32(in_s32, multiplier);</div><div class="line"><a name="l00353"></a><span class="lineno"> 353</span>&#160; }</div><div class="line"><a name="l00354"></a><span class="lineno"> 354</span>&#160;</div><div class="line"><a name="l00355"></a><span class="lineno"> 355</span>&#160; <span class="keywordflow">if</span>(is_fixed_point)</div><div class="line"><a name="l00356"></a><span class="lineno"> 356</span>&#160; {</div><div class="line"><a name="l00357"></a><span class="lineno"> 357</span>&#160; <a class="code" href="namespacearm__compute_1_1wrapper.xhtml#ae7943ea9c1f74dc72c62d4cc3966a459">wrapper::vstore</a>(reinterpret_cast&lt;typename VT::stype *&gt;(out_it.ptr() + x),</div><div class="line"><a name="l00358"></a><span class="lineno"> 358</span>&#160; finalize_quantization&lt;is_bounded_relu&gt;(in_s32, multiplier, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a979a54caef6e77ce0259e427136847e8">shift</a>, result_offset_s32, min_vec, max_vec));</div><div class="line"><a name="l00359"></a><span class="lineno"> 359</span>&#160; }</div><div class="line"><a name="l00360"></a><span class="lineno"> 360</span>&#160; <span class="keywordflow">else</span></div><div class="line"><a name="l00361"></a><span class="lineno"> 361</span>&#160; {</div><div class="line"><a name="l00362"></a><span class="lineno"> 362</span>&#160; <a class="code" href="namespacearm__compute_1_1wrapper.xhtml#ae7943ea9c1f74dc72c62d4cc3966a459">wrapper::vstore</a>(reinterpret_cast&lt;typename VT::stype *&gt;(out_it.ptr() + x),</div><div class="line"><a name="l00363"></a><span class="lineno"> 363</span>&#160; finalize_quantization_floating_point&lt;is_bounded_relu&gt;(in_s32, result_shift_s32, min_vec, max_vec));</div><div class="line"><a name="l00364"></a><span class="lineno"> 364</span>&#160; }</div><div class="line"><a name="l00365"></a><span class="lineno"> 365</span>&#160; }</div><div class="line"><a name="l00366"></a><span class="lineno"> 366</span>&#160; <span class="comment">// Compute left-over elements</span></div><div class="line"><a name="l00367"></a><span class="lineno"> 367</span>&#160; <span class="keywordflow">for</span>(; x &lt; window_end_x; ++x)</div><div class="line"><a name="l00368"></a><span class="lineno"> 368</span>&#160; {</div><div class="line"><a name="l00369"></a><span class="lineno"> 369</span>&#160; int32_t in_value = *(reinterpret_cast&lt;const int32_t *&gt;(mm_result_it.ptr()) + x) + <a class="code" href="namespacearm__compute_1_1wrapper.xhtml#aa16ace001ab8287faa46d6962f369219">wrapper::vgetlane</a>(offset_term_s32.val[0], 0);</div><div class="line"><a name="l00370"></a><span class="lineno"> 370</span>&#160;</div><div class="line"><a name="l00371"></a><span class="lineno"> 371</span>&#160; <span class="keywordflow">if</span>(has_a_offset)</div><div class="line"><a name="l00372"></a><span class="lineno"> 372</span>&#160; {</div><div class="line"><a name="l00373"></a><span class="lineno"> 373</span>&#160; in_value += (*(vector_sum_col_ptr + x) * a_offset);</div><div class="line"><a name="l00374"></a><span class="lineno"> 374</span>&#160; }</div><div class="line"><a name="l00375"></a><span class="lineno"> 375</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a9aeced5a5128f60a31ea3e327a45ee21">has_bias</a>)</div><div class="line"><a name="l00376"></a><span class="lineno"> 376</span>&#160; {</div><div class="line"><a name="l00377"></a><span class="lineno"> 377</span>&#160; in_value += *(bias_ptr + x);</div><div class="line"><a name="l00378"></a><span class="lineno"> 378</span>&#160; }</div><div class="line"><a name="l00379"></a><span class="lineno"> 379</span>&#160;</div><div class="line"><a name="l00380"></a><span class="lineno"> 380</span>&#160; <span class="keywordflow">if</span>(is_fixed_point)</div><div class="line"><a name="l00381"></a><span class="lineno"> 381</span>&#160; {</div><div class="line"><a name="l00382"></a><span class="lineno"> 382</span>&#160; <span class="comment">// Finalize and store the result</span></div><div class="line"><a name="l00383"></a><span class="lineno"> 383</span>&#160; *reinterpret_cast&lt;typename VT::stype *&gt;(out_it.ptr() + x) = finalize_quantization&lt;is_bounded_relu&gt;(in_value, multiplier, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a979a54caef6e77ce0259e427136847e8">shift</a>, <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>,</div><div class="line"><a name="l00384"></a><span class="lineno"> 384</span>&#160; static_cast&lt;typename VT::stype&gt;(min_bound),</div><div class="line"><a name="l00385"></a><span class="lineno"> 385</span>&#160; static_cast&lt;typename VT::stype&gt;(max_bound));</div><div class="line"><a name="l00386"></a><span class="lineno"> 386</span>&#160; }</div><div class="line"><a name="l00387"></a><span class="lineno"> 387</span>&#160; <span class="keywordflow">else</span></div><div class="line"><a name="l00388"></a><span class="lineno"> 388</span>&#160; {</div><div class="line"><a name="l00389"></a><span class="lineno"> 389</span>&#160; <span class="comment">// Finalize quantization</span></div><div class="line"><a name="l00390"></a><span class="lineno"> 390</span>&#160; in_value = (in_value * multiplier) &gt;&gt; <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a979a54caef6e77ce0259e427136847e8">shift</a>;</div><div class="line"><a name="l00391"></a><span class="lineno"> 391</span>&#160;</div><div class="line"><a name="l00392"></a><span class="lineno"> 392</span>&#160; <span class="comment">// Bound and store the result</span></div><div class="line"><a name="l00393"></a><span class="lineno"> 393</span>&#160; <span class="keywordflow">if</span>(is_bounded_relu)</div><div class="line"><a name="l00394"></a><span class="lineno"> 394</span>&#160; {</div><div class="line"><a name="l00395"></a><span class="lineno"> 395</span>&#160; in_value = static_cast&lt;typename VT::stype&gt;(std::max&lt;int32_t&gt;(min_bound, std::min&lt;int32_t&gt;(max_bound, in_value)));</div><div class="line"><a name="l00396"></a><span class="lineno"> 396</span>&#160; }</div><div class="line"><a name="l00397"></a><span class="lineno"> 397</span>&#160; *reinterpret_cast&lt;typename VT::stype *&gt;(out_it.ptr() + x) = static_cast&lt;typename VT::stype&gt;(std::max&lt;int32_t&gt;(static_cast&lt;int32_t&gt;(<a class="code" href="namespacearm__compute_1_1support_1_1cpp11.xhtml#a73e352c61baaf9c1178da2d30105b04e">std::numeric_limits&lt;typename VT::stype&gt;::lowest</a>()),</div><div class="line"><a name="l00398"></a><span class="lineno"> 398</span>&#160; std::min&lt;int32_t&gt;(static_cast&lt;int32_t&gt;(std::numeric_limits&lt;typename VT::stype&gt;::max()), in_value)));</div><div class="line"><a name="l00399"></a><span class="lineno"> 399</span>&#160; }</div><div class="line"><a name="l00400"></a><span class="lineno"> 400</span>&#160; }</div><div class="line"><a name="l00401"></a><span class="lineno"> 401</span>&#160;}</div><div class="line"><a name="l00402"></a><span class="lineno"> 402</span>&#160;</div><div class="line"><a name="l00403"></a><span class="lineno"> 403</span>&#160;<span class="keyword">template</span> &lt;<span class="keywordtype">bool</span> has_a_offset, <span class="keywordtype">bool</span> has_bias, <span class="keywordtype">bool</span> is_bounded_relu, <span class="keywordtype">bool</span> is_fixed_po<span class="keywordtype">int</span>&gt;</div><div class="line"><a name="l00404"></a><span class="lineno"> 404</span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> run_offset_contribution_output_stage_window_symm(<span class="keyword">const</span> int32_t *vector_sum_col_ptr, <span class="keyword">const</span> int32_t *bias_ptr, Iterator mm_result_it, Iterator out_it,</div><div class="line"><a name="l00405"></a><span class="lineno"> 405</span>&#160; <span class="keyword">const</span> int32_t *result_multipliers, <span class="keyword">const</span> int32_t *result_shifts,</div><div class="line"><a name="l00406"></a><span class="lineno"> 406</span>&#160; <span class="keyword">const</span> int32x4_t result_offset, int8x16_t min_s8, int8x16_t max_s8,</div><div class="line"><a name="l00407"></a><span class="lineno"> 407</span>&#160; int32_t a_offset, int32_t <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>, int32_t min_bound, int32_t max_bound,</div><div class="line"><a name="l00408"></a><span class="lineno"> 408</span>&#160; <span class="keywordtype">int</span> window_step_x, <span class="keywordtype">int</span> window_start_x, <span class="keywordtype">int</span> window_end_x)</div><div class="line"><a name="l00409"></a><span class="lineno"> 409</span>&#160;{</div><div class="line"><a name="l00410"></a><span class="lineno"> 410</span>&#160; int32x4x4_t offset_term_s32 = { 0, 0, 0, 0 };</div><div class="line"><a name="l00411"></a><span class="lineno"> 411</span>&#160; <span class="keywordflow">if</span>(!is_fixed_point)</div><div class="line"><a name="l00412"></a><span class="lineno"> 412</span>&#160; {</div><div class="line"><a name="l00413"></a><span class="lineno"> 413</span>&#160; <span class="comment">// Combine quantization offset with other offsets.</span></div><div class="line"><a name="l00414"></a><span class="lineno"> 414</span>&#160; offset_term_s32 = add_s32(offset_term_s32, result_offset);</div><div class="line"><a name="l00415"></a><span class="lineno"> 415</span>&#160; }</div><div class="line"><a name="l00416"></a><span class="lineno"> 416</span>&#160;</div><div class="line"><a name="l00417"></a><span class="lineno"> 417</span>&#160; <span class="keywordtype">int</span> x = window_start_x;</div><div class="line"><a name="l00418"></a><span class="lineno"> 418</span>&#160; <span class="keywordflow">for</span>(; x &lt;= (window_end_x - window_step_x); x += window_step_x)</div><div class="line"><a name="l00419"></a><span class="lineno"> 419</span>&#160; {</div><div class="line"><a name="l00420"></a><span class="lineno"> 420</span>&#160; int32x4x4_t in_s32 = load_results_input(mm_result_it, x);</div><div class="line"><a name="l00421"></a><span class="lineno"> 421</span>&#160;</div><div class="line"><a name="l00422"></a><span class="lineno"> 422</span>&#160; <span class="keywordflow">if</span>(has_a_offset)</div><div class="line"><a name="l00423"></a><span class="lineno"> 423</span>&#160; {</div><div class="line"><a name="l00424"></a><span class="lineno"> 424</span>&#160; in_s32 = add_s32(in_s32, get_a_offset(vector_sum_col_ptr, a_offset, x));</div><div class="line"><a name="l00425"></a><span class="lineno"> 425</span>&#160; }</div><div class="line"><a name="l00426"></a><span class="lineno"> 426</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a9aeced5a5128f60a31ea3e327a45ee21">has_bias</a>)</div><div class="line"><a name="l00427"></a><span class="lineno"> 427</span>&#160; {</div><div class="line"><a name="l00428"></a><span class="lineno"> 428</span>&#160; in_s32 = add_s32(in_s32, load(bias_ptr, x));</div><div class="line"><a name="l00429"></a><span class="lineno"> 429</span>&#160; }</div><div class="line"><a name="l00430"></a><span class="lineno"> 430</span>&#160; <span class="keywordflow">if</span>(!is_fixed_point)</div><div class="line"><a name="l00431"></a><span class="lineno"> 431</span>&#160; {</div><div class="line"><a name="l00432"></a><span class="lineno"> 432</span>&#160; in_s32 = add_s32(in_s32, offset_term_s32);</div><div class="line"><a name="l00433"></a><span class="lineno"> 433</span>&#160; in_s32 = mul_s32(in_s32, result_multipliers + x);</div><div class="line"><a name="l00434"></a><span class="lineno"> 434</span>&#160; }</div><div class="line"><a name="l00435"></a><span class="lineno"> 435</span>&#160;</div><div class="line"><a name="l00436"></a><span class="lineno"> 436</span>&#160; <span class="keywordflow">if</span>(is_fixed_point)</div><div class="line"><a name="l00437"></a><span class="lineno"> 437</span>&#160; {</div><div class="line"><a name="l00438"></a><span class="lineno"> 438</span>&#160; vst1q_s8(reinterpret_cast&lt;int8_t *&gt;(out_it.ptr() + x), finalize_quantization_symm&lt;is_bounded_relu&gt;(in_s32, load(result_multipliers, x), load(result_shifts, x), result_offset, min_s8, max_s8));</div><div class="line"><a name="l00439"></a><span class="lineno"> 439</span>&#160; }</div><div class="line"><a name="l00440"></a><span class="lineno"> 440</span>&#160; <span class="keywordflow">else</span></div><div class="line"><a name="l00441"></a><span class="lineno"> 441</span>&#160; {</div><div class="line"><a name="l00442"></a><span class="lineno"> 442</span>&#160; vst1q_s8(reinterpret_cast&lt;int8_t *&gt;(out_it.ptr() + x), finalize_quantization_floating_point&lt;is_bounded_relu&gt;(in_s32, load(result_shifts, x), min_s8, max_s8));</div><div class="line"><a name="l00443"></a><span class="lineno"> 443</span>&#160; }</div><div class="line"><a name="l00444"></a><span class="lineno"> 444</span>&#160; }</div><div class="line"><a name="l00445"></a><span class="lineno"> 445</span>&#160; <span class="comment">// Compute left-over elements</span></div><div class="line"><a name="l00446"></a><span class="lineno"> 446</span>&#160; <span class="keywordflow">for</span>(; x &lt; window_end_x; ++x)</div><div class="line"><a name="l00447"></a><span class="lineno"> 447</span>&#160; {</div><div class="line"><a name="l00448"></a><span class="lineno"> 448</span>&#160; int32_t in_value = *(reinterpret_cast&lt;const int32_t *&gt;(mm_result_it.ptr()) + x) + <a class="code" href="namespacearm__compute_1_1wrapper.xhtml#aa16ace001ab8287faa46d6962f369219">wrapper::vgetlane</a>(offset_term_s32.val[0], 0);</div><div class="line"><a name="l00449"></a><span class="lineno"> 449</span>&#160;</div><div class="line"><a name="l00450"></a><span class="lineno"> 450</span>&#160; <span class="keywordflow">if</span>(has_a_offset)</div><div class="line"><a name="l00451"></a><span class="lineno"> 451</span>&#160; {</div><div class="line"><a name="l00452"></a><span class="lineno"> 452</span>&#160; in_value += (*(vector_sum_col_ptr + x) * a_offset);</div><div class="line"><a name="l00453"></a><span class="lineno"> 453</span>&#160; }</div><div class="line"><a name="l00454"></a><span class="lineno"> 454</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a9aeced5a5128f60a31ea3e327a45ee21">has_bias</a>)</div><div class="line"><a name="l00455"></a><span class="lineno"> 455</span>&#160; {</div><div class="line"><a name="l00456"></a><span class="lineno"> 456</span>&#160; in_value += *(bias_ptr + x);</div><div class="line"><a name="l00457"></a><span class="lineno"> 457</span>&#160; }</div><div class="line"><a name="l00458"></a><span class="lineno"> 458</span>&#160;</div><div class="line"><a name="l00459"></a><span class="lineno"> 459</span>&#160; <span class="keywordflow">if</span>(is_fixed_point)</div><div class="line"><a name="l00460"></a><span class="lineno"> 460</span>&#160; {</div><div class="line"><a name="l00461"></a><span class="lineno"> 461</span>&#160; <span class="comment">// Finalize and store the result</span></div><div class="line"><a name="l00462"></a><span class="lineno"> 462</span>&#160; *(out_it.ptr() + x) = finalize_quantization&lt;is_bounded_relu&gt;(in_value, result_multipliers[x], result_shifts[x], <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>, static_cast&lt;int8_t&gt;(min_bound), static_cast&lt;int8_t&gt;(max_bound));</div><div class="line"><a name="l00463"></a><span class="lineno"> 463</span>&#160; }</div><div class="line"><a name="l00464"></a><span class="lineno"> 464</span>&#160; <span class="keywordflow">else</span></div><div class="line"><a name="l00465"></a><span class="lineno"> 465</span>&#160; {</div><div class="line"><a name="l00466"></a><span class="lineno"> 466</span>&#160; <span class="comment">// Finalize quantization</span></div><div class="line"><a name="l00467"></a><span class="lineno"> 467</span>&#160; in_value = (in_value * result_multipliers[x]) &gt;&gt; (-result_shifts[x]);</div><div class="line"><a name="l00468"></a><span class="lineno"> 468</span>&#160;</div><div class="line"><a name="l00469"></a><span class="lineno"> 469</span>&#160; <span class="comment">// Bound and store the result</span></div><div class="line"><a name="l00470"></a><span class="lineno"> 470</span>&#160; <span class="keywordflow">if</span>(is_bounded_relu)</div><div class="line"><a name="l00471"></a><span class="lineno"> 471</span>&#160; {</div><div class="line"><a name="l00472"></a><span class="lineno"> 472</span>&#160; in_value = static_cast&lt;int8_t&gt;(std::max&lt;int32_t&gt;(min_bound, std::min&lt;int32_t&gt;(max_bound, in_value)));</div><div class="line"><a name="l00473"></a><span class="lineno"> 473</span>&#160; }</div><div class="line"><a name="l00474"></a><span class="lineno"> 474</span>&#160; *(out_it.ptr() + x) = static_cast&lt;int8_t&gt;(std::max&lt;int32_t&gt;(-128, std::min&lt;int32_t&gt;(127, in_value)));</div><div class="line"><a name="l00475"></a><span class="lineno"> 475</span>&#160; }</div><div class="line"><a name="l00476"></a><span class="lineno"> 476</span>&#160; }</div><div class="line"><a name="l00477"></a><span class="lineno"> 477</span>&#160;}</div><div class="line"><a name="l00478"></a><span class="lineno"> 478</span>&#160;</div><div class="line"><a name="l00479"></a><span class="lineno"> 479</span>&#160;<span class="keyword">template</span> &lt;<span class="keyword">typename</span> T, <span class="keywordtype">bool</span> is_gemm3d, <span class="keywordtype">bool</span> is_bounded_relu, <span class="keywordtype">bool</span> is_fixed_po<span class="keywordtype">int</span>&gt;</div><div class="line"><a name="l00480"></a><span class="lineno"> 480</span>&#160;<span class="keywordtype">void</span> run_offset_contribution_output_stage(<span class="keyword">const</span> Window &amp;window,</div><div class="line"><a name="l00481"></a><span class="lineno"> 481</span>&#160; <span class="keyword">const</span> ITensor *mm_result, <span class="keyword">const</span> ITensor *vector_sum_col, <span class="keyword">const</span> ITensor *vector_sum_row, <span class="keyword">const</span> ITensor *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">bias</a>, ITensor *output,</div><div class="line"><a name="l00482"></a><span class="lineno"> 482</span>&#160; int32_t a_offset, int32_t b_offset, int32_t k_offset, <span class="keywordtype">bool</span> slide_vector_sum_col,</div><div class="line"><a name="l00483"></a><span class="lineno"> 483</span>&#160; GEMMLowpOutputStageInfo output_stage)</div><div class="line"><a name="l00484"></a><span class="lineno"> 484</span>&#160;{</div><div class="line"><a name="l00485"></a><span class="lineno"> 485</span>&#160; <span class="keyword">using</span> ExactTagType = <span class="keyword">typename</span> wrapper::traits::neon_bitvector_tag_t&lt;T, wrapper::traits::BitWidth::W128&gt;;</div><div class="line"><a name="l00486"></a><span class="lineno"> 486</span>&#160; <span class="keyword">using</span> Typer = VectorTyper&lt;T&gt;;</div><div class="line"><a name="l00487"></a><span class="lineno"> 487</span>&#160;</div><div class="line"><a name="l00488"></a><span class="lineno"> 488</span>&#160; <span class="keyword">const</span> <span class="keywordtype">int</span> height_input = is_gemm3d ? mm_result-&gt;info()-&gt;dimension(1) : 0;</div><div class="line"><a name="l00489"></a><span class="lineno"> 489</span>&#160; <span class="keyword">const</span> <span class="keywordtype">int</span> depth_input = is_gemm3d ? mm_result-&gt;info()-&gt;dimension(2) : 1;</div><div class="line"><a name="l00490"></a><span class="lineno"> 490</span>&#160;</div><div class="line"><a name="l00491"></a><span class="lineno"> 491</span>&#160; <span class="keyword">const</span> int32_t multiplier = output_stage.gemmlowp_multiplier;</div><div class="line"><a name="l00492"></a><span class="lineno"> 492</span>&#160; <span class="keyword">const</span> int32_t <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a979a54caef6e77ce0259e427136847e8">shift</a> = output_stage.gemmlowp_shift;</div><div class="line"><a name="l00493"></a><span class="lineno"> 493</span>&#160; <span class="keyword">const</span> int32_t <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a> = output_stage.gemmlowp_offset;</div><div class="line"><a name="l00494"></a><span class="lineno"> 494</span>&#160; <span class="keyword">const</span> int32_t min_bound = output_stage.gemmlowp_min_bound;</div><div class="line"><a name="l00495"></a><span class="lineno"> 495</span>&#160; <span class="keyword">const</span> int32_t max_bound = output_stage.gemmlowp_max_bound;</div><div class="line"><a name="l00496"></a><span class="lineno"> 496</span>&#160;</div><div class="line"><a name="l00497"></a><span class="lineno"> 497</span>&#160; <span class="keyword">const</span> int32x4_t result_offset_s32 = vdupq_n_s32(<a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>);</div><div class="line"><a name="l00498"></a><span class="lineno"> 498</span>&#160; <span class="keyword">const</span> int32x4_t result_shift_s32 = vdupq_n_s32(is_fixed_point ? <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a979a54caef6e77ce0259e427136847e8">shift</a> : -<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a979a54caef6e77ce0259e427136847e8">shift</a>);</div><div class="line"><a name="l00499"></a><span class="lineno"> 499</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span> min_vec = <a class="code" href="namespacearm__compute_1_1wrapper.xhtml#a39e87435be178fba49b76f49426ef873">wrapper::vdup_n</a>(static_cast&lt;T&gt;(min_bound), ExactTagType{});</div><div class="line"><a name="l00500"></a><span class="lineno"> 500</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span> max_vec = <a class="code" href="namespacearm__compute_1_1wrapper.xhtml#a39e87435be178fba49b76f49426ef873">wrapper::vdup_n</a>(static_cast&lt;T&gt;(max_bound), ExactTagType{});</div><div class="line"><a name="l00501"></a><span class="lineno"> 501</span>&#160;</div><div class="line"><a name="l00502"></a><span class="lineno"> 502</span>&#160; <span class="keyword">const</span> <span class="keywordtype">int</span> window_step_x = 16;</div><div class="line"><a name="l00503"></a><span class="lineno"> 503</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span> window_start_x = static_cast&lt;int&gt;(window.x().start());</div><div class="line"><a name="l00504"></a><span class="lineno"> 504</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span> window_end_x = static_cast&lt;int&gt;(window.x().end());</div><div class="line"><a name="l00505"></a><span class="lineno"> 505</span>&#160;</div><div class="line"><a name="l00506"></a><span class="lineno"> 506</span>&#160; Window win(window);</div><div class="line"><a name="l00507"></a><span class="lineno"> 507</span>&#160; win.set(<a class="code" href="classarm__compute_1_1_window.xhtml#aa96e81276ee4f87ab386cd05a5539a7d">Window::DimX</a>, Window::Dimension(0, 1, 1));</div><div class="line"><a name="l00508"></a><span class="lineno"> 508</span>&#160;</div><div class="line"><a name="l00509"></a><span class="lineno"> 509</span>&#160; Window collapsed_window = win.collapse_if_possible(win, <a class="code" href="classarm__compute_1_1_window.xhtml#a893d17b56b9abc4423ce26e9a24ac5dc">Window::DimZ</a>);</div><div class="line"><a name="l00510"></a><span class="lineno"> 510</span>&#160;</div><div class="line"><a name="l00511"></a><span class="lineno"> 511</span>&#160; Iterator mm_result_it(mm_result, win);</div><div class="line"><a name="l00512"></a><span class="lineno"> 512</span>&#160; Iterator out_it(output, win);</div><div class="line"><a name="l00513"></a><span class="lineno"> 513</span>&#160;</div><div class="line"><a name="l00514"></a><span class="lineno"> 514</span>&#160; <span class="keywordflow">if</span>((a_offset != 0) &amp;&amp; (b_offset != 0))</div><div class="line"><a name="l00515"></a><span class="lineno"> 515</span>&#160; {</div><div class="line"><a name="l00516"></a><span class="lineno"> 516</span>&#160; <a class="code" href="_validate_8h.xhtml#a921b705e9e3e0fe928928447869e62a5">ARM_COMPUTE_ERROR_ON_NULLPTR</a>(vector_sum_col);</div><div class="line"><a name="l00517"></a><span class="lineno"> 517</span>&#160; <a class="code" href="_validate_8h.xhtml#a921b705e9e3e0fe928928447869e62a5">ARM_COMPUTE_ERROR_ON_NULLPTR</a>(vector_sum_row);</div><div class="line"><a name="l00518"></a><span class="lineno"> 518</span>&#160;</div><div class="line"><a name="l00519"></a><span class="lineno"> 519</span>&#160; Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col);</div><div class="line"><a name="l00520"></a><span class="lineno"> 520</span>&#160; Iterator vector_sum_row_it = get_vector_sum_row_it(collapsed_window, vector_sum_row);</div><div class="line"><a name="l00521"></a><span class="lineno"> 521</span>&#160;</div><div class="line"><a name="l00522"></a><span class="lineno"> 522</span>&#160; <span class="keyword">const</span> <span class="keywordtype">size_t</span> sum_row_stride_y = vector_sum_row-&gt;info()-&gt;strides_in_bytes().y();</div><div class="line"><a name="l00523"></a><span class="lineno"> 523</span>&#160;</div><div class="line"><a name="l00524"></a><span class="lineno"> 524</span>&#160; <span class="comment">// Offset in case vector_sum_col is batched</span></div><div class="line"><a name="l00525"></a><span class="lineno"> 525</span>&#160; <span class="keyword">const</span> <span class="keywordtype">int</span> vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col-&gt;info()-&gt;strides_in_bytes().z() : 0;</div><div class="line"><a name="l00526"></a><span class="lineno"> 526</span>&#160;</div><div class="line"><a name="l00527"></a><span class="lineno"> 527</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">bias</a> != <span class="keyword">nullptr</span>)</div><div class="line"><a name="l00528"></a><span class="lineno"> 528</span>&#160; {</div><div class="line"><a name="l00529"></a><span class="lineno"> 529</span>&#160; Iterator bias_it = get_bias_it(collapsed_window, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">bias</a>);</div><div class="line"><a name="l00530"></a><span class="lineno"> 530</span>&#160; <a class="code" href="namespacearm__compute.xhtml#a5002bf7ec46d52971f9526e94172cfee">execute_window_loop</a>(collapsed_window, [&amp;](<span class="keyword">const</span> Coordinates &amp; <span class="keywordtype">id</span>)</div><div class="line"><a name="l00531"></a><span class="lineno"> 531</span>&#160; {</div><div class="line"><a name="l00532"></a><span class="lineno"> 532</span>&#160; <span class="keyword">const</span> <span class="keywordtype">int</span> batch_id = <span class="keywordtype">id</span>.z() / depth_input;</div><div class="line"><a name="l00533"></a><span class="lineno"> 533</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span> vector_sum_col_ptr = reinterpret_cast&lt;const int32_t *&gt;(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);</div><div class="line"><a name="l00534"></a><span class="lineno"> 534</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span> vector_sum_row_ptr = reinterpret_cast&lt;const int32_t *&gt;(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)</div><div class="line"><a name="l00535"></a><span class="lineno"> 535</span>&#160; + <span class="keywordtype">id</span>.y() + (<span class="keywordtype">id</span>.z() % depth_input) * height_input;</div><div class="line"><a name="l00536"></a><span class="lineno"> 536</span>&#160; run_offset_contribution_output_stage_window&lt;Typer, true, true, true, is_bounded_relu, is_fixed_point&gt;(vector_sum_col_ptr, vector_sum_row_ptr, reinterpret_cast&lt;const int32_t *&gt;(bias_it.ptr()),</div><div class="line"><a name="l00537"></a><span class="lineno"> 537</span>&#160; mm_result_it,</div><div class="line"><a name="l00538"></a><span class="lineno"> 538</span>&#160; out_it,</div><div class="line"><a name="l00539"></a><span class="lineno"> 539</span>&#160; result_offset_s32, result_shift_s32,</div><div class="line"><a name="l00540"></a><span class="lineno"> 540</span>&#160; min_vec, max_vec, a_offset, b_offset, k_offset,</div><div class="line"><a name="l00541"></a><span class="lineno"> 541</span>&#160; multiplier, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a979a54caef6e77ce0259e427136847e8">shift</a>, <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>, min_bound, max_bound,</div><div class="line"><a name="l00542"></a><span class="lineno"> 542</span>&#160; window_step_x, window_start_x, window_end_x);</div><div class="line"><a name="l00543"></a><span class="lineno"> 543</span>&#160; },</div><div class="line"><a name="l00544"></a><span class="lineno"> 544</span>&#160; vector_sum_col_it, vector_sum_row_it, bias_it, mm_result_it, out_it);</div><div class="line"><a name="l00545"></a><span class="lineno"> 545</span>&#160; }</div><div class="line"><a name="l00546"></a><span class="lineno"> 546</span>&#160; <span class="keywordflow">else</span></div><div class="line"><a name="l00547"></a><span class="lineno"> 547</span>&#160; {</div><div class="line"><a name="l00548"></a><span class="lineno"> 548</span>&#160; <a class="code" href="namespacearm__compute.xhtml#a5002bf7ec46d52971f9526e94172cfee">execute_window_loop</a>(collapsed_window, [&amp;](<span class="keyword">const</span> Coordinates &amp; <span class="keywordtype">id</span>)</div><div class="line"><a name="l00549"></a><span class="lineno"> 549</span>&#160; {</div><div class="line"><a name="l00550"></a><span class="lineno"> 550</span>&#160; <span class="keyword">const</span> <span class="keywordtype">int</span> batch_id = <span class="keywordtype">id</span>.z() / depth_input;</div><div class="line"><a name="l00551"></a><span class="lineno"> 551</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span> vector_sum_col_ptr = reinterpret_cast&lt;const int32_t *&gt;(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);</div><div class="line"><a name="l00552"></a><span class="lineno"> 552</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span> vector_sum_row_ptr = reinterpret_cast&lt;const int32_t *&gt;(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)</div><div class="line"><a name="l00553"></a><span class="lineno"> 553</span>&#160; + <span class="keywordtype">id</span>.y() + (<span class="keywordtype">id</span>.z() % depth_input) * height_input;</div><div class="line"><a name="l00554"></a><span class="lineno"> 554</span>&#160; run_offset_contribution_output_stage_window&lt;Typer, true, true, false, is_bounded_relu, is_fixed_point&gt;(vector_sum_col_ptr, vector_sum_row_ptr, <span class="keyword">nullptr</span>, mm_result_it, out_it,</div><div class="line"><a name="l00555"></a><span class="lineno"> 555</span>&#160; result_offset_s32, result_shift_s32,</div><div class="line"><a name="l00556"></a><span class="lineno"> 556</span>&#160; min_vec, max_vec, a_offset, b_offset, k_offset,</div><div class="line"><a name="l00557"></a><span class="lineno"> 557</span>&#160; multiplier, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a979a54caef6e77ce0259e427136847e8">shift</a>, <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>, min_bound, max_bound,</div><div class="line"><a name="l00558"></a><span class="lineno"> 558</span>&#160; window_step_x, window_start_x, window_end_x);</div><div class="line"><a name="l00559"></a><span class="lineno"> 559</span>&#160; },</div><div class="line"><a name="l00560"></a><span class="lineno"> 560</span>&#160; vector_sum_col_it, vector_sum_row_it, mm_result_it, out_it);</div><div class="line"><a name="l00561"></a><span class="lineno"> 561</span>&#160; }</div><div class="line"><a name="l00562"></a><span class="lineno"> 562</span>&#160; }</div><div class="line"><a name="l00563"></a><span class="lineno"> 563</span>&#160; <span class="keywordflow">else</span> <span class="keywordflow">if</span>((a_offset == 0) &amp;&amp; (b_offset != 0))</div><div class="line"><a name="l00564"></a><span class="lineno"> 564</span>&#160; {</div><div class="line"><a name="l00565"></a><span class="lineno"> 565</span>&#160; <a class="code" href="_validate_8h.xhtml#a921b705e9e3e0fe928928447869e62a5">ARM_COMPUTE_ERROR_ON_NULLPTR</a>(vector_sum_row);</div><div class="line"><a name="l00566"></a><span class="lineno"> 566</span>&#160;</div><div class="line"><a name="l00567"></a><span class="lineno"> 567</span>&#160; Iterator vector_sum_row_it = get_vector_sum_row_it(collapsed_window, vector_sum_row);</div><div class="line"><a name="l00568"></a><span class="lineno"> 568</span>&#160;</div><div class="line"><a name="l00569"></a><span class="lineno"> 569</span>&#160; <span class="keyword">const</span> <span class="keywordtype">size_t</span> sum_row_stride_y = vector_sum_row-&gt;info()-&gt;strides_in_bytes().y();</div><div class="line"><a name="l00570"></a><span class="lineno"> 570</span>&#160;</div><div class="line"><a name="l00571"></a><span class="lineno"> 571</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">bias</a> != <span class="keyword">nullptr</span>)</div><div class="line"><a name="l00572"></a><span class="lineno"> 572</span>&#160; {</div><div class="line"><a name="l00573"></a><span class="lineno"> 573</span>&#160; Iterator bias_it = get_bias_it(collapsed_window, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">bias</a>);</div><div class="line"><a name="l00574"></a><span class="lineno"> 574</span>&#160; <a class="code" href="namespacearm__compute.xhtml#a5002bf7ec46d52971f9526e94172cfee">execute_window_loop</a>(collapsed_window, [&amp;](<span class="keyword">const</span> Coordinates &amp; <span class="keywordtype">id</span>)</div><div class="line"><a name="l00575"></a><span class="lineno"> 575</span>&#160; {</div><div class="line"><a name="l00576"></a><span class="lineno"> 576</span>&#160; <span class="keyword">const</span> <span class="keywordtype">int</span> batch_id = <span class="keywordtype">id</span>.z() / depth_input;</div><div class="line"><a name="l00577"></a><span class="lineno"> 577</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span> vector_sum_row_ptr = reinterpret_cast&lt;const int32_t *&gt;(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)</div><div class="line"><a name="l00578"></a><span class="lineno"> 578</span>&#160; + <span class="keywordtype">id</span>.y() + (<span class="keywordtype">id</span>.z() % depth_input) * height_input;</div><div class="line"><a name="l00579"></a><span class="lineno"> 579</span>&#160; run_offset_contribution_output_stage_window&lt;Typer, false, true, true, is_bounded_relu, is_fixed_point&gt;(<span class="keyword">nullptr</span>, vector_sum_row_ptr, reinterpret_cast&lt;const int32_t *&gt;(bias_it.ptr()), mm_result_it,</div><div class="line"><a name="l00580"></a><span class="lineno"> 580</span>&#160; out_it,</div><div class="line"><a name="l00581"></a><span class="lineno"> 581</span>&#160; result_offset_s32, result_shift_s32,</div><div class="line"><a name="l00582"></a><span class="lineno"> 582</span>&#160; min_vec, max_vec, a_offset, b_offset, k_offset,</div><div class="line"><a name="l00583"></a><span class="lineno"> 583</span>&#160; multiplier, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a979a54caef6e77ce0259e427136847e8">shift</a>, <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>, min_bound, max_bound,</div><div class="line"><a name="l00584"></a><span class="lineno"> 584</span>&#160; window_step_x, window_start_x, window_end_x);</div><div class="line"><a name="l00585"></a><span class="lineno"> 585</span>&#160; },</div><div class="line"><a name="l00586"></a><span class="lineno"> 586</span>&#160; vector_sum_row_it, bias_it, mm_result_it, out_it);</div><div class="line"><a name="l00587"></a><span class="lineno"> 587</span>&#160; }</div><div class="line"><a name="l00588"></a><span class="lineno"> 588</span>&#160; <span class="keywordflow">else</span></div><div class="line"><a name="l00589"></a><span class="lineno"> 589</span>&#160; {</div><div class="line"><a name="l00590"></a><span class="lineno"> 590</span>&#160; <a class="code" href="namespacearm__compute.xhtml#a5002bf7ec46d52971f9526e94172cfee">execute_window_loop</a>(collapsed_window, [&amp;](<span class="keyword">const</span> Coordinates &amp; <span class="keywordtype">id</span>)</div><div class="line"><a name="l00591"></a><span class="lineno"> 591</span>&#160; {</div><div class="line"><a name="l00592"></a><span class="lineno"> 592</span>&#160; <span class="keyword">const</span> <span class="keywordtype">int</span> batch_id = <span class="keywordtype">id</span>.z() / depth_input;</div><div class="line"><a name="l00593"></a><span class="lineno"> 593</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span> vector_sum_row_ptr = reinterpret_cast&lt;const int32_t *&gt;(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y)</div><div class="line"><a name="l00594"></a><span class="lineno"> 594</span>&#160; + <span class="keywordtype">id</span>.y() + (<span class="keywordtype">id</span>.z() % depth_input) * height_input;</div><div class="line"><a name="l00595"></a><span class="lineno"> 595</span>&#160; run_offset_contribution_output_stage_window&lt;Typer, false, true, false, is_bounded_relu, is_fixed_point&gt;(<span class="keyword">nullptr</span>, vector_sum_row_ptr, <span class="keyword">nullptr</span>, mm_result_it, out_it,</div><div class="line"><a name="l00596"></a><span class="lineno"> 596</span>&#160; result_offset_s32, result_shift_s32,</div><div class="line"><a name="l00597"></a><span class="lineno"> 597</span>&#160; min_vec, max_vec, a_offset, b_offset, k_offset,</div><div class="line"><a name="l00598"></a><span class="lineno"> 598</span>&#160; multiplier, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a979a54caef6e77ce0259e427136847e8">shift</a>, <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>, min_bound, max_bound,</div><div class="line"><a name="l00599"></a><span class="lineno"> 599</span>&#160; window_step_x, window_start_x, window_end_x);</div><div class="line"><a name="l00600"></a><span class="lineno"> 600</span>&#160; },</div><div class="line"><a name="l00601"></a><span class="lineno"> 601</span>&#160; vector_sum_row_it, mm_result_it, out_it);</div><div class="line"><a name="l00602"></a><span class="lineno"> 602</span>&#160; }</div><div class="line"><a name="l00603"></a><span class="lineno"> 603</span>&#160; }</div><div class="line"><a name="l00604"></a><span class="lineno"> 604</span>&#160; <span class="keywordflow">else</span> <span class="keywordflow">if</span>((a_offset != 0) &amp;&amp; (b_offset == 0))</div><div class="line"><a name="l00605"></a><span class="lineno"> 605</span>&#160; {</div><div class="line"><a name="l00606"></a><span class="lineno"> 606</span>&#160; <a class="code" href="_validate_8h.xhtml#a921b705e9e3e0fe928928447869e62a5">ARM_COMPUTE_ERROR_ON_NULLPTR</a>(vector_sum_col);</div><div class="line"><a name="l00607"></a><span class="lineno"> 607</span>&#160;</div><div class="line"><a name="l00608"></a><span class="lineno"> 608</span>&#160; Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col);</div><div class="line"><a name="l00609"></a><span class="lineno"> 609</span>&#160;</div><div class="line"><a name="l00610"></a><span class="lineno"> 610</span>&#160; <span class="comment">// Offset in case vector_sum_col is batched</span></div><div class="line"><a name="l00611"></a><span class="lineno"> 611</span>&#160; <span class="keyword">const</span> <span class="keywordtype">int</span> vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col-&gt;info()-&gt;strides_in_bytes().z() : 0;</div><div class="line"><a name="l00612"></a><span class="lineno"> 612</span>&#160;</div><div class="line"><a name="l00613"></a><span class="lineno"> 613</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">bias</a> != <span class="keyword">nullptr</span>)</div><div class="line"><a name="l00614"></a><span class="lineno"> 614</span>&#160; {</div><div class="line"><a name="l00615"></a><span class="lineno"> 615</span>&#160; Iterator bias_it = get_bias_it(collapsed_window, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">bias</a>);</div><div class="line"><a name="l00616"></a><span class="lineno"> 616</span>&#160; <a class="code" href="namespacearm__compute.xhtml#a5002bf7ec46d52971f9526e94172cfee">execute_window_loop</a>(collapsed_window, [&amp;](<span class="keyword">const</span> Coordinates &amp; <span class="keywordtype">id</span>)</div><div class="line"><a name="l00617"></a><span class="lineno"> 617</span>&#160; {</div><div class="line"><a name="l00618"></a><span class="lineno"> 618</span>&#160; <span class="keyword">const</span> <span class="keywordtype">int</span> batch_id = <span class="keywordtype">id</span>.z() / depth_input;</div><div class="line"><a name="l00619"></a><span class="lineno"> 619</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span> vector_sum_col_ptr = reinterpret_cast&lt;const int32_t *&gt;(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);</div><div class="line"><a name="l00620"></a><span class="lineno"> 620</span>&#160; run_offset_contribution_output_stage_window&lt;Typer, true, false, true, is_bounded_relu, is_fixed_point&gt;(vector_sum_col_ptr, <span class="keyword">nullptr</span>, reinterpret_cast&lt;const int32_t *&gt;(bias_it.ptr()), mm_result_it,</div><div class="line"><a name="l00621"></a><span class="lineno"> 621</span>&#160; out_it,</div><div class="line"><a name="l00622"></a><span class="lineno"> 622</span>&#160; result_offset_s32, result_shift_s32,</div><div class="line"><a name="l00623"></a><span class="lineno"> 623</span>&#160; min_vec, max_vec, a_offset, b_offset, k_offset,</div><div class="line"><a name="l00624"></a><span class="lineno"> 624</span>&#160; multiplier, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a979a54caef6e77ce0259e427136847e8">shift</a>, <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>, min_bound, max_bound,</div><div class="line"><a name="l00625"></a><span class="lineno"> 625</span>&#160; window_step_x, window_start_x, window_end_x);</div><div class="line"><a name="l00626"></a><span class="lineno"> 626</span>&#160; },</div><div class="line"><a name="l00627"></a><span class="lineno"> 627</span>&#160; vector_sum_col_it, bias_it, mm_result_it, out_it);</div><div class="line"><a name="l00628"></a><span class="lineno"> 628</span>&#160; }</div><div class="line"><a name="l00629"></a><span class="lineno"> 629</span>&#160; <span class="keywordflow">else</span></div><div class="line"><a name="l00630"></a><span class="lineno"> 630</span>&#160; {</div><div class="line"><a name="l00631"></a><span class="lineno"> 631</span>&#160; <a class="code" href="namespacearm__compute.xhtml#a5002bf7ec46d52971f9526e94172cfee">execute_window_loop</a>(collapsed_window, [&amp;](<span class="keyword">const</span> Coordinates &amp; <span class="keywordtype">id</span>)</div><div class="line"><a name="l00632"></a><span class="lineno"> 632</span>&#160; {</div><div class="line"><a name="l00633"></a><span class="lineno"> 633</span>&#160; <span class="keyword">const</span> <span class="keywordtype">int</span> batch_id = <span class="keywordtype">id</span>.z() / depth_input;</div><div class="line"><a name="l00634"></a><span class="lineno"> 634</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span> vector_sum_col_ptr = reinterpret_cast&lt;const int32_t *&gt;(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);</div><div class="line"><a name="l00635"></a><span class="lineno"> 635</span>&#160; run_offset_contribution_output_stage_window&lt;Typer, true, false, false, is_bounded_relu, is_fixed_point&gt;(vector_sum_col_ptr, <span class="keyword">nullptr</span>, <span class="keyword">nullptr</span>, mm_result_it, out_it,</div><div class="line"><a name="l00636"></a><span class="lineno"> 636</span>&#160; result_offset_s32, result_shift_s32,</div><div class="line"><a name="l00637"></a><span class="lineno"> 637</span>&#160; min_vec, max_vec, a_offset, b_offset, k_offset,</div><div class="line"><a name="l00638"></a><span class="lineno"> 638</span>&#160; multiplier, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a979a54caef6e77ce0259e427136847e8">shift</a>, <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>, min_bound, max_bound,</div><div class="line"><a name="l00639"></a><span class="lineno"> 639</span>&#160; window_step_x, window_start_x, window_end_x);</div><div class="line"><a name="l00640"></a><span class="lineno"> 640</span>&#160; },</div><div class="line"><a name="l00641"></a><span class="lineno"> 641</span>&#160; vector_sum_col_it, mm_result_it, out_it);</div><div class="line"><a name="l00642"></a><span class="lineno"> 642</span>&#160; }</div><div class="line"><a name="l00643"></a><span class="lineno"> 643</span>&#160; }</div><div class="line"><a name="l00644"></a><span class="lineno"> 644</span>&#160; <span class="keywordflow">else</span></div><div class="line"><a name="l00645"></a><span class="lineno"> 645</span>&#160; {</div><div class="line"><a name="l00646"></a><span class="lineno"> 646</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">bias</a> != <span class="keyword">nullptr</span>)</div><div class="line"><a name="l00647"></a><span class="lineno"> 647</span>&#160; {</div><div class="line"><a name="l00648"></a><span class="lineno"> 648</span>&#160; Iterator bias_it = get_bias_it(collapsed_window, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">bias</a>);</div><div class="line"><a name="l00649"></a><span class="lineno"> 649</span>&#160; <a class="code" href="namespacearm__compute.xhtml#a5002bf7ec46d52971f9526e94172cfee">execute_window_loop</a>(collapsed_window, [&amp;](<span class="keyword">const</span> Coordinates &amp;)</div><div class="line"><a name="l00650"></a><span class="lineno"> 650</span>&#160; {</div><div class="line"><a name="l00651"></a><span class="lineno"> 651</span>&#160; run_offset_contribution_output_stage_window&lt;Typer, false, false, true, is_bounded_relu, is_fixed_point&gt;(<span class="keyword">nullptr</span>, <span class="keyword">nullptr</span>, reinterpret_cast&lt;const int32_t *&gt;(bias_it.ptr()), mm_result_it, out_it,</div><div class="line"><a name="l00652"></a><span class="lineno"> 652</span>&#160; result_offset_s32, result_shift_s32,</div><div class="line"><a name="l00653"></a><span class="lineno"> 653</span>&#160; min_vec, max_vec, a_offset, b_offset, k_offset,</div><div class="line"><a name="l00654"></a><span class="lineno"> 654</span>&#160; multiplier, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a979a54caef6e77ce0259e427136847e8">shift</a>, <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>, min_bound, max_bound,</div><div class="line"><a name="l00655"></a><span class="lineno"> 655</span>&#160; window_step_x, window_start_x, window_end_x);</div><div class="line"><a name="l00656"></a><span class="lineno"> 656</span>&#160; },</div><div class="line"><a name="l00657"></a><span class="lineno"> 657</span>&#160; bias_it, mm_result_it, out_it);</div><div class="line"><a name="l00658"></a><span class="lineno"> 658</span>&#160; }</div><div class="line"><a name="l00659"></a><span class="lineno"> 659</span>&#160; <span class="keywordflow">else</span></div><div class="line"><a name="l00660"></a><span class="lineno"> 660</span>&#160; {</div><div class="line"><a name="l00661"></a><span class="lineno"> 661</span>&#160; <a class="code" href="namespacearm__compute.xhtml#a5002bf7ec46d52971f9526e94172cfee">execute_window_loop</a>(collapsed_window, [&amp;](<span class="keyword">const</span> Coordinates &amp;)</div><div class="line"><a name="l00662"></a><span class="lineno"> 662</span>&#160; {</div><div class="line"><a name="l00663"></a><span class="lineno"> 663</span>&#160; run_offset_contribution_output_stage_window&lt;Typer, false, false, false, is_bounded_relu, is_fixed_point&gt;(<span class="keyword">nullptr</span>, <span class="keyword">nullptr</span>, <span class="keyword">nullptr</span>, mm_result_it, out_it,</div><div class="line"><a name="l00664"></a><span class="lineno"> 664</span>&#160; result_offset_s32, result_shift_s32,</div><div class="line"><a name="l00665"></a><span class="lineno"> 665</span>&#160; min_vec, max_vec, a_offset, b_offset, k_offset,</div><div class="line"><a name="l00666"></a><span class="lineno"> 666</span>&#160; multiplier, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a979a54caef6e77ce0259e427136847e8">shift</a>, <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>, min_bound, max_bound,</div><div class="line"><a name="l00667"></a><span class="lineno"> 667</span>&#160; window_step_x, window_start_x, window_end_x);</div><div class="line"><a name="l00668"></a><span class="lineno"> 668</span>&#160; },</div><div class="line"><a name="l00669"></a><span class="lineno"> 669</span>&#160; mm_result_it, out_it);</div><div class="line"><a name="l00670"></a><span class="lineno"> 670</span>&#160; }</div><div class="line"><a name="l00671"></a><span class="lineno"> 671</span>&#160; <span class="keywordflow">return</span>;</div><div class="line"><a name="l00672"></a><span class="lineno"> 672</span>&#160; }</div><div class="line"><a name="l00673"></a><span class="lineno"> 673</span>&#160;}</div><div class="line"><a name="l00674"></a><span class="lineno"> 674</span>&#160;</div><div class="line"><a name="l00675"></a><span class="lineno"> 675</span>&#160;<span class="keyword">template</span> &lt;<span class="keywordtype">bool</span> is_gemm3d, <span class="keywordtype">bool</span> is_bounded_relu, <span class="keywordtype">bool</span> is_fixed_po<span class="keywordtype">int</span>&gt;</div><div class="line"><a name="l00676"></a><span class="lineno"> 676</span>&#160;<span class="keywordtype">void</span> run_offset_contribution_output_stage_symm(<span class="keyword">const</span> Window &amp;window,</div><div class="line"><a name="l00677"></a><span class="lineno"> 677</span>&#160; <span class="keyword">const</span> ITensor *mm_result, <span class="keyword">const</span> ITensor *vector_sum_col, <span class="keyword">const</span> ITensor *vector_sum_row, <span class="keyword">const</span> ITensor *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">bias</a>, ITensor *output,</div><div class="line"><a name="l00678"></a><span class="lineno"> 678</span>&#160; int32_t a_offset, int32_t b_offset, int32_t k_offset, <span class="keywordtype">bool</span> slide_vector_sum_col,</div><div class="line"><a name="l00679"></a><span class="lineno"> 679</span>&#160; GEMMLowpOutputStageInfo output_stage)</div><div class="line"><a name="l00680"></a><span class="lineno"> 680</span>&#160;{</div><div class="line"><a name="l00681"></a><span class="lineno"> 681</span>&#160; <a class="code" href="_error_8h.xhtml#a6dc630a6ae9cc063b3924bcea8dee9d6">ARM_COMPUTE_UNUSED</a>(vector_sum_row, b_offset, k_offset);</div><div class="line"><a name="l00682"></a><span class="lineno"> 682</span>&#160;</div><div class="line"><a name="l00683"></a><span class="lineno"> 683</span>&#160; <span class="keyword">const</span> <span class="keywordtype">int</span> depth_input = is_gemm3d ? mm_result-&gt;info()-&gt;dimension(2) : 1;</div><div class="line"><a name="l00684"></a><span class="lineno"> 684</span>&#160;</div><div class="line"><a name="l00685"></a><span class="lineno"> 685</span>&#160; <span class="keyword">const</span> int32_t <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a> = output_stage.gemmlowp_offset;</div><div class="line"><a name="l00686"></a><span class="lineno"> 686</span>&#160; <span class="keyword">const</span> int32_t min_bound = output_stage.gemmlowp_min_bound;</div><div class="line"><a name="l00687"></a><span class="lineno"> 687</span>&#160; <span class="keyword">const</span> int32_t max_bound = output_stage.gemmlowp_max_bound;</div><div class="line"><a name="l00688"></a><span class="lineno"> 688</span>&#160;</div><div class="line"><a name="l00689"></a><span class="lineno"> 689</span>&#160; <span class="keyword">const</span> int32_t *result_multipliers = output_stage.gemmlowp_multipliers.data();</div><div class="line"><a name="l00690"></a><span class="lineno"> 690</span>&#160; <span class="keyword">const</span> int32_t *result_shifts = output_stage.gemmlowp_shifts.data();</div><div class="line"><a name="l00691"></a><span class="lineno"> 691</span>&#160; <span class="keyword">const</span> int32x4_t result_offset_s32 = vdupq_n_s32(<a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>);</div><div class="line"><a name="l00692"></a><span class="lineno"> 692</span>&#160; <span class="keyword">const</span> int8x16_t min_s8 = vdupq_n_s8(static_cast&lt;int8_t&gt;(min_bound));</div><div class="line"><a name="l00693"></a><span class="lineno"> 693</span>&#160; <span class="keyword">const</span> int8x16_t max_s8 = vdupq_n_s8(static_cast&lt;int8_t&gt;(max_bound));</div><div class="line"><a name="l00694"></a><span class="lineno"> 694</span>&#160;</div><div class="line"><a name="l00695"></a><span class="lineno"> 695</span>&#160; <span class="keyword">const</span> <span class="keywordtype">int</span> window_step_x = 16;</div><div class="line"><a name="l00696"></a><span class="lineno"> 696</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span> window_start_x = static_cast&lt;int&gt;(window.x().start());</div><div class="line"><a name="l00697"></a><span class="lineno"> 697</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span> window_end_x = static_cast&lt;int&gt;(window.x().end());</div><div class="line"><a name="l00698"></a><span class="lineno"> 698</span>&#160;</div><div class="line"><a name="l00699"></a><span class="lineno"> 699</span>&#160; Window win(window);</div><div class="line"><a name="l00700"></a><span class="lineno"> 700</span>&#160; win.set(<a class="code" href="classarm__compute_1_1_window.xhtml#aa96e81276ee4f87ab386cd05a5539a7d">Window::DimX</a>, Window::Dimension(0, 1, 1));</div><div class="line"><a name="l00701"></a><span class="lineno"> 701</span>&#160;</div><div class="line"><a name="l00702"></a><span class="lineno"> 702</span>&#160; Window collapsed_window = win.collapse_if_possible(win, <a class="code" href="classarm__compute_1_1_window.xhtml#a893d17b56b9abc4423ce26e9a24ac5dc">Window::DimZ</a>);</div><div class="line"><a name="l00703"></a><span class="lineno"> 703</span>&#160;</div><div class="line"><a name="l00704"></a><span class="lineno"> 704</span>&#160; Iterator mm_result_it(mm_result, win);</div><div class="line"><a name="l00705"></a><span class="lineno"> 705</span>&#160; Iterator out_it(output, win);</div><div class="line"><a name="l00706"></a><span class="lineno"> 706</span>&#160;</div><div class="line"><a name="l00707"></a><span class="lineno"> 707</span>&#160; <span class="keywordflow">if</span>(a_offset != 0)</div><div class="line"><a name="l00708"></a><span class="lineno"> 708</span>&#160; {</div><div class="line"><a name="l00709"></a><span class="lineno"> 709</span>&#160; <a class="code" href="_validate_8h.xhtml#a921b705e9e3e0fe928928447869e62a5">ARM_COMPUTE_ERROR_ON_NULLPTR</a>(vector_sum_col);</div><div class="line"><a name="l00710"></a><span class="lineno"> 710</span>&#160;</div><div class="line"><a name="l00711"></a><span class="lineno"> 711</span>&#160; Iterator vector_sum_col_it = get_vector_sum_col_it(collapsed_window, vector_sum_col);</div><div class="line"><a name="l00712"></a><span class="lineno"> 712</span>&#160;</div><div class="line"><a name="l00713"></a><span class="lineno"> 713</span>&#160; <span class="comment">// Offset in case vector_sum_col is batched</span></div><div class="line"><a name="l00714"></a><span class="lineno"> 714</span>&#160; <span class="keyword">const</span> <span class="keywordtype">int</span> vector_sum_col_batch_offset = slide_vector_sum_col ? vector_sum_col-&gt;info()-&gt;strides_in_bytes().z() : 0;</div><div class="line"><a name="l00715"></a><span class="lineno"> 715</span>&#160;</div><div class="line"><a name="l00716"></a><span class="lineno"> 716</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">bias</a> != <span class="keyword">nullptr</span>)</div><div class="line"><a name="l00717"></a><span class="lineno"> 717</span>&#160; {</div><div class="line"><a name="l00718"></a><span class="lineno"> 718</span>&#160; Iterator bias_it = get_bias_it(collapsed_window, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">bias</a>);</div><div class="line"><a name="l00719"></a><span class="lineno"> 719</span>&#160; <a class="code" href="namespacearm__compute.xhtml#a5002bf7ec46d52971f9526e94172cfee">execute_window_loop</a>(collapsed_window, [&amp;](<span class="keyword">const</span> Coordinates &amp; <span class="keywordtype">id</span>)</div><div class="line"><a name="l00720"></a><span class="lineno"> 720</span>&#160; {</div><div class="line"><a name="l00721"></a><span class="lineno"> 721</span>&#160; <span class="keyword">const</span> <span class="keywordtype">int</span> batch_id = <span class="keywordtype">id</span>.z() / depth_input;</div><div class="line"><a name="l00722"></a><span class="lineno"> 722</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span> vector_sum_col_ptr = reinterpret_cast&lt;const int32_t *&gt;(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);</div><div class="line"><a name="l00723"></a><span class="lineno"> 723</span>&#160; run_offset_contribution_output_stage_window_symm&lt;true, true, is_bounded_relu, is_fixed_point&gt;(vector_sum_col_ptr, reinterpret_cast&lt;const int32_t *&gt;(bias_it.ptr()), mm_result_it, out_it,</div><div class="line"><a name="l00724"></a><span class="lineno"> 724</span>&#160; result_multipliers, result_shifts,</div><div class="line"><a name="l00725"></a><span class="lineno"> 725</span>&#160; result_offset_s32, min_s8, max_s8,</div><div class="line"><a name="l00726"></a><span class="lineno"> 726</span>&#160; a_offset, <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>, min_bound, max_bound,</div><div class="line"><a name="l00727"></a><span class="lineno"> 727</span>&#160; window_step_x, window_start_x, window_end_x);</div><div class="line"><a name="l00728"></a><span class="lineno"> 728</span>&#160; },</div><div class="line"><a name="l00729"></a><span class="lineno"> 729</span>&#160; vector_sum_col_it, bias_it, mm_result_it, out_it);</div><div class="line"><a name="l00730"></a><span class="lineno"> 730</span>&#160; }</div><div class="line"><a name="l00731"></a><span class="lineno"> 731</span>&#160; <span class="keywordflow">else</span></div><div class="line"><a name="l00732"></a><span class="lineno"> 732</span>&#160; {</div><div class="line"><a name="l00733"></a><span class="lineno"> 733</span>&#160; <a class="code" href="namespacearm__compute.xhtml#a5002bf7ec46d52971f9526e94172cfee">execute_window_loop</a>(collapsed_window, [&amp;](<span class="keyword">const</span> Coordinates &amp; <span class="keywordtype">id</span>)</div><div class="line"><a name="l00734"></a><span class="lineno"> 734</span>&#160; {</div><div class="line"><a name="l00735"></a><span class="lineno"> 735</span>&#160; <span class="keyword">const</span> <span class="keywordtype">int</span> batch_id = <span class="keywordtype">id</span>.z() / depth_input;</div><div class="line"><a name="l00736"></a><span class="lineno"> 736</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span> vector_sum_col_ptr = reinterpret_cast&lt;const int32_t *&gt;(vector_sum_col_it.ptr() + batch_id * vector_sum_col_batch_offset);</div><div class="line"><a name="l00737"></a><span class="lineno"> 737</span>&#160; run_offset_contribution_output_stage_window_symm&lt;true, false, is_bounded_relu, is_fixed_point&gt;(vector_sum_col_ptr, <span class="keyword">nullptr</span>, mm_result_it, out_it,</div><div class="line"><a name="l00738"></a><span class="lineno"> 738</span>&#160; result_multipliers, result_shifts,</div><div class="line"><a name="l00739"></a><span class="lineno"> 739</span>&#160; result_offset_s32, min_s8, max_s8,</div><div class="line"><a name="l00740"></a><span class="lineno"> 740</span>&#160; a_offset, <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>, min_bound, max_bound,</div><div class="line"><a name="l00741"></a><span class="lineno"> 741</span>&#160; window_step_x, window_start_x, window_end_x);</div><div class="line"><a name="l00742"></a><span class="lineno"> 742</span>&#160; },</div><div class="line"><a name="l00743"></a><span class="lineno"> 743</span>&#160; vector_sum_col_it, mm_result_it, out_it);</div><div class="line"><a name="l00744"></a><span class="lineno"> 744</span>&#160; }</div><div class="line"><a name="l00745"></a><span class="lineno"> 745</span>&#160; }</div><div class="line"><a name="l00746"></a><span class="lineno"> 746</span>&#160; <span class="keywordflow">else</span></div><div class="line"><a name="l00747"></a><span class="lineno"> 747</span>&#160; {</div><div class="line"><a name="l00748"></a><span class="lineno"> 748</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">bias</a> != <span class="keyword">nullptr</span>)</div><div class="line"><a name="l00749"></a><span class="lineno"> 749</span>&#160; {</div><div class="line"><a name="l00750"></a><span class="lineno"> 750</span>&#160; Iterator bias_it = get_bias_it(collapsed_window, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">bias</a>);</div><div class="line"><a name="l00751"></a><span class="lineno"> 751</span>&#160; <a class="code" href="namespacearm__compute.xhtml#a5002bf7ec46d52971f9526e94172cfee">execute_window_loop</a>(collapsed_window, [&amp;](<span class="keyword">const</span> Coordinates &amp;)</div><div class="line"><a name="l00752"></a><span class="lineno"> 752</span>&#160; {</div><div class="line"><a name="l00753"></a><span class="lineno"> 753</span>&#160; run_offset_contribution_output_stage_window_symm&lt;false, true, is_bounded_relu, is_fixed_point&gt;(<span class="keyword">nullptr</span>, reinterpret_cast&lt;const int32_t *&gt;(bias_it.ptr()), mm_result_it, out_it,</div><div class="line"><a name="l00754"></a><span class="lineno"> 754</span>&#160; result_multipliers, result_shifts,</div><div class="line"><a name="l00755"></a><span class="lineno"> 755</span>&#160; result_offset_s32, min_s8, max_s8,</div><div class="line"><a name="l00756"></a><span class="lineno"> 756</span>&#160; a_offset, <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>, min_bound, max_bound,</div><div class="line"><a name="l00757"></a><span class="lineno"> 757</span>&#160; window_step_x, window_start_x, window_end_x);</div><div class="line"><a name="l00758"></a><span class="lineno"> 758</span>&#160; },</div><div class="line"><a name="l00759"></a><span class="lineno"> 759</span>&#160; bias_it, mm_result_it, out_it);</div><div class="line"><a name="l00760"></a><span class="lineno"> 760</span>&#160; }</div><div class="line"><a name="l00761"></a><span class="lineno"> 761</span>&#160; <span class="keywordflow">else</span></div><div class="line"><a name="l00762"></a><span class="lineno"> 762</span>&#160; {</div><div class="line"><a name="l00763"></a><span class="lineno"> 763</span>&#160; <a class="code" href="namespacearm__compute.xhtml#a5002bf7ec46d52971f9526e94172cfee">execute_window_loop</a>(collapsed_window, [&amp;](<span class="keyword">const</span> Coordinates &amp;)</div><div class="line"><a name="l00764"></a><span class="lineno"> 764</span>&#160; {</div><div class="line"><a name="l00765"></a><span class="lineno"> 765</span>&#160; run_offset_contribution_output_stage_window_symm&lt;false, false, is_bounded_relu, is_fixed_point&gt;(<span class="keyword">nullptr</span>, <span class="keyword">nullptr</span>, mm_result_it, out_it,</div><div class="line"><a name="l00766"></a><span class="lineno"> 766</span>&#160; result_multipliers, result_shifts,</div><div class="line"><a name="l00767"></a><span class="lineno"> 767</span>&#160; result_offset_s32, min_s8, max_s8,</div><div class="line"><a name="l00768"></a><span class="lineno"> 768</span>&#160; a_offset, <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>, min_bound, max_bound,</div><div class="line"><a name="l00769"></a><span class="lineno"> 769</span>&#160; window_step_x, window_start_x, window_end_x);</div><div class="line"><a name="l00770"></a><span class="lineno"> 770</span>&#160; },</div><div class="line"><a name="l00771"></a><span class="lineno"> 771</span>&#160; mm_result_it, out_it);</div><div class="line"><a name="l00772"></a><span class="lineno"> 772</span>&#160; }</div><div class="line"><a name="l00773"></a><span class="lineno"> 773</span>&#160; <span class="keywordflow">return</span>;</div><div class="line"><a name="l00774"></a><span class="lineno"> 774</span>&#160; }</div><div class="line"><a name="l00775"></a><span class="lineno"> 775</span>&#160;}</div><div class="line"><a name="l00776"></a><span class="lineno"> 776</span>&#160;</div><div class="line"><a name="l00777"></a><span class="lineno"> 777</span>&#160;Status validate_arguments(<span class="keyword">const</span> ITensorInfo *mm_result, <span class="keyword">const</span> ITensorInfo *vector_sum_col, <span class="keyword">const</span> ITensorInfo *vector_sum_row, <span class="keyword">const</span> ITensorInfo *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">bias</a>, <span class="keyword">const</span> ITensorInfo *output,</div><div class="line"><a name="l00778"></a><span class="lineno"> 778</span>&#160; int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)</div><div class="line"><a name="l00779"></a><span class="lineno"> 779</span>&#160;{</div><div class="line"><a name="l00780"></a><span class="lineno"> 780</span>&#160; <a class="code" href="_validate_8h.xhtml#ae7eed178dac535c6e727061b1f5bc6eb">ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN</a>(mm_result, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58aa1e28eee0339658d39a8b4d325b56e9c">DataType::S32</a>);</div><div class="line"><a name="l00781"></a><span class="lineno"> 781</span>&#160; <span class="keywordflow">if</span>(output-&gt;data_type() == <a class="code" href="namespacearm__compute.xhtml#ad8ed01ff3ff33333d8e19db4d2818bb6af14462d71aa842202c3e4b272c7ec924">DataType::QASYMM8</a>)</div><div class="line"><a name="l00782"></a><span class="lineno"> 782</span>&#160; {</div><div class="line"><a name="l00783"></a><span class="lineno"> 783</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(output_stage.gemmlowp_max_bound &gt; 255);</div><div class="line"><a name="l00784"></a><span class="lineno"> 784</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(output_stage.gemmlowp_min_bound &lt; 0);</div><div class="line"><a name="l00785"></a><span class="lineno"> 785</span>&#160; }</div><div class="line"><a name="l00786"></a><span class="lineno"> 786</span>&#160; <span class="keywordflow">else</span></div><div class="line"><a name="l00787"></a><span class="lineno"> 787</span>&#160; {</div><div class="line"><a name="l00788"></a><span class="lineno"> 788</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(output_stage.gemmlowp_max_bound &gt; 127);</div><div class="line"><a name="l00789"></a><span class="lineno"> 789</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(output_stage.gemmlowp_min_bound &lt; -128);</div><div class="line"><a name="l00790"></a><span class="lineno"> 790</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(mm_result-&gt;dimension(0) &gt; 1 &amp;&amp; output_stage.gemmlowp_multipliers.size() &gt; 1 &amp;&amp; b_offset != 0);</div><div class="line"><a name="l00791"></a><span class="lineno"> 791</span>&#160; }</div><div class="line"><a name="l00792"></a><span class="lineno"> 792</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(output_stage.gemmlowp_min_bound &gt; output_stage.gemmlowp_max_bound);</div><div class="line"><a name="l00793"></a><span class="lineno"> 793</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(output_stage.type != <a class="code" href="namespacearm__compute.xhtml#a5558e2cc22f7f4771653d992c8ad8864a079e2ddc95b344b5cb0188bed9a80d8b">GEMMLowpOutputStageType::QUANTIZE_DOWN</a> &amp;&amp; output_stage.type != <a class="code" href="namespacearm__compute.xhtml#a5558e2cc22f7f4771653d992c8ad8864ab300cae200f67712c1eb9234e28158ca">GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT</a>);</div><div class="line"><a name="l00794"></a><span class="lineno"> 794</span>&#160;</div><div class="line"><a name="l00795"></a><span class="lineno"> 795</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">bias</a> != <span class="keyword">nullptr</span>)</div><div class="line"><a name="l00796"></a><span class="lineno"> 796</span>&#160; {</div><div class="line"><a name="l00797"></a><span class="lineno"> 797</span>&#160; <a class="code" href="_validate_8h.xhtml#ae7eed178dac535c6e727061b1f5bc6eb">ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">bias</a>, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58aa1e28eee0339658d39a8b4d325b56e9c">DataType::S32</a>);</div><div class="line"><a name="l00798"></a><span class="lineno"> 798</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">bias</a>-&gt;num_dimensions() &gt; 1);</div><div class="line"><a name="l00799"></a><span class="lineno"> 799</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(mm_result-&gt;dimension(0) != <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">bias</a>-&gt;dimension(0));</div><div class="line"><a name="l00800"></a><span class="lineno"> 800</span>&#160; }</div><div class="line"><a name="l00801"></a><span class="lineno"> 801</span>&#160;</div><div class="line"><a name="l00802"></a><span class="lineno"> 802</span>&#160; <span class="comment">// If a_offset == 0, vector_sum_col can be a nullptr</span></div><div class="line"><a name="l00803"></a><span class="lineno"> 803</span>&#160; <span class="keywordflow">if</span>(a_offset != 0)</div><div class="line"><a name="l00804"></a><span class="lineno"> 804</span>&#160; {</div><div class="line"><a name="l00805"></a><span class="lineno"> 805</span>&#160; <a class="code" href="_validate_8h.xhtml#ae7eed178dac535c6e727061b1f5bc6eb">ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN</a>(vector_sum_col, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58aa1e28eee0339658d39a8b4d325b56e9c">DataType::S32</a>);</div><div class="line"><a name="l00806"></a><span class="lineno"> 806</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(vector_sum_col-&gt;dimension(0) != mm_result-&gt;dimension(0));</div><div class="line"><a name="l00807"></a><span class="lineno"> 807</span>&#160; }</div><div class="line"><a name="l00808"></a><span class="lineno"> 808</span>&#160;</div><div class="line"><a name="l00809"></a><span class="lineno"> 809</span>&#160; <span class="comment">// If b_offset == 0, vector_sum_row can be a nullptr</span></div><div class="line"><a name="l00810"></a><span class="lineno"> 810</span>&#160; <span class="keywordflow">if</span>(b_offset != 0)</div><div class="line"><a name="l00811"></a><span class="lineno"> 811</span>&#160; {</div><div class="line"><a name="l00812"></a><span class="lineno"> 812</span>&#160; <a class="code" href="_validate_8h.xhtml#ae7eed178dac535c6e727061b1f5bc6eb">ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN</a>(vector_sum_row, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58aa1e28eee0339658d39a8b4d325b56e9c">DataType::S32</a>);</div><div class="line"><a name="l00813"></a><span class="lineno"> 813</span>&#160;</div><div class="line"><a name="l00814"></a><span class="lineno"> 814</span>&#160; <span class="comment">// Check if input is a 3D reinterpretation</span></div><div class="line"><a name="l00815"></a><span class="lineno"> 815</span>&#160; <span class="keyword">const</span> <span class="keywordtype">bool</span> reinterpret_as_3d = mm_result-&gt;num_dimensions() &gt; 1 &amp;&amp; mm_result-&gt;tensor_shape().y() != vector_sum_row-&gt;tensor_shape().x();</div><div class="line"><a name="l00816"></a><span class="lineno"> 816</span>&#160;</div><div class="line"><a name="l00817"></a><span class="lineno"> 817</span>&#160; <span class="comment">// Validate input</span></div><div class="line"><a name="l00818"></a><span class="lineno"> 818</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(reinterpret_as_3d &amp;&amp; vector_sum_row-&gt;dimension(0) != (mm_result-&gt;dimension(1) * mm_result-&gt;dimension(2)));</div><div class="line"><a name="l00819"></a><span class="lineno"> 819</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(!reinterpret_as_3d &amp;&amp; vector_sum_row-&gt;dimension(0) != mm_result-&gt;dimension(1));</div><div class="line"><a name="l00820"></a><span class="lineno"> 820</span>&#160;</div><div class="line"><a name="l00821"></a><span class="lineno"> 821</span>&#160; TensorShape <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ab1806bf0c5a41f674fb9d2dc6af644f5">output_shape</a> = output-&gt;tensor_shape();</div><div class="line"><a name="l00822"></a><span class="lineno"> 822</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ab1806bf0c5a41f674fb9d2dc6af644f5">output_shape</a>.num_dimensions() &gt; 1)</div><div class="line"><a name="l00823"></a><span class="lineno"> 823</span>&#160; {</div><div class="line"><a name="l00824"></a><span class="lineno"> 824</span>&#160; <span class="keyword">const</span> <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> output_batch_idx = reinterpret_as_3d ? 3 : 2;</div><div class="line"><a name="l00825"></a><span class="lineno"> 825</span>&#160;</div><div class="line"><a name="l00826"></a><span class="lineno"> 826</span>&#160; TensorShape vector_sum_row_shape = vector_sum_row-&gt;tensor_shape();</div><div class="line"><a name="l00827"></a><span class="lineno"> 827</span>&#160; vector_sum_row_shape.collapse_from(1);</div><div class="line"><a name="l00828"></a><span class="lineno"> 828</span>&#160; <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ab1806bf0c5a41f674fb9d2dc6af644f5">output_shape</a>.collapse_from(output_batch_idx);</div><div class="line"><a name="l00829"></a><span class="lineno"> 829</span>&#160;</div><div class="line"><a name="l00830"></a><span class="lineno"> 830</span>&#160; <a class="code" href="_error_8h.xhtml#a1c69762a42ab8add645d0a949b6f4b1f">ARM_COMPUTE_RETURN_ERROR_ON_MSG</a>(vector_sum_row_shape[1] != <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ab1806bf0c5a41f674fb9d2dc6af644f5">output_shape</a>[output_batch_idx],</div><div class="line"><a name="l00831"></a><span class="lineno"> 831</span>&#160; <span class="stringliteral">&quot;mm_result tensor must have the same number of batches of output tensor&quot;</span>);</div><div class="line"><a name="l00832"></a><span class="lineno"> 832</span>&#160;</div><div class="line"><a name="l00833"></a><span class="lineno"> 833</span>&#160; <span class="keywordflow">if</span>(a_offset != 0)</div><div class="line"><a name="l00834"></a><span class="lineno"> 834</span>&#160; {</div><div class="line"><a name="l00835"></a><span class="lineno"> 835</span>&#160; TensorShape vector_sum_col_shape = vector_sum_col-&gt;tensor_shape();</div><div class="line"><a name="l00836"></a><span class="lineno"> 836</span>&#160; vector_sum_col_shape.collapse_from(1);</div><div class="line"><a name="l00837"></a><span class="lineno"> 837</span>&#160;</div><div class="line"><a name="l00838"></a><span class="lineno"> 838</span>&#160; <a class="code" href="_error_8h.xhtml#a1c69762a42ab8add645d0a949b6f4b1f">ARM_COMPUTE_RETURN_ERROR_ON_MSG</a>(vector_sum_col_shape[1] != 1 &amp;&amp; vector_sum_col_shape[1] != vector_sum_row_shape[1],</div><div class="line"><a name="l00839"></a><span class="lineno"> 839</span>&#160; <span class="stringliteral">&quot;vector_sum_col tensor must have the same number of batches of vector_sum_row_shape or the number of batches must be set to 1&quot;</span>);</div><div class="line"><a name="l00840"></a><span class="lineno"> 840</span>&#160; }</div><div class="line"><a name="l00841"></a><span class="lineno"> 841</span>&#160; }</div><div class="line"><a name="l00842"></a><span class="lineno"> 842</span>&#160; }</div><div class="line"><a name="l00843"></a><span class="lineno"> 843</span>&#160;</div><div class="line"><a name="l00844"></a><span class="lineno"> 844</span>&#160; <span class="keywordflow">if</span>(output-&gt;total_size() != 0)</div><div class="line"><a name="l00845"></a><span class="lineno"> 845</span>&#160; {</div><div class="line"><a name="l00846"></a><span class="lineno"> 846</span>&#160; <a class="code" href="_validate_8h.xhtml#ae7eed178dac535c6e727061b1f5bc6eb">ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN</a>(output, 1, <a class="code" href="namespacearm__compute.xhtml#ad8ed01ff3ff33333d8e19db4d2818bb6af14462d71aa842202c3e4b272c7ec924">DataType::QASYMM8</a>, <a class="code" href="namespacearm__compute.xhtml#ad8ed01ff3ff33333d8e19db4d2818bb6a329f5d0c4b0c80e3474951d2c4435dd9">DataType::QASYMM8_SIGNED</a>);</div><div class="line"><a name="l00847"></a><span class="lineno"> 847</span>&#160; <a class="code" href="_validate_8h.xhtml#a27e4638546c88b8916f967e6e54480a9">ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES</a>(mm_result, output);</div><div class="line"><a name="l00848"></a><span class="lineno"> 848</span>&#160; }</div><div class="line"><a name="l00849"></a><span class="lineno"> 849</span>&#160;</div><div class="line"><a name="l00850"></a><span class="lineno"> 850</span>&#160; <span class="keywordflow">return</span> Status{};</div><div class="line"><a name="l00851"></a><span class="lineno"> 851</span>&#160;}</div><div class="line"><a name="l00852"></a><span class="lineno"> 852</span>&#160;</div><div class="line"><a name="l00853"></a><span class="lineno"> 853</span>&#160;std::pair&lt;Status, Window&gt; validate_and_configure_window(ITensorInfo *mm_result, ITensorInfo *output)</div><div class="line"><a name="l00854"></a><span class="lineno"> 854</span>&#160;{</div><div class="line"><a name="l00855"></a><span class="lineno"> 855</span>&#160; <span class="comment">// Output auto inizialitation if not yet initialized</span></div><div class="line"><a name="l00856"></a><span class="lineno"> 856</span>&#160; <a class="code" href="namespacearm__compute.xhtml#a47be6fa38308d0003c25b60b7dbc45ce">auto_init_if_empty</a>(*output, mm_result-&gt;clone()-&gt;set_data_type(<a class="code" href="namespacearm__compute.xhtml#ad8ed01ff3ff33333d8e19db4d2818bb6af14462d71aa842202c3e4b272c7ec924">DataType::QASYMM8</a>));</div><div class="line"><a name="l00857"></a><span class="lineno"> 857</span>&#160;</div><div class="line"><a name="l00858"></a><span class="lineno"> 858</span>&#160; <span class="comment">// Configure kernel window</span></div><div class="line"><a name="l00859"></a><span class="lineno"> 859</span>&#160; Window win = <a class="code" href="namespacearm__compute.xhtml#ab7980fa5ee693e3282a76da047a1c3b5">calculate_max_window</a>(*mm_result, Steps());</div><div class="line"><a name="l00860"></a><span class="lineno"> 860</span>&#160;</div><div class="line"><a name="l00861"></a><span class="lineno"> 861</span>&#160; <span class="comment">// Note: This kernel performs 16 elements per iteration.</span></div><div class="line"><a name="l00862"></a><span class="lineno"> 862</span>&#160; <span class="comment">// However, since we use a left-over for loop, we cannot have any read or write out of memory</span></div><div class="line"><a name="l00863"></a><span class="lineno"> 863</span>&#160; <span class="comment">// For this reason num_elems_processed_per_iteration is 1 and so update_window_and_padding() can be skipped</span></div><div class="line"><a name="l00864"></a><span class="lineno"> 864</span>&#160; Coordinates coord;</div><div class="line"><a name="l00865"></a><span class="lineno"> 865</span>&#160; coord.set_num_dimensions(output-&gt;num_dimensions());</div><div class="line"><a name="l00866"></a><span class="lineno"> 866</span>&#160; output-&gt;set_valid_region(ValidRegion(coord, output-&gt;tensor_shape()));</div><div class="line"><a name="l00867"></a><span class="lineno"> 867</span>&#160;</div><div class="line"><a name="l00868"></a><span class="lineno"> 868</span>&#160; <span class="keywordflow">return</span> std::make_pair(Status{}, win);</div><div class="line"><a name="l00869"></a><span class="lineno"> 869</span>&#160;}</div><div class="line"><a name="l00870"></a><span class="lineno"> 870</span>&#160;</div><div class="line"><a name="l00871"></a><span class="lineno"> 871</span>&#160;<a class="code" href="classarm__compute_1_1_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel.xhtml#adb79bb3ad15444f00b55b35f1d6e16b7">NEGEMMLowpOffsetContributionOutputStageKernel::NEGEMMLowpOffsetContributionOutputStageFunction</a></div><div class="line"><a name="l00872"></a><span class="lineno"> 872</span>&#160;get_configured_function(<span class="keyword">const</span> ITensor *mm_result, <span class="keyword">const</span> ITensor *vector_sum_row, <span class="keyword">const</span> ITensor *output, GEMMLowpOutputStageInfo output_stage)</div><div class="line"><a name="l00873"></a><span class="lineno"> 873</span>&#160;{</div><div class="line"><a name="l00874"></a><span class="lineno"> 874</span>&#160; <span class="keyword">static</span> std::map&lt;uint8_t, NEGEMMLowpOffsetContributionOutputStageKernel::NEGEMMLowpOffsetContributionOutputStageFunction&gt; map_function_qasymm =</div><div class="line"><a name="l00875"></a><span class="lineno"> 875</span>&#160; {</div><div class="line"><a name="l00876"></a><span class="lineno"> 876</span>&#160; { 0, &amp;run_offset_contribution_output_stage&lt;uint8_t, false, false, false&gt; },</div><div class="line"><a name="l00877"></a><span class="lineno"> 877</span>&#160; { 1, &amp;run_offset_contribution_output_stage&lt;uint8_t, true, false, false&gt; },</div><div class="line"><a name="l00878"></a><span class="lineno"> 878</span>&#160; { 2, &amp;run_offset_contribution_output_stage&lt;uint8_t, false, true, false&gt; },</div><div class="line"><a name="l00879"></a><span class="lineno"> 879</span>&#160; { 3, &amp;run_offset_contribution_output_stage&lt;uint8_t, true, true, false&gt; },</div><div class="line"><a name="l00880"></a><span class="lineno"> 880</span>&#160; { 4, &amp;run_offset_contribution_output_stage&lt;uint8_t, false, false, true&gt; },</div><div class="line"><a name="l00881"></a><span class="lineno"> 881</span>&#160; { 5, &amp;run_offset_contribution_output_stage&lt;uint8_t, true, false, true&gt; },</div><div class="line"><a name="l00882"></a><span class="lineno"> 882</span>&#160; { 6, &amp;run_offset_contribution_output_stage&lt;uint8_t, false, true, true&gt; },</div><div class="line"><a name="l00883"></a><span class="lineno"> 883</span>&#160; { 7, &amp;run_offset_contribution_output_stage&lt;uint8_t, true, true, true&gt; },</div><div class="line"><a name="l00884"></a><span class="lineno"> 884</span>&#160; { 8, &amp;run_offset_contribution_output_stage&lt;int8_t, false, false, false&gt; },</div><div class="line"><a name="l00885"></a><span class="lineno"> 885</span>&#160; { 9, &amp;run_offset_contribution_output_stage&lt;int8_t, true, false, false&gt; },</div><div class="line"><a name="l00886"></a><span class="lineno"> 886</span>&#160; { 10, &amp;run_offset_contribution_output_stage&lt;int8_t, false, true, false&gt; },</div><div class="line"><a name="l00887"></a><span class="lineno"> 887</span>&#160; { 11, &amp;run_offset_contribution_output_stage&lt;int8_t, true, true, false&gt; },</div><div class="line"><a name="l00888"></a><span class="lineno"> 888</span>&#160; { 12, &amp;run_offset_contribution_output_stage&lt;int8_t, false, false, true&gt; },</div><div class="line"><a name="l00889"></a><span class="lineno"> 889</span>&#160; { 13, &amp;run_offset_contribution_output_stage&lt;int8_t, true, false, true&gt; },</div><div class="line"><a name="l00890"></a><span class="lineno"> 890</span>&#160; { 14, &amp;run_offset_contribution_output_stage&lt;int8_t, false, true, true&gt; },</div><div class="line"><a name="l00891"></a><span class="lineno"> 891</span>&#160; { 15, &amp;run_offset_contribution_output_stage&lt;int8_t, true, true, true&gt; },</div><div class="line"><a name="l00892"></a><span class="lineno"> 892</span>&#160; };</div><div class="line"><a name="l00893"></a><span class="lineno"> 893</span>&#160;</div><div class="line"><a name="l00894"></a><span class="lineno"> 894</span>&#160; <span class="keyword">static</span> std::map&lt;uint8_t, NEGEMMLowpOffsetContributionOutputStageKernel::NEGEMMLowpOffsetContributionOutputStageFunction&gt; map_function_qsymm =</div><div class="line"><a name="l00895"></a><span class="lineno"> 895</span>&#160; {</div><div class="line"><a name="l00896"></a><span class="lineno"> 896</span>&#160; { 0, &amp;run_offset_contribution_output_stage_symm&lt;false, false, false&gt; },</div><div class="line"><a name="l00897"></a><span class="lineno"> 897</span>&#160; { 1, &amp;run_offset_contribution_output_stage_symm&lt;true, false, false&gt; },</div><div class="line"><a name="l00898"></a><span class="lineno"> 898</span>&#160; { 2, &amp;run_offset_contribution_output_stage_symm&lt;false, true, false&gt; },</div><div class="line"><a name="l00899"></a><span class="lineno"> 899</span>&#160; { 3, &amp;run_offset_contribution_output_stage_symm&lt;true, true, false&gt; },</div><div class="line"><a name="l00900"></a><span class="lineno"> 900</span>&#160; { 4, &amp;run_offset_contribution_output_stage_symm&lt;false, false, true&gt; },</div><div class="line"><a name="l00901"></a><span class="lineno"> 901</span>&#160; { 5, &amp;run_offset_contribution_output_stage_symm&lt;true, false, true&gt; },</div><div class="line"><a name="l00902"></a><span class="lineno"> 902</span>&#160; { 6, &amp;run_offset_contribution_output_stage_symm&lt;false, true, true&gt; },</div><div class="line"><a name="l00903"></a><span class="lineno"> 903</span>&#160; { 7, &amp;run_offset_contribution_output_stage_symm&lt;true, true, true&gt; }</div><div class="line"><a name="l00904"></a><span class="lineno"> 904</span>&#160; };</div><div class="line"><a name="l00905"></a><span class="lineno"> 905</span>&#160;</div><div class="line"><a name="l00906"></a><span class="lineno"> 906</span>&#160; <span class="comment">// Check if input is a 3D reinterpretation</span></div><div class="line"><a name="l00907"></a><span class="lineno"> 907</span>&#160; <span class="keyword">const</span> <span class="keywordtype">bool</span> reinterpret_as_3d = vector_sum_row != <span class="keyword">nullptr</span></div><div class="line"><a name="l00908"></a><span class="lineno"> 908</span>&#160; &amp;&amp; mm_result-&gt;info()-&gt;num_dimensions() &gt; 1</div><div class="line"><a name="l00909"></a><span class="lineno"> 909</span>&#160; &amp;&amp; mm_result-&gt;info()-&gt;tensor_shape().y() != vector_sum_row-&gt;info()-&gt;tensor_shape().x();</div><div class="line"><a name="l00910"></a><span class="lineno"> 910</span>&#160;</div><div class="line"><a name="l00911"></a><span class="lineno"> 911</span>&#160; <span class="comment">// Check if we need to clamp the result using min and max</span></div><div class="line"><a name="l00912"></a><span class="lineno"> 912</span>&#160; PixelValue <a class="code" href="minmaxloc_8cl.xhtml#a538b4b63f40e7b12891774e03a4f0dec">type_min</a>{};</div><div class="line"><a name="l00913"></a><span class="lineno"> 913</span>&#160; PixelValue <a class="code" href="minmaxloc_8cl.xhtml#a4464d6f922ea17b4a9ca6a2cec7ddb75">type_max</a>{};</div><div class="line"><a name="l00914"></a><span class="lineno"> 914</span>&#160; std::tie(<a class="code" href="minmaxloc_8cl.xhtml#a538b4b63f40e7b12891774e03a4f0dec">type_min</a>, <a class="code" href="minmaxloc_8cl.xhtml#a4464d6f922ea17b4a9ca6a2cec7ddb75">type_max</a>) = <a class="code" href="namespacearm__compute.xhtml#ae69217acf0f0b5d4de030a09ad50a0bc">get_min_max</a>(output-&gt;info()-&gt;data_type());</div><div class="line"><a name="l00915"></a><span class="lineno"> 915</span>&#160; int32_t type_min_int = <a class="code" href="minmaxloc_8cl.xhtml#a538b4b63f40e7b12891774e03a4f0dec">type_min</a>.get&lt;int32_t&gt;();</div><div class="line"><a name="l00916"></a><span class="lineno"> 916</span>&#160; int32_t type_max_int = <a class="code" href="minmaxloc_8cl.xhtml#a4464d6f922ea17b4a9ca6a2cec7ddb75">type_max</a>.get&lt;int32_t&gt;();</div><div class="line"><a name="l00917"></a><span class="lineno"> 917</span>&#160; <span class="keyword">const</span> <span class="keywordtype">bool</span> is_bounded_relu = !(output_stage.gemmlowp_min_bound == type_min_int &amp;&amp; output_stage.gemmlowp_max_bound == type_max_int);</div><div class="line"><a name="l00918"></a><span class="lineno"> 918</span>&#160;</div><div class="line"><a name="l00919"></a><span class="lineno"> 919</span>&#160; <span class="comment">// Check if we need to perform fixed point requantization</span></div><div class="line"><a name="l00920"></a><span class="lineno"> 920</span>&#160; <span class="keyword">const</span> <span class="keywordtype">bool</span> is_fixed_point = output_stage.type != <a class="code" href="namespacearm__compute.xhtml#a5558e2cc22f7f4771653d992c8ad8864a079e2ddc95b344b5cb0188bed9a80d8b">GEMMLowpOutputStageType::QUANTIZE_DOWN</a>;</div><div class="line"><a name="l00921"></a><span class="lineno"> 921</span>&#160;</div><div class="line"><a name="l00922"></a><span class="lineno"> 922</span>&#160; <span class="comment">// Check if symmetric per-channel execution</span></div><div class="line"><a name="l00923"></a><span class="lineno"> 923</span>&#160; <span class="keyword">const</span> <span class="keywordtype">bool</span> is_signed = output-&gt;info()-&gt;data_type() == <a class="code" href="namespacearm__compute.xhtml#ad8ed01ff3ff33333d8e19db4d2818bb6a329f5d0c4b0c80e3474951d2c4435dd9">DataType::QASYMM8_SIGNED</a>;</div><div class="line"><a name="l00924"></a><span class="lineno"> 924</span>&#160;</div><div class="line"><a name="l00925"></a><span class="lineno"> 925</span>&#160; <span class="comment">// Check if symmetric per-channel execution</span></div><div class="line"><a name="l00926"></a><span class="lineno"> 926</span>&#160; <span class="keyword">const</span> <span class="keywordtype">bool</span> is_symm = output_stage.is_quantized_per_channel;</div><div class="line"><a name="l00927"></a><span class="lineno"> 927</span>&#160;</div><div class="line"><a name="l00928"></a><span class="lineno"> 928</span>&#160; <span class="comment">// key acts as a bitset, setting the first bit on reinterpret_as_3d,</span></div><div class="line"><a name="l00929"></a><span class="lineno"> 929</span>&#160; <span class="comment">// the second on is_bounded_relu, and the third on is_fixed_point.</span></div><div class="line"><a name="l00930"></a><span class="lineno"> 930</span>&#160; uint8_t key = (reinterpret_as_3d ? 1UL : 0UL) | ((is_bounded_relu ? 1UL : 0UL) &lt;&lt; 1) | ((is_fixed_point ? 1UL : 0UL) &lt;&lt; 2);</div><div class="line"><a name="l00931"></a><span class="lineno"> 931</span>&#160; <span class="keywordflow">if</span>(is_symm)</div><div class="line"><a name="l00932"></a><span class="lineno"> 932</span>&#160; {</div><div class="line"><a name="l00933"></a><span class="lineno"> 933</span>&#160; <span class="keywordflow">return</span> map_function_qsymm.find(key)-&gt;second;</div><div class="line"><a name="l00934"></a><span class="lineno"> 934</span>&#160; }</div><div class="line"><a name="l00935"></a><span class="lineno"> 935</span>&#160; <span class="keywordflow">else</span></div><div class="line"><a name="l00936"></a><span class="lineno"> 936</span>&#160; {</div><div class="line"><a name="l00937"></a><span class="lineno"> 937</span>&#160; key |= ((is_signed ? 1UL : 0UL) &lt;&lt; 3);</div><div class="line"><a name="l00938"></a><span class="lineno"> 938</span>&#160; <span class="keywordflow">return</span> map_function_qasymm.find(key)-&gt;second;</div><div class="line"><a name="l00939"></a><span class="lineno"> 939</span>&#160; }</div><div class="line"><a name="l00940"></a><span class="lineno"> 940</span>&#160;}</div><div class="line"><a name="l00941"></a><span class="lineno"> 941</span>&#160;} <span class="comment">// namespace</span></div><div class="line"><a name="l00942"></a><span class="lineno"> 942</span>&#160;</div><div class="line"><a name="l00943"></a><span class="lineno"><a class="line" href="classarm__compute_1_1_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel.xhtml#a2851f631e9660a4dd9644cb749282723"> 943</a></span>&#160;<a class="code" href="classarm__compute_1_1_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel.xhtml#a2851f631e9660a4dd9644cb749282723">NEGEMMLowpOffsetContributionOutputStageKernel::NEGEMMLowpOffsetContributionOutputStageKernel</a>()</div><div class="line"><a name="l00944"></a><span class="lineno"> 944</span>&#160; : _function(nullptr), _vector_sum_col(nullptr), _vector_sum_row(nullptr), _bias(nullptr), _mm_result(nullptr), _output(nullptr), _a_offset(0), _b_offset(0), _k_offset(0), _slide_vector_sum_col(true),</div><div class="line"><a name="l00945"></a><span class="lineno"> 945</span>&#160; _output_stage(<a class="code" href="structarm__compute_1_1_g_e_m_m_lowp_output_stage_info.xhtml">GEMMLowpOutputStageInfo</a>())</div><div class="line"><a name="l00946"></a><span class="lineno"> 946</span>&#160;</div><div class="line"><a name="l00947"></a><span class="lineno"> 947</span>&#160;{</div><div class="line"><a name="l00948"></a><span class="lineno"> 948</span>&#160;}</div><div class="line"><a name="l00949"></a><span class="lineno"> 949</span>&#160;</div><div class="line"><a name="l00950"></a><span class="lineno"><a class="line" href="classarm__compute_1_1_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel.xhtml#a97ebe5c0444a53d58d9b9f079ebe2d0f"> 950</a></span>&#160;<span class="keywordtype">void</span> <a class="code" href="classarm__compute_1_1_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel.xhtml#a97ebe5c0444a53d58d9b9f079ebe2d0f">NEGEMMLowpOffsetContributionOutputStageKernel::configure</a>(<span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *mm_result, <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *vector_sum_col,</div><div class="line"><a name="l00951"></a><span class="lineno"> 951</span>&#160; <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *vector_sum_row, <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">bias</a>, <a class="code" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *output,</div><div class="line"><a name="l00952"></a><span class="lineno"> 952</span>&#160; int32_t k, int32_t a_offset, int32_t b_offset,</div><div class="line"><a name="l00953"></a><span class="lineno"> 953</span>&#160; <a class="code" href="structarm__compute_1_1_g_e_m_m_lowp_output_stage_info.xhtml">GEMMLowpOutputStageInfo</a> output_stage)</div><div class="line"><a name="l00954"></a><span class="lineno"> 954</span>&#160;{</div><div class="line"><a name="l00955"></a><span class="lineno"> 955</span>&#160; <span class="comment">// Perform validate step</span></div><div class="line"><a name="l00956"></a><span class="lineno"> 956</span>&#160; <a class="code" href="_validate_8h.xhtml#a921b705e9e3e0fe928928447869e62a5">ARM_COMPUTE_ERROR_ON_NULLPTR</a>(mm_result, output);</div><div class="line"><a name="l00957"></a><span class="lineno"> 957</span>&#160;</div><div class="line"><a name="l00958"></a><span class="lineno"> 958</span>&#160; <a class="code" href="_error_8h.xhtml#a938dcd406ce611ef5345ad2531cdb948">ARM_COMPUTE_ERROR_THROW_ON</a>(validate_arguments(mm_result-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>(),</div><div class="line"><a name="l00959"></a><span class="lineno"> 959</span>&#160; vector_sum_col != <span class="keyword">nullptr</span> ? vector_sum_col-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>() : <span class="keyword">nullptr</span>, <span class="comment">// NOLINT</span></div><div class="line"><a name="l00960"></a><span class="lineno"> 960</span>&#160; vector_sum_row != <span class="keyword">nullptr</span> ? vector_sum_row-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>() : <span class="keyword">nullptr</span>, <span class="comment">// NOLINT</span></div><div class="line"><a name="l00961"></a><span class="lineno"> 961</span>&#160; <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">bias</a> != <span class="keyword">nullptr</span> ? <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">bias</a>-&gt;<a class="code" href="classarm__compute_1_1_c_l_tensor.xhtml#ad45f0c01a0713dfb6bd7232c7f396fc4">info</a>() : <span class="keyword">nullptr</span>, <span class="comment">// NOLINT</span></div><div class="line"><a name="l00962"></a><span class="lineno"> 962</span>&#160; output-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>(), a_offset, b_offset, output_stage)); <span class="comment">// NOLINT</span></div><div class="line"><a name="l00963"></a><span class="lineno"> 963</span>&#160;</div><div class="line"><a name="l00964"></a><span class="lineno"> 964</span>&#160; _vector_sum_col = vector_sum_col;</div><div class="line"><a name="l00965"></a><span class="lineno"> 965</span>&#160; _vector_sum_row = vector_sum_row;</div><div class="line"><a name="l00966"></a><span class="lineno"> 966</span>&#160; _bias = <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">bias</a>;</div><div class="line"><a name="l00967"></a><span class="lineno"> 967</span>&#160; _mm_result = mm_result;</div><div class="line"><a name="l00968"></a><span class="lineno"> 968</span>&#160; _output = output;</div><div class="line"><a name="l00969"></a><span class="lineno"> 969</span>&#160; _a_offset = a_offset;</div><div class="line"><a name="l00970"></a><span class="lineno"> 970</span>&#160; _b_offset = b_offset;</div><div class="line"><a name="l00971"></a><span class="lineno"> 971</span>&#160; _k_offset = a_offset * b_offset * k;</div><div class="line"><a name="l00972"></a><span class="lineno"> 972</span>&#160; _output_stage = output_stage;</div><div class="line"><a name="l00973"></a><span class="lineno"> 973</span>&#160;</div><div class="line"><a name="l00974"></a><span class="lineno"> 974</span>&#160; <span class="comment">// If a_offset == 0, vector_sum_col can be a nullptr</span></div><div class="line"><a name="l00975"></a><span class="lineno"> 975</span>&#160; <span class="keywordflow">if</span>(a_offset != 0)</div><div class="line"><a name="l00976"></a><span class="lineno"> 976</span>&#160; {</div><div class="line"><a name="l00977"></a><span class="lineno"> 977</span>&#160; <span class="comment">// Check if vector_sum_col_shape should be slidden or not</span></div><div class="line"><a name="l00978"></a><span class="lineno"> 978</span>&#160; <span class="comment">// Don&#39;t slide vector_sum_col_shape along the y dimension if vector_sum_col_shape has just 1 dimension and vector_sum_row_shape more than 1</span></div><div class="line"><a name="l00979"></a><span class="lineno"> 979</span>&#160; <span class="comment">// This scenario can happen when the the matrix multiplication is used to perform a convolution operation</span></div><div class="line"><a name="l00980"></a><span class="lineno"> 980</span>&#160; _slide_vector_sum_col = vector_sum_col-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a7c66505457d00ece3aa4b34cab80757d">tensor_shape</a>().<a class="code" href="classarm__compute_1_1_dimensions.xhtml#a80a5f2d6e3a697c9aad893a3b4242615">num_dimensions</a>() &gt; 1;</div><div class="line"><a name="l00981"></a><span class="lineno"> 981</span>&#160; }</div><div class="line"><a name="l00982"></a><span class="lineno"> 982</span>&#160;</div><div class="line"><a name="l00983"></a><span class="lineno"> 983</span>&#160; <span class="comment">// Configure kernel window</span></div><div class="line"><a name="l00984"></a><span class="lineno"> 984</span>&#160; <span class="keyword">auto</span> win_config = validate_and_configure_window(mm_result-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>(), output-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>());</div><div class="line"><a name="l00985"></a><span class="lineno"> 985</span>&#160; <a class="code" href="_error_8h.xhtml#a938dcd406ce611ef5345ad2531cdb948">ARM_COMPUTE_ERROR_THROW_ON</a>(win_config.first);</div><div class="line"><a name="l00986"></a><span class="lineno"> 986</span>&#160; INEKernel::configure(win_config.second);</div><div class="line"><a name="l00987"></a><span class="lineno"> 987</span>&#160;</div><div class="line"><a name="l00988"></a><span class="lineno"> 988</span>&#160; _function = get_configured_function(mm_result, vector_sum_row, output, output_stage);</div><div class="line"><a name="l00989"></a><span class="lineno"> 989</span>&#160;}</div><div class="line"><a name="l00990"></a><span class="lineno"> 990</span>&#160;</div><div class="line"><a name="l00991"></a><span class="lineno"><a class="line" href="classarm__compute_1_1_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel.xhtml#a6296f2754011b2221b343ccedfc0ba35"> 991</a></span>&#160;<a class="code" href="classarm__compute_1_1_status.xhtml">Status</a> <a class="code" href="classarm__compute_1_1_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel.xhtml#a6296f2754011b2221b343ccedfc0ba35">NEGEMMLowpOffsetContributionOutputStageKernel::validate</a>(<span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *mm_result, <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *vector_sum_col,</div><div class="line"><a name="l00992"></a><span class="lineno"> 992</span>&#160; <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *vector_sum_row, <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">bias</a>, <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *output,</div><div class="line"><a name="l00993"></a><span class="lineno"> 993</span>&#160; int32_t a_offset, int32_t b_offset, <a class="code" href="structarm__compute_1_1_g_e_m_m_lowp_output_stage_info.xhtml">GEMMLowpOutputStageInfo</a> output_stage)</div><div class="line"><a name="l00994"></a><span class="lineno"> 994</span>&#160;{</div><div class="line"><a name="l00995"></a><span class="lineno"> 995</span>&#160; <a class="code" href="_validate_8h.xhtml#a921b705e9e3e0fe928928447869e62a5">ARM_COMPUTE_ERROR_ON_NULLPTR</a>(mm_result, output);</div><div class="line"><a name="l00996"></a><span class="lineno"> 996</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(validate_arguments(mm_result, vector_sum_col, vector_sum_row, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">bias</a>, output, a_offset, b_offset, output_stage));</div><div class="line"><a name="l00997"></a><span class="lineno"> 997</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(validate_and_configure_window(mm_result-&gt;<a class="code" href="classarm__compute_1_1misc_1_1_i_cloneable.xhtml#a4d10e5012a872e7f78f2b539b673049d">clone</a>().get(), output-&gt;<a class="code" href="classarm__compute_1_1misc_1_1_i_cloneable.xhtml#a4d10e5012a872e7f78f2b539b673049d">clone</a>().get()).first);</div><div class="line"><a name="l00998"></a><span class="lineno"> 998</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classarm__compute_1_1_status.xhtml">Status</a>{};</div><div class="line"><a name="l00999"></a><span class="lineno"> 999</span>&#160;}</div><div class="line"><a name="l01000"></a><span class="lineno"> 1000</span>&#160;</div><div class="line"><a name="l01001"></a><span class="lineno"><a class="line" href="classarm__compute_1_1_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel.xhtml#a112b35dd205c62ea6ed1447ef226da82"> 1001</a></span>&#160;<span class="keywordtype">void</span> <a class="code" href="classarm__compute_1_1_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel.xhtml#a112b35dd205c62ea6ed1447ef226da82">NEGEMMLowpOffsetContributionOutputStageKernel::run</a>(<span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_window.xhtml">Window</a> &amp;window, <span class="keyword">const</span> <a class="code" href="structarm__compute_1_1_thread_info.xhtml">ThreadInfo</a> &amp;<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a4f4125dba5283887b34f889b1c615c0c">info</a>)</div><div class="line"><a name="l01002"></a><span class="lineno"> 1002</span>&#160;{</div><div class="line"><a name="l01003"></a><span class="lineno"> 1003</span>&#160; <a class="code" href="_error_8h.xhtml#a6dc630a6ae9cc063b3924bcea8dee9d6">ARM_COMPUTE_UNUSED</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a4f4125dba5283887b34f889b1c615c0c">info</a>);</div><div class="line"><a name="l01004"></a><span class="lineno"> 1004</span>&#160; <a class="code" href="_validate_8h.xhtml#a1b35b0d258183cf9ef36adf684d0b88c">ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL</a>(<span class="keyword">this</span>);</div><div class="line"><a name="l01005"></a><span class="lineno"> 1005</span>&#160; <a class="code" href="_validate_8h.xhtml#a6eb9ce82815fe429250189da7592ba75">ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW</a>(<a class="code" href="classarm__compute_1_1_i_kernel.xhtml#ad34a46f53686c12a5c5e717cc9617fb6">INEKernel::window</a>(), <a class="code" href="classarm__compute_1_1_i_kernel.xhtml#ad34a46f53686c12a5c5e717cc9617fb6">window</a>);</div><div class="line"><a name="l01006"></a><span class="lineno"> 1006</span>&#160; _function(<a class="code" href="classarm__compute_1_1_i_kernel.xhtml#ad34a46f53686c12a5c5e717cc9617fb6">window</a>, _mm_result, _vector_sum_col, _vector_sum_row, _bias, _output, _a_offset, _b_offset, _k_offset, _slide_vector_sum_col, _output_stage);</div><div class="line"><a name="l01007"></a><span class="lineno"> 1007</span>&#160;}</div><div class="line"><a name="l01008"></a><span class="lineno"> 1008</span>&#160;</div><div class="line"><a name="l01009"></a><span class="lineno"> 1009</span>&#160;} <span class="comment">// namespace arm_compute</span></div><div class="ttc" id="src_2core_2_c_l_2cl__kernels_2_helpers_8h_xhtml_a009469e4d9b8fce3b6d5e97d2077827d"><div class="ttname"><a href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a></div><div class="ttdeci">__global uchar * offset(const Image *img, int x, int y)</div><div class="ttdoc">Get the pointer position of a Image.</div><div class="ttdef"><b>Definition:</b> <a href="src_2core_2_c_l_2cl__kernels_2_helpers_8h_source.xhtml#l00510">helpers.h:510</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_a979a54caef6e77ce0259e427136847e8"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#a979a54caef6e77ce0259e427136847e8">arm_compute::test::validation::shift</a></div><div class="ttdeci">shift</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_depth_convert_layer_8cpp_source.xhtml#l00155">DepthConvertLayer.cpp:155</a></div></div>
<div class="ttc" id="classarm__compute_1_1_i_kernel_xhtml_ad34a46f53686c12a5c5e717cc9617fb6"><div class="ttname"><a href="classarm__compute_1_1_i_kernel.xhtml#ad34a46f53686c12a5c5e717cc9617fb6">arm_compute::IKernel::window</a></div><div class="ttdeci">const Window &amp; window() const</div><div class="ttdoc">The maximum window the kernel can be executed on.</div><div class="ttdef"><b>Definition:</b> <a href="_i_kernel_8cpp_source.xhtml#l00028">IKernel.cpp:28</a></div></div>
<div class="ttc" id="namespacearm__compute_xhtml_a5558e2cc22f7f4771653d992c8ad8864ab300cae200f67712c1eb9234e28158ca"><div class="ttname"><a href="namespacearm__compute.xhtml#a5558e2cc22f7f4771653d992c8ad8864ab300cae200f67712c1eb9234e28158ca">arm_compute::GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT</a></div><div class="ttdoc">Quantize using a fixed point multiplication.</div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel_xhtml_a112b35dd205c62ea6ed1447ef226da82"><div class="ttname"><a href="classarm__compute_1_1_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel.xhtml#a112b35dd205c62ea6ed1447ef226da82">arm_compute::NEGEMMLowpOffsetContributionOutputStageKernel::run</a></div><div class="ttdeci">void run(const Window &amp;window, const ThreadInfo &amp;info) override</div><div class="ttdoc">Execute the kernel on the passed window.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel_8cpp_source.xhtml#l01001">NEGEMMLowpOffsetContributionOutputStageKernel.cpp:1001</a></div></div>
<div class="ttc" id="_i_tensor_8h_xhtml"><div class="ttname"><a href="_i_tensor_8h.xhtml">ITensor.h</a></div></div>
<div class="ttc" id="classarm__compute_1_1_c_l_tensor_xhtml_ad45f0c01a0713dfb6bd7232c7f396fc4"><div class="ttname"><a href="classarm__compute_1_1_c_l_tensor.xhtml#ad45f0c01a0713dfb6bd7232c7f396fc4">arm_compute::CLTensor::info</a></div><div class="ttdeci">TensorInfo * info() const override</div><div class="ttdoc">Interface to be implemented by the child class to return the tensor's metadata.</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_tensor_8cpp_source.xhtml#l00041">CLTensor.cpp:41</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_aa76b4a6e74940dabc5b7fc6b2dab3545"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#aa76b4a6e74940dabc5b7fc6b2dab3545">arm_compute::test::validation::b</a></div><div class="ttdeci">SimpleTensor&lt; float &gt; b</div><div class="ttdef"><b>Definition:</b> <a href="_c_p_p_2_d_f_t_8cpp_source.xhtml#l00157">DFT.cpp:157</a></div></div>
<div class="ttc" id="_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel_8h_xhtml"><div class="ttname"><a href="_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel_8h.xhtml">NEGEMMLowpOffsetContributionOutputStageKernel.h</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel_xhtml_a2851f631e9660a4dd9644cb749282723"><div class="ttname"><a href="classarm__compute_1_1_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel.xhtml#a2851f631e9660a4dd9644cb749282723">arm_compute::NEGEMMLowpOffsetContributionOutputStageKernel::NEGEMMLowpOffsetContributionOutputStageKernel</a></div><div class="ttdeci">NEGEMMLowpOffsetContributionOutputStageKernel()</div><div class="ttdoc">Constructor.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel_8cpp_source.xhtml#l00943">NEGEMMLowpOffsetContributionOutputStageKernel.cpp:943</a></div></div>
<div class="ttc" id="_error_8h_xhtml_a8a1e1c105f0bdaf37db408c7cfcb77a4"><div class="ttname"><a href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a></div><div class="ttdeci">#define ARM_COMPUTE_RETURN_ON_ERROR(status)</div><div class="ttdoc">Checks if a status contains an error and returns it.</div><div class="ttdef"><b>Definition:</b> <a href="_error_8h_source.xhtml#l00204">Error.h:204</a></div></div>
<div class="ttc" id="_window_8h_xhtml"><div class="ttname"><a href="_window_8h.xhtml">Window.h</a></div></div>
<div class="ttc" id="_validate_8h_xhtml_ae7eed178dac535c6e727061b1f5bc6eb"><div class="ttname"><a href="_validate_8h.xhtml#ae7eed178dac535c6e727061b1f5bc6eb">ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN</a></div><div class="ttdeci">#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)</div><div class="ttdef"><b>Definition:</b> <a href="_validate_8h_source.xhtml#l00792">Validate.h:792</a></div></div>
<div class="ttc" id="classarm__compute_1_1_i_tensor_info_xhtml"><div class="ttname"><a href="classarm__compute_1_1_i_tensor_info.xhtml">arm_compute::ITensorInfo</a></div><div class="ttdoc">Store the tensor's metadata.</div><div class="ttdef"><b>Definition:</b> <a href="_i_tensor_info_8h_source.xhtml#l00040">ITensorInfo.h:40</a></div></div>
<div class="ttc" id="_error_8h_xhtml_a938dcd406ce611ef5345ad2531cdb948"><div class="ttname"><a href="_error_8h.xhtml#a938dcd406ce611ef5345ad2531cdb948">ARM_COMPUTE_ERROR_THROW_ON</a></div><div class="ttdeci">#define ARM_COMPUTE_ERROR_THROW_ON(status)</div><div class="ttdef"><b>Definition:</b> <a href="_error_8h_source.xhtml#l00455">Error.h:455</a></div></div>
<div class="ttc" id="classarm__compute_1_1_status_xhtml"><div class="ttname"><a href="classarm__compute_1_1_status.xhtml">arm_compute::Status</a></div><div class="ttdoc">Status class.</div><div class="ttdef"><b>Definition:</b> <a href="_error_8h_source.xhtml#l00052">Error.h:52</a></div></div>
<div class="ttc" id="_tensor_info_8h_xhtml"><div class="ttname"><a href="_tensor_info_8h.xhtml">TensorInfo.h</a></div></div>
<div class="ttc" id="_n_e_asymm_8h_xhtml"><div class="ttname"><a href="_n_e_asymm_8h.xhtml">NEAsymm.h</a></div></div>
<div class="ttc" id="_error_8h_xhtml_a206d6e247e0957ac3dee45d27756fc25"><div class="ttname"><a href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a></div><div class="ttdeci">#define ARM_COMPUTE_RETURN_ERROR_ON(cond)</div><div class="ttdoc">If the condition is true, an error is returned.</div><div class="ttdef"><b>Definition:</b> <a href="_error_8h_source.xhtml#l00296">Error.h:296</a></div></div>
<div class="ttc" id="classarm__compute_1_1_i_tensor_xhtml"><div class="ttname"><a href="classarm__compute_1_1_i_tensor.xhtml">arm_compute::ITensor</a></div><div class="ttdoc">Interface for NEON tensor.</div><div class="ttdef"><b>Definition:</b> <a href="_i_tensor_8h_source.xhtml#l00036">ITensor.h:36</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1support_1_1cpp11_xhtml_a73e352c61baaf9c1178da2d30105b04e"><div class="ttname"><a href="namespacearm__compute_1_1support_1_1cpp11.xhtml#a73e352c61baaf9c1178da2d30105b04e">arm_compute::support::cpp11::lowest</a></div><div class="ttdeci">T lowest()</div><div class="ttdef"><b>Definition:</b> <a href="_toolchain_support_8h_source.xhtml#l00418">ToolchainSupport.h:418</a></div></div>
<div class="ttc" id="namespacearm__compute_xhtml_ab7980fa5ee693e3282a76da047a1c3b5"><div class="ttname"><a href="namespacearm__compute.xhtml#ab7980fa5ee693e3282a76da047a1c3b5">arm_compute::calculate_max_window</a></div><div class="ttdeci">Window calculate_max_window(const ValidRegion &amp;valid_region, const Steps &amp;steps=Steps(), bool skip_border=false, BorderSize border_size=BorderSize())</div><div class="ttdoc">Calculate the maximum window for a given tensor shape and border setting.</div><div class="ttdef"><b>Definition:</b> <a href="src_2core_2_helpers_8cpp_source.xhtml#l00028">Helpers.cpp:28</a></div></div>
<div class="ttc" id="namespacearm__compute_xhtml"><div class="ttname"><a href="namespacearm__compute.xhtml">arm_compute</a></div><div class="ttdoc">Copyright (c) 2017-2020 ARM Limited.</div><div class="ttdef"><b>Definition:</b> <a href="00__introduction_8dox_source.xhtml#l00024">00_introduction.dox:24</a></div></div>
<div class="ttc" id="namespacearm__compute_xhtml_a47be6fa38308d0003c25b60b7dbc45ce"><div class="ttname"><a href="namespacearm__compute.xhtml#a47be6fa38308d0003c25b60b7dbc45ce">arm_compute::auto_init_if_empty</a></div><div class="ttdeci">bool auto_init_if_empty(ITensorInfo &amp;info, const TensorShape &amp;shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())</div><div class="ttdoc">Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...</div><div class="ttdef"><b>Definition:</b> <a href="_helpers_8inl_source.xhtml#l00202">Helpers.inl:202</a></div></div>
<div class="ttc" id="namespacearm__compute_xhtml_ab4e88c89b3b7ea1735996cc4def22d58aa1e28eee0339658d39a8b4d325b56e9c"><div class="ttname"><a href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58aa1e28eee0339658d39a8b4d325b56e9c">arm_compute::Format::S32</a></div><div class="ttdoc">1 channel, 1 S32 per channel</div></div>
<div class="ttc" id="arm__compute_2core_2_utils_8h_xhtml"><div class="ttname"><a href="arm__compute_2core_2_utils_8h.xhtml">Utils.h</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1wrapper_xhtml_aa16ace001ab8287faa46d6962f369219"><div class="ttname"><a href="namespacearm__compute_1_1wrapper.xhtml#aa16ace001ab8287faa46d6962f369219">arm_compute::wrapper::vgetlane</a></div><div class="ttdeci">uint8_t vgetlane(const uint8x8_t vector, const unsigned int lane)</div><div class="ttdef"><b>Definition:</b> <a href="getlane_8h_source.xhtml#l00091">getlane.h:91</a></div></div>
<div class="ttc" id="classarm__compute_1_1_window_xhtml_aa96e81276ee4f87ab386cd05a5539a7d"><div class="ttname"><a href="classarm__compute_1_1_window.xhtml#aa96e81276ee4f87ab386cd05a5539a7d">arm_compute::Window::DimX</a></div><div class="ttdeci">static constexpr size_t DimX</div><div class="ttdoc">Alias for dimension 0 also known as X dimension.</div><div class="ttdef"><b>Definition:</b> <a href="_window_8h_source.xhtml#l00043">Window.h:43</a></div></div>
<div class="ttc" id="_error_8h_xhtml_a6dc630a6ae9cc063b3924bcea8dee9d6"><div class="ttname"><a href="_error_8h.xhtml#a6dc630a6ae9cc063b3924bcea8dee9d6">ARM_COMPUTE_UNUSED</a></div><div class="ttdeci">#define ARM_COMPUTE_UNUSED(...)</div><div class="ttdoc">To avoid unused variables warnings.</div><div class="ttdef"><b>Definition:</b> <a href="_error_8h_source.xhtml#l00152">Error.h:152</a></div></div>
<div class="ttc" id="_validate_8h_xhtml_a27e4638546c88b8916f967e6e54480a9"><div class="ttname"><a href="_validate_8h.xhtml#a27e4638546c88b8916f967e6e54480a9">ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES</a></div><div class="ttdeci">#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...)</div><div class="ttdef"><b>Definition:</b> <a href="_validate_8h_source.xhtml#l00443">Validate.h:443</a></div></div>
<div class="ttc" id="classarm__compute_1_1_i_tensor_info_xhtml_a7c66505457d00ece3aa4b34cab80757d"><div class="ttname"><a href="classarm__compute_1_1_i_tensor_info.xhtml#a7c66505457d00ece3aa4b34cab80757d">arm_compute::ITensorInfo::tensor_shape</a></div><div class="ttdeci">virtual const TensorShape &amp; tensor_shape() const =0</div><div class="ttdoc">Size for each dimension of the tensor.</div></div>
<div class="ttc" id="namespacearm__compute_xhtml_ad8ed01ff3ff33333d8e19db4d2818bb6af14462d71aa842202c3e4b272c7ec924"><div class="ttname"><a href="namespacearm__compute.xhtml#ad8ed01ff3ff33333d8e19db4d2818bb6af14462d71aa842202c3e4b272c7ec924">arm_compute::DataType::QASYMM8</a></div><div class="ttdoc">quantized, asymmetric fixed-point 8-bit number unsigned</div></div>
<div class="ttc" id="classarm__compute_1_1misc_1_1_i_cloneable_xhtml_a4d10e5012a872e7f78f2b539b673049d"><div class="ttname"><a href="classarm__compute_1_1misc_1_1_i_cloneable.xhtml#a4d10e5012a872e7f78f2b539b673049d">arm_compute::misc::ICloneable::clone</a></div><div class="ttdeci">virtual std::unique_ptr&lt; T &gt; clone() const =0</div><div class="ttdoc">Provide a clone of the current object of class T.</div></div>
<div class="ttc" id="structarm__compute_1_1_g_e_m_m_lowp_output_stage_info_xhtml"><div class="ttname"><a href="structarm__compute_1_1_g_e_m_m_lowp_output_stage_info.xhtml">arm_compute::GEMMLowpOutputStageInfo</a></div><div class="ttdoc">GEMMLowp output stage info.</div><div class="ttdef"><b>Definition:</b> <a href="arm__compute_2core_2_types_8h_source.xhtml#l01944">Types.h:1944</a></div></div>
<div class="ttc" id="classarm__compute_1_1_i_tensor_xhtml_a0e95dc1e53c361348314873b168ae237"><div class="ttname"><a href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">arm_compute::ITensor::info</a></div><div class="ttdeci">virtual ITensorInfo * info() const =0</div><div class="ttdoc">Interface to be implemented by the child class to return the tensor's metadata.</div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel_xhtml_a97ebe5c0444a53d58d9b9f079ebe2d0f"><div class="ttname"><a href="classarm__compute_1_1_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel.xhtml#a97ebe5c0444a53d58d9b9f079ebe2d0f">arm_compute::NEGEMMLowpOffsetContributionOutputStageKernel::configure</a></div><div class="ttdeci">void configure(const ITensor *mm_result, const ITensor *vector_sum_col, const ITensor *vector_sum_row, const ITensor *bias, ITensor *output, int32_t k, int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)</div><div class="ttdoc">Initialise the kernel's input and output.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel_8cpp_source.xhtml#l00950">NEGEMMLowpOffsetContributionOutputStageKernel.cpp:950</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_a9aeced5a5128f60a31ea3e327a45ee21"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#a9aeced5a5128f60a31ea3e327a45ee21">arm_compute::test::validation::has_bias</a></div><div class="ttdeci">const bool has_bias</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_o_n_2_im2_col_8cpp_source.xhtml#l00147">Im2Col.cpp:147</a></div></div>
<div class="ttc" id="_error_8h_xhtml"><div class="ttname"><a href="_error_8h.xhtml">Error.h</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_a3a77be8aebd8e00522b32061d46ccdbd"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#a3a77be8aebd8e00522b32061d46ccdbd">arm_compute::test::validation::bias</a></div><div class="ttdeci">CLTensor bias</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_convolution_layer_8cpp_source.xhtml#l00189">ConvolutionLayer.cpp:189</a></div></div>
<div class="ttc" id="namespacearm__compute_xhtml_a5558e2cc22f7f4771653d992c8ad8864a079e2ddc95b344b5cb0188bed9a80d8b"><div class="ttname"><a href="namespacearm__compute.xhtml#a5558e2cc22f7f4771653d992c8ad8864a079e2ddc95b344b5cb0188bed9a80d8b">arm_compute::GEMMLowpOutputStageType::QUANTIZE_DOWN</a></div><div class="ttdoc">Quantize using an integer multiplication.</div></div>
<div class="ttc" id="minmaxloc_8cl_xhtml_a538b4b63f40e7b12891774e03a4f0dec"><div class="ttname"><a href="minmaxloc_8cl.xhtml#a538b4b63f40e7b12891774e03a4f0dec">type_min</a></div><div class="ttdeci">__constant DATA_TYPE16 type_min</div><div class="ttdef"><b>Definition:</b> <a href="minmaxloc_8cl_source.xhtml#l00046">minmaxloc.cl:46</a></div></div>
<div class="ttc" id="classarm__compute_1_1_window_xhtml_ad2d402364fa822b0b7775081291eeca9"><div class="ttname"><a href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">arm_compute::Window::DimY</a></div><div class="ttdeci">static constexpr size_t DimY</div><div class="ttdoc">Alias for dimension 1 also known as Y dimension.</div><div class="ttdef"><b>Definition:</b> <a href="_window_8h_source.xhtml#l00045">Window.h:45</a></div></div>
<div class="ttc" id="_validate_8h_xhtml_a921b705e9e3e0fe928928447869e62a5"><div class="ttname"><a href="_validate_8h.xhtml#a921b705e9e3e0fe928928447869e62a5">ARM_COMPUTE_ERROR_ON_NULLPTR</a></div><div class="ttdeci">#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)</div><div class="ttdef"><b>Definition:</b> <a href="_validate_8h_source.xhtml#l00161">Validate.h:161</a></div></div>
<div class="ttc" id="structarm__compute_1_1_thread_info_xhtml"><div class="ttname"><a href="structarm__compute_1_1_thread_info.xhtml">arm_compute::ThreadInfo</a></div><div class="ttdoc">Information about executing thread and CPU.</div><div class="ttdef"><b>Definition:</b> <a href="_c_p_p_types_8h_source.xhtml#l00225">CPPTypes.h:225</a></div></div>
<div class="ttc" id="classarm__compute_1_1_window_xhtml_a893d17b56b9abc4423ce26e9a24ac5dc"><div class="ttname"><a href="classarm__compute_1_1_window.xhtml#a893d17b56b9abc4423ce26e9a24ac5dc">arm_compute::Window::DimZ</a></div><div class="ttdeci">static constexpr size_t DimZ</div><div class="ttdoc">Alias for dimension 2 also known as Z dimension.</div><div class="ttdef"><b>Definition:</b> <a href="_window_8h_source.xhtml#l00047">Window.h:47</a></div></div>
<div class="ttc" id="classarm__compute_1_1_dimensions_xhtml_a80a5f2d6e3a697c9aad893a3b4242615"><div class="ttname"><a href="classarm__compute_1_1_dimensions.xhtml#a80a5f2d6e3a697c9aad893a3b4242615">arm_compute::Dimensions::num_dimensions</a></div><div class="ttdeci">unsigned int num_dimensions() const</div><div class="ttdoc">Returns the effective dimensionality of the tensor.</div><div class="ttdef"><b>Definition:</b> <a href="_dimensions_8h_source.xhtml#l00122">Dimensions.h:122</a></div></div>
<div class="ttc" id="minmaxloc_8cl_xhtml_a4464d6f922ea17b4a9ca6a2cec7ddb75"><div class="ttname"><a href="minmaxloc_8cl.xhtml#a4464d6f922ea17b4a9ca6a2cec7ddb75">type_max</a></div><div class="ttdeci">__constant DATA_TYPE16 type_max</div><div class="ttdef"><b>Definition:</b> <a href="minmaxloc_8cl_source.xhtml#l00047">minmaxloc.cl:47</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1wrapper_xhtml_ae7943ea9c1f74dc72c62d4cc3966a459"><div class="ttname"><a href="namespacearm__compute_1_1wrapper.xhtml#ae7943ea9c1f74dc72c62d4cc3966a459">arm_compute::wrapper::vstore</a></div><div class="ttdeci">void vstore(uint8_t *ptr, uint8x8_t val)</div><div class="ttdef"><b>Definition:</b> <a href="store_8h_source.xhtml#l00039">store.h:39</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_ab1806bf0c5a41f674fb9d2dc6af644f5"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#ab1806bf0c5a41f674fb9d2dc6af644f5">arm_compute::test::validation::output_shape</a></div><div class="ttdeci">output_shape</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_convolution_layer_8cpp_source.xhtml#l00182">ConvolutionLayer.cpp:182</a></div></div>
<div class="ttc" id="_error_8h_xhtml_a1c69762a42ab8add645d0a949b6f4b1f"><div class="ttname"><a href="_error_8h.xhtml#a1c69762a42ab8add645d0a949b6f4b1f">ARM_COMPUTE_RETURN_ERROR_ON_MSG</a></div><div class="ttdeci">#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)</div><div class="ttdoc">If the condition is true, an error is returned.</div><div class="ttdef"><b>Definition:</b> <a href="_error_8h_source.xhtml#l00244">Error.h:244</a></div></div>
<div class="ttc" id="arm__compute_2core_2_helpers_8h_xhtml"><div class="ttname"><a href="arm__compute_2core_2_helpers_8h.xhtml">Helpers.h</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1wrapper_xhtml_a39e87435be178fba49b76f49426ef873"><div class="ttname"><a href="namespacearm__compute_1_1wrapper.xhtml#a39e87435be178fba49b76f49426ef873">arm_compute::wrapper::vdup_n</a></div><div class="ttdeci">uint8x8_t vdup_n(uint8_t value, traits::vector_64_tag)</div><div class="ttdef"><b>Definition:</b> <a href="dup__n_8h_source.xhtml#l00041">dup_n.h:41</a></div></div>
<div class="ttc" id="namespacearm__compute_xhtml_a5002bf7ec46d52971f9526e94172cfee"><div class="ttname"><a href="namespacearm__compute.xhtml#a5002bf7ec46d52971f9526e94172cfee">arm_compute::execute_window_loop</a></div><div class="ttdeci">void execute_window_loop(const Window &amp;w, L &amp;&amp;lambda_function, Ts &amp;&amp;... iterators)</div><div class="ttdoc">Iterate through the passed window, automatically adjusting the iterators and calling the lambda_funct...</div><div class="ttdef"><b>Definition:</b> <a href="_helpers_8inl_source.xhtml#l00123">Helpers.inl:123</a></div></div>
<div class="ttc" id="_access_window_static_8h_xhtml"><div class="ttname"><a href="_access_window_static_8h.xhtml">AccessWindowStatic.h</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel_xhtml_adb79bb3ad15444f00b55b35f1d6e16b7"><div class="ttname"><a href="classarm__compute_1_1_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel.xhtml#adb79bb3ad15444f00b55b35f1d6e16b7">arm_compute::NEGEMMLowpOffsetContributionOutputStageKernel::NEGEMMLowpOffsetContributionOutputStageFunction</a></div><div class="ttdeci">std::function&lt; void(const Window, const ITensor *, const ITensor *, const ITensor *, const ITensor *, ITensor *, int32_t, int32_t, int32_t, bool, GEMMLowpOutputStageInfo)&gt; NEGEMMLowpOffsetContributionOutputStageFunction</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel_8h_source.xhtml#l00119">NEGEMMLowpOffsetContributionOutputStageKernel.h:119</a></div></div>
<div class="ttc" id="namespacearm__compute_xhtml_ad8ed01ff3ff33333d8e19db4d2818bb6a329f5d0c4b0c80e3474951d2c4435dd9"><div class="ttname"><a href="namespacearm__compute.xhtml#ad8ed01ff3ff33333d8e19db4d2818bb6a329f5d0c4b0c80e3474951d2c4435dd9">arm_compute::DataType::QASYMM8_SIGNED</a></div><div class="ttdoc">quantized, asymmetric fixed-point 8-bit number signed</div></div>
<div class="ttc" id="wrapper_8h_xhtml"><div class="ttname"><a href="wrapper_8h.xhtml">wrapper.h</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel_xhtml_a6296f2754011b2221b343ccedfc0ba35"><div class="ttname"><a href="classarm__compute_1_1_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel.xhtml#a6296f2754011b2221b343ccedfc0ba35">arm_compute::NEGEMMLowpOffsetContributionOutputStageKernel::validate</a></div><div class="ttdeci">static Status validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *output, int32_t a_offset, int32_t b_offset, GEMMLowpOutputStageInfo output_stage)</div><div class="ttdoc">Static function to check if given info will lead to a valid configuration of NEGEMMLowpOffsetContribu...</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel_8cpp_source.xhtml#l00991">NEGEMMLowpOffsetContributionOutputStageKernel.cpp:991</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_a4f4125dba5283887b34f889b1c615c0c"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#a4f4125dba5283887b34f889b1c615c0c">arm_compute::test::validation::info</a></div><div class="ttdeci">info</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_convolution_layer_8cpp_source.xhtml#l00182">ConvolutionLayer.cpp:182</a></div></div>
<div class="ttc" id="arm__compute_2core_2_types_8h_xhtml"><div class="ttname"><a href="arm__compute_2core_2_types_8h.xhtml">Types.h</a></div></div>
<div class="ttc" id="_validate_8h_xhtml_a6eb9ce82815fe429250189da7592ba75"><div class="ttname"><a href="_validate_8h.xhtml#a6eb9ce82815fe429250189da7592ba75">ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW</a></div><div class="ttdeci">#define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s)</div><div class="ttdef"><b>Definition:</b> <a href="_validate_8h_source.xhtml#l00205">Validate.h:205</a></div></div>
<div class="ttc" id="namespacearm__compute_xhtml_ae69217acf0f0b5d4de030a09ad50a0bc"><div class="ttname"><a href="namespacearm__compute.xhtml#ae69217acf0f0b5d4de030a09ad50a0bc">arm_compute::get_min_max</a></div><div class="ttdeci">std::tuple&lt; PixelValue, PixelValue &gt; get_min_max(DataType dt)</div><div class="ttdoc">Compute the mininum and maximum values a data type can take.</div><div class="ttdef"><b>Definition:</b> <a href="arm__compute_2core_2_utils_8h_source.xhtml#l00558">Utils.h:558</a></div></div>
<div class="ttc" id="classarm__compute_1_1_window_xhtml"><div class="ttname"><a href="classarm__compute_1_1_window.xhtml">arm_compute::Window</a></div><div class="ttdoc">Describe a multidimensional execution window.</div><div class="ttdef"><b>Definition:</b> <a href="_window_8h_source.xhtml#l00039">Window.h:39</a></div></div>
<div class="ttc" id="_validate_8h_xhtml_a1b35b0d258183cf9ef36adf684d0b88c"><div class="ttname"><a href="_validate_8h.xhtml#a1b35b0d258183cf9ef36adf684d0b88c">ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL</a></div><div class="ttdeci">#define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k)</div><div class="ttdef"><b>Definition:</b> <a href="_validate_8h_source.xhtml#l00941">Validate.h:941</a></div></div>
<div class="ttc" id="_validate_8h_xhtml"><div class="ttname"><a href="_validate_8h.xhtml">Validate.h</a></div></div>
</div><!-- fragment --></div><!-- contents -->
</div><!-- doc-content -->
<!-- start footer part -->
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
<ul>
<li class="navelem"><a class="el" href="dir_68267d1309a1af8e8297ef4c3efbcdba.xhtml">src</a></li><li class="navelem"><a class="el" href="dir_aebb8dcc11953d78e620bbef0b9e2183.xhtml">core</a></li><li class="navelem"><a class="el" href="dir_d91e1966cc57df4f49d1e07be7697713.xhtml">NEON</a></li><li class="navelem"><a class="el" href="dir_1ce938f580a6f304b99685e2c1dd19db.xhtml">kernels</a></li><li class="navelem"><a class="el" href="_n_e_g_e_m_m_lowp_offset_contribution_output_stage_kernel_8cpp.xhtml">NEGEMMLowpOffsetContributionOutputStageKernel.cpp</a></li>
<li class="footer">Generated on Thu Mar 5 2020 16:07:01 for Compute Library by
<a href="http://www.doxygen.org/index.html">
<img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.15 </li>
</ul>
</div>
</body>
</html>