blob: 727f78ea28c1635c6d5a59aa2aa4a7b56eeee214 [file] [log] [blame]
<!-- HTML header for doxygen 1.8.15-->
<!-- Remember to use version doxygen 1.8.15 +-->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen 1.8.15"/>
<meta name="robots" content="NOINDEX, NOFOLLOW" /> <!-- Prevent indexing by search engines -->
<title>Compute Library: src/core/CL/cl_kernels/softmax_layer_quantized.cl Source File</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="navtree.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="resize.js"></script>
<script type="text/javascript" src="navtreedata.js"></script>
<script type="text/javascript" src="navtree.js"></script>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(document).ready(initResizable);
/* @license-end */</script>
<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="search/searchdata.js"></script>
<script type="text/javascript" src="search/search.js"></script>
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
extensions: ["tex2jax.js"],
jax: ["input/TeX","output/HTML-CSS"],
});
</script><script type="text/javascript" async="async" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
<link href="stylesheet.css" rel="stylesheet" type="text/css"/>
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
<tbody>
<tr style="height: 56px;">
<img alt="Compute Library" src="https://raw.githubusercontent.com/ARM-software/ComputeLibrary/gh-pages/ACL_logo.png" style="max-width: 100%;margin-top: 15px;margin-left: 10px"/>
<td style="padding-left: 0.5em;">
<div id="projectname">
&#160;<span id="projectnumber">20.02.1</span>
</div>
</td>
</tr>
</tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.8.15 -->
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
var searchBox = new SearchBox("searchBox", "search",false,'Search');
/* @license-end */
</script>
<script type="text/javascript" src="menudata.js"></script>
<script type="text/javascript" src="menu.js"></script>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(function() {
initMenu('',true,false,'search.php','Search');
$(document).ready(function() { init_search(); });
});
/* @license-end */</script>
<div id="main-nav"></div>
</div><!-- top -->
<div id="side-nav" class="ui-resizable side-nav-resizable">
<div id="nav-tree">
<div id="nav-tree-contents">
<div id="nav-sync" class="sync"></div>
</div>
</div>
<div id="splitbar" style="-moz-user-select:none;"
class="ui-resizable-handle">
</div>
</div>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(document).ready(function(){initNavTree('softmax__layer__quantized_8cl_source.xhtml','');});
/* @license-end */
</script>
<div id="doc-content">
<!-- window showing the filter options -->
<div id="MSearchSelectWindow"
onmouseover="return searchBox.OnSearchSelectShow()"
onmouseout="return searchBox.OnSearchSelectHide()"
onkeydown="return searchBox.OnSearchSelectKey(event)">
</div>
<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="javascript:void(0)" frameborder="0"
name="MSearchResults" id="MSearchResults">
</iframe>
</div>
<div class="header">
<div class="headertitle">
<div class="title">softmax_layer_quantized.cl</div> </div>
</div><!--header-->
<div class="contents">
<a href="softmax__layer__quantized_8cl.xhtml">Go to the documentation of this file.</a><div class="fragment"><div class="line"><a name="l00001"></a><span class="lineno"> 1</span>&#160;<span class="comment">/*</span></div><div class="line"><a name="l00002"></a><span class="lineno"> 2</span>&#160;<span class="comment"> * Copyright (c) 2017-2019 ARM Limited.</span></div><div class="line"><a name="l00003"></a><span class="lineno"> 3</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00004"></a><span class="lineno"> 4</span>&#160;<span class="comment"> * SPDX-License-Identifier: MIT</span></div><div class="line"><a name="l00005"></a><span class="lineno"> 5</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00006"></a><span class="lineno"> 6</span>&#160;<span class="comment"> * Permission is hereby granted, free of charge, to any person obtaining a copy</span></div><div class="line"><a name="l00007"></a><span class="lineno"> 7</span>&#160;<span class="comment"> * of this software and associated documentation files (the &quot;Software&quot;), to</span></div><div class="line"><a name="l00008"></a><span class="lineno"> 8</span>&#160;<span class="comment"> * deal in the Software without restriction, including without limitation the</span></div><div class="line"><a name="l00009"></a><span class="lineno"> 9</span>&#160;<span class="comment"> * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or</span></div><div class="line"><a name="l00010"></a><span class="lineno"> 10</span>&#160;<span class="comment"> * sell copies of the Software, and to permit persons to whom the Software is</span></div><div class="line"><a name="l00011"></a><span class="lineno"> 11</span>&#160;<span class="comment"> * furnished to do so, subject to the following conditions:</span></div><div class="line"><a name="l00012"></a><span class="lineno"> 12</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00013"></a><span class="lineno"> 13</span>&#160;<span class="comment"> * The above copyright notice and this permission notice shall be included in all</span></div><div class="line"><a name="l00014"></a><span class="lineno"> 14</span>&#160;<span class="comment"> * copies or substantial portions of the Software.</span></div><div class="line"><a name="l00015"></a><span class="lineno"> 15</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00016"></a><span class="lineno"> 16</span>&#160;<span class="comment"> * THE SOFTWARE IS PROVIDED &quot;AS IS&quot;, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR</span></div><div class="line"><a name="l00017"></a><span class="lineno"> 17</span>&#160;<span class="comment"> * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,</span></div><div class="line"><a name="l00018"></a><span class="lineno"> 18</span>&#160;<span class="comment"> * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE</span></div><div class="line"><a name="l00019"></a><span class="lineno"> 19</span>&#160;<span class="comment"> * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER</span></div><div class="line"><a name="l00020"></a><span class="lineno"> 20</span>&#160;<span class="comment"> * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,</span></div><div class="line"><a name="l00021"></a><span class="lineno"> 21</span>&#160;<span class="comment"> * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE</span></div><div class="line"><a name="l00022"></a><span class="lineno"> 22</span>&#160;<span class="comment"> * SOFTWARE.</span></div><div class="line"><a name="l00023"></a><span class="lineno"> 23</span>&#160;<span class="comment"> */</span></div><div class="line"><a name="l00024"></a><span class="lineno"> 24</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="helpers__asymm_8h.xhtml">helpers_asymm.h</a>&quot;</span></div><div class="line"><a name="l00025"></a><span class="lineno"> 25</span>&#160;</div><div class="line"><a name="l00026"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01"> 26</a></span>&#160;<span class="preprocessor">#define MAX_OP(x, y, type, size) max((x), (y))</span></div><div class="line"><a name="l00027"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88"> 27</a></span>&#160;<span class="preprocessor">#define ADD_OP(x, y, type, size) ((x) + (y))</span></div><div class="line"><a name="l00028"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#ac3af2d18008cbbf7247ae48fcd6e0c4e"> 28</a></span>&#160;<span class="preprocessor">#define SUB_OP(x, y, type, size) ((x) - (y))</span></div><div class="line"><a name="l00029"></a><span class="lineno"> 29</span>&#160;</div><div class="line"><a name="l00030"></a><span class="lineno"> 30</span>&#160;<span class="comment">/* Number of workitems in dimension 0. */</span></div><div class="line"><a name="l00031"></a><span class="lineno"> 31</span>&#160;<span class="preprocessor">#if !defined(GRID_SIZE)</span></div><div class="line"><a name="l00032"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f"> 32</a></span>&#160;<span class="preprocessor">#define GRID_SIZE 1</span></div><div class="line"><a name="l00033"></a><span class="lineno"> 33</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* !defined(GRID_SIZE) */</span><span class="preprocessor"></span></div><div class="line"><a name="l00034"></a><span class="lineno"> 34</span>&#160;</div><div class="line"><a name="l00035"></a><span class="lineno"> 35</span>&#160;<span class="preprocessor">#if VECTOR_SIZE == 2</span></div><div class="line"><a name="l00036"></a><span class="lineno"> 36</span>&#160;__constant uint2 <a class="code" href="softmax__layer__quantized_8cl.xhtml#aa1dd94b8d98f1c6d790bdf0fc5de29e9">idx__</a> = (uint2)(0, 1);</div><div class="line"><a name="l00037"></a><span class="lineno"> 37</span>&#160;<span class="preprocessor">#define asymm_mult(a, b) ASYMM_MULT(a, b, 2)</span></div><div class="line"><a name="l00038"></a><span class="lineno"> 38</span>&#160;<span class="preprocessor">#define asymm_exp_on_negative_values(a, k_integer_bits) ASYMM_EXP_ON_NEGATIVE_VALUES(a, k_integer_bits, 2)</span></div><div class="line"><a name="l00039"></a><span class="lineno"> 39</span>&#160;<span class="preprocessor">#define asymm_rescale(value, src_integer_bits, dst_integer_bits) ASYMM_RESCALE(value, src_integer_bits, dst_integer_bits, 2)</span></div><div class="line"><a name="l00040"></a><span class="lineno"> 40</span>&#160;</div><div class="line"><a name="l00041"></a><span class="lineno"> 41</span>&#160;<span class="preprocessor">#elif VECTOR_SIZE == 4</span></div><div class="line"><a name="l00042"></a><span class="lineno"> 42</span>&#160;__constant uint4 <a class="code" href="softmax__layer__quantized_8cl.xhtml#aa1dd94b8d98f1c6d790bdf0fc5de29e9">idx__</a> = (uint4)(0, 1, 2, 3);</div><div class="line"><a name="l00043"></a><span class="lineno"> 43</span>&#160;<span class="preprocessor">#define asymm_mult(a, b) ASYMM_MULT(a, b, 4)</span></div><div class="line"><a name="l00044"></a><span class="lineno"> 44</span>&#160;<span class="preprocessor">#define asymm_exp_on_negative_values(a, k_integer_bits) ASYMM_EXP_ON_NEGATIVE_VALUES(a, k_integer_bits, 4)</span></div><div class="line"><a name="l00045"></a><span class="lineno"> 45</span>&#160;<span class="preprocessor">#define asymm_rescale(value, src_integer_bits, dst_integer_bits) ASYMM_RESCALE(value, src_integer_bits, dst_integer_bits, 4)</span></div><div class="line"><a name="l00046"></a><span class="lineno"> 46</span>&#160;</div><div class="line"><a name="l00047"></a><span class="lineno"> 47</span>&#160;<span class="preprocessor">#elif VECTOR_SIZE == 8</span></div><div class="line"><a name="l00048"></a><span class="lineno"> 48</span>&#160;__constant uint8 <a class="code" href="softmax__layer__quantized_8cl.xhtml#aa1dd94b8d98f1c6d790bdf0fc5de29e9">idx__</a> = (uint8)(0, 1, 2, 3, 4, 5, 6, 7);</div><div class="line"><a name="l00049"></a><span class="lineno"> 49</span>&#160;<span class="preprocessor">#define asymm_mult(a, b) ASYMM_MULT(a, b, 8)</span></div><div class="line"><a name="l00050"></a><span class="lineno"> 50</span>&#160;<span class="preprocessor">#define asymm_exp_on_negative_values(a, k_integer_bits) ASYMM_EXP_ON_NEGATIVE_VALUES(a, k_integer_bits, 8)</span></div><div class="line"><a name="l00051"></a><span class="lineno"> 51</span>&#160;<span class="preprocessor">#define asymm_rescale(value, src_integer_bits, dst_integer_bits) ASYMM_RESCALE(value, src_integer_bits, dst_integer_bits, 8)</span></div><div class="line"><a name="l00052"></a><span class="lineno"> 52</span>&#160;</div><div class="line"><a name="l00053"></a><span class="lineno"> 53</span>&#160;<span class="preprocessor">#else </span><span class="comment">/* VECTOR_SIZE DEFAULT */</span><span class="preprocessor"></span></div><div class="line"><a name="l00054"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#a7c78836761fa3b5b124efea237dac70f"> 54</a></span>&#160;<span class="preprocessor">#define VECTOR_SIZE 16</span></div><div class="line"><a name="l00055"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#a372393c380805985b813dbb16d589a64"> 55</a></span>&#160;<span class="preprocessor">#define LOG_VECTOR_SIZE 4</span></div><div class="line"><a name="l00056"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#aa1dd94b8d98f1c6d790bdf0fc5de29e9"> 56</a></span>&#160;__constant uint16 <a class="code" href="softmax__layer__quantized_8cl.xhtml#aa1dd94b8d98f1c6d790bdf0fc5de29e9">idx__</a> = (uint16)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);</div><div class="line"><a name="l00057"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#a525a42d38133b1051b8924b456add4a1"> 57</a></span>&#160;<span class="preprocessor">#define asymm_mult(a, b) ASYMM_MULT(a, b, 16)</span></div><div class="line"><a name="l00058"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#a54aedfa17c5ac2567107d5f488b0f4af"> 58</a></span>&#160;<span class="preprocessor">#define asymm_exp_on_negative_values(a, k_integer_bits) ASYMM_EXP_ON_NEGATIVE_VALUES(a, k_integer_bits, 16)</span></div><div class="line"><a name="l00059"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#ad57ea340cdcfeb2e1375b70c3ae59bae"> 59</a></span>&#160;<span class="preprocessor">#define asymm_rescale(value, src_integer_bits, dst_integer_bits) ASYMM_RESCALE(value, src_integer_bits, dst_integer_bits, 16)</span></div><div class="line"><a name="l00060"></a><span class="lineno"> 60</span>&#160;</div><div class="line"><a name="l00061"></a><span class="lineno"> 61</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* VECTOR_SIZE END */</span><span class="preprocessor"></span></div><div class="line"><a name="l00062"></a><span class="lineno"> 62</span>&#160;</div><div class="line"><a name="l00063"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#af5987b09a234231612b2b1eded343025"> 63</a></span>&#160;<span class="preprocessor">#define VEC_UCHAR VEC_DATA_TYPE(uchar, VECTOR_SIZE)</span></div><div class="line"><a name="l00064"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#a16110bd2b92003141dbaf8a44498ff82"> 64</a></span>&#160;<span class="preprocessor">#define VEC_UINT VEC_DATA_TYPE(uint, VECTOR_SIZE)</span></div><div class="line"><a name="l00065"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd"> 65</a></span>&#160;<span class="preprocessor">#define VEC_INT VEC_DATA_TYPE(int, VECTOR_SIZE)</span></div><div class="line"><a name="l00066"></a><span class="lineno"><a class="line" href="softmax__layer__quantized_8cl.xhtml#a89a27ed9d640355cfc1b6220b6eedd64"> 66</a></span>&#160;<span class="preprocessor">#define VEC_BASE VEC_DATA_TYPE(DATA_TYPE, VECTOR_SIZE)</span></div><div class="line"><a name="l00067"></a><span class="lineno"> 67</span>&#160;</div><div class="line"><a name="l00068"></a><span class="lineno"> 68</span>&#160;<span class="preprocessor">#if defined(DIFF_MIN)</span></div><div class="line"><a name="l00069"></a><span class="lineno"> 69</span>&#160;</div><div class="line"><a name="l00070"></a><span class="lineno"> 70</span>&#160;<a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a> mult_by_quantized_multiplier_serial(<a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a> data)</div><div class="line"><a name="l00071"></a><span class="lineno"> 71</span>&#160;{</div><div class="line"><a name="l00072"></a><span class="lineno"> 72</span>&#160;<span class="preprocessor">#if defined(INPUT_BETA_MULTIPLIER) &amp;&amp; defined(INPUT_BETA_LEFT_SHIFT)</span></div><div class="line"><a name="l00073"></a><span class="lineno"> 73</span>&#160; <span class="keywordflow">if</span>(INPUT_BETA_MULTIPLIER &gt; 1)</div><div class="line"><a name="l00074"></a><span class="lineno"> 74</span>&#160; {</div><div class="line"><a name="l00075"></a><span class="lineno"> 75</span>&#160; <span class="keywordflow">return</span> <a class="code" href="softmax__layer__quantized_8cl.xhtml#a525a42d38133b1051b8924b456add4a1">asymm_mult</a>(data * (1 &lt;&lt; INPUT_BETA_LEFT_SHIFT), INPUT_BETA_MULTIPLIER);</div><div class="line"><a name="l00076"></a><span class="lineno"> 76</span>&#160; }</div><div class="line"><a name="l00077"></a><span class="lineno"> 77</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* defined(INPUT_BETA_MULTIPLIER) &amp;&amp; defined(INPUT_BETA_LEFT_SHIFT) */</span><span class="preprocessor"></span></div><div class="line"><a name="l00078"></a><span class="lineno"> 78</span>&#160; <span class="keywordflow">return</span> data;</div><div class="line"><a name="l00079"></a><span class="lineno"> 79</span>&#160;}</div><div class="line"><a name="l00080"></a><span class="lineno"> 80</span>&#160;</div><div class="line"><a name="l00081"></a><span class="lineno"> 81</span>&#160;int4 mult_by_quantized_multiplier_parallel(int4 data)</div><div class="line"><a name="l00082"></a><span class="lineno"> 82</span>&#160;{</div><div class="line"><a name="l00083"></a><span class="lineno"> 83</span>&#160;<span class="preprocessor">#if defined(INPUT_BETA_MULTIPLIER) &amp;&amp; defined(INPUT_BETA_LEFT_SHIFT)</span></div><div class="line"><a name="l00084"></a><span class="lineno"> 84</span>&#160; <span class="keywordflow">if</span>(INPUT_BETA_MULTIPLIER &gt; 1)</div><div class="line"><a name="l00085"></a><span class="lineno"> 85</span>&#160; {</div><div class="line"><a name="l00086"></a><span class="lineno"> 86</span>&#160; <span class="keywordflow">return</span> <a class="code" href="helpers__asymm_8h.xhtml#a5483aefd5e07244661178bfd3f434448">ASYMM_MULT</a>(data * (1 &lt;&lt; INPUT_BETA_LEFT_SHIFT), INPUT_BETA_MULTIPLIER, 4);</div><div class="line"><a name="l00087"></a><span class="lineno"> 87</span>&#160; }</div><div class="line"><a name="l00088"></a><span class="lineno"> 88</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* defined(INPUT_BETA_MULTIPLIER) &amp;&amp; defined(INPUT_BETA_LEFT_SHIFT) */</span><span class="preprocessor"></span></div><div class="line"><a name="l00089"></a><span class="lineno"> 89</span>&#160; <span class="keywordflow">return</span> data;</div><div class="line"><a name="l00090"></a><span class="lineno"> 90</span>&#160;}</div><div class="line"><a name="l00091"></a><span class="lineno"> 91</span>&#160;<span class="comment"></span></div><div class="line"><a name="l00092"></a><span class="lineno"> 92</span>&#160;<span class="comment">/** Shifts the values of the input tensor by the max calculated in softmax_layer_max kernel,</span></div><div class="line"><a name="l00093"></a><span class="lineno"> 93</span>&#160;<span class="comment"> * then gets the exponent of each element as sums all elements across each row.</span></div><div class="line"><a name="l00094"></a><span class="lineno"> 94</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00095"></a><span class="lineno"> 95</span>&#160;<span class="comment"> * @note In case the input is not multiple of 16 -DNON_MULTIPLE_OF_VECTOR_SIZE must be passed.</span></div><div class="line"><a name="l00096"></a><span class="lineno"> 96</span>&#160;<span class="comment"> * @note Quantized beta can be optionally passed at compile time using -DINPUT_BETA_MULTIPLIER and -DINPUT_BETA_LEFT_SHIFT (if undefined, assume beta equals 1.0)</span></div><div class="line"><a name="l00097"></a><span class="lineno"> 97</span>&#160;<span class="comment"> * @note -DDIFF_MIN must be passed at compile time. It is threshold difference between maximum value of input data and current processed value, it defines whether the value will be taken into account or not.</span></div><div class="line"><a name="l00098"></a><span class="lineno"> 98</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00099"></a><span class="lineno"> 99</span>&#160;<span class="comment"> * @param[in] src_ptr Pointer to the source tensor slice. Supported data types: QASYMM8</span></div><div class="line"><a name="l00100"></a><span class="lineno"> 100</span>&#160;<span class="comment"> * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)</span></div><div class="line"><a name="l00101"></a><span class="lineno"> 101</span>&#160;<span class="comment"> * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)</span></div><div class="line"><a name="l00102"></a><span class="lineno"> 102</span>&#160;<span class="comment"> * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)</span></div><div class="line"><a name="l00103"></a><span class="lineno"> 103</span>&#160;<span class="comment"> * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)</span></div><div class="line"><a name="l00104"></a><span class="lineno"> 104</span>&#160;<span class="comment"> * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)</span></div><div class="line"><a name="l00105"></a><span class="lineno"> 105</span>&#160;<span class="comment"> * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)</span></div><div class="line"><a name="l00106"></a><span class="lineno"> 106</span>&#160;<span class="comment"> * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor</span></div><div class="line"><a name="l00107"></a><span class="lineno"> 107</span>&#160;<span class="comment"> * @param[in] max_ptr Pointer to the max values tensor slice. Supported data types: same as @p src_ptr</span></div><div class="line"><a name="l00108"></a><span class="lineno"> 108</span>&#160;<span class="comment"> * @param[in] max_stride_x Stride of the max values tensor in X dimension (in bytes)</span></div><div class="line"><a name="l00109"></a><span class="lineno"> 109</span>&#160;<span class="comment"> * @param[in] max_step_x max_stride_x * number of elements along X processed per workitem(in bytes)</span></div><div class="line"><a name="l00110"></a><span class="lineno"> 110</span>&#160;<span class="comment"> * @param[in] max_stride_y Stride of the max values tensor in Y dimension (in bytes)</span></div><div class="line"><a name="l00111"></a><span class="lineno"> 111</span>&#160;<span class="comment"> * @param[in] max_step_y max_stride_y * number of elements along Y processed per workitem(in bytes)</span></div><div class="line"><a name="l00112"></a><span class="lineno"> 112</span>&#160;<span class="comment"> * @param[in] max_stride_z Stride of the max values tensor in Z dimension (in bytes)</span></div><div class="line"><a name="l00113"></a><span class="lineno"> 113</span>&#160;<span class="comment"> * @param[in] max_step_z max_stride_z * number of elements along Z processed per workitem(in bytes)</span></div><div class="line"><a name="l00114"></a><span class="lineno"> 114</span>&#160;<span class="comment"> * @param[in] max_offset_first_element_in_bytes The offset of the first element in the max values tensor</span></div><div class="line"><a name="l00115"></a><span class="lineno"> 115</span>&#160;<span class="comment"> * @param[out] dst_ptr Pointer to the destination tensor slice. Supported data types: S32</span></div><div class="line"><a name="l00116"></a><span class="lineno"> 116</span>&#160;<span class="comment"> * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)</span></div><div class="line"><a name="l00117"></a><span class="lineno"> 117</span>&#160;<span class="comment"> * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)</span></div><div class="line"><a name="l00118"></a><span class="lineno"> 118</span>&#160;<span class="comment"> * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)</span></div><div class="line"><a name="l00119"></a><span class="lineno"> 119</span>&#160;<span class="comment"> * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)</span></div><div class="line"><a name="l00120"></a><span class="lineno"> 120</span>&#160;<span class="comment"> * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)</span></div><div class="line"><a name="l00121"></a><span class="lineno"> 121</span>&#160;<span class="comment"> * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)</span></div><div class="line"><a name="l00122"></a><span class="lineno"> 122</span>&#160;<span class="comment"> * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor</span></div><div class="line"><a name="l00123"></a><span class="lineno"> 123</span>&#160;<span class="comment"> * @param[out] sum_ptr Pointer to the sum values tensor slice. Supported data types: same as @p dst_ptr</span></div><div class="line"><a name="l00124"></a><span class="lineno"> 124</span>&#160;<span class="comment"> * @param[in] sum_stride_x Stride of the sum values tensor in X dimension (in bytes)</span></div><div class="line"><a name="l00125"></a><span class="lineno"> 125</span>&#160;<span class="comment"> * @param[in] sum_step_x sum_stride_x * number of elements along X processed per workitem(in bytes)</span></div><div class="line"><a name="l00126"></a><span class="lineno"> 126</span>&#160;<span class="comment"> * @param[in] sum_stride_y Stride of the sum values tensor in Y dimension (in bytes)</span></div><div class="line"><a name="l00127"></a><span class="lineno"> 127</span>&#160;<span class="comment"> * @param[in] sum_step_y sum_stride_z * number of elements along Z processed per workitem(in bytes)</span></div><div class="line"><a name="l00128"></a><span class="lineno"> 128</span>&#160;<span class="comment"> * @param[in] sum_stride_z Stride of the sum values tensor in Z dimension (in bytes)</span></div><div class="line"><a name="l00129"></a><span class="lineno"> 129</span>&#160;<span class="comment"> * @param[in] sum_step_z sum_stride_z * number of elements along Z processed per workitem(in bytes)</span></div><div class="line"><a name="l00130"></a><span class="lineno"> 130</span>&#160;<span class="comment"> * @param[in] sum_offset_first_element_in_bytes The offset of the first element in the sum values tensor</span></div><div class="line"><a name="l00131"></a><span class="lineno"> 131</span>&#160;<span class="comment"> * @param[in] width Input image width</span></div><div class="line"><a name="l00132"></a><span class="lineno"> 132</span>&#160;<span class="comment"> */</span></div><div class="line"><a name="l00133"></a><span class="lineno"> 133</span>&#160;__kernel <span class="keywordtype">void</span> softmax_layer_max_shift_exp_sum_quantized_serial(</div><div class="line"><a name="l00134"></a><span class="lineno"> 134</span>&#160; <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a989ab3e96426615bb98e04e0235088ca">src</a>),</div><div class="line"><a name="l00135"></a><span class="lineno"> 135</span>&#160; <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(maxo),</div><div class="line"><a name="l00136"></a><span class="lineno"> 136</span>&#160; <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">dst</a>),</div><div class="line"><a name="l00137"></a><span class="lineno"> 137</span>&#160; <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(<a class="code" href="reduction__operation_8cl.xhtml#ab0df00f5333da51860deb93deb44a782">sum</a>),</div><div class="line"><a name="l00138"></a><span class="lineno"> 138</span>&#160; uint width)</div><div class="line"><a name="l00139"></a><span class="lineno"> 139</span>&#160;{</div><div class="line"><a name="l00140"></a><span class="lineno"> 140</span>&#160; <a class="code" href="struct_image.xhtml">Image</a> <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a989ab3e96426615bb98e04e0235088ca">src</a> = <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a989ab3e96426615bb98e04e0235088ca">src</a>);</div><div class="line"><a name="l00141"></a><span class="lineno"> 141</span>&#160; <a class="code" href="struct_image.xhtml">Image</a> <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">dst</a> = <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">dst</a>);</div><div class="line"><a name="l00142"></a><span class="lineno"> 142</span>&#160; <a class="code" href="struct_image.xhtml">Image</a> maxo = <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(maxo);</div><div class="line"><a name="l00143"></a><span class="lineno"> 143</span>&#160; <a class="code" href="struct_image.xhtml">Image</a> <a class="code" href="reduction__operation_8cl.xhtml#ab0df00f5333da51860deb93deb44a782">sum</a> = <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(<a class="code" href="reduction__operation_8cl.xhtml#ab0df00f5333da51860deb93deb44a782">sum</a>);</div><div class="line"><a name="l00144"></a><span class="lineno"> 144</span>&#160;</div><div class="line"><a name="l00145"></a><span class="lineno"> 145</span>&#160; <a class="code" href="softmax__layer__quantized_8cl.xhtml#a89a27ed9d640355cfc1b6220b6eedd64">VEC_BASE</a> max_val_vec = (<a class="code" href="softmax__layer__quantized_8cl.xhtml#a89a27ed9d640355cfc1b6220b6eedd64">VEC_BASE</a>)(MIN_VALUE);</div><div class="line"><a name="l00146"></a><span class="lineno"> 146</span>&#160;</div><div class="line"><a name="l00147"></a><span class="lineno"> 147</span>&#160; <span class="comment">// Calculate max of row</span></div><div class="line"><a name="l00148"></a><span class="lineno"> 148</span>&#160; <span class="keyword">const</span> uint width4 = width &gt;&gt; <a class="code" href="softmax__layer__quantized_8cl.xhtml#a372393c380805985b813dbb16d589a64">LOG_VECTOR_SIZE</a>;</div><div class="line"><a name="l00149"></a><span class="lineno"> 149</span>&#160; <span class="keywordflow">for</span>(uint i = 0; i &lt; width4; i++)</div><div class="line"><a name="l00150"></a><span class="lineno"> 150</span>&#160; {</div><div class="line"><a name="l00151"></a><span class="lineno"> 151</span>&#160; <a class="code" href="softmax__layer__quantized_8cl.xhtml#a89a27ed9d640355cfc1b6220b6eedd64">VEC_BASE</a> data = <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a287e2fc366c312b468382c95bb90f91f">VLOAD</a>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a7c78836761fa3b5b124efea237dac70f">VECTOR_SIZE</a>)(0, (__global <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a> *)<a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a989ab3e96426615bb98e04e0235088ca">src</a>, i &lt;&lt; <a class="code" href="softmax__layer__quantized_8cl.xhtml#a372393c380805985b813dbb16d589a64">LOG_VECTOR_SIZE</a>, 0));</div><div class="line"><a name="l00152"></a><span class="lineno"> 152</span>&#160; max_val_vec = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(data, max_val_vec, <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 16);</div><div class="line"><a name="l00153"></a><span class="lineno"> 153</span>&#160; }</div><div class="line"><a name="l00154"></a><span class="lineno"> 154</span>&#160;</div><div class="line"><a name="l00155"></a><span class="lineno"> 155</span>&#160;<span class="preprocessor">#ifdef NON_MULTIPLE_OF_VECTOR_SIZE</span></div><div class="line"><a name="l00156"></a><span class="lineno"> 156</span>&#160; <span class="comment">// Handle non multiple of 16</span></div><div class="line"><a name="l00157"></a><span class="lineno"> 157</span>&#160; <a class="code" href="softmax__layer__quantized_8cl.xhtml#a89a27ed9d640355cfc1b6220b6eedd64">VEC_BASE</a> vec_min_val = (<a class="code" href="softmax__layer__quantized_8cl.xhtml#a89a27ed9d640355cfc1b6220b6eedd64">VEC_BASE</a>)(MIN_VALUE);</div><div class="line"><a name="l00158"></a><span class="lineno"> 158</span>&#160; <a class="code" href="softmax__layer__quantized_8cl.xhtml#a89a27ed9d640355cfc1b6220b6eedd64">VEC_BASE</a> data = <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a287e2fc366c312b468382c95bb90f91f">VLOAD</a>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a7c78836761fa3b5b124efea237dac70f">VECTOR_SIZE</a>)(0, (__global <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a> *)<a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a989ab3e96426615bb98e04e0235088ca">src</a>, width4 &lt;&lt; <a class="code" href="softmax__layer__quantized_8cl.xhtml#a372393c380805985b813dbb16d589a64">LOG_VECTOR_SIZE</a>, 0));</div><div class="line"><a name="l00159"></a><span class="lineno"> 159</span>&#160; <a class="code" href="softmax__layer__quantized_8cl.xhtml#af5987b09a234231612b2b1eded343025">VEC_UCHAR</a> widx = <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#aa8d95ba04fc73845abc6045952cae5be">CONVERT</a>(((<a class="code" href="softmax__layer__quantized_8cl.xhtml#a16110bd2b92003141dbaf8a44498ff82">VEC_UINT</a>)(width4 &lt;&lt; <a class="code" href="softmax__layer__quantized_8cl.xhtml#a372393c380805985b813dbb16d589a64">LOG_VECTOR_SIZE</a>) + <a class="code" href="softmax__layer__quantized_8cl.xhtml#aa1dd94b8d98f1c6d790bdf0fc5de29e9">idx__</a>) &lt; width, <a class="code" href="softmax__layer__quantized_8cl.xhtml#af5987b09a234231612b2b1eded343025">VEC_UCHAR</a>);</div><div class="line"><a name="l00160"></a><span class="lineno"> 160</span>&#160; max_val_vec = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(max_val_vec, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#af77145fbdc6b0c8931148f5597d9de53">select</a>(vec_min_val, data, widx), <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 16);</div><div class="line"><a name="l00161"></a><span class="lineno"> 161</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* NON_MULTIPLE_OF_VECTOR_SIZE */</span><span class="preprocessor"></span></div><div class="line"><a name="l00162"></a><span class="lineno"> 162</span>&#160;</div><div class="line"><a name="l00163"></a><span class="lineno"> 163</span>&#160; <span class="comment">// Perform max reduction</span></div><div class="line"><a name="l00164"></a><span class="lineno"> 164</span>&#160;<span class="preprocessor">#if VECTOR_SIZE == 16</span></div><div class="line"><a name="l00165"></a><span class="lineno"> 165</span>&#160; max_val_vec.s01234567 = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(max_val_vec.s01234567, max_val_vec.s89ABCDEF, <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 8);</div><div class="line"><a name="l00166"></a><span class="lineno"> 166</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* VECTOR SIZE 16 END */</span><span class="preprocessor"></span></div><div class="line"><a name="l00167"></a><span class="lineno"> 167</span>&#160;<span class="preprocessor">#if VECTOR_SIZE &gt;= 8</span></div><div class="line"><a name="l00168"></a><span class="lineno"> 168</span>&#160; max_val_vec.s0123 = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(max_val_vec.s0123, max_val_vec.s4567, <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 4);</div><div class="line"><a name="l00169"></a><span class="lineno"> 169</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* VECTOR SIZE 8 END */</span><span class="preprocessor"></span></div><div class="line"><a name="l00170"></a><span class="lineno"> 170</span>&#160;<span class="preprocessor">#if VECTOR_SIZE &gt;= 4</span></div><div class="line"><a name="l00171"></a><span class="lineno"> 171</span>&#160; max_val_vec.s01 = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(max_val_vec.s01, max_val_vec.s23, <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 2);</div><div class="line"><a name="l00172"></a><span class="lineno"> 172</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* VECTOR SIZE 4 END */</span><span class="preprocessor"></span></div><div class="line"><a name="l00173"></a><span class="lineno"> 173</span>&#160; max_val_vec.s0 = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(max_val_vec.s0, max_val_vec.s1, <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 1);</div><div class="line"><a name="l00174"></a><span class="lineno"> 174</span>&#160;</div><div class="line"><a name="l00175"></a><span class="lineno"> 175</span>&#160; <span class="comment">// Store result</span></div><div class="line"><a name="l00176"></a><span class="lineno"> 176</span>&#160; *((__global <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a> *)maxo.<a class="code" href="struct_image.xhtml#acf52c23cbd7424606c10a606524e3e32">ptr</a>) = max_val_vec.s0;</div><div class="line"><a name="l00177"></a><span class="lineno"> 177</span>&#160;</div><div class="line"><a name="l00178"></a><span class="lineno"> 178</span>&#160; <span class="comment">// Second part</span></div><div class="line"><a name="l00179"></a><span class="lineno"> 179</span>&#160;</div><div class="line"><a name="l00180"></a><span class="lineno"> 180</span>&#160; <span class="comment">// Load max value of 1D logits vector (row)</span></div><div class="line"><a name="l00181"></a><span class="lineno"> 181</span>&#160; <span class="keywordtype">int</span> max_val = convert_int(*((__global <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a> *)<a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;maxo, 0, 0)));</div><div class="line"><a name="l00182"></a><span class="lineno"> 182</span>&#160;</div><div class="line"><a name="l00183"></a><span class="lineno"> 183</span>&#160; <span class="comment">// Set sum vector, Q(EXP_ACCUMULATION_INT_BITS)</span></div><div class="line"><a name="l00184"></a><span class="lineno"> 184</span>&#160; <a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a> sum1D = 0;</div><div class="line"><a name="l00185"></a><span class="lineno"> 185</span>&#160;</div><div class="line"><a name="l00186"></a><span class="lineno"> 186</span>&#160; <span class="comment">// Shift values, exp and sum</span></div><div class="line"><a name="l00187"></a><span class="lineno"> 187</span>&#160; <span class="keywordflow">for</span>(uint i = 0; i &lt; width4; i++)</div><div class="line"><a name="l00188"></a><span class="lineno"> 188</span>&#160; {</div><div class="line"><a name="l00189"></a><span class="lineno"> 189</span>&#160; <a class="code" href="softmax__layer__quantized_8cl.xhtml#a89a27ed9d640355cfc1b6220b6eedd64">VEC_BASE</a> data = <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a287e2fc366c312b468382c95bb90f91f">VLOAD</a>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a7c78836761fa3b5b124efea237dac70f">VECTOR_SIZE</a>)(0, (__global <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a> *)<a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a989ab3e96426615bb98e04e0235088ca">src</a>, i &lt;&lt; <a class="code" href="softmax__layer__quantized_8cl.xhtml#a372393c380805985b813dbb16d589a64">LOG_VECTOR_SIZE</a>, 0));</div><div class="line"><a name="l00190"></a><span class="lineno"> 190</span>&#160; <a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a> data_fp = <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#aa8d95ba04fc73845abc6045952cae5be">CONVERT</a>(data, <a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a>);</div><div class="line"><a name="l00191"></a><span class="lineno"> 191</span>&#160; <a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a> data_diff = data_fp - max_val;</div><div class="line"><a name="l00192"></a><span class="lineno"> 192</span>&#160; <a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a> data_diff_mult = mult_by_quantized_multiplier_serial(data_diff);</div><div class="line"><a name="l00193"></a><span class="lineno"> 193</span>&#160; data_fp = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a54aedfa17c5ac2567107d5f488b0f4af">asymm_exp_on_negative_values</a>(data_diff_mult, SCALED_DIFF_INT_BITS);</div><div class="line"><a name="l00194"></a><span class="lineno"> 194</span>&#160; data_fp = <a class="code" href="softmax__layer__quantized_8cl.xhtml#ad57ea340cdcfeb2e1375b70c3ae59bae">asymm_rescale</a>(data_fp, 0, EXP_ACCUMULATION_INT_BITS);</div><div class="line"><a name="l00195"></a><span class="lineno"> 195</span>&#160; <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#acb282042d1edeeaa3cc979a206f78b54">VSTORE</a>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a7c78836761fa3b5b124efea237dac70f">VECTOR_SIZE</a>)</div><div class="line"><a name="l00196"></a><span class="lineno"> 196</span>&#160; (data_diff, 0, (__global <span class="keywordtype">int</span> *)<a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">dst</a>, i &lt;&lt; <a class="code" href="softmax__layer__quantized_8cl.xhtml#a372393c380805985b813dbb16d589a64">LOG_VECTOR_SIZE</a>, 0));</div><div class="line"><a name="l00197"></a><span class="lineno"> 197</span>&#160; sum1D = sum1D + <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#af77145fbdc6b0c8931148f5597d9de53">select</a>(MIN_VALUE, data_fp, data_diff &gt;= (<a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a>)(DIFF_MIN));</div><div class="line"><a name="l00198"></a><span class="lineno"> 198</span>&#160; }</div><div class="line"><a name="l00199"></a><span class="lineno"> 199</span>&#160;</div><div class="line"><a name="l00200"></a><span class="lineno"> 200</span>&#160;<span class="preprocessor">#ifdef NON_MULTIPLE_OF_VECTOR_SIZE</span></div><div class="line"><a name="l00201"></a><span class="lineno"> 201</span>&#160; <span class="comment">// Handle non multiple of 16</span></div><div class="line"><a name="l00202"></a><span class="lineno"> 202</span>&#160; data = <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a287e2fc366c312b468382c95bb90f91f">VLOAD</a>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a7c78836761fa3b5b124efea237dac70f">VECTOR_SIZE</a>)(0, (__global <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a> *)<a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a989ab3e96426615bb98e04e0235088ca">src</a>, width4 &lt;&lt; <a class="code" href="softmax__layer__quantized_8cl.xhtml#a372393c380805985b813dbb16d589a64">LOG_VECTOR_SIZE</a>, 0));</div><div class="line"><a name="l00203"></a><span class="lineno"> 203</span>&#160; <a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a> data_fp = <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#aa8d95ba04fc73845abc6045952cae5be">CONVERT</a>(data, <a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a>);</div><div class="line"><a name="l00204"></a><span class="lineno"> 204</span>&#160; <a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a> data_diff = data_fp - max_val;</div><div class="line"><a name="l00205"></a><span class="lineno"> 205</span>&#160; <a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a> data_diff_mult = mult_by_quantized_multiplier_serial(data_diff);</div><div class="line"><a name="l00206"></a><span class="lineno"> 206</span>&#160; data_fp = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a54aedfa17c5ac2567107d5f488b0f4af">asymm_exp_on_negative_values</a>(data_diff_mult, SCALED_DIFF_INT_BITS);</div><div class="line"><a name="l00207"></a><span class="lineno"> 207</span>&#160; data_fp = <a class="code" href="softmax__layer__quantized_8cl.xhtml#ad57ea340cdcfeb2e1375b70c3ae59bae">asymm_rescale</a>(data_fp, 0, EXP_ACCUMULATION_INT_BITS);</div><div class="line"><a name="l00208"></a><span class="lineno"> 208</span>&#160; <a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a> widx_ = <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#aa8d95ba04fc73845abc6045952cae5be">CONVERT</a>(((<a class="code" href="softmax__layer__quantized_8cl.xhtml#a16110bd2b92003141dbaf8a44498ff82">VEC_UINT</a>)(width4 &lt;&lt; <a class="code" href="softmax__layer__quantized_8cl.xhtml#a372393c380805985b813dbb16d589a64">LOG_VECTOR_SIZE</a>) + <a class="code" href="softmax__layer__quantized_8cl.xhtml#aa1dd94b8d98f1c6d790bdf0fc5de29e9">idx__</a>) &lt; width, <a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a>);</div><div class="line"><a name="l00209"></a><span class="lineno"> 209</span>&#160; <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#acb282042d1edeeaa3cc979a206f78b54">VSTORE</a>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a7c78836761fa3b5b124efea237dac70f">VECTOR_SIZE</a>)</div><div class="line"><a name="l00210"></a><span class="lineno"> 210</span>&#160; (data_diff, 0, (__global <span class="keywordtype">int</span> *)<a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">dst</a>, width4 &lt;&lt; <a class="code" href="softmax__layer__quantized_8cl.xhtml#a372393c380805985b813dbb16d589a64">LOG_VECTOR_SIZE</a>, 0));</div><div class="line"><a name="l00211"></a><span class="lineno"> 211</span>&#160; data_fp = <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#af77145fbdc6b0c8931148f5597d9de53">select</a>(MIN_VALUE, data_fp, data_diff &gt;= (<a class="code" href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a>)(DIFF_MIN));</div><div class="line"><a name="l00212"></a><span class="lineno"> 212</span>&#160; sum1D = sum1D + <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#af77145fbdc6b0c8931148f5597d9de53">select</a>(MIN_VALUE, data_fp, widx_);</div><div class="line"><a name="l00213"></a><span class="lineno"> 213</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* NON_MULTIPLE_OF_VECTOR_SIZE */</span><span class="preprocessor"></span></div><div class="line"><a name="l00214"></a><span class="lineno"> 214</span>&#160;</div><div class="line"><a name="l00215"></a><span class="lineno"> 215</span>&#160; <span class="comment">// Perform sum reduction</span></div><div class="line"><a name="l00216"></a><span class="lineno"> 216</span>&#160;<span class="preprocessor">#if VECTOR_SIZE == 16</span></div><div class="line"><a name="l00217"></a><span class="lineno"> 217</span>&#160; sum1D.s01234567 = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(sum1D.s01234567, sum1D.s89ABCDEF, <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 8);</div><div class="line"><a name="l00218"></a><span class="lineno"> 218</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* VECTOR SIZE 16 END */</span><span class="preprocessor"></span></div><div class="line"><a name="l00219"></a><span class="lineno"> 219</span>&#160;<span class="preprocessor">#if VECTOR_SIZE &gt;= 8</span></div><div class="line"><a name="l00220"></a><span class="lineno"> 220</span>&#160; sum1D.s0123 = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(sum1D.s0123, sum1D.s4567, <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 4);</div><div class="line"><a name="l00221"></a><span class="lineno"> 221</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* VECTOR SIZE 8 END */</span><span class="preprocessor"></span></div><div class="line"><a name="l00222"></a><span class="lineno"> 222</span>&#160;<span class="preprocessor">#if VECTOR_SIZE &gt;= 4</span></div><div class="line"><a name="l00223"></a><span class="lineno"> 223</span>&#160; sum1D.s01 = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(sum1D.s01, sum1D.s23, <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 2);</div><div class="line"><a name="l00224"></a><span class="lineno"> 224</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* VECTOR SIZE 4 END */</span><span class="preprocessor"></span></div><div class="line"><a name="l00225"></a><span class="lineno"> 225</span>&#160; sum1D.s0 = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(sum1D.s0, sum1D.s1, <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 1);</div><div class="line"><a name="l00226"></a><span class="lineno"> 226</span>&#160;</div><div class="line"><a name="l00227"></a><span class="lineno"> 227</span>&#160; <span class="comment">// Calculate and store result</span></div><div class="line"><a name="l00228"></a><span class="lineno"> 228</span>&#160; *((__global <span class="keywordtype">int</span> *)<a class="code" href="reduction__operation_8cl.xhtml#ab0df00f5333da51860deb93deb44a782">sum</a>.ptr) = sum1D.s0;</div><div class="line"><a name="l00229"></a><span class="lineno"> 229</span>&#160;}</div><div class="line"><a name="l00230"></a><span class="lineno"> 230</span>&#160;<span class="comment"></span></div><div class="line"><a name="l00231"></a><span class="lineno"> 231</span>&#160;<span class="comment">/** Identifies the maximum value across the 1st dimension and shifts the values of the input tensor by this maximum value,</span></div><div class="line"><a name="l00232"></a><span class="lineno"> 232</span>&#160;<span class="comment"> * then gets the exponent of each element as sums all elements across each row.</span></div><div class="line"><a name="l00233"></a><span class="lineno"> 233</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00234"></a><span class="lineno"> 234</span>&#160;<span class="comment"> * @note Datatype must be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short</span></div><div class="line"><a name="l00235"></a><span class="lineno"> 235</span>&#160;<span class="comment"> * @note In case the input is not a multiple of VECTOR_SIZE (2,4,8,16) -DNON_MULTIPLE_OF_VECTOR_SIZE must be passed.</span></div><div class="line"><a name="l00236"></a><span class="lineno"> 236</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00237"></a><span class="lineno"> 237</span>&#160;<span class="comment"> * @param[in] src_ptr Pointer to the source tensor slice. Supported data types: F16/F32</span></div><div class="line"><a name="l00238"></a><span class="lineno"> 238</span>&#160;<span class="comment"> * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)</span></div><div class="line"><a name="l00239"></a><span class="lineno"> 239</span>&#160;<span class="comment"> * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)</span></div><div class="line"><a name="l00240"></a><span class="lineno"> 240</span>&#160;<span class="comment"> * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)</span></div><div class="line"><a name="l00241"></a><span class="lineno"> 241</span>&#160;<span class="comment"> * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)</span></div><div class="line"><a name="l00242"></a><span class="lineno"> 242</span>&#160;<span class="comment"> * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)</span></div><div class="line"><a name="l00243"></a><span class="lineno"> 243</span>&#160;<span class="comment"> * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)</span></div><div class="line"><a name="l00244"></a><span class="lineno"> 244</span>&#160;<span class="comment"> * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor</span></div><div class="line"><a name="l00245"></a><span class="lineno"> 245</span>&#160;<span class="comment"> * @param[in] maxo_ptr Pointer to the max values tensor slice. Supported data types: same as @p src_ptr</span></div><div class="line"><a name="l00246"></a><span class="lineno"> 246</span>&#160;<span class="comment"> * @param[in] maxo_stride_x Stride of the max values tensor in X dimension (in bytes)</span></div><div class="line"><a name="l00247"></a><span class="lineno"> 247</span>&#160;<span class="comment"> * @param[in] maxo_step_x max_stride_x * number of elements along X processed per workitem(in bytes)</span></div><div class="line"><a name="l00248"></a><span class="lineno"> 248</span>&#160;<span class="comment"> * @param[in] maxo_stride_y Stride of the max values tensor in Y dimension (in bytes)</span></div><div class="line"><a name="l00249"></a><span class="lineno"> 249</span>&#160;<span class="comment"> * @param[in] maxo_step_y max_stride_y * number of elements along Y processed per workitem(in bytes)</span></div><div class="line"><a name="l00250"></a><span class="lineno"> 250</span>&#160;<span class="comment"> * @param[in] maxo_stride_z Stride of the max values tensor in Z dimension (in bytes)</span></div><div class="line"><a name="l00251"></a><span class="lineno"> 251</span>&#160;<span class="comment"> * @param[in] maxo_step_z max_stride_z * number of elements along Z processed per workitem(in bytes)</span></div><div class="line"><a name="l00252"></a><span class="lineno"> 252</span>&#160;<span class="comment"> * @param[in] maxo_offset_first_element_in_bytes The offset of the first element in the max values tensor</span></div><div class="line"><a name="l00253"></a><span class="lineno"> 253</span>&#160;<span class="comment"> * @param[out] dst_ptr Pointer to the destination tensor slice. Supported data types: same as @p src_ptr</span></div><div class="line"><a name="l00254"></a><span class="lineno"> 254</span>&#160;<span class="comment"> * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)</span></div><div class="line"><a name="l00255"></a><span class="lineno"> 255</span>&#160;<span class="comment"> * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)</span></div><div class="line"><a name="l00256"></a><span class="lineno"> 256</span>&#160;<span class="comment"> * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)</span></div><div class="line"><a name="l00257"></a><span class="lineno"> 257</span>&#160;<span class="comment"> * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)</span></div><div class="line"><a name="l00258"></a><span class="lineno"> 258</span>&#160;<span class="comment"> * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)</span></div><div class="line"><a name="l00259"></a><span class="lineno"> 259</span>&#160;<span class="comment"> * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)</span></div><div class="line"><a name="l00260"></a><span class="lineno"> 260</span>&#160;<span class="comment"> * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor</span></div><div class="line"><a name="l00261"></a><span class="lineno"> 261</span>&#160;<span class="comment"> * @param[out] sum_ptr Pointer to the sum values tensor slice. Supported data types: same as @p src_ptr</span></div><div class="line"><a name="l00262"></a><span class="lineno"> 262</span>&#160;<span class="comment"> * @param[in] sum_stride_x Stride of the sum values tensor in X dimension (in bytes)</span></div><div class="line"><a name="l00263"></a><span class="lineno"> 263</span>&#160;<span class="comment"> * @param[in] sum_step_x sum_stride_x * number of elements along X processed per workitem(in bytes)</span></div><div class="line"><a name="l00264"></a><span class="lineno"> 264</span>&#160;<span class="comment"> * @param[in] sum_stride_y Stride of the sum values tensor in Y dimension (in bytes)</span></div><div class="line"><a name="l00265"></a><span class="lineno"> 265</span>&#160;<span class="comment"> * @param[in] sum_step_y sum_stride_z * number of elements along Z processed per workitem(in bytes)</span></div><div class="line"><a name="l00266"></a><span class="lineno"> 266</span>&#160;<span class="comment"> * @param[in] sum_stride_z Stride of the sum values tensor in Z dimension (in bytes)</span></div><div class="line"><a name="l00267"></a><span class="lineno"> 267</span>&#160;<span class="comment"> * @param[in] sum_step_z sum_stride_z * number of elements along Z processed per workitem(in bytes)</span></div><div class="line"><a name="l00268"></a><span class="lineno"> 268</span>&#160;<span class="comment"> * @param[in] sum_offset_first_element_in_bytes The offset of the first element in the sum values tensor</span></div><div class="line"><a name="l00269"></a><span class="lineno"> 269</span>&#160;<span class="comment"> * @param[in] width Input image width</span></div><div class="line"><a name="l00270"></a><span class="lineno"> 270</span>&#160;<span class="comment"> */</span></div><div class="line"><a name="l00271"></a><span class="lineno"> 271</span>&#160;__kernel <span class="keywordtype">void</span> softmax_layer_max_shift_exp_sum_quantized_parallel(</div><div class="line"><a name="l00272"></a><span class="lineno"> 272</span>&#160; <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a989ab3e96426615bb98e04e0235088ca">src</a>),</div><div class="line"><a name="l00273"></a><span class="lineno"> 273</span>&#160; <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(maxo),</div><div class="line"><a name="l00274"></a><span class="lineno"> 274</span>&#160; <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">dst</a>),</div><div class="line"><a name="l00275"></a><span class="lineno"> 275</span>&#160; <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(<a class="code" href="reduction__operation_8cl.xhtml#ab0df00f5333da51860deb93deb44a782">sum</a>),</div><div class="line"><a name="l00276"></a><span class="lineno"> 276</span>&#160; uint width)</div><div class="line"><a name="l00277"></a><span class="lineno"> 277</span>&#160;{</div><div class="line"><a name="l00278"></a><span class="lineno"> 278</span>&#160; <a class="code" href="struct_image.xhtml">Image</a> <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a989ab3e96426615bb98e04e0235088ca">src</a> = <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a989ab3e96426615bb98e04e0235088ca">src</a>);</div><div class="line"><a name="l00279"></a><span class="lineno"> 279</span>&#160; <a class="code" href="struct_image.xhtml">Image</a> <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">dst</a> = <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">dst</a>);</div><div class="line"><a name="l00280"></a><span class="lineno"> 280</span>&#160; <a class="code" href="struct_image.xhtml">Image</a> maxo = <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(maxo);</div><div class="line"><a name="l00281"></a><span class="lineno"> 281</span>&#160; <a class="code" href="struct_image.xhtml">Image</a> <a class="code" href="reduction__operation_8cl.xhtml#ab0df00f5333da51860deb93deb44a782">sum</a> = <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(<a class="code" href="reduction__operation_8cl.xhtml#ab0df00f5333da51860deb93deb44a782">sum</a>);</div><div class="line"><a name="l00282"></a><span class="lineno"> 282</span>&#160;</div><div class="line"><a name="l00283"></a><span class="lineno"> 283</span>&#160; <span class="keyword">const</span> uint4 <a class="code" href="softmax__layer_8cl.xhtml#a4884a666a1e93fbf8c27bd7d2da3c8bb">idx4</a> = (uint4)(0, 1, 2, 3);</div><div class="line"><a name="l00284"></a><span class="lineno"> 284</span>&#160; <span class="keyword">const</span> uint lid = get_local_id(0);</div><div class="line"><a name="l00285"></a><span class="lineno"> 285</span>&#160;</div><div class="line"><a name="l00286"></a><span class="lineno"> 286</span>&#160; <span class="comment">// Define one temporary vector per work-item.</span></div><div class="line"><a name="l00287"></a><span class="lineno"> 287</span>&#160; __local int4 tmp_local[<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a>];</div><div class="line"><a name="l00288"></a><span class="lineno"> 288</span>&#160; __local <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a> max_local;</div><div class="line"><a name="l00289"></a><span class="lineno"> 289</span>&#160;</div><div class="line"><a name="l00290"></a><span class="lineno"> 290</span>&#160; <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a36f754c05b6fddf6df0d8d0a74f8159f">VEC_DATA_TYPE</a>(<a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 4)</div><div class="line"><a name="l00291"></a><span class="lineno"> 291</span>&#160; vec_min_val = (<a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a36f754c05b6fddf6df0d8d0a74f8159f">VEC_DATA_TYPE</a>(<a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 4))(MIN_VALUE);</div><div class="line"><a name="l00292"></a><span class="lineno"> 292</span>&#160; <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a36f754c05b6fddf6df0d8d0a74f8159f">VEC_DATA_TYPE</a>(<a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 4)</div><div class="line"><a name="l00293"></a><span class="lineno"> 293</span>&#160; max_val_vec = vec_min_val;</div><div class="line"><a name="l00294"></a><span class="lineno"> 294</span>&#160;</div><div class="line"><a name="l00295"></a><span class="lineno"> 295</span>&#160; <span class="comment">// Number of elements per work-item.</span></div><div class="line"><a name="l00296"></a><span class="lineno"> 296</span>&#160; const uint row = width / <a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a>;</div><div class="line"><a name="l00297"></a><span class="lineno"> 297</span>&#160; <span class="comment">// Number of iterations per work-item.</span></div><div class="line"><a name="l00298"></a><span class="lineno"> 298</span>&#160; const uint width_ = row &gt;&gt; 2;</div><div class="line"><a name="l00299"></a><span class="lineno"> 299</span>&#160; <span class="comment">// Calculate max of row</span></div><div class="line"><a name="l00300"></a><span class="lineno"> 300</span>&#160; uint i = 0;</div><div class="line"><a name="l00301"></a><span class="lineno"> 301</span>&#160; <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a0886942393a3ba0dfefaa7516b159784">for</a>(; i &lt; width_; i++)</div><div class="line"><a name="l00302"></a><span class="lineno"> 302</span>&#160; {</div><div class="line"><a name="l00303"></a><span class="lineno"> 303</span>&#160; <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a36f754c05b6fddf6df0d8d0a74f8159f">VEC_DATA_TYPE</a>(<a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 4)</div><div class="line"><a name="l00304"></a><span class="lineno"> 304</span>&#160; data_max = vload4(0, (__global <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a> *)<a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a989ab3e96426615bb98e04e0235088ca">src</a>, i * <a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> * 4, 0));</div><div class="line"><a name="l00305"></a><span class="lineno"> 305</span>&#160; max_val_vec = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(data_max, max_val_vec, <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 4);</div><div class="line"><a name="l00306"></a><span class="lineno"> 306</span>&#160; }</div><div class="line"><a name="l00307"></a><span class="lineno"> 307</span>&#160;<span class="preprocessor">#ifdef NON_MULTIPLE_OF_GRID_SIZE</span></div><div class="line"><a name="l00308"></a><span class="lineno"> 308</span>&#160; <span class="comment">// How many work-items needed to complete the computation.</span></div><div class="line"><a name="l00309"></a><span class="lineno"> 309</span>&#160; <span class="comment">//TODO: Optimize this calculation (avoid %).</span></div><div class="line"><a name="l00310"></a><span class="lineno"> 310</span>&#160; <span class="keywordtype">int</span> boundary_workitems = (width % (<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> * 4)) / 4;</div><div class="line"><a name="l00311"></a><span class="lineno"> 311</span>&#160; <span class="keywordflow">if</span>(lid &lt; boundary_workitems)</div><div class="line"><a name="l00312"></a><span class="lineno"> 312</span>&#160; {</div><div class="line"><a name="l00313"></a><span class="lineno"> 313</span>&#160; <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a36f754c05b6fddf6df0d8d0a74f8159f">VEC_DATA_TYPE</a>(<a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 4)</div><div class="line"><a name="l00314"></a><span class="lineno"> 314</span>&#160; data_max = vload4(0, (__global <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a> *)<a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a989ab3e96426615bb98e04e0235088ca">src</a>, i * <a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> * 4, 0));</div><div class="line"><a name="l00315"></a><span class="lineno"> 315</span>&#160; max_val_vec = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(data_max, max_val_vec, <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 4);</div><div class="line"><a name="l00316"></a><span class="lineno"> 316</span>&#160; }</div><div class="line"><a name="l00317"></a><span class="lineno"> 317</span>&#160;<span class="preprocessor">#ifdef NON_MULTIPLE_OF_VECTOR_SIZE</span></div><div class="line"><a name="l00318"></a><span class="lineno"> 318</span>&#160; <span class="keywordflow">if</span>(boundary_workitems == 0)</div><div class="line"><a name="l00319"></a><span class="lineno"> 319</span>&#160; {</div><div class="line"><a name="l00320"></a><span class="lineno"> 320</span>&#160; boundary_workitems = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a>;</div><div class="line"><a name="l00321"></a><span class="lineno"> 321</span>&#160; i--;</div><div class="line"><a name="l00322"></a><span class="lineno"> 322</span>&#160; }</div><div class="line"><a name="l00323"></a><span class="lineno"> 323</span>&#160; <span class="keywordflow">if</span>(lid == (boundary_workitems - 1))</div><div class="line"><a name="l00324"></a><span class="lineno"> 324</span>&#160; {</div><div class="line"><a name="l00325"></a><span class="lineno"> 325</span>&#160; <span class="comment">// Handle non multiple of 4</span></div><div class="line"><a name="l00326"></a><span class="lineno"> 326</span>&#160; <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a36f754c05b6fddf6df0d8d0a74f8159f">VEC_DATA_TYPE</a>(<a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 4)</div><div class="line"><a name="l00327"></a><span class="lineno"> 327</span>&#160; data_max = vload4(0, (__global <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a> *)<a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a989ab3e96426615bb98e04e0235088ca">src</a>, (<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> * i * 4) + 4, 0));</div><div class="line"><a name="l00328"></a><span class="lineno"> 328</span>&#160; <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a36f754c05b6fddf6df0d8d0a74f8159f">VEC_DATA_TYPE</a>(<a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 4)</div><div class="line"><a name="l00329"></a><span class="lineno"> 329</span>&#160; widx = <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#aa8d95ba04fc73845abc6045952cae5be">CONVERT</a>((((uint4)(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> * i * 4) + boundary_workitems * 4 + <a class="code" href="softmax__layer_8cl.xhtml#a4884a666a1e93fbf8c27bd7d2da3c8bb">idx4</a>) &lt; width), <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a36f754c05b6fddf6df0d8d0a74f8159f">VEC_DATA_TYPE</a>(<a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 4));</div><div class="line"><a name="l00330"></a><span class="lineno"> 330</span>&#160; max_val_vec = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(max_val_vec, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#af77145fbdc6b0c8931148f5597d9de53">select</a>(vec_min_val, data_max, widx), <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 4);</div><div class="line"><a name="l00331"></a><span class="lineno"> 331</span>&#160; }</div><div class="line"><a name="l00332"></a><span class="lineno"> 332</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* NON_MULTIPLE_OF_VECTOR_SIZE */</span><span class="preprocessor"></span></div><div class="line"><a name="l00333"></a><span class="lineno"> 333</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* NON_MULTIPLE_OF_GRID_SIZE */</span><span class="preprocessor"></span></div><div class="line"><a name="l00334"></a><span class="lineno"> 334</span>&#160; tmp_local[lid] = convert_int4(max_val_vec);</div><div class="line"><a name="l00335"></a><span class="lineno"> 335</span>&#160;</div><div class="line"><a name="l00336"></a><span class="lineno"> 336</span>&#160; barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00337"></a><span class="lineno"> 337</span>&#160;</div><div class="line"><a name="l00338"></a><span class="lineno"> 338</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 256)</div><div class="line"><a name="l00339"></a><span class="lineno"> 339</span>&#160; {</div><div class="line"><a name="l00340"></a><span class="lineno"> 340</span>&#160; <span class="keywordflow">if</span>(lid &lt; 128)</div><div class="line"><a name="l00341"></a><span class="lineno"> 341</span>&#160; {</div><div class="line"><a name="l00342"></a><span class="lineno"> 342</span>&#160; tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(tmp_local[lid + 128], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00343"></a><span class="lineno"> 343</span>&#160; }</div><div class="line"><a name="l00344"></a><span class="lineno"> 344</span>&#160; barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00345"></a><span class="lineno"> 345</span>&#160; }</div><div class="line"><a name="l00346"></a><span class="lineno"> 346</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 128)</div><div class="line"><a name="l00347"></a><span class="lineno"> 347</span>&#160; {</div><div class="line"><a name="l00348"></a><span class="lineno"> 348</span>&#160; <span class="keywordflow">if</span>(lid &lt; 64)</div><div class="line"><a name="l00349"></a><span class="lineno"> 349</span>&#160; {</div><div class="line"><a name="l00350"></a><span class="lineno"> 350</span>&#160; tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(tmp_local[lid + 64], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00351"></a><span class="lineno"> 351</span>&#160; }</div><div class="line"><a name="l00352"></a><span class="lineno"> 352</span>&#160; barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00353"></a><span class="lineno"> 353</span>&#160; }</div><div class="line"><a name="l00354"></a><span class="lineno"> 354</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 64)</div><div class="line"><a name="l00355"></a><span class="lineno"> 355</span>&#160; {</div><div class="line"><a name="l00356"></a><span class="lineno"> 356</span>&#160; <span class="keywordflow">if</span>(lid &lt; 32)</div><div class="line"><a name="l00357"></a><span class="lineno"> 357</span>&#160; {</div><div class="line"><a name="l00358"></a><span class="lineno"> 358</span>&#160; tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(tmp_local[lid + 32], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00359"></a><span class="lineno"> 359</span>&#160; }</div><div class="line"><a name="l00360"></a><span class="lineno"> 360</span>&#160; barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00361"></a><span class="lineno"> 361</span>&#160; }</div><div class="line"><a name="l00362"></a><span class="lineno"> 362</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 32)</div><div class="line"><a name="l00363"></a><span class="lineno"> 363</span>&#160; {</div><div class="line"><a name="l00364"></a><span class="lineno"> 364</span>&#160; <span class="keywordflow">if</span>(lid &lt; 16)</div><div class="line"><a name="l00365"></a><span class="lineno"> 365</span>&#160; {</div><div class="line"><a name="l00366"></a><span class="lineno"> 366</span>&#160; tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(tmp_local[lid + 16], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00367"></a><span class="lineno"> 367</span>&#160; }</div><div class="line"><a name="l00368"></a><span class="lineno"> 368</span>&#160; barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00369"></a><span class="lineno"> 369</span>&#160; }</div><div class="line"><a name="l00370"></a><span class="lineno"> 370</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 16)</div><div class="line"><a name="l00371"></a><span class="lineno"> 371</span>&#160; {</div><div class="line"><a name="l00372"></a><span class="lineno"> 372</span>&#160; <span class="keywordflow">if</span>(lid &lt; 8)</div><div class="line"><a name="l00373"></a><span class="lineno"> 373</span>&#160; {</div><div class="line"><a name="l00374"></a><span class="lineno"> 374</span>&#160; tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(tmp_local[lid + 8], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00375"></a><span class="lineno"> 375</span>&#160; }</div><div class="line"><a name="l00376"></a><span class="lineno"> 376</span>&#160; barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00377"></a><span class="lineno"> 377</span>&#160; }</div><div class="line"><a name="l00378"></a><span class="lineno"> 378</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 8)</div><div class="line"><a name="l00379"></a><span class="lineno"> 379</span>&#160; {</div><div class="line"><a name="l00380"></a><span class="lineno"> 380</span>&#160; <span class="keywordflow">if</span>(lid &lt; 4)</div><div class="line"><a name="l00381"></a><span class="lineno"> 381</span>&#160; {</div><div class="line"><a name="l00382"></a><span class="lineno"> 382</span>&#160; tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(tmp_local[lid + 4], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00383"></a><span class="lineno"> 383</span>&#160; }</div><div class="line"><a name="l00384"></a><span class="lineno"> 384</span>&#160; barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00385"></a><span class="lineno"> 385</span>&#160; }</div><div class="line"><a name="l00386"></a><span class="lineno"> 386</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 4)</div><div class="line"><a name="l00387"></a><span class="lineno"> 387</span>&#160; {</div><div class="line"><a name="l00388"></a><span class="lineno"> 388</span>&#160; <span class="keywordflow">if</span>(lid &lt; 2)</div><div class="line"><a name="l00389"></a><span class="lineno"> 389</span>&#160; {</div><div class="line"><a name="l00390"></a><span class="lineno"> 390</span>&#160; tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(tmp_local[lid + 2], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00391"></a><span class="lineno"> 391</span>&#160; }</div><div class="line"><a name="l00392"></a><span class="lineno"> 392</span>&#160; barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00393"></a><span class="lineno"> 393</span>&#160; }</div><div class="line"><a name="l00394"></a><span class="lineno"> 394</span>&#160; <span class="keywordflow">if</span>(lid == 0)</div><div class="line"><a name="l00395"></a><span class="lineno"> 395</span>&#160; {</div><div class="line"><a name="l00396"></a><span class="lineno"> 396</span>&#160; max_val_vec = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(<a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#aa8d95ba04fc73845abc6045952cae5be">CONVERT</a>((tmp_local[lid + 1]), <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a36f754c05b6fddf6df0d8d0a74f8159f">VEC_DATA_TYPE</a>(<a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 4)), <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#aa8d95ba04fc73845abc6045952cae5be">CONVERT</a>((tmp_local[lid]), <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a36f754c05b6fddf6df0d8d0a74f8159f">VEC_DATA_TYPE</a>(<a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 4)), <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 4);</div><div class="line"><a name="l00397"></a><span class="lineno"> 397</span>&#160; max_val_vec.s01 = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(max_val_vec.s01, max_val_vec.s23, <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 2);</div><div class="line"><a name="l00398"></a><span class="lineno"> 398</span>&#160; max_val_vec.s0 = <a class="code" href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a>(max_val_vec.s0, max_val_vec.s1, <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 1);</div><div class="line"><a name="l00399"></a><span class="lineno"> 399</span>&#160; max_local = max_val_vec.s0;</div><div class="line"><a name="l00400"></a><span class="lineno"> 400</span>&#160; }</div><div class="line"><a name="l00401"></a><span class="lineno"> 401</span>&#160; barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00402"></a><span class="lineno"> 402</span>&#160;</div><div class="line"><a name="l00403"></a><span class="lineno"> 403</span>&#160; <span class="comment">/* Second section */</span></div><div class="line"><a name="l00404"></a><span class="lineno"> 404</span>&#160;</div><div class="line"><a name="l00405"></a><span class="lineno"> 405</span>&#160; <span class="comment">// Set sum vector</span></div><div class="line"><a name="l00406"></a><span class="lineno"> 406</span>&#160; int4 sum1D = 0;</div><div class="line"><a name="l00407"></a><span class="lineno"> 407</span>&#160; <span class="keywordtype">int</span> max_val = convert_int(max_local);</div><div class="line"><a name="l00408"></a><span class="lineno"> 408</span>&#160;</div><div class="line"><a name="l00409"></a><span class="lineno"> 409</span>&#160; <span class="comment">// Shift values, exp and sum</span></div><div class="line"><a name="l00410"></a><span class="lineno"> 410</span>&#160; <span class="keywordflow">for</span>(i = 0; i &lt; width_; i++)</div><div class="line"><a name="l00411"></a><span class="lineno"> 411</span>&#160; {</div><div class="line"><a name="l00412"></a><span class="lineno"> 412</span>&#160; <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a36f754c05b6fddf6df0d8d0a74f8159f">VEC_DATA_TYPE</a>(<a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 4)</div><div class="line"><a name="l00413"></a><span class="lineno"> 413</span>&#160; data = vload4(0, (__global <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a> *)<a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a989ab3e96426615bb98e04e0235088ca">src</a>, i * <a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> * 4, 0));</div><div class="line"><a name="l00414"></a><span class="lineno"> 414</span>&#160; int4 data_fp = convert_int4(data);</div><div class="line"><a name="l00415"></a><span class="lineno"> 415</span>&#160; int4 data_diff = data_fp - max_val;</div><div class="line"><a name="l00416"></a><span class="lineno"> 416</span>&#160; int4 data_diff_mult = mult_by_quantized_multiplier_parallel(data_diff);</div><div class="line"><a name="l00417"></a><span class="lineno"> 417</span>&#160; data_fp = <a class="code" href="helpers__asymm_8h.xhtml#a3a4f1b5d8f1cd67ac31bc62c9a6f4aa8">ASYMM_EXP_ON_NEGATIVE_VALUES</a>(data_diff_mult, SCALED_DIFF_INT_BITS, 4);</div><div class="line"><a name="l00418"></a><span class="lineno"> 418</span>&#160; data_fp = <a class="code" href="helpers__asymm_8h.xhtml#a98585f1bb84dea90aecbf59785c46151">ASYMM_RESCALE</a>(data_fp, 0, EXP_ACCUMULATION_INT_BITS, 4);</div><div class="line"><a name="l00419"></a><span class="lineno"> 419</span>&#160; vstore4(data_diff, 0, (__global <span class="keywordtype">int</span> *)<a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">dst</a>, i * <a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> * 4, 0));</div><div class="line"><a name="l00420"></a><span class="lineno"> 420</span>&#160; sum1D = sum1D + <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#af77145fbdc6b0c8931148f5597d9de53">select</a>(MIN_VALUE, data_fp, data_diff &gt;= (int4)(DIFF_MIN));</div><div class="line"><a name="l00421"></a><span class="lineno"> 421</span>&#160; }</div><div class="line"><a name="l00422"></a><span class="lineno"> 422</span>&#160;<span class="preprocessor">#ifdef NON_MULTIPLE_OF_GRID_SIZE</span></div><div class="line"><a name="l00423"></a><span class="lineno"> 423</span>&#160; <span class="comment">//TODO: Optimize the calculation (avoid %).</span></div><div class="line"><a name="l00424"></a><span class="lineno"> 424</span>&#160; boundary_workitems = (width % (<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> * 4)) / 4;</div><div class="line"><a name="l00425"></a><span class="lineno"> 425</span>&#160; <span class="keywordflow">if</span>(lid &lt; boundary_workitems)</div><div class="line"><a name="l00426"></a><span class="lineno"> 426</span>&#160; {</div><div class="line"><a name="l00427"></a><span class="lineno"> 427</span>&#160; <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a36f754c05b6fddf6df0d8d0a74f8159f">VEC_DATA_TYPE</a>(<a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 4)</div><div class="line"><a name="l00428"></a><span class="lineno"> 428</span>&#160; data = vload4(0, (__global <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a> *)<a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a989ab3e96426615bb98e04e0235088ca">src</a>, i * <a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> * 4, 0));</div><div class="line"><a name="l00429"></a><span class="lineno"> 429</span>&#160; int4 data_fp = convert_int4(data);</div><div class="line"><a name="l00430"></a><span class="lineno"> 430</span>&#160; int4 data_diff = data_fp - max_val;</div><div class="line"><a name="l00431"></a><span class="lineno"> 431</span>&#160; int4 data_diff_mult = mult_by_quantized_multiplier_parallel(data_diff);</div><div class="line"><a name="l00432"></a><span class="lineno"> 432</span>&#160; data_fp = <a class="code" href="helpers__asymm_8h.xhtml#a3a4f1b5d8f1cd67ac31bc62c9a6f4aa8">ASYMM_EXP_ON_NEGATIVE_VALUES</a>(data_diff_mult, SCALED_DIFF_INT_BITS, 4);</div><div class="line"><a name="l00433"></a><span class="lineno"> 433</span>&#160; data_fp = <a class="code" href="helpers__asymm_8h.xhtml#a98585f1bb84dea90aecbf59785c46151">ASYMM_RESCALE</a>(data_fp, 0, EXP_ACCUMULATION_INT_BITS, 4);</div><div class="line"><a name="l00434"></a><span class="lineno"> 434</span>&#160; vstore4(data_diff, 0, (__global <span class="keywordtype">int</span> *)<a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">dst</a>, i * <a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> * 4, 0));</div><div class="line"><a name="l00435"></a><span class="lineno"> 435</span>&#160; sum1D = sum1D + <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#af77145fbdc6b0c8931148f5597d9de53">select</a>(MIN_VALUE, data_fp, data_diff &gt;= (int4)(DIFF_MIN));</div><div class="line"><a name="l00436"></a><span class="lineno"> 436</span>&#160; }</div><div class="line"><a name="l00437"></a><span class="lineno"> 437</span>&#160;<span class="preprocessor">#ifdef NON_MULTIPLE_OF_VECTOR_SIZE</span></div><div class="line"><a name="l00438"></a><span class="lineno"> 438</span>&#160; <span class="keywordflow">if</span>(boundary_workitems == 0)</div><div class="line"><a name="l00439"></a><span class="lineno"> 439</span>&#160; {</div><div class="line"><a name="l00440"></a><span class="lineno"> 440</span>&#160; boundary_workitems = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a>;</div><div class="line"><a name="l00441"></a><span class="lineno"> 441</span>&#160; i--;</div><div class="line"><a name="l00442"></a><span class="lineno"> 442</span>&#160; }</div><div class="line"><a name="l00443"></a><span class="lineno"> 443</span>&#160; <span class="keywordflow">if</span>(lid == (boundary_workitems - 1))</div><div class="line"><a name="l00444"></a><span class="lineno"> 444</span>&#160; {</div><div class="line"><a name="l00445"></a><span class="lineno"> 445</span>&#160; <span class="comment">// Handle non multiple of vector size ((GRID_SIZE * i * 4) + 4, 0); move 4 float positions ahead, *4 is due to the stride</span></div><div class="line"><a name="l00446"></a><span class="lineno"> 446</span>&#160; <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a36f754c05b6fddf6df0d8d0a74f8159f">VEC_DATA_TYPE</a>(<a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 4)</div><div class="line"><a name="l00447"></a><span class="lineno"> 447</span>&#160; data = vload4(0, (__global <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a> *)<a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a989ab3e96426615bb98e04e0235088ca">src</a>, i * <a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> * 4 + 4, 0));</div><div class="line"><a name="l00448"></a><span class="lineno"> 448</span>&#160; int4 data_fp = convert_int4(data);</div><div class="line"><a name="l00449"></a><span class="lineno"> 449</span>&#160; int4 data_diff = data_fp - max_val;</div><div class="line"><a name="l00450"></a><span class="lineno"> 450</span>&#160; int4 data_diff_mult = mult_by_quantized_multiplier_parallel(data_diff);</div><div class="line"><a name="l00451"></a><span class="lineno"> 451</span>&#160; data_fp = <a class="code" href="helpers__asymm_8h.xhtml#a3a4f1b5d8f1cd67ac31bc62c9a6f4aa8">ASYMM_EXP_ON_NEGATIVE_VALUES</a>(data_diff_mult, SCALED_DIFF_INT_BITS, 4);</div><div class="line"><a name="l00452"></a><span class="lineno"> 452</span>&#160; data_fp = <a class="code" href="helpers__asymm_8h.xhtml#a98585f1bb84dea90aecbf59785c46151">ASYMM_RESCALE</a>(data_fp, 0, EXP_ACCUMULATION_INT_BITS, 4);</div><div class="line"><a name="l00453"></a><span class="lineno"> 453</span>&#160; int4 widx = convert_int4(((uint4)(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> * i * 4) + boundary_workitems * 4 + <a class="code" href="softmax__layer_8cl.xhtml#a4884a666a1e93fbf8c27bd7d2da3c8bb">idx4</a>) &lt; width);</div><div class="line"><a name="l00454"></a><span class="lineno"> 454</span>&#160; data_fp = <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#af77145fbdc6b0c8931148f5597d9de53">select</a>(MIN_VALUE, data_fp, widx);</div><div class="line"><a name="l00455"></a><span class="lineno"> 455</span>&#160; vstore4(data_diff, 0, (__global <span class="keywordtype">int</span> *)<a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">dst</a>, i * <a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> * 4 + 4, 0));</div><div class="line"><a name="l00456"></a><span class="lineno"> 456</span>&#160; sum1D = sum1D + <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#af77145fbdc6b0c8931148f5597d9de53">select</a>(MIN_VALUE, data_fp, data_diff &gt;= (int4)(DIFF_MIN));</div><div class="line"><a name="l00457"></a><span class="lineno"> 457</span>&#160; }</div><div class="line"><a name="l00458"></a><span class="lineno"> 458</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* NON_MULTIPLE_OF_VECTOR_SIZE */</span><span class="preprocessor"></span></div><div class="line"><a name="l00459"></a><span class="lineno"> 459</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* NON_MULTIPLE_OF_GRID_SIZE */</span><span class="preprocessor"></span></div><div class="line"><a name="l00460"></a><span class="lineno"> 460</span>&#160; tmp_local[lid] = sum1D;</div><div class="line"><a name="l00461"></a><span class="lineno"> 461</span>&#160;</div><div class="line"><a name="l00462"></a><span class="lineno"> 462</span>&#160; barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00463"></a><span class="lineno"> 463</span>&#160;</div><div class="line"><a name="l00464"></a><span class="lineno"> 464</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 256)</div><div class="line"><a name="l00465"></a><span class="lineno"> 465</span>&#160; {</div><div class="line"><a name="l00466"></a><span class="lineno"> 466</span>&#160; <span class="keywordflow">if</span>(lid &lt; 128)</div><div class="line"><a name="l00467"></a><span class="lineno"> 467</span>&#160; {</div><div class="line"><a name="l00468"></a><span class="lineno"> 468</span>&#160; tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(tmp_local[lid + 128], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00469"></a><span class="lineno"> 469</span>&#160; }</div><div class="line"><a name="l00470"></a><span class="lineno"> 470</span>&#160; barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00471"></a><span class="lineno"> 471</span>&#160; }</div><div class="line"><a name="l00472"></a><span class="lineno"> 472</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 128)</div><div class="line"><a name="l00473"></a><span class="lineno"> 473</span>&#160; {</div><div class="line"><a name="l00474"></a><span class="lineno"> 474</span>&#160; <span class="keywordflow">if</span>(lid &lt; 64)</div><div class="line"><a name="l00475"></a><span class="lineno"> 475</span>&#160; {</div><div class="line"><a name="l00476"></a><span class="lineno"> 476</span>&#160; tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(tmp_local[lid + 64], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00477"></a><span class="lineno"> 477</span>&#160; }</div><div class="line"><a name="l00478"></a><span class="lineno"> 478</span>&#160; barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00479"></a><span class="lineno"> 479</span>&#160; }</div><div class="line"><a name="l00480"></a><span class="lineno"> 480</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 64)</div><div class="line"><a name="l00481"></a><span class="lineno"> 481</span>&#160; {</div><div class="line"><a name="l00482"></a><span class="lineno"> 482</span>&#160; <span class="keywordflow">if</span>(lid &lt; 32)</div><div class="line"><a name="l00483"></a><span class="lineno"> 483</span>&#160; {</div><div class="line"><a name="l00484"></a><span class="lineno"> 484</span>&#160; tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(tmp_local[lid + 32], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00485"></a><span class="lineno"> 485</span>&#160; }</div><div class="line"><a name="l00486"></a><span class="lineno"> 486</span>&#160; barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00487"></a><span class="lineno"> 487</span>&#160; }</div><div class="line"><a name="l00488"></a><span class="lineno"> 488</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 32)</div><div class="line"><a name="l00489"></a><span class="lineno"> 489</span>&#160; {</div><div class="line"><a name="l00490"></a><span class="lineno"> 490</span>&#160; <span class="keywordflow">if</span>(lid &lt; 16)</div><div class="line"><a name="l00491"></a><span class="lineno"> 491</span>&#160; {</div><div class="line"><a name="l00492"></a><span class="lineno"> 492</span>&#160; tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(tmp_local[lid + 16], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00493"></a><span class="lineno"> 493</span>&#160; }</div><div class="line"><a name="l00494"></a><span class="lineno"> 494</span>&#160; barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00495"></a><span class="lineno"> 495</span>&#160; }</div><div class="line"><a name="l00496"></a><span class="lineno"> 496</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 16)</div><div class="line"><a name="l00497"></a><span class="lineno"> 497</span>&#160; {</div><div class="line"><a name="l00498"></a><span class="lineno"> 498</span>&#160; <span class="keywordflow">if</span>(lid &lt; 8)</div><div class="line"><a name="l00499"></a><span class="lineno"> 499</span>&#160; {</div><div class="line"><a name="l00500"></a><span class="lineno"> 500</span>&#160; tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(tmp_local[lid + 8], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00501"></a><span class="lineno"> 501</span>&#160; }</div><div class="line"><a name="l00502"></a><span class="lineno"> 502</span>&#160; barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00503"></a><span class="lineno"> 503</span>&#160; }</div><div class="line"><a name="l00504"></a><span class="lineno"> 504</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 8)</div><div class="line"><a name="l00505"></a><span class="lineno"> 505</span>&#160; {</div><div class="line"><a name="l00506"></a><span class="lineno"> 506</span>&#160; <span class="keywordflow">if</span>(lid &lt; 4)</div><div class="line"><a name="l00507"></a><span class="lineno"> 507</span>&#160; {</div><div class="line"><a name="l00508"></a><span class="lineno"> 508</span>&#160; tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(tmp_local[lid + 4], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00509"></a><span class="lineno"> 509</span>&#160; }</div><div class="line"><a name="l00510"></a><span class="lineno"> 510</span>&#160; barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00511"></a><span class="lineno"> 511</span>&#160; }</div><div class="line"><a name="l00512"></a><span class="lineno"> 512</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a> &gt;= 4)</div><div class="line"><a name="l00513"></a><span class="lineno"> 513</span>&#160; {</div><div class="line"><a name="l00514"></a><span class="lineno"> 514</span>&#160; <span class="keywordflow">if</span>(lid &lt; 2)</div><div class="line"><a name="l00515"></a><span class="lineno"> 515</span>&#160; {</div><div class="line"><a name="l00516"></a><span class="lineno"> 516</span>&#160; tmp_local[lid] = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(tmp_local[lid + 2], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00517"></a><span class="lineno"> 517</span>&#160; }</div><div class="line"><a name="l00518"></a><span class="lineno"> 518</span>&#160; barrier(CLK_LOCAL_MEM_FENCE);</div><div class="line"><a name="l00519"></a><span class="lineno"> 519</span>&#160; }</div><div class="line"><a name="l00520"></a><span class="lineno"> 520</span>&#160; <span class="keywordflow">if</span>(lid == 0)</div><div class="line"><a name="l00521"></a><span class="lineno"> 521</span>&#160; {</div><div class="line"><a name="l00522"></a><span class="lineno"> 522</span>&#160; sum1D = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(tmp_local[lid + 1], tmp_local[lid], <span class="keywordtype">int</span>, 4);</div><div class="line"><a name="l00523"></a><span class="lineno"> 523</span>&#160; <span class="comment">// Perform max reduction</span></div><div class="line"><a name="l00524"></a><span class="lineno"> 524</span>&#160; sum1D.s01 = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(sum1D.s01, sum1D.s23, <span class="keywordtype">int</span>, 2);</div><div class="line"><a name="l00525"></a><span class="lineno"> 525</span>&#160; sum1D.s0 = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(sum1D.s0, sum1D.s1, <span class="keywordtype">int</span>, 1);</div><div class="line"><a name="l00526"></a><span class="lineno"> 526</span>&#160; *((__global <span class="keywordtype">int</span> *)<a class="code" href="reduction__operation_8cl.xhtml#ab0df00f5333da51860deb93deb44a782">sum</a>.ptr) = sum1D.s0;</div><div class="line"><a name="l00527"></a><span class="lineno"> 527</span>&#160; }</div><div class="line"><a name="l00528"></a><span class="lineno"> 528</span>&#160;}</div><div class="line"><a name="l00529"></a><span class="lineno"> 529</span>&#160;<span class="comment"></span></div><div class="line"><a name="l00530"></a><span class="lineno"> 530</span>&#160;<span class="comment">/** Divides all the values of the input tensor by the sum calculated from softmax_layer_shift_exp_sum kernel.</span></div><div class="line"><a name="l00531"></a><span class="lineno"> 531</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00532"></a><span class="lineno"> 532</span>&#160;<span class="comment"> * @note Quantized beta can be optionally passed at compile time using -DINPUT_BETA_MULTIPLIER and -DINPUT_BETA_LEFT_SHIFT (if undefined, assume beta equals 1.0)</span></div><div class="line"><a name="l00533"></a><span class="lineno"> 533</span>&#160;<span class="comment"> * @note -DDIFF_MIN must be passed at compile time. It is threshold difference between maximum value of input data and current processed value, it defines whether the value will be taken into account or not.</span></div><div class="line"><a name="l00534"></a><span class="lineno"> 534</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00535"></a><span class="lineno"> 535</span>&#160;<span class="comment"> * @param[in] src_ptr Pointer to the source tensor slice. Supported data types: S32</span></div><div class="line"><a name="l00536"></a><span class="lineno"> 536</span>&#160;<span class="comment"> * @param[in] src_stride_x Stride of the source tensor in X dimension (in bytes)</span></div><div class="line"><a name="l00537"></a><span class="lineno"> 537</span>&#160;<span class="comment"> * @param[in] src_step_x src_stride_x * number of elements along X processed per workitem(in bytes)</span></div><div class="line"><a name="l00538"></a><span class="lineno"> 538</span>&#160;<span class="comment"> * @param[in] src_stride_y Stride of the source tensor in Y dimension (in bytes)</span></div><div class="line"><a name="l00539"></a><span class="lineno"> 539</span>&#160;<span class="comment"> * @param[in] src_step_y src_stride_y * number of elements along Y processed per workitem(in bytes)</span></div><div class="line"><a name="l00540"></a><span class="lineno"> 540</span>&#160;<span class="comment"> * @param[in] src_stride_z Stride of the source tensor in Z dimension (in bytes)</span></div><div class="line"><a name="l00541"></a><span class="lineno"> 541</span>&#160;<span class="comment"> * @param[in] src_step_z src_stride_z * number of elements along Z processed per workitem(in bytes)</span></div><div class="line"><a name="l00542"></a><span class="lineno"> 542</span>&#160;<span class="comment"> * @param[in] src_offset_first_element_in_bytes The offset of the first element in the source tensor</span></div><div class="line"><a name="l00543"></a><span class="lineno"> 543</span>&#160;<span class="comment"> * @param[in] sum_ptr Pointer to the sum values tensor slice. Supported data types: same as @p src_ptr</span></div><div class="line"><a name="l00544"></a><span class="lineno"> 544</span>&#160;<span class="comment"> * @param[in] sum_stride_x Stride of the sum values tensor in X dimension (in bytes)</span></div><div class="line"><a name="l00545"></a><span class="lineno"> 545</span>&#160;<span class="comment"> * @param[in] sum_step_x sum_stride_x * number of elements along X processed per workitem(in bytes)</span></div><div class="line"><a name="l00546"></a><span class="lineno"> 546</span>&#160;<span class="comment"> * @param[in] sum_stride_y Stride of the sum values tensor in Y dimension (in bytes)</span></div><div class="line"><a name="l00547"></a><span class="lineno"> 547</span>&#160;<span class="comment"> * @param[in] sum_step_y sum_stride_y * number of elements along Y processed per workitem(in bytes)</span></div><div class="line"><a name="l00548"></a><span class="lineno"> 548</span>&#160;<span class="comment"> * @param[in] sum_stride_z Stride of the sum values tensor in Z dimension (in bytes)</span></div><div class="line"><a name="l00549"></a><span class="lineno"> 549</span>&#160;<span class="comment"> * @param[in] sum_step_z sum_stride_z * number of elements along Z processed per workitem(in bytes)</span></div><div class="line"><a name="l00550"></a><span class="lineno"> 550</span>&#160;<span class="comment"> * @param[in] sum_offset_first_element_in_bytes The offset of the first element in the sum values tensor</span></div><div class="line"><a name="l00551"></a><span class="lineno"> 551</span>&#160;<span class="comment"> * @param[out] dst_ptr Pointer to the destination tensor slice. Supported data types: QASYMM8</span></div><div class="line"><a name="l00552"></a><span class="lineno"> 552</span>&#160;<span class="comment"> * @param[in] dst_stride_x Stride of the destination tensor in X dimension (in bytes)</span></div><div class="line"><a name="l00553"></a><span class="lineno"> 553</span>&#160;<span class="comment"> * @param[in] dst_step_x dst_stride_x * number of elements along X processed per workitem(in bytes)</span></div><div class="line"><a name="l00554"></a><span class="lineno"> 554</span>&#160;<span class="comment"> * @param[in] dst_stride_y Stride of the destination tensor in Y dimension (in bytes)</span></div><div class="line"><a name="l00555"></a><span class="lineno"> 555</span>&#160;<span class="comment"> * @param[in] dst_step_y dst_stride_y * number of elements along Y processed per workitem(in bytes)</span></div><div class="line"><a name="l00556"></a><span class="lineno"> 556</span>&#160;<span class="comment"> * @param[in] dst_stride_z Stride of the destination tensor in Z dimension (in bytes)</span></div><div class="line"><a name="l00557"></a><span class="lineno"> 557</span>&#160;<span class="comment"> * @param[in] dst_step_z dst_stride_z * number of elements along Z processed per workitem(in bytes)</span></div><div class="line"><a name="l00558"></a><span class="lineno"> 558</span>&#160;<span class="comment"> * @param[in] dst_offset_first_element_in_bytes The offset of the first element in the destination tensor</span></div><div class="line"><a name="l00559"></a><span class="lineno"> 559</span>&#160;<span class="comment"> */</span></div><div class="line"><a name="l00560"></a><span class="lineno"> 560</span>&#160;__kernel <span class="keywordtype">void</span> softmax_layer_norm_quantized(</div><div class="line"><a name="l00561"></a><span class="lineno"> 561</span>&#160; <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a989ab3e96426615bb98e04e0235088ca">src</a>),</div><div class="line"><a name="l00562"></a><span class="lineno"> 562</span>&#160; <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(<a class="code" href="reduction__operation_8cl.xhtml#ab0df00f5333da51860deb93deb44a782">sum</a>),</div><div class="line"><a name="l00563"></a><span class="lineno"> 563</span>&#160; <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">dst</a>))</div><div class="line"><a name="l00564"></a><span class="lineno"> 564</span>&#160;{</div><div class="line"><a name="l00565"></a><span class="lineno"> 565</span>&#160; <a class="code" href="struct_image.xhtml">Image</a> <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a989ab3e96426615bb98e04e0235088ca">src</a> = <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a989ab3e96426615bb98e04e0235088ca">src</a>);</div><div class="line"><a name="l00566"></a><span class="lineno"> 566</span>&#160; <a class="code" href="struct_image.xhtml">Image</a> <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">dst</a> = <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">dst</a>);</div><div class="line"><a name="l00567"></a><span class="lineno"> 567</span>&#160; <a class="code" href="struct_image.xhtml">Image</a> <a class="code" href="reduction__operation_8cl.xhtml#ab0df00f5333da51860deb93deb44a782">sum</a> = <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a071aa45af973feac43b14f62e54a6fce">CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP</a>(<a class="code" href="reduction__operation_8cl.xhtml#ab0df00f5333da51860deb93deb44a782">sum</a>);</div><div class="line"><a name="l00568"></a><span class="lineno"> 568</span>&#160;</div><div class="line"><a name="l00569"></a><span class="lineno"> 569</span>&#160; <span class="comment">// Load max value of 1D logits vector (row)</span></div><div class="line"><a name="l00570"></a><span class="lineno"> 570</span>&#160; <span class="keywordtype">int</span> sum_val = *((__global <span class="keywordtype">int</span> *)<a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;<a class="code" href="reduction__operation_8cl.xhtml#ab0df00f5333da51860deb93deb44a782">sum</a>, 0, get_global_id(1)));</div><div class="line"><a name="l00571"></a><span class="lineno"> 571</span>&#160;</div><div class="line"><a name="l00572"></a><span class="lineno"> 572</span>&#160; <span class="comment">// It will be better to calculate this in prev layer and pass here as parameter</span></div><div class="line"><a name="l00573"></a><span class="lineno"> 573</span>&#160;<span class="preprocessor">#ifndef LOG_SOFTMAX</span></div><div class="line"><a name="l00574"></a><span class="lineno"> 574</span>&#160; uint sum_val_u = convert_uint(sum_val);</div><div class="line"><a name="l00575"></a><span class="lineno"> 575</span>&#160; <span class="keywordtype">int</span> headroom_plus_one = clz(sum_val_u);</div><div class="line"><a name="l00576"></a><span class="lineno"> 576</span>&#160; <span class="keywordtype">int</span> num_bits_over_unit = EXP_ACCUMULATION_INT_BITS - headroom_plus_one;</div><div class="line"><a name="l00577"></a><span class="lineno"> 577</span>&#160; <span class="keywordtype">int</span> shifted_sum_minus_one_1 = convert_int((sum_val_u &lt;&lt; headroom_plus_one) - (1u &lt;&lt; 31));</div><div class="line"><a name="l00578"></a><span class="lineno"> 578</span>&#160; int16 shifted_sum_minus_one = shifted_sum_minus_one_1;</div><div class="line"><a name="l00579"></a><span class="lineno"> 579</span>&#160; int16 shifted_scale = <a class="code" href="helpers__asymm_8h.xhtml#ae77f34e1316d52c1ee84c35be9efb0d8">ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1</a>(shifted_sum_minus_one, 16);</div><div class="line"><a name="l00580"></a><span class="lineno"> 580</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* LOG_SOFTMAX */</span><span class="preprocessor"></span></div><div class="line"><a name="l00581"></a><span class="lineno"> 581</span>&#160;</div><div class="line"><a name="l00582"></a><span class="lineno"> 582</span>&#160; <span class="comment">// It was already calculated in prev layer, should be stored into tmp output and reused</span></div><div class="line"><a name="l00583"></a><span class="lineno"> 583</span>&#160; int16 data_diff = vload16(0, (__global <span class="keywordtype">int</span> *)<a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a989ab3e96426615bb98e04e0235088ca">src</a>, 0, 0));</div><div class="line"><a name="l00584"></a><span class="lineno"> 584</span>&#160; int16 data_diff_mult = data_diff;</div><div class="line"><a name="l00585"></a><span class="lineno"> 585</span>&#160;<span class="preprocessor">#if defined(INPUT_BETA_MULTIPLIER) &amp;&amp; defined(INPUT_BETA_LEFT_SHIFT)</span></div><div class="line"><a name="l00586"></a><span class="lineno"> 586</span>&#160; <span class="keywordflow">if</span>(INPUT_BETA_MULTIPLIER &gt; 1)</div><div class="line"><a name="l00587"></a><span class="lineno"> 587</span>&#160; {</div><div class="line"><a name="l00588"></a><span class="lineno"> 588</span>&#160; data_diff_mult = <a class="code" href="helpers__asymm_8h.xhtml#a5483aefd5e07244661178bfd3f434448">ASYMM_MULT</a>(data_diff * (1 &lt;&lt; INPUT_BETA_LEFT_SHIFT), INPUT_BETA_MULTIPLIER, 16);</div><div class="line"><a name="l00589"></a><span class="lineno"> 589</span>&#160; }</div><div class="line"><a name="l00590"></a><span class="lineno"> 590</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* defined(INPUT_BETA_MULTIPLIER) &amp;&amp; defined(INPUT_BETA_LEFT_SHIFT) */</span><span class="preprocessor"></span></div><div class="line"><a name="l00591"></a><span class="lineno"> 591</span>&#160;</div><div class="line"><a name="l00592"></a><span class="lineno"> 592</span>&#160;<span class="preprocessor">#ifdef LOG_SOFTMAX</span></div><div class="line"><a name="l00593"></a><span class="lineno"> 593</span>&#160; long16 data = <a class="code" href="softmax__layer__quantized_8cl.xhtml#ac3af2d18008cbbf7247ae48fcd6e0c4e">SUB_OP</a>(convert_long16(data_diff_mult), (long16)(sum_val), <span class="keywordtype">long</span>, 16);</div><div class="line"><a name="l00594"></a><span class="lineno"> 594</span>&#160; data = <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#af77145fbdc6b0c8931148f5597d9de53">select</a>(0L, data, convert_long16(data_diff) &gt;= (long16)(DIFF_MIN));</div><div class="line"><a name="l00595"></a><span class="lineno"> 595</span>&#160;<span class="preprocessor">#else </span><span class="comment">/* LOG_SOFTMAX */</span><span class="preprocessor"></span></div><div class="line"><a name="l00596"></a><span class="lineno"> 596</span>&#160; int16 data = <a class="code" href="helpers__asymm_8h.xhtml#a3a4f1b5d8f1cd67ac31bc62c9a6f4aa8">ASYMM_EXP_ON_NEGATIVE_VALUES</a>(data_diff_mult, SCALED_DIFF_INT_BITS, 16);</div><div class="line"><a name="l00597"></a><span class="lineno"> 597</span>&#160; data = <a class="code" href="helpers__asymm_8h.xhtml#a5483aefd5e07244661178bfd3f434448">ASYMM_MULT</a>(shifted_scale, data, 16);</div><div class="line"><a name="l00598"></a><span class="lineno"> 598</span>&#160; data = <a class="code" href="helpers__asymm_8h.xhtml#aa43fc359dea64362f3016384f4269845">ASYMM_ROUNDING_DIVIDE_BY_POW2</a>(data, num_bits_over_unit + 31 - 8, 16);</div><div class="line"><a name="l00599"></a><span class="lineno"> 599</span>&#160;<span class="preprocessor">#ifdef QASYMM8_SIGNED</span></div><div class="line"><a name="l00600"></a><span class="lineno"> 600</span>&#160; data = <a class="code" href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a>(data, (int16)(MIN_VALUE), <span class="keywordtype">int</span>, 16);</div><div class="line"><a name="l00601"></a><span class="lineno"> 601</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* QASYMM8_SIGNED */</span><span class="preprocessor"></span></div><div class="line"><a name="l00602"></a><span class="lineno"> 602</span>&#160; data = <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#af77145fbdc6b0c8931148f5597d9de53">select</a>(MIN_VALUE, data, data_diff &gt;= (int16)(DIFF_MIN));</div><div class="line"><a name="l00603"></a><span class="lineno"> 603</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* LOG_SOFTMAX */</span><span class="preprocessor"></span></div><div class="line"><a name="l00604"></a><span class="lineno"> 604</span>&#160; vstore16(<a class="code" href="direct__convolution1x1_8cl.xhtml#a1f15728672380ade7a238f5e783d54d2">CONVERT_SAT</a>(data, <a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a36f754c05b6fddf6df0d8d0a74f8159f">VEC_DATA_TYPE</a>(<a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a>, 16)), 0, (__global <a class="code" href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a> *)<a class="code" href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a>(&amp;<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">dst</a>, 0, 0));</div><div class="line"><a name="l00605"></a><span class="lineno"> 605</span>&#160;}</div><div class="line"><a name="l00606"></a><span class="lineno"> 606</span>&#160;</div><div class="line"><a name="l00607"></a><span class="lineno"> 607</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* defined(DIFF_MIN) */</span><span class="preprocessor"></span></div><div class="ttc" id="src_2core_2_c_l_2cl__kernels_2_helpers_8h_xhtml_a009469e4d9b8fce3b6d5e97d2077827d"><div class="ttname"><a href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a009469e4d9b8fce3b6d5e97d2077827d">offset</a></div><div class="ttdeci">__global uchar * offset(const Image *img, int x, int y)</div><div class="ttdoc">Get the pointer position of a Image.</div><div class="ttdef"><b>Definition:</b> <a href="src_2core_2_c_l_2cl__kernels_2_helpers_8h_source.xhtml#l00510">helpers.h:510</a></div></div>
<div class="ttc" id="src_2core_2_c_l_2cl__kernels_2_helpers_8h_xhtml_a071aa45af973feac43b14f62e54a6fce"><div class="ttname"><a href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a071aa45af973feac43b14f62e54a6fce">CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP</a></div><div class="ttdeci">#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(name)</div><div class="ttdef"><b>Definition:</b> <a href="src_2core_2_c_l_2cl__kernels_2_helpers_8h_source.xhtml#l00320">helpers.h:320</a></div></div>
<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_a16110bd2b92003141dbaf8a44498ff82"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#a16110bd2b92003141dbaf8a44498ff82">VEC_UINT</a></div><div class="ttdeci">#define VEC_UINT</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00064">softmax_layer_quantized.cl:64</a></div></div>
<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_a525a42d38133b1051b8924b456add4a1"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#a525a42d38133b1051b8924b456add4a1">asymm_mult</a></div><div class="ttdeci">#define asymm_mult(a, b)</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00057">softmax_layer_quantized.cl:57</a></div></div>
<div class="ttc" id="src_2core_2_c_l_2cl__kernels_2_helpers_8h_xhtml_aa8d95ba04fc73845abc6045952cae5be"><div class="ttname"><a href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#aa8d95ba04fc73845abc6045952cae5be">CONVERT</a></div><div class="ttdeci">#define CONVERT(x, type)</div><div class="ttdef"><b>Definition:</b> <a href="src_2core_2_c_l_2cl__kernels_2_helpers_8h_source.xhtml#l00261">helpers.h:261</a></div></div>
<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_aa1dd94b8d98f1c6d790bdf0fc5de29e9"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#aa1dd94b8d98f1c6d790bdf0fc5de29e9">idx__</a></div><div class="ttdeci">__constant uint16 idx__</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00056">softmax_layer_quantized.cl:56</a></div></div>
<div class="ttc" id="convolution3x3_8cl_xhtml_afb8c72ce35c4a1f4a2588d6573e54aa1"><div class="ttname"><a href="convolution3x3_8cl.xhtml#afb8c72ce35c4a1f4a2588d6573e54aa1">DATA_TYPE</a></div><div class="ttdeci">#define DATA_TYPE</div><div class="ttdef"><b>Definition:</b> <a href="convolution3x3_8cl_source.xhtml#l00027">convolution3x3.cl:27</a></div></div>
<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_a7c78836761fa3b5b124efea237dac70f"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#a7c78836761fa3b5b124efea237dac70f">VECTOR_SIZE</a></div><div class="ttdeci">#define VECTOR_SIZE</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00054">softmax_layer_quantized.cl:54</a></div></div>
<div class="ttc" id="helpers__asymm_8h_xhtml_a5483aefd5e07244661178bfd3f434448"><div class="ttname"><a href="helpers__asymm_8h.xhtml#a5483aefd5e07244661178bfd3f434448">ASYMM_MULT</a></div><div class="ttdeci">#define ASYMM_MULT(a, b, size)</div><div class="ttdef"><b>Definition:</b> <a href="helpers__asymm_8h_source.xhtml#l00384">helpers_asymm.h:384</a></div></div>
<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_abaa48ad818c44e415fd3f9dd0f27bf01"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#abaa48ad818c44e415fd3f9dd0f27bf01">MAX_OP</a></div><div class="ttdeci">#define MAX_OP(x, y, type, size)</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00026">softmax_layer_quantized.cl:26</a></div></div>
<div class="ttc" id="reduction__operation_8cl_xhtml_ab0df00f5333da51860deb93deb44a782"><div class="ttname"><a href="reduction__operation_8cl.xhtml#ab0df00f5333da51860deb93deb44a782">sum</a></div><div class="ttdeci">DATA_TYPE sum(__global const DATA_TYPE *input)</div><div class="ttdoc">Calculate sum of a vector.</div><div class="ttdef"><b>Definition:</b> <a href="reduction__operation_8cl_source.xhtml#l00066">reduction_operation.cl:66</a></div></div>
<div class="ttc" id="helpers__asymm_8h_xhtml_aa43fc359dea64362f3016384f4269845"><div class="ttname"><a href="helpers__asymm_8h.xhtml#aa43fc359dea64362f3016384f4269845">ASYMM_ROUNDING_DIVIDE_BY_POW2</a></div><div class="ttdeci">#define ASYMM_ROUNDING_DIVIDE_BY_POW2(x, exponent, size)</div><div class="ttdef"><b>Definition:</b> <a href="helpers__asymm_8h_source.xhtml#l00383">helpers_asymm.h:383</a></div></div>
<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_a44206a4e5783c7aabacec88aad878c88"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#a44206a4e5783c7aabacec88aad878c88">ADD_OP</a></div><div class="ttdeci">#define ADD_OP(x, y, type, size)</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00027">softmax_layer_quantized.cl:27</a></div></div>
<div class="ttc" id="direct__convolution1x1_8cl_xhtml_a1f15728672380ade7a238f5e783d54d2"><div class="ttname"><a href="direct__convolution1x1_8cl.xhtml#a1f15728672380ade7a238f5e783d54d2">CONVERT_SAT</a></div><div class="ttdeci">#define CONVERT_SAT(a, b)</div><div class="ttdef"><b>Definition:</b> <a href="direct__convolution1x1_8cl_source.xhtml#l00030">direct_convolution1x1.cl:30</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_a0886942393a3ba0dfefaa7516b159784"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#a0886942393a3ba0dfefaa7516b159784">arm_compute::test::validation::for</a></div><div class="ttdeci">for(size_t k=0;k&lt; _target.size();++k)</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_unstack_8cpp_source.xhtml#l00091">Unstack.cpp:91</a></div></div>
<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_ac3af2d18008cbbf7247ae48fcd6e0c4e"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#ac3af2d18008cbbf7247ae48fcd6e0c4e">SUB_OP</a></div><div class="ttdeci">#define SUB_OP(x, y, type, size)</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00028">softmax_layer_quantized.cl:28</a></div></div>
<div class="ttc" id="helpers__asymm_8h_xhtml_a98585f1bb84dea90aecbf59785c46151"><div class="ttname"><a href="helpers__asymm_8h.xhtml#a98585f1bb84dea90aecbf59785c46151">ASYMM_RESCALE</a></div><div class="ttdeci">#define ASYMM_RESCALE(value, src_integer_bits, dst_integer_bits, size)</div><div class="ttdef"><b>Definition:</b> <a href="helpers__asymm_8h_source.xhtml#l00398">helpers_asymm.h:398</a></div></div>
<div class="ttc" id="src_2core_2_c_l_2cl__kernels_2_helpers_8h_xhtml_a541f8db866a0fa93ee67d58ea31a7d0c"><div class="ttname"><a href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a541f8db866a0fa93ee67d58ea31a7d0c">CONVERT_TENSOR3D_TO_IMAGE_STRUCT</a></div><div class="ttdeci">#define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name)</div><div class="ttdef"><b>Definition:</b> <a href="src_2core_2_c_l_2cl__kernels_2_helpers_8h_source.xhtml#l00323">helpers.h:323</a></div></div>
<div class="ttc" id="softmax__layer_8cl_xhtml_a4884a666a1e93fbf8c27bd7d2da3c8bb"><div class="ttname"><a href="softmax__layer_8cl.xhtml#a4884a666a1e93fbf8c27bd7d2da3c8bb">idx4</a></div><div class="ttdeci">__constant uint4 idx4</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer_8cl_source.xhtml#l00070">softmax_layer.cl:70</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_adbf67dcee294e673cf796f1ed8aeb6a4"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#adbf67dcee294e673cf796f1ed8aeb6a4">arm_compute::test::validation::dst</a></div><div class="ttdeci">CLTensor dst</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_absolute_difference_8cpp_source.xhtml#l00102">AbsoluteDifference.cpp:102</a></div></div>
<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_ad57ea340cdcfeb2e1375b70c3ae59bae"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#ad57ea340cdcfeb2e1375b70c3ae59bae">asymm_rescale</a></div><div class="ttdeci">#define asymm_rescale(value, src_integer_bits, dst_integer_bits)</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00059">softmax_layer_quantized.cl:59</a></div></div>
<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_aee190caf3b3571e939ac129e12c368cd"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#aee190caf3b3571e939ac129e12c368cd">VEC_INT</a></div><div class="ttdeci">#define VEC_INT</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00065">softmax_layer_quantized.cl:65</a></div></div>
<div class="ttc" id="struct_image_xhtml"><div class="ttname"><a href="struct_image.xhtml">Image</a></div><div class="ttdoc">Structure to hold Image information.</div><div class="ttdef"><b>Definition:</b> <a href="src_2core_2_c_l_2cl__kernels_2_helpers_8h_source.xhtml#l00349">helpers.h:349</a></div></div>
<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_a372393c380805985b813dbb16d589a64"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#a372393c380805985b813dbb16d589a64">LOG_VECTOR_SIZE</a></div><div class="ttdeci">#define LOG_VECTOR_SIZE</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00055">softmax_layer_quantized.cl:55</a></div></div>
<div class="ttc" id="struct_image_xhtml_acf52c23cbd7424606c10a606524e3e32"><div class="ttname"><a href="struct_image.xhtml#acf52c23cbd7424606c10a606524e3e32">Image::ptr</a></div><div class="ttdeci">__global uchar * ptr</div><div class="ttdoc">Pointer to the starting postion of the buffer.</div><div class="ttdef"><b>Definition:</b> <a href="src_2core_2_c_l_2cl__kernels_2_helpers_8h_source.xhtml#l00351">helpers.h:351</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_af77145fbdc6b0c8931148f5597d9de53"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#af77145fbdc6b0c8931148f5597d9de53">arm_compute::test::validation::select</a></div><div class="ttdeci">CLSelect select</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_select_8cpp_source.xhtml#l00164">Select.cpp:164</a></div></div>
<div class="ttc" id="helpers__asymm_8h_xhtml"><div class="ttname"><a href="helpers__asymm_8h.xhtml">helpers_asymm.h</a></div></div>
<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_af5987b09a234231612b2b1eded343025"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#af5987b09a234231612b2b1eded343025">VEC_UCHAR</a></div><div class="ttdeci">#define VEC_UCHAR</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00063">softmax_layer_quantized.cl:63</a></div></div>
<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_a08246606c233e7785a497c09672f366f"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#a08246606c233e7785a497c09672f366f">GRID_SIZE</a></div><div class="ttdeci">#define GRID_SIZE</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00032">softmax_layer_quantized.cl:32</a></div></div>
<div class="ttc" id="src_2core_2_c_l_2cl__kernels_2_helpers_8h_xhtml_acb282042d1edeeaa3cc979a206f78b54"><div class="ttname"><a href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#acb282042d1edeeaa3cc979a206f78b54">VSTORE</a></div><div class="ttdeci">#define VSTORE(size)</div><div class="ttdef"><b>Definition:</b> <a href="src_2core_2_c_l_2cl__kernels_2_helpers_8h_source.xhtml#l00198">helpers.h:198</a></div></div>
<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_a54aedfa17c5ac2567107d5f488b0f4af"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#a54aedfa17c5ac2567107d5f488b0f4af">asymm_exp_on_negative_values</a></div><div class="ttdeci">#define asymm_exp_on_negative_values(a, k_integer_bits)</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00058">softmax_layer_quantized.cl:58</a></div></div>
<div class="ttc" id="src_2core_2_c_l_2cl__kernels_2_helpers_8h_xhtml_a287e2fc366c312b468382c95bb90f91f"><div class="ttname"><a href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a287e2fc366c312b468382c95bb90f91f">VLOAD</a></div><div class="ttdeci">#define VLOAD(size)</div><div class="ttdef"><b>Definition:</b> <a href="src_2core_2_c_l_2cl__kernels_2_helpers_8h_source.xhtml#l00195">helpers.h:195</a></div></div>
<div class="ttc" id="softmax__layer__quantized_8cl_xhtml_a89a27ed9d640355cfc1b6220b6eedd64"><div class="ttname"><a href="softmax__layer__quantized_8cl.xhtml#a89a27ed9d640355cfc1b6220b6eedd64">VEC_BASE</a></div><div class="ttdeci">#define VEC_BASE</div><div class="ttdef"><b>Definition:</b> <a href="softmax__layer__quantized_8cl_source.xhtml#l00066">softmax_layer_quantized.cl:66</a></div></div>
<div class="ttc" id="src_2core_2_c_l_2cl__kernels_2_helpers_8h_xhtml_a6b83038822d1ae7ab619b684ed3b7fc0"><div class="ttname"><a href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a6b83038822d1ae7ab619b684ed3b7fc0">TENSOR3D_DECLARATION</a></div><div class="ttdeci">#define TENSOR3D_DECLARATION(name)</div><div class="ttdef"><b>Definition:</b> <a href="src_2core_2_c_l_2cl__kernels_2_helpers_8h_source.xhtml#l00283">helpers.h:283</a></div></div>
<div class="ttc" id="helpers__asymm_8h_xhtml_ae77f34e1316d52c1ee84c35be9efb0d8"><div class="ttname"><a href="helpers__asymm_8h.xhtml#ae77f34e1316d52c1ee84c35be9efb0d8">ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1</a></div><div class="ttdeci">#define ASYMM_ONE_OVER_ONE_PLUS_X_FOR_X_IN_0_1(a, size)</div><div class="ttdef"><b>Definition:</b> <a href="helpers__asymm_8h_source.xhtml#l00395">helpers_asymm.h:395</a></div></div>
<div class="ttc" id="helpers__asymm_8h_xhtml_a3a4f1b5d8f1cd67ac31bc62c9a6f4aa8"><div class="ttname"><a href="helpers__asymm_8h.xhtml#a3a4f1b5d8f1cd67ac31bc62c9a6f4aa8">ASYMM_EXP_ON_NEGATIVE_VALUES</a></div><div class="ttdeci">#define ASYMM_EXP_ON_NEGATIVE_VALUES(a, k_integer_bits, size)</div><div class="ttdef"><b>Definition:</b> <a href="helpers__asymm_8h_source.xhtml#l00394">helpers_asymm.h:394</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_a989ab3e96426615bb98e04e0235088ca"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#a989ab3e96426615bb98e04e0235088ca">arm_compute::test::validation::src</a></div><div class="ttdeci">cast configure &amp; src</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_cast_8cpp_source.xhtml#l00169">Cast.cpp:169</a></div></div>
<div class="ttc" id="src_2core_2_c_l_2cl__kernels_2_helpers_8h_xhtml_a36f754c05b6fddf6df0d8d0a74f8159f"><div class="ttname"><a href="src_2core_2_c_l_2cl__kernels_2_helpers_8h.xhtml#a36f754c05b6fddf6df0d8d0a74f8159f">VEC_DATA_TYPE</a></div><div class="ttdeci">#define VEC_DATA_TYPE(type, size)</div><div class="ttdef"><b>Definition:</b> <a href="src_2core_2_c_l_2cl__kernels_2_helpers_8h_source.xhtml#l00255">helpers.h:255</a></div></div>
</div><!-- fragment --></div><!-- contents -->
</div><!-- doc-content -->
<!-- start footer part -->
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
<ul>
<li class="navelem"><a class="el" href="dir_68267d1309a1af8e8297ef4c3efbcdba.xhtml">src</a></li><li class="navelem"><a class="el" href="dir_aebb8dcc11953d78e620bbef0b9e2183.xhtml">core</a></li><li class="navelem"><a class="el" href="dir_8c278f79c760e5c5fbd911f9870614c1.xhtml">CL</a></li><li class="navelem"><a class="el" href="dir_25885286e9dad4fa105b7b25a8031bbf.xhtml">cl_kernels</a></li><li class="navelem"><a class="el" href="softmax__layer__quantized_8cl.xhtml">softmax_layer_quantized.cl</a></li>
<li class="footer">Generated on Thu Mar 5 2020 16:06:58 for Compute Library by
<a href="http://www.doxygen.org/index.html">
<img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.15 </li>
</ul>
</div>
</body>
</html>