blob: aae096dfe65c63afa50aecaa6941b1cb4d964d4b [file] [log] [blame]
<!-- HTML header for doxygen 1.8.15-->
<!-- Remember to use version doxygen 1.8.15 +-->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen 1.8.15"/>
<meta name="robots" content="NOINDEX, NOFOLLOW" /> <!-- Prevent indexing by search engines -->
<title>Compute Library: src/runtime/CL/functions/CLSoftmaxLayer.cpp Source File</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="navtree.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="resize.js"></script>
<script type="text/javascript" src="navtreedata.js"></script>
<script type="text/javascript" src="navtree.js"></script>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(document).ready(initResizable);
/* @license-end */</script>
<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="search/searchdata.js"></script>
<script type="text/javascript" src="search/search.js"></script>
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
extensions: ["tex2jax.js"],
jax: ["input/TeX","output/HTML-CSS"],
});
</script><script type="text/javascript" async="async" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
<link href="stylesheet.css" rel="stylesheet" type="text/css"/>
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
<tbody>
<tr style="height: 56px;">
<img alt="Compute Library" src="https://raw.githubusercontent.com/ARM-software/ComputeLibrary/gh-pages/ACL_logo.png" style="max-width: 100%;margin-top: 15px;margin-left: 10px"/>
<td style="padding-left: 0.5em;">
<div id="projectname">
&#160;<span id="projectnumber">20.02.1</span>
</div>
</td>
</tr>
</tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.8.15 -->
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
var searchBox = new SearchBox("searchBox", "search",false,'Search');
/* @license-end */
</script>
<script type="text/javascript" src="menudata.js"></script>
<script type="text/javascript" src="menu.js"></script>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(function() {
initMenu('',true,false,'search.php','Search');
$(document).ready(function() { init_search(); });
});
/* @license-end */</script>
<div id="main-nav"></div>
</div><!-- top -->
<div id="side-nav" class="ui-resizable side-nav-resizable">
<div id="nav-tree">
<div id="nav-tree-contents">
<div id="nav-sync" class="sync"></div>
</div>
</div>
<div id="splitbar" style="-moz-user-select:none;"
class="ui-resizable-handle">
</div>
</div>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(document).ready(function(){initNavTree('_c_l_softmax_layer_8cpp_source.xhtml','');});
/* @license-end */
</script>
<div id="doc-content">
<!-- window showing the filter options -->
<div id="MSearchSelectWindow"
onmouseover="return searchBox.OnSearchSelectShow()"
onmouseout="return searchBox.OnSearchSelectHide()"
onkeydown="return searchBox.OnSearchSelectKey(event)">
</div>
<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="javascript:void(0)" frameborder="0"
name="MSearchResults" id="MSearchResults">
</iframe>
</div>
<div class="header">
<div class="headertitle">
<div class="title">CLSoftmaxLayer.cpp</div> </div>
</div><!--header-->
<div class="contents">
<a href="_c_l_softmax_layer_8cpp.xhtml">Go to the documentation of this file.</a><div class="fragment"><div class="line"><a name="l00001"></a><span class="lineno"> 1</span>&#160;<span class="comment">/*</span></div><div class="line"><a name="l00002"></a><span class="lineno"> 2</span>&#160;<span class="comment"> * Copyright (c) 2017-2019 ARM Limited.</span></div><div class="line"><a name="l00003"></a><span class="lineno"> 3</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00004"></a><span class="lineno"> 4</span>&#160;<span class="comment"> * SPDX-License-Identifier: MIT</span></div><div class="line"><a name="l00005"></a><span class="lineno"> 5</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00006"></a><span class="lineno"> 6</span>&#160;<span class="comment"> * Permission is hereby granted, free of charge, to any person obtaining a copy</span></div><div class="line"><a name="l00007"></a><span class="lineno"> 7</span>&#160;<span class="comment"> * of this software and associated documentation files (the &quot;Software&quot;), to</span></div><div class="line"><a name="l00008"></a><span class="lineno"> 8</span>&#160;<span class="comment"> * deal in the Software without restriction, including without limitation the</span></div><div class="line"><a name="l00009"></a><span class="lineno"> 9</span>&#160;<span class="comment"> * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or</span></div><div class="line"><a name="l00010"></a><span class="lineno"> 10</span>&#160;<span class="comment"> * sell copies of the Software, and to permit persons to whom the Software is</span></div><div class="line"><a name="l00011"></a><span class="lineno"> 11</span>&#160;<span class="comment"> * furnished to do so, subject to the following conditions:</span></div><div class="line"><a name="l00012"></a><span class="lineno"> 12</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00013"></a><span class="lineno"> 13</span>&#160;<span class="comment"> * The above copyright notice and this permission notice shall be included in all</span></div><div class="line"><a name="l00014"></a><span class="lineno"> 14</span>&#160;<span class="comment"> * copies or substantial portions of the Software.</span></div><div class="line"><a name="l00015"></a><span class="lineno"> 15</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00016"></a><span class="lineno"> 16</span>&#160;<span class="comment"> * THE SOFTWARE IS PROVIDED &quot;AS IS&quot;, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR</span></div><div class="line"><a name="l00017"></a><span class="lineno"> 17</span>&#160;<span class="comment"> * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,</span></div><div class="line"><a name="l00018"></a><span class="lineno"> 18</span>&#160;<span class="comment"> * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE</span></div><div class="line"><a name="l00019"></a><span class="lineno"> 19</span>&#160;<span class="comment"> * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER</span></div><div class="line"><a name="l00020"></a><span class="lineno"> 20</span>&#160;<span class="comment"> * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,</span></div><div class="line"><a name="l00021"></a><span class="lineno"> 21</span>&#160;<span class="comment"> * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE</span></div><div class="line"><a name="l00022"></a><span class="lineno"> 22</span>&#160;<span class="comment"> * SOFTWARE.</span></div><div class="line"><a name="l00023"></a><span class="lineno"> 23</span>&#160;<span class="comment"> */</span></div><div class="line"><a name="l00024"></a><span class="lineno"> 24</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_c_l_softmax_layer_8h.xhtml">arm_compute/runtime/CL/functions/CLSoftmaxLayer.h</a>&quot;</span></div><div class="line"><a name="l00025"></a><span class="lineno"> 25</span>&#160;</div><div class="line"><a name="l00026"></a><span class="lineno"> 26</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="core_2_c_l_2_c_l_helpers_8h.xhtml">arm_compute/core/CL/CLHelpers.h</a>&quot;</span></div><div class="line"><a name="l00027"></a><span class="lineno"> 27</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_i_c_l_kernel_8h.xhtml">arm_compute/core/CL/ICLKernel.h</a>&quot;</span></div><div class="line"><a name="l00028"></a><span class="lineno"> 28</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_c_l_softmax_layer_kernel_8h.xhtml">arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h</a>&quot;</span></div><div class="line"><a name="l00029"></a><span class="lineno"> 29</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="arm__compute_2core_2_helpers_8h.xhtml">arm_compute/core/Helpers.h</a>&quot;</span></div><div class="line"><a name="l00030"></a><span class="lineno"> 30</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="arm__compute_2core_2_types_8h.xhtml">arm_compute/core/Types.h</a>&quot;</span></div><div class="line"><a name="l00031"></a><span class="lineno"> 31</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="arm__compute_2core_2_utils_8h.xhtml">arm_compute/core/Utils.h</a>&quot;</span></div><div class="line"><a name="l00032"></a><span class="lineno"> 32</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_shape_calculator_8h.xhtml">arm_compute/core/utils/misc/ShapeCalculator.h</a>&quot;</span></div><div class="line"><a name="l00033"></a><span class="lineno"> 33</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_c_l_scheduler_8h.xhtml">arm_compute/runtime/CL/CLScheduler.h</a>&quot;</span></div><div class="line"><a name="l00034"></a><span class="lineno"> 34</span>&#160;</div><div class="line"><a name="l00035"></a><span class="lineno"> 35</span>&#160;<span class="keyword">namespace </span><a class="code" href="namespacearm__compute.xhtml">arm_compute</a></div><div class="line"><a name="l00036"></a><span class="lineno"> 36</span>&#160;{</div><div class="line"><a name="l00037"></a><span class="lineno"> 37</span>&#160;<span class="keyword">template</span> &lt;<span class="keywordtype">bool</span> IS_LOG&gt;</div><div class="line"><a name="l00038"></a><span class="lineno"><a class="line" href="classarm__compute_1_1_c_l_softmax_layer_generic.xhtml#a21c529666f0d9c998cdae7c52bea4fc1"> 38</a></span>&#160;<a class="code" href="classarm__compute_1_1_c_l_softmax_layer_generic.xhtml#a21c529666f0d9c998cdae7c52bea4fc1">CLSoftmaxLayerGeneric&lt;IS_LOG&gt;::CLSoftmaxLayerGeneric</a>(std::shared_ptr&lt;IMemoryManager&gt; memory_manager)</div><div class="line"><a name="l00039"></a><span class="lineno"> 39</span>&#160; : _memory_group(std::move(memory_manager)), _max_shift_exp_sum_kernel(), _norm_kernel(), _flatten_kernel_ptr(), _reshape_kernel(), _max(), _sum(), _tmp(), _input_flattened(), _output_flattened(),</div><div class="line"><a name="l00040"></a><span class="lineno"> 40</span>&#160; _needs_flattening(false)</div><div class="line"><a name="l00041"></a><span class="lineno"> 41</span>&#160;{</div><div class="line"><a name="l00042"></a><span class="lineno"> 42</span>&#160;}</div><div class="line"><a name="l00043"></a><span class="lineno"> 43</span>&#160;</div><div class="line"><a name="l00044"></a><span class="lineno"> 44</span>&#160;<span class="keyword">template</span> &lt;<span class="keywordtype">bool</span> IS_LOG&gt;</div><div class="line"><a name="l00045"></a><span class="lineno"> 45</span>&#160;<span class="keywordtype">void</span> <a class="code" href="classarm__compute_1_1_c_l_softmax_layer_generic.xhtml">CLSoftmaxLayerGeneric&lt;IS_LOG&gt;::configure_reshape_input_kernel</a>(<span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_c_l_tensor.xhtml">ICLTensor</a> *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>, <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_c_l_tensor.xhtml">ICLTensor</a> *output, <span class="keywordtype">size_t</span> <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#accc088009d44c521706aa98d6387ee21">axis</a>)</div><div class="line"><a name="l00046"></a><span class="lineno"> 46</span>&#160;{</div><div class="line"><a name="l00047"></a><span class="lineno"> 47</span>&#160; <span class="comment">// Flatten the input</span></div><div class="line"><a name="l00048"></a><span class="lineno"> 48</span>&#160; <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> shape_flatten = <a class="code" href="namespacearm__compute_1_1misc_1_1shape__calculator.xhtml#ad16b366db486fec63b6d962937ec4545">misc::shape_calculator::compute_softmax_shape</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info(), <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#accc088009d44c521706aa98d6387ee21">axis</a>);</div><div class="line"><a name="l00049"></a><span class="lineno"> 49</span>&#160;</div><div class="line"><a name="l00050"></a><span class="lineno"> 50</span>&#160; <span class="comment">// Initialize the flat input</span></div><div class="line"><a name="l00051"></a><span class="lineno"> 51</span>&#160; _input_flattened.allocator()-&gt;init(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;clone()-&gt;set_is_resizable(<span class="keyword">true</span>).reset_padding().set_tensor_shape(shape_flatten));</div><div class="line"><a name="l00052"></a><span class="lineno"> 52</span>&#160;</div><div class="line"><a name="l00053"></a><span class="lineno"> 53</span>&#160; <span class="comment">// If we need to flatten the input, we can use CLFlattenKernel or CLReshapeKernel</span></div><div class="line"><a name="l00054"></a><span class="lineno"> 54</span>&#160; <span class="comment">// If flattening on the third axes, we use CLFlattenKernel.</span></div><div class="line"><a name="l00055"></a><span class="lineno"> 55</span>&#160; <span class="comment">// In all other cases we have to use CLReshapeKernel</span></div><div class="line"><a name="l00056"></a><span class="lineno"> 56</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#accc088009d44c521706aa98d6387ee21">axis</a> != 3)</div><div class="line"><a name="l00057"></a><span class="lineno"> 57</span>&#160; {</div><div class="line"><a name="l00058"></a><span class="lineno"> 58</span>&#160; <span class="keyword">auto</span> reshape_kernel_ptr = support::cpp14::make_unique&lt;CLReshapeLayerKernel&gt;();</div><div class="line"><a name="l00059"></a><span class="lineno"> 59</span>&#160; reshape_kernel_ptr-&gt;configure(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>, &amp;_input_flattened);</div><div class="line"><a name="l00060"></a><span class="lineno"> 60</span>&#160; _flatten_kernel_ptr = std::move(reshape_kernel_ptr);</div><div class="line"><a name="l00061"></a><span class="lineno"> 61</span>&#160; }</div><div class="line"><a name="l00062"></a><span class="lineno"> 62</span>&#160; <span class="keywordflow">else</span></div><div class="line"><a name="l00063"></a><span class="lineno"> 63</span>&#160; {</div><div class="line"><a name="l00064"></a><span class="lineno"> 64</span>&#160; <span class="keyword">auto</span> flatten_kernel_ptr = support::cpp14::make_unique&lt;CLFlattenLayerKernel&gt;();</div><div class="line"><a name="l00065"></a><span class="lineno"> 65</span>&#160; flatten_kernel_ptr-&gt;configure(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>, &amp;_input_flattened);</div><div class="line"><a name="l00066"></a><span class="lineno"> 66</span>&#160; _flatten_kernel_ptr = std::move(flatten_kernel_ptr);</div><div class="line"><a name="l00067"></a><span class="lineno"> 67</span>&#160; }</div><div class="line"><a name="l00068"></a><span class="lineno"> 68</span>&#160;</div><div class="line"><a name="l00069"></a><span class="lineno"> 69</span>&#160; <span class="comment">// We need to init the output tensor here. Indeed, the reshape kernel expects</span></div><div class="line"><a name="l00070"></a><span class="lineno"> 70</span>&#160; <span class="comment">// both tensors to be already initialized</span></div><div class="line"><a name="l00071"></a><span class="lineno"> 71</span>&#160; <a class="code" href="namespacearm__compute.xhtml#a47be6fa38308d0003c25b60b7dbc45ce">auto_init_if_empty</a>(*output-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>(), *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;clone());</div><div class="line"><a name="l00072"></a><span class="lineno"> 72</span>&#160;}</div><div class="line"><a name="l00073"></a><span class="lineno"> 73</span>&#160;</div><div class="line"><a name="l00074"></a><span class="lineno"> 74</span>&#160;<span class="keyword">template</span> &lt;<span class="keywordtype">bool</span> IS_LOG&gt;</div><div class="line"><a name="l00075"></a><span class="lineno"><a class="line" href="classarm__compute_1_1_c_l_softmax_layer_generic.xhtml#ae3fdd368418ef45db831786e741ea1bb"> 75</a></span>&#160;<span class="keywordtype">void</span> <a class="code" href="classarm__compute_1_1_c_l_softmax_layer_generic.xhtml#ae3fdd368418ef45db831786e741ea1bb">CLSoftmaxLayerGeneric&lt;IS_LOG&gt;::configure</a>(<span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_c_l_tensor.xhtml">ICLTensor</a> *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>, <a class="code" href="classarm__compute_1_1_i_c_l_tensor.xhtml">ICLTensor</a> *output, <span class="keywordtype">float</span> beta, <span class="keywordtype">size_t</span> <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#accc088009d44c521706aa98d6387ee21">axis</a>)</div><div class="line"><a name="l00076"></a><span class="lineno"> 76</span>&#160;{</div><div class="line"><a name="l00077"></a><span class="lineno"> 77</span>&#160; <span class="comment">// Perform validation step</span></div><div class="line"><a name="l00078"></a><span class="lineno"> 78</span>&#160; <a class="code" href="_validate_8h.xhtml#a921b705e9e3e0fe928928447869e62a5">ARM_COMPUTE_ERROR_ON_NULLPTR</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>, output);</div><div class="line"><a name="l00079"></a><span class="lineno"> 79</span>&#160; <a class="code" href="_error_8h.xhtml#a938dcd406ce611ef5345ad2531cdb948">ARM_COMPUTE_ERROR_THROW_ON</a>(<a class="code" href="classarm__compute_1_1_c_l_softmax_layer_generic.xhtml">CLSoftmaxLayerGeneric&lt;IS_LOG&gt;::validate</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info(), output-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>(), beta, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#accc088009d44c521706aa98d6387ee21">axis</a>));</div><div class="line"><a name="l00080"></a><span class="lineno"> 80</span>&#160;</div><div class="line"><a name="l00081"></a><span class="lineno"> 81</span>&#160; <span class="comment">// We don&#39;t need flattening only in the case the input is 2D and axis is 1</span></div><div class="line"><a name="l00082"></a><span class="lineno"> 82</span>&#160; _needs_flattening = <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#accc088009d44c521706aa98d6387ee21">axis</a> != 1;</div><div class="line"><a name="l00083"></a><span class="lineno"> 83</span>&#160;</div><div class="line"><a name="l00084"></a><span class="lineno"> 84</span>&#160; <span class="comment">// If we are dealing with a 4D tensor, we will:</span></div><div class="line"><a name="l00085"></a><span class="lineno"> 85</span>&#160; <span class="comment">// - Flatten the input, so that we end up with a [width*height*depth] * batches 2D tensor</span></div><div class="line"><a name="l00086"></a><span class="lineno"> 86</span>&#160; <span class="comment">// - Execute all the pipeline (reduction + normalization) on the flattened tensor</span></div><div class="line"><a name="l00087"></a><span class="lineno"> 87</span>&#160; <span class="comment">// - Reshape the flattened output into the real output</span></div><div class="line"><a name="l00088"></a><span class="lineno"> 88</span>&#160; <span class="keywordflow">if</span>(_needs_flattening)</div><div class="line"><a name="l00089"></a><span class="lineno"> 89</span>&#160; {</div><div class="line"><a name="l00090"></a><span class="lineno"> 90</span>&#160; <span class="comment">// Add to the memory manager _input_flattened</span></div><div class="line"><a name="l00091"></a><span class="lineno"> 91</span>&#160; _memory_group.manage(&amp;_input_flattened);</div><div class="line"><a name="l00092"></a><span class="lineno"> 92</span>&#160;</div><div class="line"><a name="l00093"></a><span class="lineno"> 93</span>&#160; <span class="comment">// Cofigure _flatten_kernel and _input_flattened</span></div><div class="line"><a name="l00094"></a><span class="lineno"> 94</span>&#160; configure_reshape_input_kernel(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>, output, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#accc088009d44c521706aa98d6387ee21">axis</a>);</div><div class="line"><a name="l00095"></a><span class="lineno"> 95</span>&#160; }</div><div class="line"><a name="l00096"></a><span class="lineno"> 96</span>&#160;</div><div class="line"><a name="l00097"></a><span class="lineno"> 97</span>&#160; <span class="comment">// We want to deal with a 2D input. Either it is the flattened version of the original input (4D case)</span></div><div class="line"><a name="l00098"></a><span class="lineno"> 98</span>&#160; <span class="comment">// or it is the original input case (2D case)</span></div><div class="line"><a name="l00099"></a><span class="lineno"> 99</span>&#160; <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_c_l_tensor.xhtml">ICLTensor</a> *input_2D = (_needs_flattening ? &amp;_input_flattened : <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>);</div><div class="line"><a name="l00100"></a><span class="lineno"> 100</span>&#160;</div><div class="line"><a name="l00101"></a><span class="lineno"> 101</span>&#160; <span class="comment">// Create intermediate tensors shapes</span></div><div class="line"><a name="l00102"></a><span class="lineno"> 102</span>&#160; <a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a> <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ae008e90eb6906fa3526213bc860f6cc5">input_info</a> = input_2D-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>()-&gt;<a class="code" href="classarm__compute_1_1misc_1_1_i_cloneable.xhtml#a4d10e5012a872e7f78f2b539b673049d">clone</a>()-&gt;reset_padding().set_is_resizable(<span class="keyword">true</span>);</div><div class="line"><a name="l00103"></a><span class="lineno"> 103</span>&#160; <a class="code" href="namespacearm__compute.xhtml#ad8ed01ff3ff33333d8e19db4d2818bb6">DataType</a> tmp_data_type = <a class="code" href="namespacearm__compute.xhtml#a14f46283f316e7f0fad301d5c1507e9f">is_data_type_quantized_asymmetric</a>(input_2D-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a7cfb31af63202568efef5214acfbf3ba">data_type</a>()) ? <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58aa1e28eee0339658d39a8b4d325b56e9c">DataType::S32</a> : input_2D-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a7cfb31af63202568efef5214acfbf3ba">data_type</a>();</div><div class="line"><a name="l00104"></a><span class="lineno"> 104</span>&#160; <a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a> tensor_info_tmp(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ae008e90eb6906fa3526213bc860f6cc5">input_info</a>.clone()-&gt;set_data_type(tmp_data_type));</div><div class="line"><a name="l00105"></a><span class="lineno"> 105</span>&#160; _tmp.allocator()-&gt;<a class="code" href="classarm__compute_1_1_tensor_info.xhtml#ad6b64f33be1e66dcf7612483ffb8fd63">init</a>(tensor_info_tmp);</div><div class="line"><a name="l00106"></a><span class="lineno"> 106</span>&#160;</div><div class="line"><a name="l00107"></a><span class="lineno"> 107</span>&#160; <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> max_sum_shape = input_2D-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a7c66505457d00ece3aa4b34cab80757d">tensor_shape</a>();</div><div class="line"><a name="l00108"></a><span class="lineno"> 108</span>&#160; max_sum_shape.<a class="code" href="classarm__compute_1_1_tensor_shape.xhtml#a9c54fb6cea3557692fe7c00c40bb40ad">set</a>(0, 1);</div><div class="line"><a name="l00109"></a><span class="lineno"> 109</span>&#160; _max.allocator()-&gt;init(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ae008e90eb6906fa3526213bc860f6cc5">input_info</a>.clone()-&gt;set_tensor_shape(max_sum_shape));</div><div class="line"><a name="l00110"></a><span class="lineno"> 110</span>&#160; _sum.allocator()-&gt;init(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ae008e90eb6906fa3526213bc860f6cc5">input_info</a>.clone()-&gt;set_tensor_shape(max_sum_shape).set_data_type(tmp_data_type));</div><div class="line"><a name="l00111"></a><span class="lineno"> 111</span>&#160;</div><div class="line"><a name="l00112"></a><span class="lineno"> 112</span>&#160; <span class="comment">// Set GPU target to kernels</span></div><div class="line"><a name="l00113"></a><span class="lineno"> 113</span>&#160; _max_shift_exp_sum_kernel.set_target(<a class="code" href="classarm__compute_1_1_c_l_scheduler.xhtml#a9b58d0eb9a2af8e6d7908695e1557d6c">CLScheduler::get</a>().target());</div><div class="line"><a name="l00114"></a><span class="lineno"> 114</span>&#160;</div><div class="line"><a name="l00115"></a><span class="lineno"> 115</span>&#160; <span class="comment">// Manage intermediate buffers</span></div><div class="line"><a name="l00116"></a><span class="lineno"> 116</span>&#160; _memory_group.manage(&amp;_tmp);</div><div class="line"><a name="l00117"></a><span class="lineno"> 117</span>&#160; _memory_group.manage(&amp;_max);</div><div class="line"><a name="l00118"></a><span class="lineno"> 118</span>&#160; _memory_group.manage(&amp;_sum);</div><div class="line"><a name="l00119"></a><span class="lineno"> 119</span>&#160;</div><div class="line"><a name="l00120"></a><span class="lineno"> 120</span>&#160; <a class="code" href="structarm__compute_1_1_softmax_kernel_info.xhtml">SoftmaxKernelInfo</a> softmax_info;</div><div class="line"><a name="l00121"></a><span class="lineno"> 121</span>&#160; softmax_info.<a class="code" href="structarm__compute_1_1_softmax_kernel_info.xhtml#aa773d9a6c0ccefaa0fc9ab66fec68ec1">beta</a> = beta;</div><div class="line"><a name="l00122"></a><span class="lineno"> 122</span>&#160; softmax_info.<a class="code" href="structarm__compute_1_1_softmax_kernel_info.xhtml#a5dc2a3a0b94fa51d8bccdd5bec7d6eb0">is_log</a> = IS_LOG;</div><div class="line"><a name="l00123"></a><span class="lineno"> 123</span>&#160; softmax_info.<a class="code" href="structarm__compute_1_1_softmax_kernel_info.xhtml#abfa079f56a93e0152cfe85919506fb51">input_data_type</a> = input_2D-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a7cfb31af63202568efef5214acfbf3ba">data_type</a>();</div><div class="line"><a name="l00124"></a><span class="lineno"> 124</span>&#160;</div><div class="line"><a name="l00125"></a><span class="lineno"> 125</span>&#160; <span class="comment">// Configure kernels</span></div><div class="line"><a name="l00126"></a><span class="lineno"> 126</span>&#160; _max_shift_exp_sum_kernel.configure(input_2D, &amp;_max, &amp;_tmp, &amp;_sum, softmax_info);</div><div class="line"><a name="l00127"></a><span class="lineno"> 127</span>&#160;</div><div class="line"><a name="l00128"></a><span class="lineno"> 128</span>&#160; <span class="keywordflow">if</span>(_needs_flattening)</div><div class="line"><a name="l00129"></a><span class="lineno"> 129</span>&#160; {</div><div class="line"><a name="l00130"></a><span class="lineno"> 130</span>&#160; <span class="comment">// Add to the memory manager _output_flattened</span></div><div class="line"><a name="l00131"></a><span class="lineno"> 131</span>&#160; _memory_group.manage(&amp;_output_flattened);</div><div class="line"><a name="l00132"></a><span class="lineno"> 132</span>&#160;</div><div class="line"><a name="l00133"></a><span class="lineno"> 133</span>&#160; <span class="comment">// The normalization kernel stores the result in a flat output tensor</span></div><div class="line"><a name="l00134"></a><span class="lineno"> 134</span>&#160; _norm_kernel.configure(&amp;_tmp, &amp;_sum, &amp;_output_flattened, softmax_info);</div><div class="line"><a name="l00135"></a><span class="lineno"> 135</span>&#160;</div><div class="line"><a name="l00136"></a><span class="lineno"> 136</span>&#160; <span class="comment">// Reshape the flat output into a the requested (4D) output</span></div><div class="line"><a name="l00137"></a><span class="lineno"> 137</span>&#160; _reshape_kernel.configure(&amp;_output_flattened, output);</div><div class="line"><a name="l00138"></a><span class="lineno"> 138</span>&#160;</div><div class="line"><a name="l00139"></a><span class="lineno"> 139</span>&#160; <span class="comment">// Allocate the intermediate flat tensors</span></div><div class="line"><a name="l00140"></a><span class="lineno"> 140</span>&#160; _input_flattened.allocator()-&gt;allocate();</div><div class="line"><a name="l00141"></a><span class="lineno"> 141</span>&#160; _output_flattened.allocator()-&gt;allocate();</div><div class="line"><a name="l00142"></a><span class="lineno"> 142</span>&#160; }</div><div class="line"><a name="l00143"></a><span class="lineno"> 143</span>&#160; <span class="keywordflow">else</span></div><div class="line"><a name="l00144"></a><span class="lineno"> 144</span>&#160; {</div><div class="line"><a name="l00145"></a><span class="lineno"> 145</span>&#160; <span class="comment">// Softmax 2D case</span></div><div class="line"><a name="l00146"></a><span class="lineno"> 146</span>&#160; _norm_kernel.configure(&amp;_tmp, &amp;_sum, output, softmax_info);</div><div class="line"><a name="l00147"></a><span class="lineno"> 147</span>&#160; }</div><div class="line"><a name="l00148"></a><span class="lineno"> 148</span>&#160;</div><div class="line"><a name="l00149"></a><span class="lineno"> 149</span>&#160; <span class="comment">// Allocate intermediate buffers</span></div><div class="line"><a name="l00150"></a><span class="lineno"> 150</span>&#160; _tmp.allocator()-&gt;allocate();</div><div class="line"><a name="l00151"></a><span class="lineno"> 151</span>&#160; _max.allocator()-&gt;allocate();</div><div class="line"><a name="l00152"></a><span class="lineno"> 152</span>&#160; _sum.allocator()-&gt;allocate();</div><div class="line"><a name="l00153"></a><span class="lineno"> 153</span>&#160;}</div><div class="line"><a name="l00154"></a><span class="lineno"> 154</span>&#160;</div><div class="line"><a name="l00155"></a><span class="lineno"> 155</span>&#160;<span class="keyword">template</span> &lt;<span class="keywordtype">bool</span> IS_LOG&gt;</div><div class="line"><a name="l00156"></a><span class="lineno"><a class="line" href="classarm__compute_1_1_c_l_softmax_layer_generic.xhtml#a85db282920d24cd0f4dca6a439201a43"> 156</a></span>&#160;<a class="code" href="classarm__compute_1_1_status.xhtml">Status</a> <a class="code" href="classarm__compute_1_1_c_l_softmax_layer_generic.xhtml#a85db282920d24cd0f4dca6a439201a43">CLSoftmaxLayerGeneric&lt;IS_LOG&gt;::validate</a>(<span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>, <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *output, <span class="keywordtype">float</span> beta, <span class="keywordtype">size_t</span> <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#accc088009d44c521706aa98d6387ee21">axis</a>)</div><div class="line"><a name="l00157"></a><span class="lineno"> 157</span>&#160;{</div><div class="line"><a name="l00158"></a><span class="lineno"> 158</span>&#160; <a class="code" href="_validate_8h.xhtml#aff911654521523937ff24372a870b89f">ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>, output);</div><div class="line"><a name="l00159"></a><span class="lineno"> 159</span>&#160; <a class="code" href="_error_8h.xhtml#a1c69762a42ab8add645d0a949b6f4b1f">ARM_COMPUTE_RETURN_ERROR_ON_MSG</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;num_dimensions() &gt; 4, <span class="stringliteral">&quot;Only up to 4 dimensions are supported&quot;</span>);</div><div class="line"><a name="l00160"></a><span class="lineno"> 160</span>&#160; <a class="code" href="_error_8h.xhtml#a6dc630a6ae9cc063b3924bcea8dee9d6">ARM_COMPUTE_UNUSED</a>(beta);</div><div class="line"><a name="l00161"></a><span class="lineno"> 161</span>&#160;</div><div class="line"><a name="l00162"></a><span class="lineno"> 162</span>&#160; <span class="comment">// Create intermediate tensor info</span></div><div class="line"><a name="l00163"></a><span class="lineno"> 163</span>&#160; <a class="code" href="namespacearm__compute.xhtml#ad8ed01ff3ff33333d8e19db4d2818bb6">DataType</a> tmp_data_type = <a class="code" href="namespacearm__compute.xhtml#a14f46283f316e7f0fad301d5c1507e9f">is_data_type_quantized_asymmetric</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;data_type()) ? <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58aa1e28eee0339658d39a8b4d325b56e9c">DataType::S32</a> : <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;data_type();</div><div class="line"><a name="l00164"></a><span class="lineno"> 164</span>&#160; <a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a> tensor_info_tmp(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;clone()-&gt;set_data_type(tmp_data_type).set_is_resizable(<span class="keyword">true</span>));</div><div class="line"><a name="l00165"></a><span class="lineno"> 165</span>&#160;</div><div class="line"><a name="l00166"></a><span class="lineno"> 166</span>&#160; <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> max_sum_shape = <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;tensor_shape();</div><div class="line"><a name="l00167"></a><span class="lineno"> 167</span>&#160; max_sum_shape.set(0, 1);</div><div class="line"><a name="l00168"></a><span class="lineno"> 168</span>&#160; <a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a> tensor_info_max(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;clone()-&gt;set_tensor_shape(max_sum_shape).set_is_resizable(<span class="keyword">true</span>));</div><div class="line"><a name="l00169"></a><span class="lineno"> 169</span>&#160; <a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a> tensor_info_sum(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;clone()-&gt;set_tensor_shape(max_sum_shape).set_data_type(tmp_data_type).set_quantization_info(<a class="code" href="classarm__compute_1_1_quantization_info.xhtml">QuantizationInfo</a>()).set_is_resizable(<span class="keyword">true</span>));</div><div class="line"><a name="l00170"></a><span class="lineno"> 170</span>&#160;</div><div class="line"><a name="l00171"></a><span class="lineno"> 171</span>&#160; <span class="keyword">const</span> <span class="keywordtype">bool</span> needs_flattening = (<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#accc088009d44c521706aa98d6387ee21">axis</a> != 1);</div><div class="line"><a name="l00172"></a><span class="lineno"> 172</span>&#160;</div><div class="line"><a name="l00173"></a><span class="lineno"> 173</span>&#160; <span class="keywordflow">if</span>(needs_flattening)</div><div class="line"><a name="l00174"></a><span class="lineno"> 174</span>&#160; {</div><div class="line"><a name="l00175"></a><span class="lineno"> 175</span>&#160; <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> shape_flatten = <a class="code" href="namespacearm__compute_1_1misc_1_1shape__calculator.xhtml#ad16b366db486fec63b6d962937ec4545">misc::shape_calculator::compute_softmax_shape</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#accc088009d44c521706aa98d6387ee21">axis</a>);</div><div class="line"><a name="l00176"></a><span class="lineno"> 176</span>&#160; <a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a> tensor_info_flat(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;clone()-&gt;set_tensor_shape(shape_flatten).set_is_resizable(<span class="keyword">true</span>));</div><div class="line"><a name="l00177"></a><span class="lineno"> 177</span>&#160;</div><div class="line"><a name="l00178"></a><span class="lineno"> 178</span>&#160; <span class="keywordflow">if</span>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#accc088009d44c521706aa98d6387ee21">axis</a> != 3)</div><div class="line"><a name="l00179"></a><span class="lineno"> 179</span>&#160; {</div><div class="line"><a name="l00180"></a><span class="lineno"> 180</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_c_l_reshape_layer_kernel.xhtml#a968b23a6ef327fcfb5b99d58e3fbe883">CLReshapeLayerKernel::validate</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>, &amp;tensor_info_flat));</div><div class="line"><a name="l00181"></a><span class="lineno"> 181</span>&#160; }</div><div class="line"><a name="l00182"></a><span class="lineno"> 182</span>&#160; <span class="keywordflow">else</span></div><div class="line"><a name="l00183"></a><span class="lineno"> 183</span>&#160; {</div><div class="line"><a name="l00184"></a><span class="lineno"> 184</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_c_l_flatten_layer_kernel.xhtml#a968b23a6ef327fcfb5b99d58e3fbe883">CLFlattenLayerKernel::validate</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>, &amp;tensor_info_flat));</div><div class="line"><a name="l00185"></a><span class="lineno"> 185</span>&#160; }</div><div class="line"><a name="l00186"></a><span class="lineno"> 186</span>&#160; }</div><div class="line"><a name="l00187"></a><span class="lineno"> 187</span>&#160;</div><div class="line"><a name="l00188"></a><span class="lineno"> 188</span>&#160; <a class="code" href="structarm__compute_1_1_softmax_kernel_info.xhtml">SoftmaxKernelInfo</a> softmax_info;</div><div class="line"><a name="l00189"></a><span class="lineno"> 189</span>&#160; softmax_info.<a class="code" href="structarm__compute_1_1_softmax_kernel_info.xhtml#aa773d9a6c0ccefaa0fc9ab66fec68ec1">beta</a> = beta;</div><div class="line"><a name="l00190"></a><span class="lineno"> 190</span>&#160; softmax_info.<a class="code" href="structarm__compute_1_1_softmax_kernel_info.xhtml#a5dc2a3a0b94fa51d8bccdd5bec7d6eb0">is_log</a> = IS_LOG;</div><div class="line"><a name="l00191"></a><span class="lineno"> 191</span>&#160; softmax_info.<a class="code" href="structarm__compute_1_1_softmax_kernel_info.xhtml#abfa079f56a93e0152cfe85919506fb51">input_data_type</a> = <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;data_type();</div><div class="line"><a name="l00192"></a><span class="lineno"> 192</span>&#160;</div><div class="line"><a name="l00193"></a><span class="lineno"> 193</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_c_l_logits1_d_max_shift_exp_sum_kernel.xhtml#a62885f3bde4b458c02316bfc3f339fbc">CLLogits1DMaxShiftExpSumKernel::validate</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>, &amp;tensor_info_max, &amp;tensor_info_tmp, &amp;tensor_info_sum));</div><div class="line"><a name="l00194"></a><span class="lineno"> 194</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_c_l_logits1_d_norm_kernel.xhtml#a5d4222d9de32a72823286d6f3d26c469">CLLogits1DNormKernel::validate</a>(&amp;tensor_info_tmp, &amp;tensor_info_sum, output, softmax_info));</div><div class="line"><a name="l00195"></a><span class="lineno"> 195</span>&#160;</div><div class="line"><a name="l00196"></a><span class="lineno"> 196</span>&#160; <span class="keywordflow">if</span>(needs_flattening)</div><div class="line"><a name="l00197"></a><span class="lineno"> 197</span>&#160; {</div><div class="line"><a name="l00198"></a><span class="lineno"> 198</span>&#160; <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> shape_flatten = <a class="code" href="namespacearm__compute_1_1misc_1_1shape__calculator.xhtml#ad16b366db486fec63b6d962937ec4545">misc::shape_calculator::compute_softmax_shape</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>);</div><div class="line"><a name="l00199"></a><span class="lineno"> 199</span>&#160; <a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a> tensor_info_flat(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;clone()-&gt;set_tensor_shape(shape_flatten).set_is_resizable(<span class="keyword">true</span>));</div><div class="line"><a name="l00200"></a><span class="lineno"> 200</span>&#160; }</div><div class="line"><a name="l00201"></a><span class="lineno"> 201</span>&#160;</div><div class="line"><a name="l00202"></a><span class="lineno"> 202</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classarm__compute_1_1_status.xhtml">Status</a>{};</div><div class="line"><a name="l00203"></a><span class="lineno"> 203</span>&#160;}</div><div class="line"><a name="l00204"></a><span class="lineno"> 204</span>&#160;</div><div class="line"><a name="l00205"></a><span class="lineno"> 205</span>&#160;<span class="keyword">template</span> &lt;<span class="keywordtype">bool</span> IS_LOG&gt;</div><div class="line"><a name="l00206"></a><span class="lineno"><a class="line" href="classarm__compute_1_1_c_l_softmax_layer_generic.xhtml#ad1717410afd0be936c6213a63c8005fb"> 206</a></span>&#160;<span class="keywordtype">void</span> <a class="code" href="classarm__compute_1_1_c_l_softmax_layer_generic.xhtml#ad1717410afd0be936c6213a63c8005fb">CLSoftmaxLayerGeneric&lt;IS_LOG&gt;::run</a>()</div><div class="line"><a name="l00207"></a><span class="lineno"> 207</span>&#160;{</div><div class="line"><a name="l00208"></a><span class="lineno"> 208</span>&#160; <a class="code" href="classarm__compute_1_1_memory_group_resource_scope.xhtml">MemoryGroupResourceScope</a> scope_mg(_memory_group);</div><div class="line"><a name="l00209"></a><span class="lineno"> 209</span>&#160;</div><div class="line"><a name="l00210"></a><span class="lineno"> 210</span>&#160; <span class="keywordflow">if</span>(_needs_flattening)</div><div class="line"><a name="l00211"></a><span class="lineno"> 211</span>&#160; {</div><div class="line"><a name="l00212"></a><span class="lineno"> 212</span>&#160; <a class="code" href="classarm__compute_1_1_c_l_scheduler.xhtml#a9b58d0eb9a2af8e6d7908695e1557d6c">CLScheduler::get</a>().<a class="code" href="classarm__compute_1_1_c_l_scheduler.xhtml#ae1a643e517f50bf0392fb6516dd7cf67">enqueue</a>(*_flatten_kernel_ptr, <span class="keyword">false</span>);</div><div class="line"><a name="l00213"></a><span class="lineno"> 213</span>&#160; }</div><div class="line"><a name="l00214"></a><span class="lineno"> 214</span>&#160;</div><div class="line"><a name="l00215"></a><span class="lineno"> 215</span>&#160; <a class="code" href="classarm__compute_1_1_c_l_scheduler.xhtml#a9b58d0eb9a2af8e6d7908695e1557d6c">CLScheduler::get</a>().<a class="code" href="classarm__compute_1_1_c_l_scheduler.xhtml#ae1a643e517f50bf0392fb6516dd7cf67">enqueue</a>(_max_shift_exp_sum_kernel, <span class="keyword">false</span>);</div><div class="line"><a name="l00216"></a><span class="lineno"> 216</span>&#160; <a class="code" href="classarm__compute_1_1_c_l_scheduler.xhtml#a9b58d0eb9a2af8e6d7908695e1557d6c">CLScheduler::get</a>().<a class="code" href="classarm__compute_1_1_c_l_scheduler.xhtml#ae1a643e517f50bf0392fb6516dd7cf67">enqueue</a>(_norm_kernel, !_needs_flattening);</div><div class="line"><a name="l00217"></a><span class="lineno"> 217</span>&#160;</div><div class="line"><a name="l00218"></a><span class="lineno"> 218</span>&#160; <span class="keywordflow">if</span>(_needs_flattening)</div><div class="line"><a name="l00219"></a><span class="lineno"> 219</span>&#160; {</div><div class="line"><a name="l00220"></a><span class="lineno"> 220</span>&#160; <a class="code" href="classarm__compute_1_1_c_l_scheduler.xhtml#a9b58d0eb9a2af8e6d7908695e1557d6c">CLScheduler::get</a>().<a class="code" href="classarm__compute_1_1_c_l_scheduler.xhtml#ae1a643e517f50bf0392fb6516dd7cf67">enqueue</a>(_reshape_kernel, <span class="keyword">true</span>);</div><div class="line"><a name="l00221"></a><span class="lineno"> 221</span>&#160; }</div><div class="line"><a name="l00222"></a><span class="lineno"> 222</span>&#160;}</div><div class="line"><a name="l00223"></a><span class="lineno"> 223</span>&#160;</div><div class="line"><a name="l00224"></a><span class="lineno"> 224</span>&#160;<span class="keyword">template</span> <span class="keyword">class </span><a class="code" href="classarm__compute_1_1_c_l_softmax_layer_generic.xhtml">CLSoftmaxLayerGeneric&lt;false&gt;</a>;</div><div class="line"><a name="l00225"></a><span class="lineno"> 225</span>&#160;<span class="keyword">template</span> <span class="keyword">class </span><a class="code" href="classarm__compute_1_1_c_l_softmax_layer_generic.xhtml">CLSoftmaxLayerGeneric&lt;true&gt;</a>;</div><div class="line"><a name="l00226"></a><span class="lineno"> 226</span>&#160;</div><div class="line"><a name="l00227"></a><span class="lineno"> 227</span>&#160;} <span class="comment">// namespace arm_compute</span></div><div class="ttc" id="classarm__compute_1_1_c_l_softmax_layer_generic_xhtml_ad1717410afd0be936c6213a63c8005fb"><div class="ttname"><a href="classarm__compute_1_1_c_l_softmax_layer_generic.xhtml#ad1717410afd0be936c6213a63c8005fb">arm_compute::CLSoftmaxLayerGeneric::run</a></div><div class="ttdeci">void run() override</div><div class="ttdoc">Run the kernels contained in the function.</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_softmax_layer_8cpp_source.xhtml#l00206">CLSoftmaxLayer.cpp:206</a></div></div>
<div class="ttc" id="classarm__compute_1_1_tensor_shape_xhtml"><div class="ttname"><a href="classarm__compute_1_1_tensor_shape.xhtml">arm_compute::TensorShape</a></div><div class="ttdoc">Shape of a tensor.</div><div class="ttdef"><b>Definition:</b> <a href="_tensor_shape_8h_source.xhtml#l00039">TensorShape.h:39</a></div></div>
<div class="ttc" id="classarm__compute_1_1_c_l_softmax_layer_generic_xhtml_a85db282920d24cd0f4dca6a439201a43"><div class="ttname"><a href="classarm__compute_1_1_c_l_softmax_layer_generic.xhtml#a85db282920d24cd0f4dca6a439201a43">arm_compute::CLSoftmaxLayerGeneric::validate</a></div><div class="ttdeci">static Status validate(const ITensorInfo *input, const ITensorInfo *output, float beta=1.0f, size_t axis=1)</div><div class="ttdoc">Static function to check if given info will lead to a valid configuration of CLSoftmaxLayer.</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_softmax_layer_8cpp_source.xhtml#l00156">CLSoftmaxLayer.cpp:156</a></div></div>
<div class="ttc" id="classarm__compute_1_1_c_l_softmax_layer_generic_xhtml_ae3fdd368418ef45db831786e741ea1bb"><div class="ttname"><a href="classarm__compute_1_1_c_l_softmax_layer_generic.xhtml#ae3fdd368418ef45db831786e741ea1bb">arm_compute::CLSoftmaxLayerGeneric::configure</a></div><div class="ttdeci">void configure(const ICLTensor *input, ICLTensor *output, float beta=1.0f, size_t axis=1)</div><div class="ttdoc">Set the input and output tensors.</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_softmax_layer_8cpp_source.xhtml#l00075">CLSoftmaxLayer.cpp:75</a></div></div>
<div class="ttc" id="classarm__compute_1_1_c_l_softmax_layer_generic_xhtml_a21c529666f0d9c998cdae7c52bea4fc1"><div class="ttname"><a href="classarm__compute_1_1_c_l_softmax_layer_generic.xhtml#a21c529666f0d9c998cdae7c52bea4fc1">arm_compute::CLSoftmaxLayerGeneric::CLSoftmaxLayerGeneric</a></div><div class="ttdeci">CLSoftmaxLayerGeneric(std::shared_ptr&lt; IMemoryManager &gt; memory_manager=nullptr)</div><div class="ttdoc">Constructor.</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_softmax_layer_8cpp_source.xhtml#l00038">CLSoftmaxLayer.cpp:38</a></div></div>
<div class="ttc" id="classarm__compute_1_1_c_l_logits1_d_norm_kernel_xhtml_a5d4222d9de32a72823286d6f3d26c469"><div class="ttname"><a href="classarm__compute_1_1_c_l_logits1_d_norm_kernel.xhtml#a5d4222d9de32a72823286d6f3d26c469">arm_compute::CLLogits1DNormKernel::validate</a></div><div class="ttdeci">static Status validate(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output, const SoftmaxKernelInfo &amp;info)</div><div class="ttdoc">Static function to check if given info will lead to a valid configuration of CLLogits1DNormKernel.</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_softmax_layer_kernel_8cpp_source.xhtml#l00386">CLSoftmaxLayerKernel.cpp:386</a></div></div>
<div class="ttc" id="classarm__compute_1_1_c_l_scheduler_xhtml_a9b58d0eb9a2af8e6d7908695e1557d6c"><div class="ttname"><a href="classarm__compute_1_1_c_l_scheduler.xhtml#a9b58d0eb9a2af8e6d7908695e1557d6c">arm_compute::CLScheduler::get</a></div><div class="ttdeci">static CLScheduler &amp; get()</div><div class="ttdoc">Access the scheduler singleton.</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_scheduler_8cpp_source.xhtml#l00099">CLScheduler.cpp:99</a></div></div>
<div class="ttc" id="structarm__compute_1_1_softmax_kernel_info_xhtml_aa773d9a6c0ccefaa0fc9ab66fec68ec1"><div class="ttname"><a href="structarm__compute_1_1_softmax_kernel_info.xhtml#aa773d9a6c0ccefaa0fc9ab66fec68ec1">arm_compute::SoftmaxKernelInfo::beta</a></div><div class="ttdeci">float beta</div><div class="ttdoc">A scaling factor for the exponent with default value 1.0.</div><div class="ttdef"><b>Definition:</b> <a href="_kernel_descriptors_8h_source.xhtml#l00082">KernelDescriptors.h:82</a></div></div>
<div class="ttc" id="_error_8h_xhtml_a8a1e1c105f0bdaf37db408c7cfcb77a4"><div class="ttname"><a href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a></div><div class="ttdeci">#define ARM_COMPUTE_RETURN_ON_ERROR(status)</div><div class="ttdoc">Checks if a status contains an error and returns it.</div><div class="ttdef"><b>Definition:</b> <a href="_error_8h_source.xhtml#l00204">Error.h:204</a></div></div>
<div class="ttc" id="classarm__compute_1_1_i_tensor_info_xhtml_a7cfb31af63202568efef5214acfbf3ba"><div class="ttname"><a href="classarm__compute_1_1_i_tensor_info.xhtml#a7cfb31af63202568efef5214acfbf3ba">arm_compute::ITensorInfo::data_type</a></div><div class="ttdeci">virtual DataType data_type() const =0</div><div class="ttdoc">Data type used for each element of the tensor.</div></div>
<div class="ttc" id="namespacearm__compute_1_1misc_1_1shape__calculator_xhtml_ad16b366db486fec63b6d962937ec4545"><div class="ttname"><a href="namespacearm__compute_1_1misc_1_1shape__calculator.xhtml#ad16b366db486fec63b6d962937ec4545">arm_compute::misc::shape_calculator::compute_softmax_shape</a></div><div class="ttdeci">TensorShape compute_softmax_shape(const ITensorInfo *input, size_t axis=1)</div><div class="ttdoc">Calculate the softmax output shape of a tensor.</div><div class="ttdef"><b>Definition:</b> <a href="_shape_calculator_8h_source.xhtml#l00605">ShapeCalculator.h:605</a></div></div>
<div class="ttc" id="classarm__compute_1_1_i_tensor_info_xhtml"><div class="ttname"><a href="classarm__compute_1_1_i_tensor_info.xhtml">arm_compute::ITensorInfo</a></div><div class="ttdoc">Store the tensor's metadata.</div><div class="ttdef"><b>Definition:</b> <a href="_i_tensor_info_8h_source.xhtml#l00040">ITensorInfo.h:40</a></div></div>
<div class="ttc" id="_error_8h_xhtml_a938dcd406ce611ef5345ad2531cdb948"><div class="ttname"><a href="_error_8h.xhtml#a938dcd406ce611ef5345ad2531cdb948">ARM_COMPUTE_ERROR_THROW_ON</a></div><div class="ttdeci">#define ARM_COMPUTE_ERROR_THROW_ON(status)</div><div class="ttdef"><b>Definition:</b> <a href="_error_8h_source.xhtml#l00455">Error.h:455</a></div></div>
<div class="ttc" id="classarm__compute_1_1_status_xhtml"><div class="ttname"><a href="classarm__compute_1_1_status.xhtml">arm_compute::Status</a></div><div class="ttdoc">Status class.</div><div class="ttdef"><b>Definition:</b> <a href="_error_8h_source.xhtml#l00052">Error.h:52</a></div></div>
<div class="ttc" id="core_2_c_l_2_c_l_helpers_8h_xhtml"><div class="ttname"><a href="core_2_c_l_2_c_l_helpers_8h.xhtml">CLHelpers.h</a></div></div>
<div class="ttc" id="namespacearm__compute_xhtml"><div class="ttname"><a href="namespacearm__compute.xhtml">arm_compute</a></div><div class="ttdoc">Copyright (c) 2017-2020 ARM Limited.</div><div class="ttdef"><b>Definition:</b> <a href="00__introduction_8dox_source.xhtml#l00024">00_introduction.dox:24</a></div></div>
<div class="ttc" id="namespacearm__compute_xhtml_a47be6fa38308d0003c25b60b7dbc45ce"><div class="ttname"><a href="namespacearm__compute.xhtml#a47be6fa38308d0003c25b60b7dbc45ce">arm_compute::auto_init_if_empty</a></div><div class="ttdeci">bool auto_init_if_empty(ITensorInfo &amp;info, const TensorShape &amp;shape, int num_channels, DataType data_type, QuantizationInfo quantization_info=QuantizationInfo())</div><div class="ttdoc">Auto initialize the tensor info (shape, number of channels and data type) if the current assignment i...</div><div class="ttdef"><b>Definition:</b> <a href="_helpers_8inl_source.xhtml#l00202">Helpers.inl:202</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_a8fcf2ddd9a1d58b1b280f5c0aed71845"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">arm_compute::test::validation::input</a></div><div class="ttdeci">auto input</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00487">LSTMLayerQuantized.cpp:487</a></div></div>
<div class="ttc" id="namespacearm__compute_xhtml_ab4e88c89b3b7ea1735996cc4def22d58aa1e28eee0339658d39a8b4d325b56e9c"><div class="ttname"><a href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58aa1e28eee0339658d39a8b4d325b56e9c">arm_compute::Format::S32</a></div><div class="ttdoc">1 channel, 1 S32 per channel</div></div>
<div class="ttc" id="arm__compute_2core_2_utils_8h_xhtml"><div class="ttname"><a href="arm__compute_2core_2_utils_8h.xhtml">Utils.h</a></div></div>
<div class="ttc" id="classarm__compute_1_1_c_l_softmax_layer_generic_xhtml"><div class="ttname"><a href="classarm__compute_1_1_c_l_softmax_layer_generic.xhtml">arm_compute::CLSoftmaxLayerGeneric</a></div><div class="ttdoc">Basic function to compute a SoftmaxLayer.</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_softmax_layer_8h_source.xhtml#l00055">CLSoftmaxLayer.h:55</a></div></div>
<div class="ttc" id="_c_l_scheduler_8h_xhtml"><div class="ttname"><a href="_c_l_scheduler_8h.xhtml">CLScheduler.h</a></div></div>
<div class="ttc" id="classarm__compute_1_1_quantization_info_xhtml"><div class="ttname"><a href="classarm__compute_1_1_quantization_info.xhtml">arm_compute::QuantizationInfo</a></div><div class="ttdoc">Quantization information.</div><div class="ttdef"><b>Definition:</b> <a href="_quantization_info_8h_source.xhtml#l00069">QuantizationInfo.h:69</a></div></div>
<div class="ttc" id="_error_8h_xhtml_a6dc630a6ae9cc063b3924bcea8dee9d6"><div class="ttname"><a href="_error_8h.xhtml#a6dc630a6ae9cc063b3924bcea8dee9d6">ARM_COMPUTE_UNUSED</a></div><div class="ttdeci">#define ARM_COMPUTE_UNUSED(...)</div><div class="ttdoc">To avoid unused variables warnings.</div><div class="ttdef"><b>Definition:</b> <a href="_error_8h_source.xhtml#l00152">Error.h:152</a></div></div>
<div class="ttc" id="classarm__compute_1_1_i_tensor_info_xhtml_a7c66505457d00ece3aa4b34cab80757d"><div class="ttname"><a href="classarm__compute_1_1_i_tensor_info.xhtml#a7c66505457d00ece3aa4b34cab80757d">arm_compute::ITensorInfo::tensor_shape</a></div><div class="ttdeci">virtual const TensorShape &amp; tensor_shape() const =0</div><div class="ttdoc">Size for each dimension of the tensor.</div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_ae008e90eb6906fa3526213bc860f6cc5"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#ae008e90eb6906fa3526213bc860f6cc5">arm_compute::test::validation::input_info</a></div><div class="ttdeci">input_info</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_winograd_8cpp_source.xhtml#l00328">Winograd.cpp:328</a></div></div>
<div class="ttc" id="_c_l_softmax_layer_kernel_8h_xhtml"><div class="ttname"><a href="_c_l_softmax_layer_kernel_8h.xhtml">CLSoftmaxLayerKernel.h</a></div></div>
<div class="ttc" id="structarm__compute_1_1_softmax_kernel_info_xhtml_abfa079f56a93e0152cfe85919506fb51"><div class="ttname"><a href="structarm__compute_1_1_softmax_kernel_info.xhtml#abfa079f56a93e0152cfe85919506fb51">arm_compute::SoftmaxKernelInfo::input_data_type</a></div><div class="ttdeci">DataType input_data_type</div><div class="ttdoc">Input tensor data type.</div><div class="ttdef"><b>Definition:</b> <a href="_kernel_descriptors_8h_source.xhtml#l00084">KernelDescriptors.h:84</a></div></div>
<div class="ttc" id="structarm__compute_1_1_softmax_kernel_info_xhtml_a5dc2a3a0b94fa51d8bccdd5bec7d6eb0"><div class="ttname"><a href="structarm__compute_1_1_softmax_kernel_info.xhtml#a5dc2a3a0b94fa51d8bccdd5bec7d6eb0">arm_compute::SoftmaxKernelInfo::is_log</a></div><div class="ttdeci">bool is_log</div><div class="ttdoc">Flag used to perform Log Softmax operation.</div><div class="ttdef"><b>Definition:</b> <a href="_kernel_descriptors_8h_source.xhtml#l00083">KernelDescriptors.h:83</a></div></div>
<div class="ttc" id="classarm__compute_1_1_c_l_flatten_layer_kernel_xhtml_a968b23a6ef327fcfb5b99d58e3fbe883"><div class="ttname"><a href="classarm__compute_1_1_c_l_flatten_layer_kernel.xhtml#a968b23a6ef327fcfb5b99d58e3fbe883">arm_compute::CLFlattenLayerKernel::validate</a></div><div class="ttdeci">static Status validate(const ITensorInfo *input, const ITensorInfo *output)</div><div class="ttdoc">Static function to check if given info will lead to a valid configuration of CLFlattenLayerKernel.</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_flatten_layer_kernel_8cpp_source.xhtml#l00122">CLFlattenLayerKernel.cpp:122</a></div></div>
<div class="ttc" id="_shape_calculator_8h_xhtml"><div class="ttname"><a href="_shape_calculator_8h.xhtml">ShapeCalculator.h</a></div></div>
<div class="ttc" id="classarm__compute_1_1misc_1_1_i_cloneable_xhtml_a4d10e5012a872e7f78f2b539b673049d"><div class="ttname"><a href="classarm__compute_1_1misc_1_1_i_cloneable.xhtml#a4d10e5012a872e7f78f2b539b673049d">arm_compute::misc::ICloneable::clone</a></div><div class="ttdeci">virtual std::unique_ptr&lt; T &gt; clone() const =0</div><div class="ttdoc">Provide a clone of the current object of class T.</div></div>
<div class="ttc" id="classarm__compute_1_1_i_tensor_xhtml_a0e95dc1e53c361348314873b168ae237"><div class="ttname"><a href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">arm_compute::ITensor::info</a></div><div class="ttdeci">virtual ITensorInfo * info() const =0</div><div class="ttdoc">Interface to be implemented by the child class to return the tensor's metadata.</div></div>
<div class="ttc" id="_i_c_l_kernel_8h_xhtml"><div class="ttname"><a href="_i_c_l_kernel_8h.xhtml">ICLKernel.h</a></div></div>
<div class="ttc" id="classarm__compute_1_1_c_l_reshape_layer_kernel_xhtml_a968b23a6ef327fcfb5b99d58e3fbe883"><div class="ttname"><a href="classarm__compute_1_1_c_l_reshape_layer_kernel.xhtml#a968b23a6ef327fcfb5b99d58e3fbe883">arm_compute::CLReshapeLayerKernel::validate</a></div><div class="ttdeci">static Status validate(const ITensorInfo *input, const ITensorInfo *output)</div><div class="ttdoc">Static function to check if given info will lead to a valid configuration of CLReshapeLayerKernel.</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_reshape_layer_kernel_8cpp_source.xhtml#l00104">CLReshapeLayerKernel.cpp:104</a></div></div>
<div class="ttc" id="classarm__compute_1_1_c_l_scheduler_xhtml_ae1a643e517f50bf0392fb6516dd7cf67"><div class="ttname"><a href="classarm__compute_1_1_c_l_scheduler.xhtml#ae1a643e517f50bf0392fb6516dd7cf67">arm_compute::CLScheduler::enqueue</a></div><div class="ttdeci">void enqueue(ICLKernel &amp;kernel, bool flush=true)</div><div class="ttdoc">Schedule the execution of the passed kernel if possible.</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_scheduler_8cpp_source.xhtml#l00154">CLScheduler.cpp:154</a></div></div>
<div class="ttc" id="namespacearm__compute_xhtml_a14f46283f316e7f0fad301d5c1507e9f"><div class="ttname"><a href="namespacearm__compute.xhtml#a14f46283f316e7f0fad301d5c1507e9f">arm_compute::is_data_type_quantized_asymmetric</a></div><div class="ttdeci">bool is_data_type_quantized_asymmetric(DataType dt)</div><div class="ttdoc">Check if a given data type is of asymmetric quantized type.</div><div class="ttdef"><b>Definition:</b> <a href="arm__compute_2core_2_utils_8h_source.xhtml#l01139">Utils.h:1139</a></div></div>
<div class="ttc" id="classarm__compute_1_1_tensor_info_xhtml_ad6b64f33be1e66dcf7612483ffb8fd63"><div class="ttname"><a href="classarm__compute_1_1_tensor_info.xhtml#ad6b64f33be1e66dcf7612483ffb8fd63">arm_compute::TensorInfo::init</a></div><div class="ttdeci">void init(Format format)</div><div class="ttdoc">Initialize the tensor info with just a format.</div><div class="ttdef"><b>Definition:</b> <a href="src_2core_2_tensor_info_8cpp_source.xhtml#l00107">TensorInfo.cpp:107</a></div></div>
<div class="ttc" id="_validate_8h_xhtml_aff911654521523937ff24372a870b89f"><div class="ttname"><a href="_validate_8h.xhtml#aff911654521523937ff24372a870b89f">ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR</a></div><div class="ttdeci">#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)</div><div class="ttdef"><b>Definition:</b> <a href="_validate_8h_source.xhtml#l00163">Validate.h:163</a></div></div>
<div class="ttc" id="_validate_8h_xhtml_a921b705e9e3e0fe928928447869e62a5"><div class="ttname"><a href="_validate_8h.xhtml#a921b705e9e3e0fe928928447869e62a5">ARM_COMPUTE_ERROR_ON_NULLPTR</a></div><div class="ttdeci">#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)</div><div class="ttdef"><b>Definition:</b> <a href="_validate_8h_source.xhtml#l00161">Validate.h:161</a></div></div>
<div class="ttc" id="classarm__compute_1_1_memory_group_resource_scope_xhtml"><div class="ttname"><a href="classarm__compute_1_1_memory_group_resource_scope.xhtml">arm_compute::MemoryGroupResourceScope</a></div><div class="ttdoc">Memory group resources scope handling class.</div><div class="ttdef"><b>Definition:</b> <a href="_i_memory_group_8h_source.xhtml#l00082">IMemoryGroup.h:82</a></div></div>
<div class="ttc" id="classarm__compute_1_1_i_c_l_tensor_xhtml"><div class="ttname"><a href="classarm__compute_1_1_i_c_l_tensor.xhtml">arm_compute::ICLTensor</a></div><div class="ttdoc">Interface for OpenCL tensor.</div><div class="ttdef"><b>Definition:</b> <a href="_i_c_l_tensor_8h_source.xhtml#l00042">ICLTensor.h:42</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_accc088009d44c521706aa98d6387ee21"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#accc088009d44c521706aa98d6387ee21">arm_compute::test::validation::axis</a></div><div class="ttdeci">axis</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_o_n_2_stack_layer_8cpp_source.xhtml#l00226">StackLayer.cpp:226</a></div></div>
<div class="ttc" id="classarm__compute_1_1_c_l_logits1_d_max_shift_exp_sum_kernel_xhtml_a62885f3bde4b458c02316bfc3f339fbc"><div class="ttname"><a href="classarm__compute_1_1_c_l_logits1_d_max_shift_exp_sum_kernel.xhtml#a62885f3bde4b458c02316bfc3f339fbc">arm_compute::CLLogits1DMaxShiftExpSumKernel::validate</a></div><div class="ttdeci">static Status validate(const ITensorInfo *input, const ITensorInfo *max, const ITensorInfo *output, const ITensorInfo *sum)</div><div class="ttdoc">Static function to check if given info will lead to a valid configuration of CLLogits1DMaxShiftExpSum...</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_softmax_layer_kernel_8cpp_source.xhtml#l00291">CLSoftmaxLayerKernel.cpp:291</a></div></div>
<div class="ttc" id="_c_l_softmax_layer_8h_xhtml"><div class="ttname"><a href="_c_l_softmax_layer_8h.xhtml">CLSoftmaxLayer.h</a></div></div>
<div class="ttc" id="_error_8h_xhtml_a1c69762a42ab8add645d0a949b6f4b1f"><div class="ttname"><a href="_error_8h.xhtml#a1c69762a42ab8add645d0a949b6f4b1f">ARM_COMPUTE_RETURN_ERROR_ON_MSG</a></div><div class="ttdeci">#define ARM_COMPUTE_RETURN_ERROR_ON_MSG(cond, msg)</div><div class="ttdoc">If the condition is true, an error is returned.</div><div class="ttdef"><b>Definition:</b> <a href="_error_8h_source.xhtml#l00244">Error.h:244</a></div></div>
<div class="ttc" id="classarm__compute_1_1_tensor_shape_xhtml_a9c54fb6cea3557692fe7c00c40bb40ad"><div class="ttname"><a href="classarm__compute_1_1_tensor_shape.xhtml#a9c54fb6cea3557692fe7c00c40bb40ad">arm_compute::TensorShape::set</a></div><div class="ttdeci">TensorShape &amp; set(size_t dimension, size_t value, bool apply_dim_correction=true)</div><div class="ttdoc">Accessor to set the value of one of the dimensions.</div><div class="ttdef"><b>Definition:</b> <a href="_tensor_shape_8h_source.xhtml#l00078">TensorShape.h:78</a></div></div>
<div class="ttc" id="classarm__compute_1_1_tensor_info_xhtml"><div class="ttname"><a href="classarm__compute_1_1_tensor_info.xhtml">arm_compute::TensorInfo</a></div><div class="ttdoc">Store the tensor's metadata.</div><div class="ttdef"><b>Definition:</b> <a href="_tensor_info_8h_source.xhtml#l00045">TensorInfo.h:45</a></div></div>
<div class="ttc" id="arm__compute_2core_2_helpers_8h_xhtml"><div class="ttname"><a href="arm__compute_2core_2_helpers_8h.xhtml">Helpers.h</a></div></div>
<div class="ttc" id="structarm__compute_1_1_softmax_kernel_info_xhtml"><div class="ttname"><a href="structarm__compute_1_1_softmax_kernel_info.xhtml">arm_compute::SoftmaxKernelInfo</a></div><div class="ttdoc">Descriptor used by the softmax kernels.</div><div class="ttdef"><b>Definition:</b> <a href="_kernel_descriptors_8h_source.xhtml#l00080">KernelDescriptors.h:80</a></div></div>
<div class="ttc" id="arm__compute_2core_2_types_8h_xhtml"><div class="ttname"><a href="arm__compute_2core_2_types_8h.xhtml">Types.h</a></div></div>
<div class="ttc" id="namespacearm__compute_xhtml_ad8ed01ff3ff33333d8e19db4d2818bb6"><div class="ttname"><a href="namespacearm__compute.xhtml#ad8ed01ff3ff33333d8e19db4d2818bb6">arm_compute::DataType</a></div><div class="ttdeci">DataType</div><div class="ttdoc">Available data types.</div><div class="ttdef"><b>Definition:</b> <a href="arm__compute_2core_2_types_8h_source.xhtml#l00075">Types.h:75</a></div></div>
</div><!-- fragment --></div><!-- contents -->
</div><!-- doc-content -->
<!-- start footer part -->
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
<ul>
<li class="navelem"><a class="el" href="dir_68267d1309a1af8e8297ef4c3efbcdba.xhtml">src</a></li><li class="navelem"><a class="el" href="dir_bf9f26469d00835ba20ff8d80ee5a804.xhtml">runtime</a></li><li class="navelem"><a class="el" href="dir_43c3fdbf778d1fd99e2e38f09fddd920.xhtml">CL</a></li><li class="navelem"><a class="el" href="dir_0304d3529340c629ae0050036d07056a.xhtml">functions</a></li><li class="navelem"><a class="el" href="_c_l_softmax_layer_8cpp.xhtml">CLSoftmaxLayer.cpp</a></li>
<li class="footer">Generated on Thu Mar 5 2020 16:07:03 for Compute Library by
<a href="http://www.doxygen.org/index.html">
<img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.15 </li>
</ul>
</div>
</body>
</html>