blob: 390c48295450e37bd8a01d879664715d1e40a81a [file] [log] [blame]
<!-- HTML header for doxygen 1.8.15-->
<!-- Remember to use version doxygen 1.8.15 +-->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen 1.8.15"/>
<meta name="robots" content="NOINDEX, NOFOLLOW" /> <!-- Prevent indexing by search engines -->
<title>Compute Library: arm_compute/runtime/NEON/functions/NELSTMLayer.h Source File</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="navtree.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="resize.js"></script>
<script type="text/javascript" src="navtreedata.js"></script>
<script type="text/javascript" src="navtree.js"></script>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(document).ready(initResizable);
/* @license-end */</script>
<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="search/searchdata.js"></script>
<script type="text/javascript" src="search/search.js"></script>
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
extensions: ["tex2jax.js"],
jax: ["input/TeX","output/HTML-CSS"],
});
</script><script type="text/javascript" async="async" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
<link href="stylesheet.css" rel="stylesheet" type="text/css"/>
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
<tbody>
<tr style="height: 56px;">
<img alt="Compute Library" src="https://raw.githubusercontent.com/ARM-software/ComputeLibrary/gh-pages/ACL_logo.png" style="max-width: 100%;margin-top: 15px;margin-left: 10px"/>
<td style="padding-left: 0.5em;">
<div id="projectname">
&#160;<span id="projectnumber">20.02.1</span>
</div>
</td>
</tr>
</tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.8.15 -->
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
var searchBox = new SearchBox("searchBox", "search",false,'Search');
/* @license-end */
</script>
<script type="text/javascript" src="menudata.js"></script>
<script type="text/javascript" src="menu.js"></script>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(function() {
initMenu('',true,false,'search.php','Search');
$(document).ready(function() { init_search(); });
});
/* @license-end */</script>
<div id="main-nav"></div>
</div><!-- top -->
<div id="side-nav" class="ui-resizable side-nav-resizable">
<div id="nav-tree">
<div id="nav-tree-contents">
<div id="nav-sync" class="sync"></div>
</div>
</div>
<div id="splitbar" style="-moz-user-select:none;"
class="ui-resizable-handle">
</div>
</div>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(document).ready(function(){initNavTree('_n_e_l_s_t_m_layer_8h_source.xhtml','');});
/* @license-end */
</script>
<div id="doc-content">
<!-- window showing the filter options -->
<div id="MSearchSelectWindow"
onmouseover="return searchBox.OnSearchSelectShow()"
onmouseout="return searchBox.OnSearchSelectHide()"
onkeydown="return searchBox.OnSearchSelectKey(event)">
</div>
<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="javascript:void(0)" frameborder="0"
name="MSearchResults" id="MSearchResults">
</iframe>
</div>
<div class="header">
<div class="headertitle">
<div class="title">NELSTMLayer.h</div> </div>
</div><!--header-->
<div class="contents">
<a href="_n_e_l_s_t_m_layer_8h.xhtml">Go to the documentation of this file.</a><div class="fragment"><div class="line"><a name="l00001"></a><span class="lineno"> 1</span>&#160;<span class="comment">/*</span></div><div class="line"><a name="l00002"></a><span class="lineno"> 2</span>&#160;<span class="comment"> * Copyright (c) 2018-2019 ARM Limited.</span></div><div class="line"><a name="l00003"></a><span class="lineno"> 3</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00004"></a><span class="lineno"> 4</span>&#160;<span class="comment"> * SPDX-License-Identifier: MIT</span></div><div class="line"><a name="l00005"></a><span class="lineno"> 5</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00006"></a><span class="lineno"> 6</span>&#160;<span class="comment"> * Permission is hereby granted, free of charge, to any person obtaining a copy</span></div><div class="line"><a name="l00007"></a><span class="lineno"> 7</span>&#160;<span class="comment"> * of this software and associated documentation files (the &quot;Software&quot;), to</span></div><div class="line"><a name="l00008"></a><span class="lineno"> 8</span>&#160;<span class="comment"> * deal in the Software without restriction, including without limitation the</span></div><div class="line"><a name="l00009"></a><span class="lineno"> 9</span>&#160;<span class="comment"> * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or</span></div><div class="line"><a name="l00010"></a><span class="lineno"> 10</span>&#160;<span class="comment"> * sell copies of the Software, and to permit persons to whom the Software is</span></div><div class="line"><a name="l00011"></a><span class="lineno"> 11</span>&#160;<span class="comment"> * furnished to do so, subject to the following conditions:</span></div><div class="line"><a name="l00012"></a><span class="lineno"> 12</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00013"></a><span class="lineno"> 13</span>&#160;<span class="comment"> * The above copyright notice and this permission notice shall be included in all</span></div><div class="line"><a name="l00014"></a><span class="lineno"> 14</span>&#160;<span class="comment"> * copies or substantial portions of the Software.</span></div><div class="line"><a name="l00015"></a><span class="lineno"> 15</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00016"></a><span class="lineno"> 16</span>&#160;<span class="comment"> * THE SOFTWARE IS PROVIDED &quot;AS IS&quot;, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR</span></div><div class="line"><a name="l00017"></a><span class="lineno"> 17</span>&#160;<span class="comment"> * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,</span></div><div class="line"><a name="l00018"></a><span class="lineno"> 18</span>&#160;<span class="comment"> * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE</span></div><div class="line"><a name="l00019"></a><span class="lineno"> 19</span>&#160;<span class="comment"> * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER</span></div><div class="line"><a name="l00020"></a><span class="lineno"> 20</span>&#160;<span class="comment"> * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,</span></div><div class="line"><a name="l00021"></a><span class="lineno"> 21</span>&#160;<span class="comment"> * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE</span></div><div class="line"><a name="l00022"></a><span class="lineno"> 22</span>&#160;<span class="comment"> * SOFTWARE.</span></div><div class="line"><a name="l00023"></a><span class="lineno"> 23</span>&#160;<span class="comment"> */</span></div><div class="line"><a name="l00024"></a><span class="lineno"> 24</span>&#160;<span class="preprocessor">#ifndef ARM_COMPUTE_NELSTMLAYER_H</span></div><div class="line"><a name="l00025"></a><span class="lineno"> 25</span>&#160;<span class="preprocessor">#define ARM_COMPUTE_NELSTMLAYER_H</span></div><div class="line"><a name="l00026"></a><span class="lineno"> 26</span>&#160;</div><div class="line"><a name="l00027"></a><span class="lineno"> 27</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_n_e_activation_layer_kernel_8h.xhtml">arm_compute/core/NEON/kernels/NEActivationLayerKernel.h</a>&quot;</span></div><div class="line"><a name="l00028"></a><span class="lineno"> 28</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_n_e_arithmetic_addition_kernel_8h.xhtml">arm_compute/core/NEON/kernels/NEArithmeticAdditionKernel.h</a>&quot;</span></div><div class="line"><a name="l00029"></a><span class="lineno"> 29</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_n_e_arithmetic_subtraction_kernel_8h.xhtml">arm_compute/core/NEON/kernels/NEArithmeticSubtractionKernel.h</a>&quot;</span></div><div class="line"><a name="l00030"></a><span class="lineno"> 30</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_n_e_copy_kernel_8h.xhtml">arm_compute/core/NEON/kernels/NECopyKernel.h</a>&quot;</span></div><div class="line"><a name="l00031"></a><span class="lineno"> 31</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_n_e_pixel_wise_multiplication_kernel_8h.xhtml">arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h</a>&quot;</span></div><div class="line"><a name="l00032"></a><span class="lineno"> 32</span>&#160;</div><div class="line"><a name="l00033"></a><span class="lineno"> 33</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="arm__compute_2core_2_types_8h.xhtml">arm_compute/core/Types.h</a>&quot;</span></div><div class="line"><a name="l00034"></a><span class="lineno"> 34</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_n_e_arithmetic_addition_8h.xhtml">arm_compute/runtime/NEON/functions/NEArithmeticAddition.h</a>&quot;</span></div><div class="line"><a name="l00035"></a><span class="lineno"> 35</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_n_e_concatenate_layer_8h.xhtml">arm_compute/runtime/NEON/functions/NEConcatenateLayer.h</a>&quot;</span></div><div class="line"><a name="l00036"></a><span class="lineno"> 36</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_n_e_fully_connected_layer_8h.xhtml">arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h</a>&quot;</span></div><div class="line"><a name="l00037"></a><span class="lineno"> 37</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_n_e_g_e_m_m_8h.xhtml">arm_compute/runtime/NEON/functions/NEGEMM.h</a>&quot;</span></div><div class="line"><a name="l00038"></a><span class="lineno"> 38</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_n_e_mean_std_dev_normalization_layer_8h.xhtml">arm_compute/runtime/NEON/functions/NEMeanStdDevNormalizationLayer.h</a>&quot;</span></div><div class="line"><a name="l00039"></a><span class="lineno"> 39</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_l_s_t_m_params_8h.xhtml">arm_compute/runtime/common/LSTMParams.h</a>&quot;</span></div><div class="line"><a name="l00040"></a><span class="lineno"> 40</span>&#160;</div><div class="line"><a name="l00041"></a><span class="lineno"> 41</span>&#160;<span class="keyword">namespace </span><a class="code" href="namespacearm__compute.xhtml">arm_compute</a></div><div class="line"><a name="l00042"></a><span class="lineno"> 42</span>&#160;{</div><div class="line"><a name="l00043"></a><span class="lineno"> 43</span>&#160;<span class="comment">// Forward declarations</span></div><div class="line"><a name="l00044"></a><span class="lineno"> 44</span>&#160;<span class="keyword">class </span>ITensor;</div><div class="line"><a name="l00045"></a><span class="lineno"> 45</span>&#160;<span class="comment"></span></div><div class="line"><a name="l00046"></a><span class="lineno"> 46</span>&#160;<span class="comment">/** Basic function to run @ref NELSTMLayer */</span></div><div class="line"><a name="l00047"></a><span class="lineno"><a class="line" href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml"> 47</a></span>&#160;<span class="keyword">class </span><a class="code" href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml">NELSTMLayer</a> : <span class="keyword">public</span> <a class="code" href="classarm__compute_1_1_i_function.xhtml">IFunction</a></div><div class="line"><a name="l00048"></a><span class="lineno"> 48</span>&#160;{</div><div class="line"><a name="l00049"></a><span class="lineno"> 49</span>&#160;<span class="keyword">public</span>:<span class="comment"></span></div><div class="line"><a name="l00050"></a><span class="lineno"> 50</span>&#160;<span class="comment"> /** Default constructor */</span></div><div class="line"><a name="l00051"></a><span class="lineno"> 51</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml#a8d8d5b5c66b732b3fc9494b0e743ed3f">NELSTMLayer</a>(std::shared_ptr&lt;IMemoryManager&gt; memory_manager = <span class="keyword">nullptr</span>);<span class="comment"></span></div><div class="line"><a name="l00052"></a><span class="lineno"> 52</span>&#160;<span class="comment"> /** Initialize function&#39;s tensors.</span></div><div class="line"><a name="l00053"></a><span class="lineno"> 53</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00054"></a><span class="lineno"> 54</span>&#160;<span class="comment"> * @param[in] input Source tensor. Input is a 2D tensor with dimensions [input_size, batch_size]. Data types supported: F16/F32.</span></div><div class="line"><a name="l00055"></a><span class="lineno"> 55</span>&#160;<span class="comment"> * @param[in] input_to_forget_weights 2D weights tensor with dimensions [input_size, num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00056"></a><span class="lineno"> 56</span>&#160;<span class="comment"> * @param[in] input_to_cell_weights 2D weights tensor with dimensions [input_size, num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00057"></a><span class="lineno"> 57</span>&#160;<span class="comment"> * @param[in] input_to_output_weights 2D weights tensor with dimensions [input_size, num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00058"></a><span class="lineno"> 58</span>&#160;<span class="comment"> * @param[in] recurrent_to_forget_weights 2D weights tensor with dimensions [output_size, num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00059"></a><span class="lineno"> 59</span>&#160;<span class="comment"> * @param[in] recurrent_to_cell_weights 2D weights tensor with dimensions [output_size, num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00060"></a><span class="lineno"> 60</span>&#160;<span class="comment"> * @param[in] recurrent_to_output_weights 2D weights tensor with dimensions [output_size, num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00061"></a><span class="lineno"> 61</span>&#160;<span class="comment"> * @param[in] forget_gate_bias 1D weights tensor with dimensions [num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00062"></a><span class="lineno"> 62</span>&#160;<span class="comment"> * @param[in] cell_bias 1D weights tensor with dimensions [num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00063"></a><span class="lineno"> 63</span>&#160;<span class="comment"> * @param[in] output_gate_bias 1D weights tensor with dimensions [num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00064"></a><span class="lineno"> 64</span>&#160;<span class="comment"> * @param[in] output_state_in 2D weights tensor with dimensions [output_size, batch_size]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00065"></a><span class="lineno"> 65</span>&#160;<span class="comment"> * @param[in] cell_state_in 2D tensor with dimensions [num_units, batch_size]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00066"></a><span class="lineno"> 66</span>&#160;<span class="comment"> * @param[out] scratch_buffer 2D tensor with dimensions [num_units * 4, batch_size] with CIFG or [num_units * 3, batch_size] without CIGF. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00067"></a><span class="lineno"> 67</span>&#160;<span class="comment"> * @param[out] output_state_out 2D weights tensor with dimensions [output_size, batch_size]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00068"></a><span class="lineno"> 68</span>&#160;<span class="comment"> * @param[out] cell_state_out 2D tensor with dimensions [num_units, batch_size]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00069"></a><span class="lineno"> 69</span>&#160;<span class="comment"> * @param[out] output Destination tensor. Output is a 2D tensor with dimensions [output_size, batch_size].</span></div><div class="line"><a name="l00070"></a><span class="lineno"> 70</span>&#160;<span class="comment"> * Data types supported: Same as @p input.</span></div><div class="line"><a name="l00071"></a><span class="lineno"> 71</span>&#160;<span class="comment"> * @param[in] lstm_params (Optional) Weights tensors used in peephole optimization:</span></div><div class="line"><a name="l00072"></a><span class="lineno"> 72</span>&#160;<span class="comment"> * input_to_input_weights 2D weights tensor with dimensions [input_size, num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00073"></a><span class="lineno"> 73</span>&#160;<span class="comment"> * recurrent_to_input_weights 2D weights tensor with dimensions [output_size, num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00074"></a><span class="lineno"> 74</span>&#160;<span class="comment"> * cell_to_input_weights 1D weights tensor with dimensions [num_units]. Can be nullptr. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00075"></a><span class="lineno"> 75</span>&#160;<span class="comment"> * cell_to_forget_weights 1D weights tensor with dimensions [num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00076"></a><span class="lineno"> 76</span>&#160;<span class="comment"> * cell_to_output_weights 1D weights tensor with dimensions [num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00077"></a><span class="lineno"> 77</span>&#160;<span class="comment"> * input_gate_bias 1D weights tensor with dimensions [num_units]. Data type supported: Same as @p input</span></div><div class="line"><a name="l00078"></a><span class="lineno"> 78</span>&#160;<span class="comment"> * projection_weights 2D weights tensor with dimensions [output_size, num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00079"></a><span class="lineno"> 79</span>&#160;<span class="comment"> * projection_bias 1D weights tensor with dimensions [output_size]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00080"></a><span class="lineno"> 80</span>&#160;<span class="comment"> * input_layer_norm_coefficients 1D weights tensor with dimensions [num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00081"></a><span class="lineno"> 81</span>&#160;<span class="comment"> * forget_layer_norm_coefficients 1D weights tensor with dimensions [num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00082"></a><span class="lineno"> 82</span>&#160;<span class="comment"> * cell_layer_norm_coefficients 1D weights tensor with dimensions [num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00083"></a><span class="lineno"> 83</span>&#160;<span class="comment"> * output_layer_norm_coefficients 1D weights tensor with dimensions [num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00084"></a><span class="lineno"> 84</span>&#160;<span class="comment"> * @param[in] activation_info Contains activation information described in @ref ActivationLayerInfo.</span></div><div class="line"><a name="l00085"></a><span class="lineno"> 85</span>&#160;<span class="comment"> * @param[in] cell_threshold The clipping threshold for the cell state, such that values are bound within [-cell_clip, cell_clip]. If set to 0.0 then clipping is disabled.</span></div><div class="line"><a name="l00086"></a><span class="lineno"> 86</span>&#160;<span class="comment"> * @param[in] projection_threshold The clipping threshold for the output from the projection layer, such that values are bound within [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.</span></div><div class="line"><a name="l00087"></a><span class="lineno"> 87</span>&#160;<span class="comment"> */</span></div><div class="line"><a name="l00088"></a><span class="lineno"> 88</span>&#160; <span class="keywordtype">void</span> <a class="code" href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml#aa899feaf94d69eb04afb0cd412869548">configure</a>(<span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>,</div><div class="line"><a name="l00089"></a><span class="lineno"> 89</span>&#160; <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3b793c410cba57a1395184692a018356">input_to_forget_weights</a>, <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ac547a66fe26967afb94760061ee0d0d1">input_to_cell_weights</a>, <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ace4dd633420fa8d8aa71f60ff730f01f">input_to_output_weights</a>,</div><div class="line"><a name="l00090"></a><span class="lineno"> 90</span>&#160; <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ac62dfdcc14798598d953342789c9927e">recurrent_to_forget_weights</a>, <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ac2236dfe2a3fc5fa4e125348829cbeb2">recurrent_to_cell_weights</a>, <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#aab02df8a9ee45153f2fd76e934407fbd">recurrent_to_output_weights</a>,</div><div class="line"><a name="l00091"></a><span class="lineno"> 91</span>&#160; <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a55daaf57fb833fc416d779c28f7a3c85">forget_gate_bias</a>, <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *cell_bias, <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a507bd7e4d98cb3e45d3e820d8bac422a">output_gate_bias</a>,</div><div class="line"><a name="l00092"></a><span class="lineno"> 92</span>&#160; <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *output_state_in, <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *cell_state_in,</div><div class="line"><a name="l00093"></a><span class="lineno"> 93</span>&#160; <a class="code" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *scratch_buffer, <a class="code" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *output_state_out, <a class="code" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *cell_state_out, <a class="code" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *output,</div><div class="line"><a name="l00094"></a><span class="lineno"> 94</span>&#160; <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml">LSTMParams&lt;ITensor&gt;</a> &amp;lstm_params, <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml">ActivationLayerInfo</a> &amp;activation_info, <span class="keywordtype">float</span> cell_threshold = 0.f, <span class="keywordtype">float</span> projection_threshold = 0.f);</div><div class="line"><a name="l00095"></a><span class="lineno"> 95</span>&#160;<span class="comment"></span></div><div class="line"><a name="l00096"></a><span class="lineno"> 96</span>&#160;<span class="comment"> /** Static function to check if given info will lead to a valid configuration of @ref NELSTMLayer</span></div><div class="line"><a name="l00097"></a><span class="lineno"> 97</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00098"></a><span class="lineno"> 98</span>&#160;<span class="comment"> * @param[in] input Source tensor. Input is a 2D tensor with dimensions [input_size, batch_size]. Data types supported: F16/F32.</span></div><div class="line"><a name="l00099"></a><span class="lineno"> 99</span>&#160;<span class="comment"> * @param[in] input_to_forget_weights 2D weights tensor with dimensions [input_size, num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00100"></a><span class="lineno"> 100</span>&#160;<span class="comment"> * @param[in] input_to_cell_weights 2D weights tensor with dimensions [input_size, num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00101"></a><span class="lineno"> 101</span>&#160;<span class="comment"> * @param[in] input_to_output_weights 2D weights tensor with dimensions [input_size, num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00102"></a><span class="lineno"> 102</span>&#160;<span class="comment"> * @param[in] recurrent_to_forget_weights 2D weights tensor with dimensions [output_size, num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00103"></a><span class="lineno"> 103</span>&#160;<span class="comment"> * @param[in] recurrent_to_cell_weights 2D weights tensor with dimensions [output_size, num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00104"></a><span class="lineno"> 104</span>&#160;<span class="comment"> * @param[in] recurrent_to_output_weights 2D weights tensor with dimensions [output_size, num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00105"></a><span class="lineno"> 105</span>&#160;<span class="comment"> * @param[in] forget_gate_bias 1D weights tensor with dimensions [num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00106"></a><span class="lineno"> 106</span>&#160;<span class="comment"> * @param[in] cell_bias 1D weights tensor with dimensions [num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00107"></a><span class="lineno"> 107</span>&#160;<span class="comment"> * @param[in] output_gate_bias 1D weights tensor with dimensions [num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00108"></a><span class="lineno"> 108</span>&#160;<span class="comment"> * @param[in] output_state_in 2D weights tensor with dimensions [output_size, batch_size]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00109"></a><span class="lineno"> 109</span>&#160;<span class="comment"> * @param[in] cell_state_in 2D tensor with dimensions [num_units, batch_size]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00110"></a><span class="lineno"> 110</span>&#160;<span class="comment"> * @param[in] scratch_buffer 2D tensor with dimensions [num_units * 4, batch_size] with CIFG or [num_units * 3, batch_size] without CIGF. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00111"></a><span class="lineno"> 111</span>&#160;<span class="comment"> * @param[in] output_state_out 2D weights tensor with dimensions [output_size, batch_size]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00112"></a><span class="lineno"> 112</span>&#160;<span class="comment"> * @param[in] cell_state_out 2D tensor with dimensions [num_units, batch_size]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00113"></a><span class="lineno"> 113</span>&#160;<span class="comment"> * @param[in] output Destination tensor. Output is a 2D tensor with dimensions [output_size, batch_size].</span></div><div class="line"><a name="l00114"></a><span class="lineno"> 114</span>&#160;<span class="comment"> * Data types supported: Same as @p input.</span></div><div class="line"><a name="l00115"></a><span class="lineno"> 115</span>&#160;<span class="comment"> * @param[in] lstm_params (Optional) Weights tensors used in peephole optimization:</span></div><div class="line"><a name="l00116"></a><span class="lineno"> 116</span>&#160;<span class="comment"> * input_to_input_weights 2D weights tensor with dimensions [input_size, num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00117"></a><span class="lineno"> 117</span>&#160;<span class="comment"> * recurrent_to_input_weights 2D weights tensor with dimensions [output_size, num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00118"></a><span class="lineno"> 118</span>&#160;<span class="comment"> * cell_to_input_weights 1D weights tensor with dimensions [num_units]. Can be nullptr. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00119"></a><span class="lineno"> 119</span>&#160;<span class="comment"> * cell_to_forget_weights 1D weights tensor with dimensions [num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00120"></a><span class="lineno"> 120</span>&#160;<span class="comment"> * cell_to_output_weights 1D weights tensor with dimensions [num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00121"></a><span class="lineno"> 121</span>&#160;<span class="comment"> * input_gate_bias 1D weights tensor with dimensions [num_units]. Data type supported: Same as @p input</span></div><div class="line"><a name="l00122"></a><span class="lineno"> 122</span>&#160;<span class="comment"> * projection_weights 2D weights tensor with dimensions [output_size, num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00123"></a><span class="lineno"> 123</span>&#160;<span class="comment"> * projection_bias 1D weights tensor with dimensions [output_size]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00124"></a><span class="lineno"> 124</span>&#160;<span class="comment"> * input_layer_norm_coefficients 1D weights tensor info with dimensions [num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00125"></a><span class="lineno"> 125</span>&#160;<span class="comment"> * forget_layer_norm_coefficients 1D weights tensor info with dimensions [num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00126"></a><span class="lineno"> 126</span>&#160;<span class="comment"> * cell_layer_norm_coefficients 1D weights tensor info with dimensions [num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00127"></a><span class="lineno"> 127</span>&#160;<span class="comment"> * output_layer_norm_coefficients 1D weights tensor info with dimensions [num_units]. Data type supported: Same as @p input.</span></div><div class="line"><a name="l00128"></a><span class="lineno"> 128</span>&#160;<span class="comment"> * @param[in] activation_info Contains activation information described in @ref ActivationLayerInfo.</span></div><div class="line"><a name="l00129"></a><span class="lineno"> 129</span>&#160;<span class="comment"> * @param[in] cell_threshold The clipping threshold for the cell state, such that values are bound within [-cell_clip, cell_clip]. If set to 0.0 then clipping is disabled.</span></div><div class="line"><a name="l00130"></a><span class="lineno"> 130</span>&#160;<span class="comment"> * @param[in] projection_threshold The clipping threshold for the output from the projection layer, such that values are bound within [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.</span></div><div class="line"><a name="l00131"></a><span class="lineno"> 131</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00132"></a><span class="lineno"> 132</span>&#160;<span class="comment"> * @return a status</span></div><div class="line"><a name="l00133"></a><span class="lineno"> 133</span>&#160;<span class="comment"> */</span></div><div class="line"><a name="l00134"></a><span class="lineno"> 134</span>&#160; <span class="keyword">static</span> <a class="code" href="classarm__compute_1_1_status.xhtml">Status</a> <a class="code" href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml#aa05bceba37ded272a464a90becd9cd99">validate</a>(<span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>,</div><div class="line"><a name="l00135"></a><span class="lineno"> 135</span>&#160; <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3b793c410cba57a1395184692a018356">input_to_forget_weights</a>, <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ac547a66fe26967afb94760061ee0d0d1">input_to_cell_weights</a>, <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ace4dd633420fa8d8aa71f60ff730f01f">input_to_output_weights</a>,</div><div class="line"><a name="l00136"></a><span class="lineno"> 136</span>&#160; <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ac62dfdcc14798598d953342789c9927e">recurrent_to_forget_weights</a>, <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ac2236dfe2a3fc5fa4e125348829cbeb2">recurrent_to_cell_weights</a>, <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#aab02df8a9ee45153f2fd76e934407fbd">recurrent_to_output_weights</a>,</div><div class="line"><a name="l00137"></a><span class="lineno"> 137</span>&#160; <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a55daaf57fb833fc416d779c28f7a3c85">forget_gate_bias</a>, <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *cell_bias, <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a507bd7e4d98cb3e45d3e820d8bac422a">output_gate_bias</a>,</div><div class="line"><a name="l00138"></a><span class="lineno"> 138</span>&#160; <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *output_state_in, <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *cell_state_in,</div><div class="line"><a name="l00139"></a><span class="lineno"> 139</span>&#160; <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *scratch_buffer, <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *output_state_out, <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *cell_state_out, <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *output,</div><div class="line"><a name="l00140"></a><span class="lineno"> 140</span>&#160; <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml">LSTMParams&lt;ITensorInfo&gt;</a> &amp;lstm_params, <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml">ActivationLayerInfo</a> &amp;activation_info, <span class="keywordtype">float</span> cell_threshold = 0.f, <span class="keywordtype">float</span> projection_threshold = 0.f);</div><div class="line"><a name="l00141"></a><span class="lineno"> 141</span>&#160;</div><div class="line"><a name="l00142"></a><span class="lineno"> 142</span>&#160; <span class="comment">// Inherited methods overridden:</span></div><div class="line"><a name="l00143"></a><span class="lineno"> 143</span>&#160; <span class="keywordtype">void</span> <a class="code" href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">run</a>() <span class="keyword">override</span>;</div><div class="line"><a name="l00144"></a><span class="lineno"> 144</span>&#160; <span class="keywordtype">void</span> <a class="code" href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml#aa9b93ef660fc3c5b4b19d3fc7b891b77">prepare</a>() <span class="keyword">override</span>;</div><div class="line"><a name="l00145"></a><span class="lineno"> 145</span>&#160;</div><div class="line"><a name="l00146"></a><span class="lineno"> 146</span>&#160;<span class="keyword">private</span>:</div><div class="line"><a name="l00147"></a><span class="lineno"> 147</span>&#160; <a class="code" href="classarm__compute_1_1_memory_group.xhtml">MemoryGroup</a> _memory_group;</div><div class="line"><a name="l00148"></a><span class="lineno"> 148</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml">NEFullyConnectedLayer</a> _fully_connected_input_gate;</div><div class="line"><a name="l00149"></a><span class="lineno"> 149</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_arithmetic_addition.xhtml">NEArithmeticAddition</a> _accum_input_gate1;</div><div class="line"><a name="l00150"></a><span class="lineno"> 150</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_arithmetic_subtraction_kernel.xhtml">NEArithmeticSubtractionKernel</a> _subtract_input_gate;</div><div class="line"><a name="l00151"></a><span class="lineno"> 151</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml">NEPixelWiseMultiplicationKernel</a> _pixelwise_mul_input_gate;</div><div class="line"><a name="l00152"></a><span class="lineno"> 152</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_activation_layer_kernel.xhtml">NEActivationLayerKernel</a> _activation_input_gate;</div><div class="line"><a name="l00153"></a><span class="lineno"> 153</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml">NEFullyConnectedLayer</a> _fully_connected_forget_gate;</div><div class="line"><a name="l00154"></a><span class="lineno"> 154</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_arithmetic_addition.xhtml">NEArithmeticAddition</a> _accum_forget_gate1;</div><div class="line"><a name="l00155"></a><span class="lineno"> 155</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml">NEPixelWiseMultiplicationKernel</a> _pixelwise_mul_forget_gate;</div><div class="line"><a name="l00156"></a><span class="lineno"> 156</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_activation_layer_kernel.xhtml">NEActivationLayerKernel</a> _activation_forget_gate;</div><div class="line"><a name="l00157"></a><span class="lineno"> 157</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml">NEFullyConnectedLayer</a> _fully_connected_cell_state;</div><div class="line"><a name="l00158"></a><span class="lineno"> 158</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_g_e_m_m.xhtml">NEGEMM</a> _gemm_cell_state1;</div><div class="line"><a name="l00159"></a><span class="lineno"> 159</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_transpose_kernel.xhtml">NETransposeKernel</a> _transpose_cell_state;</div><div class="line"><a name="l00160"></a><span class="lineno"> 160</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_arithmetic_addition_kernel.xhtml">NEArithmeticAdditionKernel</a> _accum_cell_state1;</div><div class="line"><a name="l00161"></a><span class="lineno"> 161</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_arithmetic_addition_kernel.xhtml">NEArithmeticAdditionKernel</a> _accum_cell_state2;</div><div class="line"><a name="l00162"></a><span class="lineno"> 162</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml">NEPixelWiseMultiplicationKernel</a> _pixelwise_mul_cell_state1;</div><div class="line"><a name="l00163"></a><span class="lineno"> 163</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_activation_layer_kernel.xhtml">NEActivationLayerKernel</a> _activation_cell_state;</div><div class="line"><a name="l00164"></a><span class="lineno"> 164</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_activation_layer_kernel.xhtml">NEActivationLayerKernel</a> _cell_clip;</div><div class="line"><a name="l00165"></a><span class="lineno"> 165</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml">NEPixelWiseMultiplicationKernel</a> _pixelwise_mul_cell_state2;</div><div class="line"><a name="l00166"></a><span class="lineno"> 166</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml">NEFullyConnectedLayer</a> _fully_connected_output;</div><div class="line"><a name="l00167"></a><span class="lineno"> 167</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml">NEPixelWiseMultiplicationKernel</a> _pixelwise_mul_output_state1;</div><div class="line"><a name="l00168"></a><span class="lineno"> 168</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_arithmetic_addition.xhtml">NEArithmeticAddition</a> _accum_output1;</div><div class="line"><a name="l00169"></a><span class="lineno"> 169</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_activation_layer_kernel.xhtml">NEActivationLayerKernel</a> _activation_output;</div><div class="line"><a name="l00170"></a><span class="lineno"> 170</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_activation_layer_kernel.xhtml">NEActivationLayerKernel</a> _activation_output_state;</div><div class="line"><a name="l00171"></a><span class="lineno"> 171</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml">NEPixelWiseMultiplicationKernel</a> _pixelwise_mul_output_state2;</div><div class="line"><a name="l00172"></a><span class="lineno"> 172</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml">NEFullyConnectedLayer</a> _fully_connected_output_state;</div><div class="line"><a name="l00173"></a><span class="lineno"> 173</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_activation_layer_kernel.xhtml">NEActivationLayerKernel</a> _projection_clip;</div><div class="line"><a name="l00174"></a><span class="lineno"> 174</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_copy_kernel.xhtml">NECopyKernel</a> _copy_cell_state;</div><div class="line"><a name="l00175"></a><span class="lineno"> 175</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_copy_kernel.xhtml">NECopyKernel</a> _copy_output;</div><div class="line"><a name="l00176"></a><span class="lineno"> 176</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_concatenate_layer.xhtml">NEConcatenateLayer</a> _concat_scratch_buffer;</div><div class="line"><a name="l00177"></a><span class="lineno"> 177</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_concatenate_layer.xhtml">NEConcatenateLayer</a> _concat_inputs_forget_gate;</div><div class="line"><a name="l00178"></a><span class="lineno"> 178</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_concatenate_layer.xhtml">NEConcatenateLayer</a> _concat_weights_forget_gate;</div><div class="line"><a name="l00179"></a><span class="lineno"> 179</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_concatenate_layer.xhtml">NEConcatenateLayer</a> _concat_weights_input_gate;</div><div class="line"><a name="l00180"></a><span class="lineno"> 180</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_concatenate_layer.xhtml">NEConcatenateLayer</a> _concat_weights_output;</div><div class="line"><a name="l00181"></a><span class="lineno"> 181</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_mean_std_dev_normalization_layer.xhtml">NEMeanStdDevNormalizationLayer</a> _mean_std_norm_input_gate;</div><div class="line"><a name="l00182"></a><span class="lineno"> 182</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml">NEPixelWiseMultiplicationKernel</a> _pixelwise_mul_input_gate_coeff;</div><div class="line"><a name="l00183"></a><span class="lineno"> 183</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_arithmetic_addition_kernel.xhtml">NEArithmeticAdditionKernel</a> _accum_input_gate_bias;</div><div class="line"><a name="l00184"></a><span class="lineno"> 184</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_mean_std_dev_normalization_layer.xhtml">NEMeanStdDevNormalizationLayer</a> _mean_std_norm_forget_gate;</div><div class="line"><a name="l00185"></a><span class="lineno"> 185</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml">NEPixelWiseMultiplicationKernel</a> _pixelwise_mul_forget_gate_coeff;</div><div class="line"><a name="l00186"></a><span class="lineno"> 186</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_arithmetic_addition_kernel.xhtml">NEArithmeticAdditionKernel</a> _accum_forget_gate_bias;</div><div class="line"><a name="l00187"></a><span class="lineno"> 187</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_mean_std_dev_normalization_layer.xhtml">NEMeanStdDevNormalizationLayer</a> _mean_std_norm_cell_gate;</div><div class="line"><a name="l00188"></a><span class="lineno"> 188</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml">NEPixelWiseMultiplicationKernel</a> _pixelwise_mul_cell_gate_coeff;</div><div class="line"><a name="l00189"></a><span class="lineno"> 189</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_arithmetic_addition_kernel.xhtml">NEArithmeticAdditionKernel</a> _accum_cell_gate_bias;</div><div class="line"><a name="l00190"></a><span class="lineno"> 190</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_mean_std_dev_normalization_layer.xhtml">NEMeanStdDevNormalizationLayer</a> _mean_std_norm_output_gate;</div><div class="line"><a name="l00191"></a><span class="lineno"> 191</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml">NEPixelWiseMultiplicationKernel</a> _pixelwise_mul_output_gate_coeff;</div><div class="line"><a name="l00192"></a><span class="lineno"> 192</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_arithmetic_addition_kernel.xhtml">NEArithmeticAdditionKernel</a> _accum_output_gate_bias;</div><div class="line"><a name="l00193"></a><span class="lineno"> 193</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _input_gate_out1;</div><div class="line"><a name="l00194"></a><span class="lineno"> 194</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _input_gate_out2;</div><div class="line"><a name="l00195"></a><span class="lineno"> 195</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _input_gate_out3;</div><div class="line"><a name="l00196"></a><span class="lineno"> 196</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _input_gate_out4;</div><div class="line"><a name="l00197"></a><span class="lineno"> 197</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _forget_gate_out1;</div><div class="line"><a name="l00198"></a><span class="lineno"> 198</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _forget_gate_out2;</div><div class="line"><a name="l00199"></a><span class="lineno"> 199</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _forget_gate_out3;</div><div class="line"><a name="l00200"></a><span class="lineno"> 200</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _forget_gate_out4;</div><div class="line"><a name="l00201"></a><span class="lineno"> 201</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _forget_gate_out5;</div><div class="line"><a name="l00202"></a><span class="lineno"> 202</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _forget_gate_out6;</div><div class="line"><a name="l00203"></a><span class="lineno"> 203</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _cell_state_out1;</div><div class="line"><a name="l00204"></a><span class="lineno"> 204</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _cell_state_out2;</div><div class="line"><a name="l00205"></a><span class="lineno"> 205</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _cell_state_out3;</div><div class="line"><a name="l00206"></a><span class="lineno"> 206</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _cell_state_out4;</div><div class="line"><a name="l00207"></a><span class="lineno"> 207</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _cell_state_out5;</div><div class="line"><a name="l00208"></a><span class="lineno"> 208</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _output1;</div><div class="line"><a name="l00209"></a><span class="lineno"> 209</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _output2;</div><div class="line"><a name="l00210"></a><span class="lineno"> 210</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _output3;</div><div class="line"><a name="l00211"></a><span class="lineno"> 211</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _output4;</div><div class="line"><a name="l00212"></a><span class="lineno"> 212</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _cell_state_activation;</div><div class="line"><a name="l00213"></a><span class="lineno"> 213</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _output_state1;</div><div class="line"><a name="l00214"></a><span class="lineno"> 214</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _ones;</div><div class="line"><a name="l00215"></a><span class="lineno"> 215</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _input_layer_norm_out1;</div><div class="line"><a name="l00216"></a><span class="lineno"> 216</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _input_layer_norm_out2;</div><div class="line"><a name="l00217"></a><span class="lineno"> 217</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _forget_layer_norm_out1;</div><div class="line"><a name="l00218"></a><span class="lineno"> 218</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _forget_layer_norm_out2;</div><div class="line"><a name="l00219"></a><span class="lineno"> 219</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _cell_layer_norm_out1;</div><div class="line"><a name="l00220"></a><span class="lineno"> 220</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _cell_layer_norm_out2;</div><div class="line"><a name="l00221"></a><span class="lineno"> 221</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _output_layer_norm_out1;</div><div class="line"><a name="l00222"></a><span class="lineno"> 222</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> _output_layer_norm_out2;</div><div class="line"><a name="l00223"></a><span class="lineno"> 223</span>&#160; <span class="keywordtype">bool</span> _run_peephole_opt;</div><div class="line"><a name="l00224"></a><span class="lineno"> 224</span>&#160; <span class="keywordtype">bool</span> _run_cifg_opt;</div><div class="line"><a name="l00225"></a><span class="lineno"> 225</span>&#160; <span class="keywordtype">bool</span> _perform_cell_clipping;</div><div class="line"><a name="l00226"></a><span class="lineno"> 226</span>&#160; <span class="keywordtype">bool</span> _has_projection_weights;</div><div class="line"><a name="l00227"></a><span class="lineno"> 227</span>&#160; <span class="keywordtype">bool</span> _perform_projection_clipping;</div><div class="line"><a name="l00228"></a><span class="lineno"> 228</span>&#160; <span class="keywordtype">bool</span> _is_prepared;</div><div class="line"><a name="l00229"></a><span class="lineno"> 229</span>&#160; <span class="keywordtype">bool</span> _is_layer_norm_lstm;</div><div class="line"><a name="l00230"></a><span class="lineno"> 230</span>&#160;};</div><div class="line"><a name="l00231"></a><span class="lineno"> 231</span>&#160;} <span class="comment">// namespace arm_compute</span></div><div class="line"><a name="l00232"></a><span class="lineno"> 232</span>&#160;<span class="preprocessor">#endif </span><span class="comment">/* ARM_COMPUTE_NELSTMLAYER_H */</span><span class="preprocessor"></span></div><div class="ttc" id="classarm__compute_1_1_memory_group_xhtml"><div class="ttname"><a href="classarm__compute_1_1_memory_group.xhtml">arm_compute::MemoryGroup</a></div><div class="ttdoc">Memory group.</div><div class="ttdef"><b>Definition:</b> <a href="_memory_group_8h_source.xhtml#l00043">MemoryGroup.h:43</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_a507bd7e4d98cb3e45d3e820d8bac422a"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#a507bd7e4d98cb3e45d3e820d8bac422a">arm_compute::test::validation::output_gate_bias</a></div><div class="ttdeci">auto output_gate_bias</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00484">LSTMLayerQuantized.cpp:484</a></div></div>
<div class="ttc" id="classarm__compute_1_1_i_function_xhtml"><div class="ttname"><a href="classarm__compute_1_1_i_function.xhtml">arm_compute::IFunction</a></div><div class="ttdoc">Base class for all functions.</div><div class="ttdef"><b>Definition:</b> <a href="_i_function_8h_source.xhtml#l00030">IFunction.h:30</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_arithmetic_addition_xhtml"><div class="ttname"><a href="classarm__compute_1_1_n_e_arithmetic_addition.xhtml">arm_compute::NEArithmeticAddition</a></div><div class="ttdoc">Basic function to run NEArithmeticAdditionKernel.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_arithmetic_addition_8h_source.xhtml#l00035">NEArithmeticAddition.h:35</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_g_e_m_m_xhtml"><div class="ttname"><a href="classarm__compute_1_1_n_e_g_e_m_m.xhtml">arm_compute::NEGEMM</a></div><div class="ttdoc">Basic function to execute GEMM on NEON.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_g_e_m_m_8h_source.xhtml#l00059">NEGEMM.h:59</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_l_s_t_m_layer_xhtml_aa05bceba37ded272a464a90becd9cd99"><div class="ttname"><a href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml#aa05bceba37ded272a464a90becd9cd99">arm_compute::NELSTMLayer::validate</a></div><div class="ttdeci">static Status validate(const ITensorInfo *input, const ITensorInfo *input_to_forget_weights, const ITensorInfo *input_to_cell_weights, const ITensorInfo *input_to_output_weights, const ITensorInfo *recurrent_to_forget_weights, const ITensorInfo *recurrent_to_cell_weights, const ITensorInfo *recurrent_to_output_weights, const ITensorInfo *forget_gate_bias, const ITensorInfo *cell_bias, const ITensorInfo *output_gate_bias, const ITensorInfo *output_state_in, const ITensorInfo *cell_state_in, const ITensorInfo *scratch_buffer, const ITensorInfo *output_state_out, const ITensorInfo *cell_state_out, const ITensorInfo *output, const LSTMParams&lt; ITensorInfo &gt; &amp;lstm_params, const ActivationLayerInfo &amp;activation_info, float cell_threshold=0.f, float projection_threshold=0.f)</div><div class="ttdoc">Static function to check if given info will lead to a valid configuration of NELSTMLayer.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_l_s_t_m_layer_8cpp_source.xhtml#l00381">NELSTMLayer.cpp:381</a></div></div>
<div class="ttc" id="_n_e_g_e_m_m_8h_xhtml"><div class="ttname"><a href="_n_e_g_e_m_m_8h.xhtml">NEGEMM.h</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_a55daaf57fb833fc416d779c28f7a3c85"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#a55daaf57fb833fc416d779c28f7a3c85">arm_compute::test::validation::forget_gate_bias</a></div><div class="ttdeci">auto forget_gate_bias</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00482">LSTMLayerQuantized.cpp:482</a></div></div>
<div class="ttc" id="_n_e_pixel_wise_multiplication_kernel_8h_xhtml"><div class="ttname"><a href="_n_e_pixel_wise_multiplication_kernel_8h.xhtml">NEPixelWiseMultiplicationKernel.h</a></div></div>
<div class="ttc" id="_n_e_copy_kernel_8h_xhtml"><div class="ttname"><a href="_n_e_copy_kernel_8h.xhtml">NECopyKernel.h</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_l_s_t_m_layer_xhtml_aa899feaf94d69eb04afb0cd412869548"><div class="ttname"><a href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml#aa899feaf94d69eb04afb0cd412869548">arm_compute::NELSTMLayer::configure</a></div><div class="ttdeci">void configure(const ITensor *input, const ITensor *input_to_forget_weights, const ITensor *input_to_cell_weights, const ITensor *input_to_output_weights, const ITensor *recurrent_to_forget_weights, const ITensor *recurrent_to_cell_weights, const ITensor *recurrent_to_output_weights, const ITensor *forget_gate_bias, const ITensor *cell_bias, const ITensor *output_gate_bias, const ITensor *output_state_in, const ITensor *cell_state_in, ITensor *scratch_buffer, ITensor *output_state_out, ITensor *cell_state_out, ITensor *output, const LSTMParams&lt; ITensor &gt; &amp;lstm_params, const ActivationLayerInfo &amp;activation_info, float cell_threshold=0.f, float projection_threshold=0.f)</div><div class="ttdoc">Initialize function's tensors.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_l_s_t_m_layer_8cpp_source.xhtml#l00056">NELSTMLayer.cpp:56</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_ac62dfdcc14798598d953342789c9927e"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#ac62dfdcc14798598d953342789c9927e">arm_compute::test::validation::recurrent_to_forget_weights</a></div><div class="ttdeci">auto recurrent_to_forget_weights</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00478">LSTMLayerQuantized.cpp:478</a></div></div>
<div class="ttc" id="classarm__compute_1_1_i_tensor_info_xhtml"><div class="ttname"><a href="classarm__compute_1_1_i_tensor_info.xhtml">arm_compute::ITensorInfo</a></div><div class="ttdoc">Store the tensor's metadata.</div><div class="ttdef"><b>Definition:</b> <a href="_i_tensor_info_8h_source.xhtml#l00040">ITensorInfo.h:40</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_activation_layer_kernel_xhtml"><div class="ttname"><a href="classarm__compute_1_1_n_e_activation_layer_kernel.xhtml">arm_compute::NEActivationLayerKernel</a></div><div class="ttdoc">Interface for the activation layer kernel.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_activation_layer_kernel_8h_source.xhtml#l00039">NEActivationLayerKernel.h:39</a></div></div>
<div class="ttc" id="_n_e_arithmetic_addition_kernel_8h_xhtml"><div class="ttname"><a href="_n_e_arithmetic_addition_kernel_8h.xhtml">NEArithmeticAdditionKernel.h</a></div></div>
<div class="ttc" id="classarm__compute_1_1_status_xhtml"><div class="ttname"><a href="classarm__compute_1_1_status.xhtml">arm_compute::Status</a></div><div class="ttdoc">Status class.</div><div class="ttdef"><b>Definition:</b> <a href="_error_8h_source.xhtml#l00052">Error.h:52</a></div></div>
<div class="ttc" id="classarm__compute_1_1_activation_layer_info_xhtml"><div class="ttname"><a href="classarm__compute_1_1_activation_layer_info.xhtml">arm_compute::ActivationLayerInfo</a></div><div class="ttdoc">Activation Layer Information class.</div><div class="ttdef"><b>Definition:</b> <a href="arm__compute_2core_2_types_8h_source.xhtml#l01615">Types.h:1615</a></div></div>
<div class="ttc" id="classarm__compute_1_1_i_tensor_xhtml"><div class="ttname"><a href="classarm__compute_1_1_i_tensor.xhtml">arm_compute::ITensor</a></div><div class="ttdoc">Interface for NEON tensor.</div><div class="ttdef"><b>Definition:</b> <a href="_i_tensor_8h_source.xhtml#l00036">ITensor.h:36</a></div></div>
<div class="ttc" id="namespacearm__compute_xhtml"><div class="ttname"><a href="namespacearm__compute.xhtml">arm_compute</a></div><div class="ttdoc">Copyright (c) 2017-2020 ARM Limited.</div><div class="ttdef"><b>Definition:</b> <a href="00__introduction_8dox_source.xhtml#l00024">00_introduction.dox:24</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_l_s_t_m_layer_xhtml"><div class="ttname"><a href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml">arm_compute::NELSTMLayer</a></div><div class="ttdoc">Basic function to run NELSTMLayer.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_l_s_t_m_layer_8h_source.xhtml#l00047">NELSTMLayer.h:47</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_ac547a66fe26967afb94760061ee0d0d1"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#ac547a66fe26967afb94760061ee0d0d1">arm_compute::test::validation::input_to_cell_weights</a></div><div class="ttdeci">auto input_to_cell_weights</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00475">LSTMLayerQuantized.cpp:475</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_a8fcf2ddd9a1d58b1b280f5c0aed71845"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">arm_compute::test::validation::input</a></div><div class="ttdeci">auto input</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00487">LSTMLayerQuantized.cpp:487</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_arithmetic_subtraction_kernel_xhtml"><div class="ttname"><a href="classarm__compute_1_1_n_e_arithmetic_subtraction_kernel.xhtml">arm_compute::NEArithmeticSubtractionKernel</a></div><div class="ttdoc">Interface for the kernel to perform subtraction between two tensors.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_arithmetic_subtraction_kernel_8h_source.xhtml#l00035">NEArithmeticSubtractionKernel.h:35</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_aab02df8a9ee45153f2fd76e934407fbd"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#aab02df8a9ee45153f2fd76e934407fbd">arm_compute::test::validation::recurrent_to_output_weights</a></div><div class="ttdeci">auto recurrent_to_output_weights</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00480">LSTMLayerQuantized.cpp:480</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_l_s_t_m_layer_xhtml_ad1717410afd0be936c6213a63c8005fb"><div class="ttname"><a href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">arm_compute::NELSTMLayer::run</a></div><div class="ttdeci">void run() override</div><div class="ttdoc">Run the kernels contained in the function.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_l_s_t_m_layer_8cpp_source.xhtml#l00611">NELSTMLayer.cpp:611</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_ace4dd633420fa8d8aa71f60ff730f01f"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#ace4dd633420fa8d8aa71f60ff730f01f">arm_compute::test::validation::input_to_output_weights</a></div><div class="ttdeci">auto input_to_output_weights</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00476">LSTMLayerQuantized.cpp:476</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_transpose_kernel_xhtml"><div class="ttname"><a href="classarm__compute_1_1_n_e_transpose_kernel.xhtml">arm_compute::NETransposeKernel</a></div><div class="ttdoc">NEON kernel which transposes the elements of a matrix.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_transpose_kernel_8h_source.xhtml#l00038">NETransposeKernel.h:38</a></div></div>
<div class="ttc" id="_n_e_arithmetic_subtraction_kernel_8h_xhtml"><div class="ttname"><a href="_n_e_arithmetic_subtraction_kernel_8h.xhtml">NEArithmeticSubtractionKernel.h</a></div></div>
<div class="ttc" id="_n_e_arithmetic_addition_8h_xhtml"><div class="ttname"><a href="_n_e_arithmetic_addition_8h.xhtml">NEArithmeticAddition.h</a></div></div>
<div class="ttc" id="classarm__compute_1_1_tensor_xhtml"><div class="ttname"><a href="classarm__compute_1_1_tensor.xhtml">arm_compute::Tensor</a></div><div class="ttdoc">Basic implementation of the tensor interface.</div><div class="ttdef"><b>Definition:</b> <a href="runtime_2_tensor_8h_source.xhtml#l00037">Tensor.h:37</a></div></div>
<div class="ttc" id="_l_s_t_m_params_8h_xhtml"><div class="ttname"><a href="_l_s_t_m_params_8h.xhtml">LSTMParams.h</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_ac2236dfe2a3fc5fa4e125348829cbeb2"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#ac2236dfe2a3fc5fa4e125348829cbeb2">arm_compute::test::validation::recurrent_to_cell_weights</a></div><div class="ttdeci">auto recurrent_to_cell_weights</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00479">LSTMLayerQuantized.cpp:479</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_l_s_t_m_layer_xhtml_a8d8d5b5c66b732b3fc9494b0e743ed3f"><div class="ttname"><a href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml#a8d8d5b5c66b732b3fc9494b0e743ed3f">arm_compute::NELSTMLayer::NELSTMLayer</a></div><div class="ttdeci">NELSTMLayer(std::shared_ptr&lt; IMemoryManager &gt; memory_manager=nullptr)</div><div class="ttdoc">Default constructor.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_l_s_t_m_layer_8cpp_source.xhtml#l00040">NELSTMLayer.cpp:40</a></div></div>
<div class="ttc" id="_n_e_activation_layer_kernel_8h_xhtml"><div class="ttname"><a href="_n_e_activation_layer_kernel_8h.xhtml">NEActivationLayerKernel.h</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_arithmetic_addition_kernel_xhtml"><div class="ttname"><a href="classarm__compute_1_1_n_e_arithmetic_addition_kernel.xhtml">arm_compute::NEArithmeticAdditionKernel</a></div><div class="ttdoc">Interface for the kernel to perform addition between two tensors.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_arithmetic_addition_kernel_8h_source.xhtml#l00035">NEArithmeticAdditionKernel.h:35</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_l_s_t_m_layer_xhtml_aa9b93ef660fc3c5b4b19d3fc7b891b77"><div class="ttname"><a href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml#aa9b93ef660fc3c5b4b19d3fc7b891b77">arm_compute::NELSTMLayer::prepare</a></div><div class="ttdeci">void prepare() override</div><div class="ttdoc">Prepare the function for executing.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_l_s_t_m_layer_8cpp_source.xhtml#l00716">NELSTMLayer.cpp:716</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_mean_std_dev_normalization_layer_xhtml"><div class="ttname"><a href="classarm__compute_1_1_n_e_mean_std_dev_normalization_layer.xhtml">arm_compute::NEMeanStdDevNormalizationLayer</a></div><div class="ttdoc">Basic function to execute mean and standard deviation normalization by calling NEMeanStdDevNormalizat...</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_mean_std_dev_normalization_layer_8h_source.xhtml#l00035">NEMeanStdDevNormalizationLayer.h:35</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_fully_connected_layer_xhtml"><div class="ttname"><a href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml">arm_compute::NEFullyConnectedLayer</a></div><div class="ttdoc">Basic function to compute a Fully Connected layer on NEON.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_fully_connected_layer_8h_source.xhtml#l00114">NEFullyConnectedLayer.h:114</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_concatenate_layer_xhtml"><div class="ttname"><a href="classarm__compute_1_1_n_e_concatenate_layer.xhtml">arm_compute::NEConcatenateLayer</a></div><div class="ttdoc">Basic function to execute concatenate tensors along a given axis.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_concatenate_layer_8h_source.xhtml#l00050">NEConcatenateLayer.h:50</a></div></div>
<div class="ttc" id="_n_e_concatenate_layer_8h_xhtml"><div class="ttname"><a href="_n_e_concatenate_layer_8h.xhtml">NEConcatenateLayer.h</a></div></div>
<div class="ttc" id="_n_e_fully_connected_layer_8h_xhtml"><div class="ttname"><a href="_n_e_fully_connected_layer_8h.xhtml">NEFullyConnectedLayer.h</a></div></div>
<div class="ttc" id="arm__compute_2core_2_types_8h_xhtml"><div class="ttname"><a href="arm__compute_2core_2_types_8h.xhtml">Types.h</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_a3b793c410cba57a1395184692a018356"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#a3b793c410cba57a1395184692a018356">arm_compute::test::validation::input_to_forget_weights</a></div><div class="ttdeci">auto input_to_forget_weights</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00474">LSTMLayerQuantized.cpp:474</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_copy_kernel_xhtml"><div class="ttname"><a href="classarm__compute_1_1_n_e_copy_kernel.xhtml">arm_compute::NECopyKernel</a></div><div class="ttdoc">NEON kernel to perform a copy between two tensors.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_copy_kernel_8h_source.xhtml#l00035">NECopyKernel.h:35</a></div></div>
<div class="ttc" id="_n_e_mean_std_dev_normalization_layer_8h_xhtml"><div class="ttname"><a href="_n_e_mean_std_dev_normalization_layer_8h.xhtml">NEMeanStdDevNormalizationLayer.h</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml">arm_compute::LSTMParams</a></div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00038">LSTMParams.h:38</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel_xhtml"><div class="ttname"><a href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml">arm_compute::NEPixelWiseMultiplicationKernel</a></div><div class="ttdoc">Interface for the kernel to perform addition between two tensors.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_pixel_wise_multiplication_kernel_8h_source.xhtml#l00035">NEPixelWiseMultiplicationKernel.h:35</a></div></div>
</div><!-- fragment --></div><!-- contents -->
</div><!-- doc-content -->
<!-- start footer part -->
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
<ul>
<li class="navelem"><a class="el" href="dir_214608ef36d61442cb2b0c1c4e9a7def.xhtml">arm_compute</a></li><li class="navelem"><a class="el" href="dir_8bc26130589aa16388b5a02f17abf2c2.xhtml">runtime</a></li><li class="navelem"><a class="el" href="dir_65e289039e1347f87d412b0a1b1a312c.xhtml">NEON</a></li><li class="navelem"><a class="el" href="dir_345bd7154d1542d1e03fd2836959a19d.xhtml">functions</a></li><li class="navelem"><a class="el" href="_n_e_l_s_t_m_layer_8h.xhtml">NELSTMLayer.h</a></li>
<li class="footer">Generated on Thu Mar 5 2020 16:06:56 for Compute Library by
<a href="http://www.doxygen.org/index.html">
<img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.15 </li>
</ul>
</div>
</body>
</html>